mirror of
https://github.com/samba-team/samba.git
synced 2024-12-28 07:21:54 +03:00
Merge branch 'v4-0-test' of git://git.samba.org/samba into 4-0-abartlet
This commit is contained in:
commit
c4aba5ad01
@ -169,13 +169,12 @@ static int partition_other_callback(struct ldb_context *ldb, void *context, stru
|
||||
}
|
||||
|
||||
|
||||
static int partition_send_request(struct partition_context *ac, struct ldb_control *remove_control,
|
||||
static int partition_send_request(struct partition_context *ac,
|
||||
struct dsdb_control_current_partition *partition)
|
||||
{
|
||||
int ret;
|
||||
struct ldb_module *backend;
|
||||
struct ldb_request *req;
|
||||
struct ldb_control **saved_controls;
|
||||
|
||||
if (partition) {
|
||||
backend = make_module_for_next_request(ac, ac->module->ldb, partition->module);
|
||||
@ -225,12 +224,6 @@ static int partition_send_request(struct partition_context *ac, struct ldb_contr
|
||||
req->context = ac;
|
||||
}
|
||||
|
||||
/* Remove a control, so we don't confuse a backend server */
|
||||
if (remove_control && !save_controls(remove_control, req, &saved_controls)) {
|
||||
ldb_oom(ac->module->ldb);
|
||||
return LDB_ERR_OPERATIONS_ERROR;
|
||||
}
|
||||
|
||||
if (partition) {
|
||||
ret = ldb_request_add_control(req, DSDB_CONTROL_CURRENT_PARTITION_OID, false, partition);
|
||||
if (ret != LDB_SUCCESS) {
|
||||
@ -253,18 +246,17 @@ static int partition_send_request(struct partition_context *ac, struct ldb_contr
|
||||
*/
|
||||
static int partition_send_all(struct ldb_module *module,
|
||||
struct partition_context *ac,
|
||||
struct ldb_control *remove_control,
|
||||
struct ldb_request *req)
|
||||
{
|
||||
int i;
|
||||
struct partition_private_data *data = talloc_get_type(module->private_data,
|
||||
struct partition_private_data);
|
||||
int ret = partition_send_request(ac, remove_control, NULL);
|
||||
int ret = partition_send_request(ac, NULL);
|
||||
if (ret != LDB_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
for (i=0; data && data->partitions && data->partitions[i]; i++) {
|
||||
ret = partition_send_request(ac, remove_control, data->partitions[i]);
|
||||
ret = partition_send_request(ac, data->partitions[i]);
|
||||
if (ret != LDB_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
@ -297,7 +289,7 @@ static int partition_replicate(struct ldb_module *module, struct ldb_request *re
|
||||
return LDB_ERR_OPERATIONS_ERROR;
|
||||
}
|
||||
|
||||
return partition_send_all(module, ac, NULL, req);
|
||||
return partition_send_all(module, ac, req);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -314,6 +306,7 @@ static int partition_replicate(struct ldb_module *module, struct ldb_request *re
|
||||
* TODO: we should maybe return an error here
|
||||
* if it's not a special dn
|
||||
*/
|
||||
|
||||
return ldb_next_request(module, req);
|
||||
}
|
||||
|
||||
@ -334,6 +327,8 @@ static int partition_replicate(struct ldb_module *module, struct ldb_request *re
|
||||
/* search */
|
||||
static int partition_search(struct ldb_module *module, struct ldb_request *req)
|
||||
{
|
||||
struct ldb_control **saved_controls;
|
||||
|
||||
/* Find backend */
|
||||
struct partition_private_data *data = talloc_get_type(module->private_data,
|
||||
struct partition_private_data);
|
||||
@ -342,19 +337,34 @@ static int partition_search(struct ldb_module *module, struct ldb_request *req)
|
||||
/* (later) consider if we should be searching multiple
|
||||
* partitions (for 'invisible' partition behaviour */
|
||||
struct ldb_control *search_control = ldb_request_get_control(req, LDB_CONTROL_SEARCH_OPTIONS_OID);
|
||||
struct ldb_control *domain_scope_control = ldb_request_get_control(req, LDB_CONTROL_DOMAIN_SCOPE_OID);
|
||||
|
||||
struct ldb_search_options_control *search_options = NULL;
|
||||
if (search_control) {
|
||||
search_options = talloc_get_type(search_control->data, struct ldb_search_options_control);
|
||||
}
|
||||
|
||||
/* Remove the domain_scope control, so we don't confuse a backend server */
|
||||
if (domain_scope_control && !save_controls(domain_scope_control, req, &saved_controls)) {
|
||||
ldb_oom(module->ldb);
|
||||
return LDB_ERR_OPERATIONS_ERROR;
|
||||
}
|
||||
|
||||
/* TODO:
|
||||
Generate referrals (look for a partition under this DN) if we don't have the above control specified
|
||||
*/
|
||||
|
||||
if (search_options && (search_options->search_options & LDB_SEARCH_OPTION_PHANTOM_ROOT)) {
|
||||
int ret, i;
|
||||
struct partition_context *ac;
|
||||
struct ldb_control *remove_control = NULL;
|
||||
if ((search_options->search_options & ~LDB_SEARCH_OPTION_PHANTOM_ROOT) == 0) {
|
||||
/* We have processed this flag, so we are done with this control now */
|
||||
remove_control = search_control;
|
||||
|
||||
/* Remove search control, so we don't confuse a backend server */
|
||||
if (search_control && !save_controls(search_control, req, &saved_controls)) {
|
||||
ldb_oom(module->ldb);
|
||||
return LDB_ERR_OPERATIONS_ERROR;
|
||||
}
|
||||
}
|
||||
ac = partition_init_handle(req, module);
|
||||
if (!ac) {
|
||||
@ -363,12 +373,12 @@ static int partition_search(struct ldb_module *module, struct ldb_request *req)
|
||||
|
||||
/* Search from the base DN */
|
||||
if (!req->op.search.base || ldb_dn_is_null(req->op.search.base)) {
|
||||
return partition_send_all(module, ac, remove_control, req);
|
||||
return partition_send_all(module, ac, req);
|
||||
}
|
||||
for (i=0; data && data->partitions && data->partitions[i]; i++) {
|
||||
/* Find all partitions under the search base */
|
||||
if (ldb_dn_compare_base(req->op.search.base, data->partitions[i]->dn) == 0) {
|
||||
ret = partition_send_request(ac, remove_control, data->partitions[i]);
|
||||
ret = partition_send_request(ac, data->partitions[i]);
|
||||
if (ret != LDB_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
@ -384,6 +394,16 @@ static int partition_search(struct ldb_module *module, struct ldb_request *req)
|
||||
return LDB_SUCCESS;
|
||||
} else {
|
||||
/* Handle this like all other requests */
|
||||
if (search_control && (search_options->search_options & ~LDB_SEARCH_OPTION_PHANTOM_ROOT) == 0) {
|
||||
/* We have processed this flag, so we are done with this control now */
|
||||
|
||||
/* Remove search control, so we don't confuse a backend server */
|
||||
if (search_control && !save_controls(search_control, req, &saved_controls)) {
|
||||
ldb_oom(module->ldb);
|
||||
return LDB_ERR_OPERATIONS_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
return partition_replicate(module, req, req->op.search.base);
|
||||
}
|
||||
}
|
||||
@ -693,7 +713,7 @@ static int partition_extended(struct ldb_module *module, struct ldb_request *req
|
||||
return LDB_ERR_OPERATIONS_ERROR;
|
||||
}
|
||||
|
||||
return partition_send_all(module, ac, NULL, req);
|
||||
return partition_send_all(module, ac, req);
|
||||
}
|
||||
|
||||
static int sort_compare(void *void1,
|
||||
|
@ -1,3 +1,11 @@
|
||||
AC_CHECK_FUNC(getpass, samba_cv_HAVE_GETPASS=yes)
|
||||
AC_CHECK_FUNC(getpassphrase, samba_cv_HAVE_GETPASSPHRASE=yes)
|
||||
if test x"$samba_cv_HAVE_GETPASS" = x"yes" -a x"$samba_cv_HAVE_GETPASSPHRASE" = x"yes"; then
|
||||
AC_DEFINE(REPLACE_GETPASS_BY_GETPASSPHRASE, 1, [getpass returns <9 chars where getpassphrase returns <265 chars])
|
||||
AC_DEFINE(REPLACE_GETPASS,1,[Whether getpass should be replaced])
|
||||
LIBREPLACEOBJ="${LIBREPLACEOBJ} getpass.o"
|
||||
else
|
||||
|
||||
AC_CACHE_CHECK([whether getpass should be replaced],samba_cv_REPLACE_GETPASS,[
|
||||
SAVE_CPPFLAGS="$CPPFLAGS"
|
||||
CPPFLAGS="$CPPFLAGS -I$libreplacedir/"
|
||||
@ -12,3 +20,5 @@ if test x"$samba_cv_REPLACE_GETPASS" = x"yes"; then
|
||||
AC_DEFINE(REPLACE_GETPASS,1,[Whether getpass should be replaced])
|
||||
LIBREPLACEOBJ="${LIBREPLACEOBJ} getpass.o"
|
||||
fi
|
||||
|
||||
fi
|
||||
|
@ -100,6 +100,7 @@ AC_CHECK_HEADERS(sys/socket.h netinet/in.h netdb.h arpa/inet.h)
|
||||
AC_CHECK_HEADERS(netinet/ip.h netinet/tcp.h netinet/in_systm.h netinet/in_ip.h)
|
||||
AC_CHECK_HEADERS(sys/sockio.h sys/un.h)
|
||||
AC_CHECK_HEADERS(sys/mount.h mntent.h)
|
||||
AC_CHECK_HEADERS(stropts.h)
|
||||
|
||||
dnl we need to check that net/if.h really can be used, to cope with hpux
|
||||
dnl where including it always fails
|
||||
|
@ -218,7 +218,7 @@ long nap(long milliseconds) {
|
||||
#ifndef HAVE_MEMMOVE
|
||||
/*******************************************************************
|
||||
safely copies memory, ensuring no overlap problems.
|
||||
this is only used if the machine does not have it's own memmove().
|
||||
this is only used if the machine does not have its own memmove().
|
||||
this is not the fastest algorithm in town, but it will do for our
|
||||
needs.
|
||||
********************************************************************/
|
||||
|
@ -546,4 +546,12 @@ typedef int bool;
|
||||
#define QSORT_CAST (int (*)(const void *, const void *))
|
||||
#endif
|
||||
|
||||
#ifndef PATH_MAX
|
||||
#define PATH_MAX 1024
|
||||
#endif
|
||||
|
||||
#ifndef MAX_DNS_NAME_LENGTH
|
||||
#define MAX_DNS_NAME_LENGTH 256 /* Actually 255 but +1 for terminating null. */
|
||||
#endif
|
||||
|
||||
#endif /* _LIBREPLACE_REPLACE_H */
|
||||
|
@ -79,6 +79,10 @@
|
||||
#include <sys/ioctl.h>
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_STROPTS_H
|
||||
#include <stropts.h>
|
||||
#endif
|
||||
|
||||
#ifdef REPLACE_INET_NTOA
|
||||
/* define is in "replace.h" */
|
||||
char *rep_inet_ntoa(struct in_addr ip);
|
||||
|
@ -68,9 +68,13 @@
|
||||
#endif
|
||||
|
||||
#ifdef REPLACE_GETPASS
|
||||
#if defined(REPLACE_GETPASS_BY_GETPASSPHRASE)
|
||||
#define getpass(prompt) getpassphrase(prompt)
|
||||
#else
|
||||
#define getpass(prompt) rep_getpass(prompt)
|
||||
char *rep_getpass(const char *prompt);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef NGROUPS_MAX
|
||||
#define NGROUPS_MAX 32 /* Guess... */
|
||||
|
@ -27,6 +27,12 @@
|
||||
|
||||
#include "tdb_private.h"
|
||||
|
||||
/* 'right' merges can involve O(n^2) cost when combined with a
|
||||
traverse, so they are disabled until we find a way to do them in
|
||||
O(1) time
|
||||
*/
|
||||
#define USE_RIGHT_MERGES 0
|
||||
|
||||
/* read a freelist record and check for simple errors */
|
||||
int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off, struct list_struct *rec)
|
||||
{
|
||||
@ -56,7 +62,7 @@ int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off, struct list_struct
|
||||
}
|
||||
|
||||
|
||||
|
||||
#if USE_RIGHT_MERGES
|
||||
/* Remove an element from the freelist. Must have alloc lock. */
|
||||
static int remove_from_freelist(struct tdb_context *tdb, tdb_off_t off, tdb_off_t next)
|
||||
{
|
||||
@ -75,6 +81,7 @@ static int remove_from_freelist(struct tdb_context *tdb, tdb_off_t off, tdb_off_
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL,"remove_from_freelist: not on list at off=%d\n", off));
|
||||
return TDB_ERRCODE(TDB_ERR_CORRUPT, -1);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* update a record tailer (must hold allocation lock) */
|
||||
@ -93,8 +100,6 @@ static int update_tailer(struct tdb_context *tdb, tdb_off_t offset,
|
||||
neccessary. */
|
||||
int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct list_struct *rec)
|
||||
{
|
||||
tdb_off_t right, left;
|
||||
|
||||
/* Allocation and tailer lock */
|
||||
if (tdb_lock(tdb, -1, F_WRLCK) != 0)
|
||||
return -1;
|
||||
@ -105,9 +110,10 @@ int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct list_struct *rec)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
#if USE_RIGHT_MERGES
|
||||
/* Look right first (I'm an Australian, dammit) */
|
||||
right = offset + sizeof(*rec) + rec->rec_len;
|
||||
if (right + sizeof(*rec) <= tdb->map_size) {
|
||||
if (offset + sizeof(*rec) + rec->rec_len + sizeof(*rec) <= tdb->map_size) {
|
||||
tdb_off_t right = offset + sizeof(*rec) + rec->rec_len;
|
||||
struct list_struct r;
|
||||
|
||||
if (tdb->methods->tdb_read(tdb, right, &r, sizeof(r), DOCONV()) == -1) {
|
||||
@ -122,13 +128,18 @@ int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct list_struct *rec)
|
||||
goto left;
|
||||
}
|
||||
rec->rec_len += sizeof(r) + r.rec_len;
|
||||
if (update_tailer(tdb, offset, rec) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed at %u\n", offset));
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
left:
|
||||
#endif
|
||||
|
||||
/* Look left */
|
||||
left = offset - sizeof(tdb_off_t);
|
||||
if (left > TDB_DATA_START(tdb->header.hash_size)) {
|
||||
if (offset - sizeof(tdb_off_t) > TDB_DATA_START(tdb->header.hash_size)) {
|
||||
tdb_off_t left = offset - sizeof(tdb_off_t);
|
||||
struct list_struct l;
|
||||
tdb_off_t leftsize;
|
||||
|
||||
@ -145,7 +156,12 @@ left:
|
||||
|
||||
left = offset - leftsize;
|
||||
|
||||
/* Now read in record */
|
||||
if (leftsize > offset ||
|
||||
left < TDB_DATA_START(tdb->header.hash_size)) {
|
||||
goto update;
|
||||
}
|
||||
|
||||
/* Now read in the left record */
|
||||
if (tdb->methods->tdb_read(tdb, left, &l, sizeof(l), DOCONV()) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: left read failed at %u (%u)\n", left, leftsize));
|
||||
goto update;
|
||||
@ -153,21 +169,24 @@ left:
|
||||
|
||||
/* If it's free, expand to include it. */
|
||||
if (l.magic == TDB_FREE_MAGIC) {
|
||||
if (remove_from_freelist(tdb, left, l.next) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: left free failed at %u\n", left));
|
||||
goto update;
|
||||
} else {
|
||||
offset = left;
|
||||
rec->rec_len += leftsize;
|
||||
/* we now merge the new record into the left record, rather than the other
|
||||
way around. This makes the operation O(1) instead of O(n). This change
|
||||
prevents traverse from being O(n^2) after a lot of deletes */
|
||||
l.rec_len += sizeof(*rec) + rec->rec_len;
|
||||
if (tdb_rec_write(tdb, left, &l) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_left failed at %u\n", left));
|
||||
goto fail;
|
||||
}
|
||||
if (update_tailer(tdb, left, &l) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed at %u\n", offset));
|
||||
goto fail;
|
||||
}
|
||||
tdb_unlock(tdb, -1, F_WRLCK);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
update:
|
||||
if (update_tailer(tdb, offset, rec) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed at %u\n", offset));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Now, prepend to free list */
|
||||
rec->magic = TDB_FREE_MAGIC;
|
||||
@ -261,6 +280,7 @@ tdb_off_t tdb_allocate(struct tdb_context *tdb, tdb_len_t length, struct list_st
|
||||
tdb_off_t rec_ptr, last_ptr;
|
||||
tdb_len_t rec_len;
|
||||
} bestfit;
|
||||
float multiplier = 1.0;
|
||||
|
||||
if (tdb_lock(tdb, -1, F_WRLCK) == -1)
|
||||
return 0;
|
||||
@ -295,18 +315,27 @@ tdb_off_t tdb_allocate(struct tdb_context *tdb, tdb_len_t length, struct list_st
|
||||
bestfit.rec_len = rec->rec_len;
|
||||
bestfit.rec_ptr = rec_ptr;
|
||||
bestfit.last_ptr = last_ptr;
|
||||
/* consider a fit to be good enough if
|
||||
we aren't wasting more than half
|
||||
the space */
|
||||
if (bestfit.rec_len < 2*length) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* move to the next record */
|
||||
last_ptr = rec_ptr;
|
||||
rec_ptr = rec->next;
|
||||
|
||||
/* if we've found a record that is big enough, then
|
||||
stop searching if its also not too big. The
|
||||
definition of 'too big' changes as we scan
|
||||
through */
|
||||
if (bestfit.rec_len > 0 &&
|
||||
bestfit.rec_len < length * multiplier) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* this multiplier means we only extremely rarely
|
||||
search more than 50 or so records. At 50 records we
|
||||
accept records up to 11 times larger than what we
|
||||
want */
|
||||
multiplier *= 1.05;
|
||||
}
|
||||
|
||||
if (bestfit.rec_ptr != 0) {
|
||||
@ -328,3 +357,25 @@ tdb_off_t tdb_allocate(struct tdb_context *tdb, tdb_len_t length, struct list_st
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
return the size of the freelist - used to decide if we should repack
|
||||
*/
|
||||
int tdb_freelist_size(struct tdb_context *tdb)
|
||||
{
|
||||
tdb_off_t ptr;
|
||||
int count=0;
|
||||
|
||||
if (tdb_lock(tdb, -1, F_RDLCK) == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ptr = FREELIST_TOP;
|
||||
while (tdb_ofs_read(tdb, ptr, &ptr) == 0 && ptr != 0) {
|
||||
count++;
|
||||
}
|
||||
|
||||
tdb_unlock(tdb, -1, F_RDLCK);
|
||||
return count;
|
||||
}
|
||||
|
@ -101,8 +101,8 @@ static int tdb_write(struct tdb_context *tdb, tdb_off_t off,
|
||||
off+written);
|
||||
}
|
||||
if (written == -1) {
|
||||
/* Ensure ecode is set for log fn. */
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
/* Ensure ecode is set for log fn. */
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_write failed at %d "
|
||||
"len=%d (%s)\n", off, len, strerror(errno)));
|
||||
return TDB_ERRCODE(TDB_ERR_IO, -1);
|
||||
@ -111,8 +111,8 @@ static int tdb_write(struct tdb_context *tdb, tdb_off_t off,
|
||||
"write %d bytes at %d in two attempts\n",
|
||||
len, off));
|
||||
errno = ENOSPC;
|
||||
return TDB_ERRCODE(TDB_ERR_IO, -1);
|
||||
}
|
||||
return TDB_ERRCODE(TDB_ERR_IO, -1);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -230,7 +230,7 @@ void tdb_mmap(struct tdb_context *tdb)
|
||||
says to use for mmap expansion */
|
||||
static int tdb_expand_file(struct tdb_context *tdb, tdb_off_t size, tdb_off_t addition)
|
||||
{
|
||||
char buf[1024];
|
||||
char buf[8192];
|
||||
|
||||
if (tdb->read_only || tdb->traverse_read) {
|
||||
tdb->ecode = TDB_ERR_RDONLY;
|
||||
@ -294,7 +294,7 @@ static int tdb_expand_file(struct tdb_context *tdb, tdb_off_t size, tdb_off_t ad
|
||||
int tdb_expand(struct tdb_context *tdb, tdb_off_t size)
|
||||
{
|
||||
struct list_struct rec;
|
||||
tdb_off_t offset;
|
||||
tdb_off_t offset, new_size;
|
||||
|
||||
if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_ERROR, "lock failed in tdb_expand\n"));
|
||||
@ -304,9 +304,11 @@ int tdb_expand(struct tdb_context *tdb, tdb_off_t size)
|
||||
/* must know about any previous expansions by another process */
|
||||
tdb->methods->tdb_oob(tdb, tdb->map_size + 1, 1);
|
||||
|
||||
/* always make room for at least 10 more records, and round
|
||||
the database up to a multiple of the page size */
|
||||
size = TDB_ALIGN(tdb->map_size + size*10, tdb->page_size) - tdb->map_size;
|
||||
/* always make room for at least 100 more records, and at
|
||||
least 25% more space. Round the database up to a multiple
|
||||
of the page size */
|
||||
new_size = MAX(tdb->map_size + size*100, tdb->map_size * 1.25);
|
||||
size = TDB_ALIGN(new_size, tdb->page_size) - tdb->map_size;
|
||||
|
||||
if (!(tdb->flags & TDB_INTERNAL))
|
||||
tdb_munmap(tdb);
|
||||
|
@ -505,6 +505,9 @@ int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key)
|
||||
/* record lock stops delete underneath */
|
||||
int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off)
|
||||
{
|
||||
if (tdb->global_lock.count) {
|
||||
return 0;
|
||||
}
|
||||
return off ? tdb->methods->tdb_brlock(tdb, off, F_RDLCK, F_SETLKW, 0, 1) : 0;
|
||||
}
|
||||
|
||||
@ -537,6 +540,10 @@ int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off)
|
||||
struct tdb_traverse_lock *i;
|
||||
uint32_t count = 0;
|
||||
|
||||
if (tdb->global_lock.count) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (off == 0)
|
||||
return 0;
|
||||
for (i = &tdb->travlocks; i; i = i->next)
|
||||
|
@ -35,7 +35,7 @@ static struct tdb_context *tdbs = NULL;
|
||||
static unsigned int default_tdb_hash(TDB_DATA *key)
|
||||
{
|
||||
uint32_t value; /* Used to compute the hash value. */
|
||||
uint32_t i; /* Used to cycle through random values. */
|
||||
uint32_t i; /* Used to cycle through random values. */
|
||||
|
||||
/* Set the initial value from the key size. */
|
||||
for (value = 0x238F13AF * key->dsize, i=0; i < key->dsize; i++)
|
||||
@ -90,7 +90,7 @@ static int tdb_new_database(struct tdb_context *tdb, int hash_size)
|
||||
size -= written;
|
||||
written = write(tdb->fd, newdb+written, size);
|
||||
if (written == size) {
|
||||
ret = 0;
|
||||
ret = 0;
|
||||
} else if (written >= 0) {
|
||||
/* a second incomplete write - we give up.
|
||||
* guessing the errno... */
|
||||
@ -152,6 +152,7 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
|
||||
int rev = 0, locked = 0;
|
||||
unsigned char *vp;
|
||||
uint32_t vertest;
|
||||
unsigned v;
|
||||
|
||||
if (!(tdb = (struct tdb_context *)calloc(1, sizeof *tdb))) {
|
||||
/* Can't log this */
|
||||
@ -215,6 +216,10 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
|
||||
goto fail; /* errno set by open(2) */
|
||||
}
|
||||
|
||||
/* on exec, don't inherit the fd */
|
||||
v = fcntl(tdb->fd, F_GETFD, 0);
|
||||
fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
|
||||
|
||||
/* ensure there is only one process initialising at once */
|
||||
if (tdb->methods->tdb_brlock(tdb, GLOBAL_LOCK, F_WRLCK, F_SETLKW, 0, 1) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to get global lock on %s: %s\n",
|
||||
@ -242,7 +247,7 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
|
||||
/* its not a valid database - possibly initialise it */
|
||||
if (!(open_flags & O_CREAT) || tdb_new_database(tdb, hash_size) == -1) {
|
||||
if (errno == 0) {
|
||||
errno = EIO; /* ie bad format or something */
|
||||
errno = EIO; /* ie bad format or something */
|
||||
}
|
||||
goto fail;
|
||||
}
|
||||
|
@ -102,8 +102,7 @@ static tdb_off_t tdb_find(struct tdb_context *tdb, TDB_DATA key, uint32_t hash,
|
||||
}
|
||||
|
||||
/* As tdb_find, but if you succeed, keep the lock */
|
||||
tdb_off_t tdb_find_lock_hash(struct tdb_context *tdb, TDB_DATA key,
|
||||
uint32_t hash, int locktype,
|
||||
tdb_off_t tdb_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, int locktype,
|
||||
struct list_struct *rec)
|
||||
{
|
||||
uint32_t rec_ptr;
|
||||
@ -237,14 +236,15 @@ int tdb_exists(struct tdb_context *tdb, TDB_DATA key)
|
||||
}
|
||||
|
||||
/* actually delete an entry in the database given the offset */
|
||||
int tdb_do_delete(struct tdb_context *tdb, tdb_off_t rec_ptr, struct list_struct*rec)
|
||||
int tdb_do_delete(struct tdb_context *tdb, tdb_off_t rec_ptr, struct list_struct *rec)
|
||||
{
|
||||
tdb_off_t last_ptr, i;
|
||||
struct list_struct lastrec;
|
||||
|
||||
if (tdb->read_only || tdb->traverse_read) return -1;
|
||||
|
||||
if (tdb_write_lock_record(tdb, rec_ptr) == -1) {
|
||||
if (tdb->traverse_write != 0 ||
|
||||
tdb_write_lock_record(tdb, rec_ptr) == -1) {
|
||||
/* Someone traversing here: mark it as dead */
|
||||
rec->magic = TDB_DEAD_MAGIC;
|
||||
return tdb_rec_write(tdb, rec_ptr, rec);
|
||||
@ -666,6 +666,16 @@ int tdb_get_flags(struct tdb_context *tdb)
|
||||
return tdb->flags;
|
||||
}
|
||||
|
||||
void tdb_add_flags(struct tdb_context *tdb, unsigned flags)
|
||||
{
|
||||
tdb->flags |= flags;
|
||||
}
|
||||
|
||||
void tdb_remove_flags(struct tdb_context *tdb, unsigned flags)
|
||||
{
|
||||
tdb->flags &= ~flags;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
enable sequence number handling on an open tdb
|
||||
@ -674,3 +684,104 @@ void tdb_enable_seqnum(struct tdb_context *tdb)
|
||||
{
|
||||
tdb->flags |= TDB_SEQNUM;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
wipe the entire database, deleting all records. This can be done
|
||||
very fast by using a global lock. The entire data portion of the
|
||||
file becomes a single entry in the freelist.
|
||||
*/
|
||||
int tdb_wipe_all(struct tdb_context *tdb)
|
||||
{
|
||||
int i;
|
||||
tdb_off_t offset = 0;
|
||||
ssize_t data_len;
|
||||
|
||||
if (tdb_lockall(tdb) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* wipe the hashes */
|
||||
for (i=0;i<tdb->header.hash_size;i++) {
|
||||
if (tdb_ofs_write(tdb, TDB_HASH_TOP(i), &offset) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to write hash %d\n", i));
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
/* wipe the freelist */
|
||||
if (tdb_ofs_write(tdb, FREELIST_TOP, &offset) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to write freelist\n"));
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (tdb_ofs_write(tdb, TDB_RECOVERY_HEAD, &offset) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to write recovery head\n"));
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/* add all the rest of the file to the freelist */
|
||||
data_len = (tdb->map_size - TDB_DATA_START(tdb->header.hash_size)) - sizeof(struct list_struct);
|
||||
if (data_len > 0) {
|
||||
struct list_struct rec;
|
||||
memset(&rec,'\0',sizeof(rec));
|
||||
rec.rec_len = data_len;
|
||||
if (tdb_free(tdb, TDB_DATA_START(tdb->header.hash_size), &rec) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to add free record\n"));
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
if (tdb_unlockall(tdb) != 0) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to unlock\n"));
|
||||
goto failed;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
tdb_unlockall(tdb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
validate the integrity of all tdb hash chains. Useful when debugging
|
||||
*/
|
||||
int tdb_validate(struct tdb_context *tdb)
|
||||
{
|
||||
int h;
|
||||
for (h=-1;h<(int)tdb->header.hash_size;h++) {
|
||||
tdb_off_t rec_ptr;
|
||||
uint32_t count = 0;
|
||||
if (tdb_ofs_read(tdb, TDB_HASH_TOP(h), &rec_ptr) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_validate: failed ofs_read at top of hash %d\n", h));
|
||||
return -1;
|
||||
}
|
||||
while (rec_ptr) {
|
||||
struct list_struct r;
|
||||
tdb_off_t size;
|
||||
|
||||
if (tdb_rec_read(tdb, rec_ptr, &r) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_validate: failed rec_read h=%d rec_ptr=%u count=%u\n",
|
||||
h, rec_ptr, count));
|
||||
return -1;
|
||||
}
|
||||
if (tdb_ofs_read(tdb, rec_ptr + sizeof(r) + r.rec_len - sizeof(tdb_off_t), &size) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_validate: failed ofs_read h=%d rec_ptr=%u count=%u\n",
|
||||
h, rec_ptr, count));
|
||||
return -1;
|
||||
}
|
||||
if (size != r.rec_len + sizeof(r)) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_validate: failed size check size=%u h=%d rec_ptr=%u count=%u\n",
|
||||
size, h, rec_ptr, count));
|
||||
return -1;
|
||||
}
|
||||
rec_ptr = r.next;
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -38,6 +38,10 @@
|
||||
typedef uint32_t tdb_len_t;
|
||||
typedef uint32_t tdb_off_t;
|
||||
|
||||
#ifndef offsetof
|
||||
#define offsetof(t,f) ((unsigned int)&((t *)0)->f)
|
||||
#endif
|
||||
|
||||
#define TDB_MAGIC_FOOD "TDB file\n"
|
||||
#define TDB_VERSION (0x26011967 + 6)
|
||||
#define TDB_MAGIC (0x26011999U)
|
||||
@ -54,7 +58,7 @@ typedef uint32_t tdb_off_t;
|
||||
#define TDB_BAD_MAGIC(r) ((r)->magic != TDB_MAGIC && !TDB_DEAD(r))
|
||||
#define TDB_HASH_TOP(hash) (FREELIST_TOP + (BUCKET(hash)+1)*sizeof(tdb_off_t))
|
||||
#define TDB_HASHTABLE_SIZE(tdb) ((tdb->header.hash_size+1)*sizeof(tdb_off_t))
|
||||
#define TDB_DATA_START(hash_size) TDB_HASH_TOP(hash_size-1)
|
||||
#define TDB_DATA_START(hash_size) (TDB_HASH_TOP(hash_size-1) + sizeof(tdb_off_t))
|
||||
#define TDB_RECOVERY_HEAD offsetof(struct tdb_header, recovery_start)
|
||||
#define TDB_SEQNUM_OFS offsetof(struct tdb_header, sequence_number)
|
||||
#define TDB_PAD_BYTE 0x42
|
||||
@ -144,6 +148,7 @@ struct tdb_context {
|
||||
tdb_len_t map_size; /* how much space has been mapped */
|
||||
int read_only; /* opened read-only */
|
||||
int traverse_read; /* read-only traversal */
|
||||
int traverse_write; /* read-write traversal */
|
||||
struct tdb_lock_type global_lock;
|
||||
int num_lockrecs;
|
||||
struct tdb_lock_type *lockrecs; /* only real locks, all with count>0 */
|
||||
|
@ -87,12 +87,6 @@
|
||||
|
||||
*/
|
||||
|
||||
struct tdb_transaction_el {
|
||||
struct tdb_transaction_el *next, *prev;
|
||||
tdb_off_t offset;
|
||||
tdb_len_t length;
|
||||
unsigned char *data;
|
||||
};
|
||||
|
||||
/*
|
||||
hold the context of any current transaction
|
||||
@ -105,12 +99,12 @@ struct tdb_transaction {
|
||||
/* the original io methods - used to do IOs to the real db */
|
||||
const struct tdb_methods *io_methods;
|
||||
|
||||
/* the list of transaction elements. We use a doubly linked
|
||||
list with a last pointer to allow us to keep the list
|
||||
ordered, with first element at the front of the list. It
|
||||
needs to be doubly linked as the read/write traversals need
|
||||
to be backwards, while the commit needs to be forwards */
|
||||
struct tdb_transaction_el *elements, *elements_last;
|
||||
/* the list of transaction blocks. When a block is first
|
||||
written to, it gets created in this list */
|
||||
uint8_t **blocks;
|
||||
uint32_t num_blocks;
|
||||
uint32_t block_size; /* bytes in each block */
|
||||
uint32_t last_block_size; /* number of valid bytes in the last block */
|
||||
|
||||
/* non-zero when an internal transaction error has
|
||||
occurred. All write operations will then fail until the
|
||||
@ -134,52 +128,48 @@ struct tdb_transaction {
|
||||
static int transaction_read(struct tdb_context *tdb, tdb_off_t off, void *buf,
|
||||
tdb_len_t len, int cv)
|
||||
{
|
||||
struct tdb_transaction_el *el;
|
||||
uint32_t blk;
|
||||
|
||||
/* we need to walk the list backwards to get the most recent data */
|
||||
for (el=tdb->transaction->elements_last;el;el=el->prev) {
|
||||
tdb_len_t partial;
|
||||
|
||||
if (off+len <= el->offset) {
|
||||
continue;
|
||||
}
|
||||
if (off >= el->offset + el->length) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* an overlapping read - needs to be split into up to
|
||||
2 reads and a memcpy */
|
||||
if (off < el->offset) {
|
||||
partial = el->offset - off;
|
||||
if (transaction_read(tdb, off, buf, partial, cv) != 0) {
|
||||
goto fail;
|
||||
}
|
||||
len -= partial;
|
||||
off += partial;
|
||||
buf = (void *)(partial + (char *)buf);
|
||||
}
|
||||
if (off + len <= el->offset + el->length) {
|
||||
partial = len;
|
||||
} else {
|
||||
partial = el->offset + el->length - off;
|
||||
}
|
||||
memcpy(buf, el->data + (off - el->offset), partial);
|
||||
if (cv) {
|
||||
tdb_convert(buf, len);
|
||||
}
|
||||
len -= partial;
|
||||
off += partial;
|
||||
buf = (void *)(partial + (char *)buf);
|
||||
|
||||
if (len != 0 && transaction_read(tdb, off, buf, len, cv) != 0) {
|
||||
goto fail;
|
||||
}
|
||||
/* break it down into block sized ops */
|
||||
while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) {
|
||||
tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size);
|
||||
if (transaction_read(tdb, off, buf, len2, cv) != 0) {
|
||||
return -1;
|
||||
}
|
||||
len -= len2;
|
||||
off += len2;
|
||||
buf = (void *)(len2 + (char *)buf);
|
||||
}
|
||||
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* its not in the transaction elements - do a real read */
|
||||
return tdb->transaction->io_methods->tdb_read(tdb, off, buf, len, cv);
|
||||
blk = off / tdb->transaction->block_size;
|
||||
|
||||
/* see if we have it in the block list */
|
||||
if (tdb->transaction->num_blocks <= blk ||
|
||||
tdb->transaction->blocks[blk] == NULL) {
|
||||
/* nope, do a real read */
|
||||
if (tdb->transaction->io_methods->tdb_read(tdb, off, buf, len, cv) != 0) {
|
||||
goto fail;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* it is in the block list. Now check for the last block */
|
||||
if (blk == tdb->transaction->num_blocks-1) {
|
||||
if (len > tdb->transaction->last_block_size) {
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* now copy it out of this block */
|
||||
memcpy(buf, tdb->transaction->blocks[blk] + (off % tdb->transaction->block_size), len);
|
||||
if (cv) {
|
||||
tdb_convert(buf, len);
|
||||
}
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_read: failed at off=%d len=%d\n", off, len));
|
||||
@ -195,12 +185,8 @@ fail:
|
||||
static int transaction_write(struct tdb_context *tdb, tdb_off_t off,
|
||||
const void *buf, tdb_len_t len)
|
||||
{
|
||||
struct tdb_transaction_el *el, *best_el=NULL;
|
||||
uint32_t blk;
|
||||
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* if the write is to a hash head, then update the transaction
|
||||
hash heads */
|
||||
if (len == sizeof(tdb_off_t) && off >= FREELIST_TOP &&
|
||||
@ -209,110 +195,149 @@ static int transaction_write(struct tdb_context *tdb, tdb_off_t off,
|
||||
memcpy(&tdb->transaction->hash_heads[chain], buf, len);
|
||||
}
|
||||
|
||||
/* first see if we can replace an existing entry */
|
||||
for (el=tdb->transaction->elements_last;el;el=el->prev) {
|
||||
tdb_len_t partial;
|
||||
|
||||
if (best_el == NULL && off == el->offset+el->length) {
|
||||
best_el = el;
|
||||
}
|
||||
|
||||
if (off+len <= el->offset) {
|
||||
continue;
|
||||
}
|
||||
if (off >= el->offset + el->length) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* an overlapping write - needs to be split into up to
|
||||
2 writes and a memcpy */
|
||||
if (off < el->offset) {
|
||||
partial = el->offset - off;
|
||||
if (transaction_write(tdb, off, buf, partial) != 0) {
|
||||
goto fail;
|
||||
}
|
||||
len -= partial;
|
||||
off += partial;
|
||||
buf = (const void *)(partial + (const char *)buf);
|
||||
}
|
||||
if (off + len <= el->offset + el->length) {
|
||||
partial = len;
|
||||
} else {
|
||||
partial = el->offset + el->length - off;
|
||||
}
|
||||
memcpy(el->data + (off - el->offset), buf, partial);
|
||||
len -= partial;
|
||||
off += partial;
|
||||
buf = (const void *)(partial + (const char *)buf);
|
||||
|
||||
if (len != 0 && transaction_write(tdb, off, buf, len) != 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* see if we can append the new entry to an existing entry */
|
||||
if (best_el && best_el->offset + best_el->length == off &&
|
||||
(off+len < tdb->transaction->old_map_size ||
|
||||
off > tdb->transaction->old_map_size)) {
|
||||
unsigned char *data = best_el->data;
|
||||
el = best_el;
|
||||
el->data = (unsigned char *)realloc(el->data,
|
||||
el->length + len);
|
||||
if (el->data == NULL) {
|
||||
tdb->ecode = TDB_ERR_OOM;
|
||||
tdb->transaction->transaction_error = 1;
|
||||
el->data = data;
|
||||
/* break it up into block sized chunks */
|
||||
while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) {
|
||||
tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size);
|
||||
if (transaction_write(tdb, off, buf, len2) != 0) {
|
||||
return -1;
|
||||
}
|
||||
if (buf) {
|
||||
memcpy(el->data + el->length, buf, len);
|
||||
} else {
|
||||
memset(el->data + el->length, TDB_PAD_BYTE, len);
|
||||
len -= len2;
|
||||
off += len2;
|
||||
if (buf != NULL) {
|
||||
buf = (const void *)(len2 + (const char *)buf);
|
||||
}
|
||||
el->length += len;
|
||||
}
|
||||
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* add a new entry at the end of the list */
|
||||
el = (struct tdb_transaction_el *)malloc(sizeof(*el));
|
||||
if (el == NULL) {
|
||||
tdb->ecode = TDB_ERR_OOM;
|
||||
tdb->transaction->transaction_error = 1;
|
||||
return -1;
|
||||
blk = off / tdb->transaction->block_size;
|
||||
off = off % tdb->transaction->block_size;
|
||||
|
||||
if (tdb->transaction->num_blocks <= blk) {
|
||||
uint8_t **new_blocks;
|
||||
/* expand the blocks array */
|
||||
if (tdb->transaction->blocks == NULL) {
|
||||
new_blocks = malloc((blk+1)*sizeof(uint8_t *));
|
||||
} else {
|
||||
new_blocks = realloc(tdb->transaction->blocks, (blk+1)*sizeof(uint8_t *));
|
||||
}
|
||||
if (new_blocks == NULL) {
|
||||
tdb->ecode = TDB_ERR_OOM;
|
||||
goto fail;
|
||||
}
|
||||
memset(&new_blocks[tdb->transaction->num_blocks], 0,
|
||||
(1+(blk - tdb->transaction->num_blocks))*sizeof(uint8_t *));
|
||||
tdb->transaction->blocks = new_blocks;
|
||||
tdb->transaction->num_blocks = blk+1;
|
||||
tdb->transaction->last_block_size = 0;
|
||||
}
|
||||
el->next = NULL;
|
||||
el->prev = tdb->transaction->elements_last;
|
||||
el->offset = off;
|
||||
el->length = len;
|
||||
el->data = (unsigned char *)malloc(len);
|
||||
if (el->data == NULL) {
|
||||
free(el);
|
||||
tdb->ecode = TDB_ERR_OOM;
|
||||
tdb->transaction->transaction_error = 1;
|
||||
return -1;
|
||||
|
||||
/* allocate and fill a block? */
|
||||
if (tdb->transaction->blocks[blk] == NULL) {
|
||||
tdb->transaction->blocks[blk] = (uint8_t *)calloc(tdb->transaction->block_size, 1);
|
||||
if (tdb->transaction->blocks[blk] == NULL) {
|
||||
tdb->ecode = TDB_ERR_OOM;
|
||||
tdb->transaction->transaction_error = 1;
|
||||
return -1;
|
||||
}
|
||||
if (tdb->transaction->old_map_size > blk * tdb->transaction->block_size) {
|
||||
tdb_len_t len2 = tdb->transaction->block_size;
|
||||
if (len2 + (blk * tdb->transaction->block_size) > tdb->transaction->old_map_size) {
|
||||
len2 = tdb->transaction->old_map_size - (blk * tdb->transaction->block_size);
|
||||
}
|
||||
if (tdb->transaction->io_methods->tdb_read(tdb, blk * tdb->transaction->block_size,
|
||||
tdb->transaction->blocks[blk],
|
||||
len2, 0) != 0) {
|
||||
SAFE_FREE(tdb->transaction->blocks[blk]);
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
goto fail;
|
||||
}
|
||||
if (blk == tdb->transaction->num_blocks-1) {
|
||||
tdb->transaction->last_block_size = len2;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (buf) {
|
||||
memcpy(el->data, buf, len);
|
||||
|
||||
/* overwrite part of an existing block */
|
||||
if (buf == NULL) {
|
||||
memset(tdb->transaction->blocks[blk] + off, 0, len);
|
||||
} else {
|
||||
memset(el->data, TDB_PAD_BYTE, len);
|
||||
memcpy(tdb->transaction->blocks[blk] + off, buf, len);
|
||||
}
|
||||
if (el->prev) {
|
||||
el->prev->next = el;
|
||||
} else {
|
||||
tdb->transaction->elements = el;
|
||||
if (blk == tdb->transaction->num_blocks-1) {
|
||||
if (len + off > tdb->transaction->last_block_size) {
|
||||
tdb->transaction->last_block_size = len + off;
|
||||
}
|
||||
}
|
||||
tdb->transaction->elements_last = el;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%d len=%d\n", off, len));
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%d len=%d\n",
|
||||
(blk*tdb->transaction->block_size) + off, len));
|
||||
tdb->transaction->transaction_error = 1;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
write while in a transaction - this varient never expands the transaction blocks, it only
|
||||
updates existing blocks. This means it cannot change the recovery size
|
||||
*/
|
||||
static int transaction_write_existing(struct tdb_context *tdb, tdb_off_t off,
|
||||
const void *buf, tdb_len_t len)
|
||||
{
|
||||
uint32_t blk;
|
||||
|
||||
/* break it up into block sized chunks */
|
||||
while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) {
|
||||
tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size);
|
||||
if (transaction_write_existing(tdb, off, buf, len2) != 0) {
|
||||
return -1;
|
||||
}
|
||||
len -= len2;
|
||||
off += len2;
|
||||
if (buf != NULL) {
|
||||
buf = (const void *)(len2 + (const char *)buf);
|
||||
}
|
||||
}
|
||||
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
blk = off / tdb->transaction->block_size;
|
||||
off = off % tdb->transaction->block_size;
|
||||
|
||||
if (tdb->transaction->num_blocks <= blk ||
|
||||
tdb->transaction->blocks[blk] == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* overwrite part of an existing block */
|
||||
if (buf == NULL) {
|
||||
memset(tdb->transaction->blocks[blk] + off, 0, len);
|
||||
} else {
|
||||
memcpy(tdb->transaction->blocks[blk] + off, buf, len);
|
||||
}
|
||||
if (blk == tdb->transaction->num_blocks-1) {
|
||||
if (len + off > tdb->transaction->last_block_size) {
|
||||
tdb->transaction->last_block_size = len + off;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%d len=%d\n",
|
||||
(blk*tdb->transaction->block_size) + off, len));
|
||||
tdb->transaction->transaction_error = 1;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
accelerated hash chain head search, using the cached hash heads
|
||||
*/
|
||||
@ -419,10 +444,14 @@ int tdb_transaction_start(struct tdb_context *tdb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* a page at a time seems like a reasonable compromise between compactness and efficiency */
|
||||
tdb->transaction->block_size = tdb->page_size;
|
||||
|
||||
/* get the transaction write lock. This is a blocking lock. As
|
||||
discussed with Volker, there are a number of ways we could
|
||||
make this async, which we will probably do in the future */
|
||||
if (tdb_transaction_lock(tdb, F_WRLCK) == -1) {
|
||||
SAFE_FREE(tdb->transaction->blocks);
|
||||
SAFE_FREE(tdb->transaction);
|
||||
return -1;
|
||||
}
|
||||
@ -460,21 +489,12 @@ int tdb_transaction_start(struct tdb_context *tdb)
|
||||
tdb->transaction->io_methods = tdb->methods;
|
||||
tdb->methods = &transaction_methods;
|
||||
|
||||
/* by calling this transaction write here, we ensure that we don't grow the
|
||||
transaction linked list due to hash table updates */
|
||||
if (transaction_write(tdb, FREELIST_TOP, tdb->transaction->hash_heads,
|
||||
TDB_HASHTABLE_SIZE(tdb)) != 0) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_start: failed to prime hash table\n"));
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
tdb->methods = tdb->transaction->io_methods;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 0);
|
||||
tdb_transaction_unlock(tdb);
|
||||
SAFE_FREE(tdb->transaction->blocks);
|
||||
SAFE_FREE(tdb->transaction->hash_heads);
|
||||
SAFE_FREE(tdb->transaction);
|
||||
return -1;
|
||||
@ -486,6 +506,8 @@ fail:
|
||||
*/
|
||||
int tdb_transaction_cancel(struct tdb_context *tdb)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (tdb->transaction == NULL) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_cancel: no transaction\n"));
|
||||
return -1;
|
||||
@ -499,13 +521,13 @@ int tdb_transaction_cancel(struct tdb_context *tdb)
|
||||
|
||||
tdb->map_size = tdb->transaction->old_map_size;
|
||||
|
||||
/* free all the transaction elements */
|
||||
while (tdb->transaction->elements) {
|
||||
struct tdb_transaction_el *el = tdb->transaction->elements;
|
||||
tdb->transaction->elements = el->next;
|
||||
free(el->data);
|
||||
free(el);
|
||||
/* free all the transaction blocks */
|
||||
for (i=0;i<tdb->transaction->num_blocks;i++) {
|
||||
if (tdb->transaction->blocks[i] != NULL) {
|
||||
free(tdb->transaction->blocks[i]);
|
||||
}
|
||||
}
|
||||
SAFE_FREE(tdb->transaction->blocks);
|
||||
|
||||
/* remove any global lock created during the transaction */
|
||||
if (tdb->global_lock.count != 0) {
|
||||
@ -515,7 +537,6 @@ int tdb_transaction_cancel(struct tdb_context *tdb)
|
||||
|
||||
/* remove any locks created during the transaction */
|
||||
if (tdb->num_locks != 0) {
|
||||
int i;
|
||||
for (i=0;i<tdb->num_lockrecs;i++) {
|
||||
tdb_brlock(tdb,FREELIST_TOP+4*tdb->lockrecs[i].list,
|
||||
F_UNLCK,F_SETLKW, 0, 1);
|
||||
@ -567,16 +588,24 @@ static int transaction_sync(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t
|
||||
*/
|
||||
static tdb_len_t tdb_recovery_size(struct tdb_context *tdb)
|
||||
{
|
||||
struct tdb_transaction_el *el;
|
||||
tdb_len_t recovery_size = 0;
|
||||
int i;
|
||||
|
||||
recovery_size = sizeof(uint32_t);
|
||||
for (el=tdb->transaction->elements;el;el=el->next) {
|
||||
if (el->offset >= tdb->transaction->old_map_size) {
|
||||
for (i=0;i<tdb->transaction->num_blocks;i++) {
|
||||
if (i * tdb->transaction->block_size >= tdb->transaction->old_map_size) {
|
||||
break;
|
||||
}
|
||||
if (tdb->transaction->blocks[i] == NULL) {
|
||||
continue;
|
||||
}
|
||||
recovery_size += 2*sizeof(tdb_off_t) + el->length;
|
||||
}
|
||||
recovery_size += 2*sizeof(tdb_off_t);
|
||||
if (i == tdb->transaction->num_blocks-1) {
|
||||
recovery_size += tdb->transaction->last_block_size;
|
||||
} else {
|
||||
recovery_size += tdb->transaction->block_size;
|
||||
}
|
||||
}
|
||||
|
||||
return recovery_size;
|
||||
}
|
||||
@ -658,6 +687,10 @@ static int tdb_recovery_allocate(struct tdb_context *tdb,
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n"));
|
||||
return -1;
|
||||
}
|
||||
if (transaction_write_existing(tdb, TDB_RECOVERY_HEAD, &recovery_head, sizeof(tdb_off_t)) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n"));
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -669,7 +702,6 @@ static int tdb_recovery_allocate(struct tdb_context *tdb,
|
||||
static int transaction_setup_recovery(struct tdb_context *tdb,
|
||||
tdb_off_t *magic_offset)
|
||||
{
|
||||
struct tdb_transaction_el *el;
|
||||
tdb_len_t recovery_size;
|
||||
unsigned char *data, *p;
|
||||
const struct tdb_methods *methods = tdb->transaction->io_methods;
|
||||
@ -677,6 +709,7 @@ static int transaction_setup_recovery(struct tdb_context *tdb,
|
||||
tdb_off_t recovery_offset, recovery_max_size;
|
||||
tdb_off_t old_map_size = tdb->transaction->old_map_size;
|
||||
uint32_t magic, tailer;
|
||||
int i;
|
||||
|
||||
/*
|
||||
check that the recovery area has enough space
|
||||
@ -704,30 +737,43 @@ static int transaction_setup_recovery(struct tdb_context *tdb,
|
||||
/* build the recovery data into a single blob to allow us to do a single
|
||||
large write, which should be more efficient */
|
||||
p = data + sizeof(*rec);
|
||||
for (el=tdb->transaction->elements;el;el=el->next) {
|
||||
if (el->offset >= old_map_size) {
|
||||
for (i=0;i<tdb->transaction->num_blocks;i++) {
|
||||
tdb_off_t offset;
|
||||
tdb_len_t length;
|
||||
|
||||
if (tdb->transaction->blocks[i] == NULL) {
|
||||
continue;
|
||||
}
|
||||
if (el->offset + el->length > tdb->transaction->old_map_size) {
|
||||
|
||||
offset = i * tdb->transaction->block_size;
|
||||
length = tdb->transaction->block_size;
|
||||
if (i == tdb->transaction->num_blocks-1) {
|
||||
length = tdb->transaction->last_block_size;
|
||||
}
|
||||
|
||||
if (offset >= old_map_size) {
|
||||
continue;
|
||||
}
|
||||
if (offset + length > tdb->transaction->old_map_size) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: transaction data over new region boundary\n"));
|
||||
free(data);
|
||||
tdb->ecode = TDB_ERR_CORRUPT;
|
||||
return -1;
|
||||
}
|
||||
memcpy(p, &el->offset, 4);
|
||||
memcpy(p+4, &el->length, 4);
|
||||
memcpy(p, &offset, 4);
|
||||
memcpy(p+4, &length, 4);
|
||||
if (DOCONV()) {
|
||||
tdb_convert(p, 8);
|
||||
}
|
||||
/* the recovery area contains the old data, not the
|
||||
new data, so we have to call the original tdb_read
|
||||
method to get it */
|
||||
if (methods->tdb_read(tdb, el->offset, p + 8, el->length, 0) != 0) {
|
||||
if (methods->tdb_read(tdb, offset, p + 8, length, 0) != 0) {
|
||||
free(data);
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
return -1;
|
||||
}
|
||||
p += 8 + el->length;
|
||||
p += 8 + length;
|
||||
}
|
||||
|
||||
/* and the tailer */
|
||||
@ -742,6 +788,12 @@ static int transaction_setup_recovery(struct tdb_context *tdb,
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
return -1;
|
||||
}
|
||||
if (transaction_write_existing(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery data\n"));
|
||||
free(data);
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* as we don't have ordered writes, we have to sync the recovery
|
||||
data before we update the magic to indicate that the recovery
|
||||
@ -763,6 +815,11 @@ static int transaction_setup_recovery(struct tdb_context *tdb,
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
return -1;
|
||||
}
|
||||
if (transaction_write_existing(tdb, *magic_offset, &magic, sizeof(magic)) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery magic\n"));
|
||||
tdb->ecode = TDB_ERR_IO;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* ensure the recovery magic marker is on disk */
|
||||
if (transaction_sync(tdb, *magic_offset, sizeof(magic)) == -1) {
|
||||
@ -780,6 +837,7 @@ int tdb_transaction_commit(struct tdb_context *tdb)
|
||||
const struct tdb_methods *methods;
|
||||
tdb_off_t magic_offset = 0;
|
||||
uint32_t zero = 0;
|
||||
int i;
|
||||
|
||||
if (tdb->transaction == NULL) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: no transaction\n"));
|
||||
@ -793,13 +851,14 @@ int tdb_transaction_commit(struct tdb_context *tdb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
if (tdb->transaction->nesting != 0) {
|
||||
tdb->transaction->nesting--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* check for a null transaction */
|
||||
if (tdb->transaction->elements == NULL) {
|
||||
if (tdb->transaction->blocks == NULL) {
|
||||
tdb_transaction_cancel(tdb);
|
||||
return 0;
|
||||
}
|
||||
@ -858,10 +917,21 @@ int tdb_transaction_commit(struct tdb_context *tdb)
|
||||
}
|
||||
|
||||
/* perform all the writes */
|
||||
while (tdb->transaction->elements) {
|
||||
struct tdb_transaction_el *el = tdb->transaction->elements;
|
||||
for (i=0;i<tdb->transaction->num_blocks;i++) {
|
||||
tdb_off_t offset;
|
||||
tdb_len_t length;
|
||||
|
||||
if (methods->tdb_write(tdb, el->offset, el->data, el->length) == -1) {
|
||||
if (tdb->transaction->blocks[i] == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
offset = i * tdb->transaction->block_size;
|
||||
length = tdb->transaction->block_size;
|
||||
if (i == tdb->transaction->num_blocks-1) {
|
||||
length = tdb->transaction->last_block_size;
|
||||
}
|
||||
|
||||
if (methods->tdb_write(tdb, offset, tdb->transaction->blocks[i], length) == -1) {
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed during commit\n"));
|
||||
|
||||
/* we've overwritten part of the data and
|
||||
@ -876,11 +946,12 @@ int tdb_transaction_commit(struct tdb_context *tdb)
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed\n"));
|
||||
return -1;
|
||||
}
|
||||
tdb->transaction->elements = el->next;
|
||||
free(el->data);
|
||||
free(el);
|
||||
SAFE_FREE(tdb->transaction->blocks[i]);
|
||||
}
|
||||
|
||||
SAFE_FREE(tdb->transaction->blocks);
|
||||
tdb->transaction->num_blocks = 0;
|
||||
|
||||
if (!(tdb->flags & TDB_NOSYNC)) {
|
||||
/* ensure the new data is on disk */
|
||||
if (transaction_sync(tdb, 0, tdb->map_size) == -1) {
|
||||
@ -919,6 +990,7 @@ int tdb_transaction_commit(struct tdb_context *tdb)
|
||||
/* use a transaction cancel to free memory and remove the
|
||||
transaction locks */
|
||||
tdb_transaction_cancel(tdb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,9 @@ int tdb_traverse(struct tdb_context *tdb,
|
||||
return -1;
|
||||
}
|
||||
|
||||
tdb->traverse_write++;
|
||||
ret = tdb_traverse_internal(tdb, fn, private_data, &tl);
|
||||
tdb->traverse_write--;
|
||||
|
||||
tdb_transaction_unlock(tdb);
|
||||
|
||||
@ -330,3 +332,4 @@ TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA oldkey)
|
||||
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: WARNING tdb_unlock failed!\n"));
|
||||
return key;
|
||||
}
|
||||
|
||||
|
@ -135,6 +135,8 @@ int tdb_get_seqnum(struct tdb_context *tdb);
|
||||
int tdb_hash_size(struct tdb_context *tdb);
|
||||
size_t tdb_map_size(struct tdb_context *tdb);
|
||||
int tdb_get_flags(struct tdb_context *tdb);
|
||||
void tdb_add_flags(struct tdb_context *tdb, unsigned flag);
|
||||
void tdb_remove_flags(struct tdb_context *tdb, unsigned flag);
|
||||
void tdb_enable_seqnum(struct tdb_context *tdb);
|
||||
void tdb_increment_seqnum_nonblock(struct tdb_context *tdb);
|
||||
|
||||
@ -153,6 +155,9 @@ void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *sigptr)
|
||||
void tdb_dump_all(struct tdb_context *tdb);
|
||||
int tdb_printfreelist(struct tdb_context *tdb);
|
||||
int tdb_validate_freelist(struct tdb_context *tdb, int *pnum_entries);
|
||||
int tdb_wipe_all(struct tdb_context *tdb);
|
||||
int tdb_freelist_size(struct tdb_context *tdb);
|
||||
int tdb_validate(struct tdb_context *tdb);
|
||||
|
||||
extern TDB_DATA tdb_null;
|
||||
|
||||
|
2
source/selftest/env/Samba4.pm
vendored
2
source/selftest/env/Samba4.pm
vendored
@ -718,7 +718,7 @@ nogroup:x:65534:nobody
|
||||
} elsif ($self->{ldap} eq "fedora-ds") {
|
||||
($ret->{FEDORA_DS_DIR}, $ret->{FEDORA_DS_PIDFILE}) = $self->mk_fedora_ds($ldapdir, $configuration) or die("Unable to create fedora ds directories");
|
||||
push (@provision_options, "--ldap-module=nsuniqueid");
|
||||
push (@provision_options, "--aci=aci:: KHRhcmdldGF0dHIgPSAiKiIpICh2ZXJzaW9uIDMuMDthY2wgImZ1bGwgYWNjZXNzIHRvIGFsbCBieSBhbGwiO2FsbG93IChhbGwpKHVzZXJkbiA9ICJsZGFwOi8vL2FueW9uZSIpOykK");
|
||||
push (@provision_options, "'--aci=aci:: KHRhcmdldGF0dHIgPSAiKiIpICh2ZXJzaW9uIDMuMDthY2wgImZ1bGwgYWNjZXNzIHRvIGFsbCBieSBhbGwiO2FsbG93IChhbGwpKHVzZXJkbiA9ICJsZGFwOi8vL2FueW9uZSIpOykK'");
|
||||
}
|
||||
|
||||
$self->slapd_start($ret) or
|
||||
|
@ -108,5 +108,12 @@ echo "Search Options Control Query test returned 0 items"
|
||||
failed=`expr $failed + 1`
|
||||
fi
|
||||
|
||||
echo "Test Search Options Control with Domain Scope Control"
|
||||
nentries=`bin/ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=search_options:1:2,domain_scope:1 '(objectclass=crossRef)' | grep crossRef | wc -l`
|
||||
if [ $nentries -lt 1 ]; then
|
||||
echo "Search Options Control Query test returned 0 items"
|
||||
failed=`expr $failed + 1`
|
||||
fi
|
||||
|
||||
|
||||
exit $failed
|
||||
|
@ -30,7 +30,12 @@ function basic_tests(ldb, gc_ldb, base_dn, configuration_dn, schema_dn)
|
||||
println("Running basic tests");
|
||||
|
||||
ldb.del("cn=ldaptestuser,cn=users," + base_dn);
|
||||
|
||||
ldb.del("cn=ldaptestuser2,cn=users," + base_dn);
|
||||
ldb.del("cn=ldaptestuser3,cn=users," + base_dn);
|
||||
ldb.del("cn=ldaptestuser4,cn=users," + base_dn);
|
||||
ldb.del("cn=ldaptestuser5,cn=users," + base_dn);
|
||||
ldb.del("CN=ldaptestuser4,CN=ldaptestcontainer2," + base_dn);
|
||||
ldb.del("CN=ldaptestcontainer2," + base_dn);
|
||||
ldb.del("cn=ldaptestgroup,cn=users," + base_dn);
|
||||
|
||||
println("Testing group add with invalid member");
|
||||
|
Loading…
Reference in New Issue
Block a user