mirror of
https://github.com/samba-team/samba.git
synced 2025-03-12 20:58:37 +03:00
r2026: Simplify statcache to use an in-memory tdb. Modify tdb to use
a customer hash function for this tdb (yes it does make a difference on benchmarks). Remove the no longer used hash.c code. Jeremy.
This commit is contained in:
parent
bf8773b094
commit
3fbadac85b
@ -199,7 +199,7 @@ LIB_OBJ = $(VERSION_OBJ) lib/charcnv.o lib/debug.o lib/fault.o \
|
||||
lib/util_str.o lib/clobber.o lib/util_sid.o lib/util_uuid.o \
|
||||
lib/util_unistr.o lib/util_file.o lib/data_blob.o \
|
||||
lib/util.o lib/util_sock.o lib/sock_exec.o lib/util_sec.o \
|
||||
lib/talloc.o lib/hash.o lib/substitute.o lib/fsusage.o \
|
||||
lib/talloc.o lib/substitute.o lib/fsusage.o \
|
||||
lib/ms_fnmatch.o lib/select.o lib/messages.o \
|
||||
lib/tallocmsg.o lib/dmallocmsg.o libsmb/smb_signing.o \
|
||||
lib/md5.o lib/hmacmd5.o lib/iconv.o \
|
||||
|
@ -1,74 +0,0 @@
|
||||
/*
|
||||
Unix SMB/CIFS implementation.
|
||||
|
||||
Copyright (C) Ying Chen 2000.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#ifndef _HASH_H_
|
||||
#define _HASH_H_
|
||||
|
||||
#define MAX_HASH_TABLE_SIZE 16384
|
||||
#define HASH_TABLE_INCREMENT 2
|
||||
|
||||
typedef int (*compare_function)(char *, char *);
|
||||
typedef int (*hash_function)(int, char *);
|
||||
|
||||
/*
|
||||
* lru_link: links the node to the LRU list.
|
||||
* hash_elem: the pointer to the element that is tied onto the link.
|
||||
*/
|
||||
typedef struct lru_node {
|
||||
ubi_dlNode lru_link;
|
||||
void *hash_elem;
|
||||
} lru_node;
|
||||
|
||||
/*
|
||||
* bucket_link: link the hash element to the bucket chain that it belongs to.
|
||||
* lru_link: this element ties the hash element to the lru list.
|
||||
* bucket: a pointer to the hash bucket that this element belongs to.
|
||||
* value: a pointer to the hash element content. It can be anything.
|
||||
* key: stores the string key. The hash_element is always allocated with
|
||||
* more memory space than the structure shown below to accomodate the space
|
||||
* used for the whole string. But the memory is always appended at the
|
||||
* end of the structure, so keep "key" at the end of the structure.
|
||||
* Don't move it.
|
||||
*/
|
||||
typedef struct hash_element {
|
||||
ubi_dlNode bucket_link;
|
||||
lru_node lru_link;
|
||||
ubi_dlList *bucket;
|
||||
void *value;
|
||||
char key[1];
|
||||
} hash_element;
|
||||
|
||||
/*
|
||||
* buckets: a list of buckets, implemented as a dLinkList.
|
||||
* lru_chain: the lru list of all the hash elements.
|
||||
* num_elements: the # of elements in the hash table.
|
||||
* size: the hash table size.
|
||||
* comp_func: the compare function used during hash key comparisons.
|
||||
*/
|
||||
|
||||
typedef struct hash_table {
|
||||
ubi_dlList *buckets;
|
||||
ubi_dlList lru_chain;
|
||||
unsigned num_elements;
|
||||
unsigned size;
|
||||
compare_function comp_func;
|
||||
} hash_table;
|
||||
|
||||
#endif /* _HASH_H_ */
|
@ -760,7 +760,6 @@ extern int errno;
|
||||
#include "nt_status.h"
|
||||
#include "ads.h"
|
||||
#include "interfaces.h"
|
||||
#include "hash.h"
|
||||
#include "trans2.h"
|
||||
#include "nterr.h"
|
||||
#include "ntioctl.h"
|
||||
|
@ -1,316 +0,0 @@
|
||||
/*
|
||||
Unix SMB/CIFS implementation.
|
||||
|
||||
Copyright (C) Ying Chen 2000.
|
||||
Copyright (C) Jeremy Allison 2000.
|
||||
- added some defensive programming.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
/*
|
||||
* NB. We may end up replacing this functionality in a future 2.x
|
||||
* release to reduce the number of hashing/lookup methods we support. JRA.
|
||||
*/
|
||||
|
||||
#include "includes.h"
|
||||
|
||||
static BOOL enlarge_hash_table(hash_table *table);
|
||||
static unsigned primes[] =
|
||||
{17, 37, 67, 131, 257, 521, 1031, 2053, 4099, 8209, 16411};
|
||||
|
||||
/****************************************************************************
|
||||
* This function initializes the hash table.
|
||||
* This hash function hashes on string keys.
|
||||
* This number of hash buckets is always rounded up to a power of
|
||||
* 2 first, then to a prime number that is large than the power of two.
|
||||
* Input:
|
||||
* table -- the hash table pointer.
|
||||
* num_buckets -- the number of buckets to be allocated. This
|
||||
* hash function can dynamically increase its size when the
|
||||
* the hash table size becomes small. There is a MAX hash table
|
||||
* size defined in hash.h.
|
||||
* compare_func -- the function pointer to a comparison function
|
||||
* used by the hash key comparison.
|
||||
****************************************************************************
|
||||
*/
|
||||
|
||||
BOOL hash_table_init(hash_table *table, unsigned num_buckets, compare_function compare_func)
|
||||
{
|
||||
unsigned i;
|
||||
ubi_dlList *bucket;
|
||||
|
||||
table->num_elements = 0;
|
||||
table->size = 2;
|
||||
table->comp_func = compare_func;
|
||||
while (table->size < num_buckets)
|
||||
table->size <<= 1;
|
||||
for (i = 0; i < ARRAY_SIZE(primes); i++) {
|
||||
if (primes[i] > table->size) {
|
||||
table->size = primes[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG(5, ("Hash size = %d.\n", table->size));
|
||||
|
||||
if(!(table->buckets = (ubi_dlList *) malloc(sizeof(ubi_dlList) * table->size))) {
|
||||
DEBUG(0,("hash_table_init: malloc fail !\n"));
|
||||
return False;
|
||||
}
|
||||
ubi_dlInitList(&(table->lru_chain));
|
||||
for (i=0, bucket = table->buckets; i < table->size; i++, bucket++)
|
||||
ubi_dlInitList(bucket);
|
||||
|
||||
return True;
|
||||
}
|
||||
|
||||
/*
|
||||
**************************************************************
|
||||
* Compute a hash value based on a string key value.
|
||||
* Make the string key into an array of int's if possible.
|
||||
* For the last few chars that cannot be int'ed, use char instead.
|
||||
* The function returns the bucket index number for the hashed
|
||||
* key.
|
||||
* JRA. Use a djb-algorithm hash for speed.
|
||||
**************************************************************
|
||||
*/
|
||||
|
||||
static int string_hash(int hash_size, const char *key)
|
||||
{
|
||||
u32 n = 0;
|
||||
const char *p;
|
||||
for (p = key; *p != '\0'; p++) {
|
||||
n = ((n << 5) + n) ^ (u32)(*p);
|
||||
}
|
||||
return (n % hash_size);
|
||||
}
|
||||
|
||||
/* *************************************************************************
|
||||
* Search the hash table for the entry in the hash chain.
|
||||
* The function returns the pointer to the
|
||||
* element found in the chain or NULL if none is found.
|
||||
* If the element is found, the element is also moved to
|
||||
* the head of the LRU list.
|
||||
*
|
||||
* Input:
|
||||
* table -- The hash table where the element is stored in.
|
||||
* hash_chain -- The pointer to the bucket that stores the
|
||||
* element to be found.
|
||||
* key -- The hash key to be found.
|
||||
***************************************************************************
|
||||
*/
|
||||
|
||||
static hash_element *hash_chain_find(hash_table *table, ubi_dlList *hash_chain, char *key)
|
||||
{
|
||||
hash_element *hash_elem;
|
||||
ubi_dlNodePtr lru_item;
|
||||
unsigned int i = 0;
|
||||
|
||||
for (hash_elem = (hash_element *)(ubi_dlFirst(hash_chain)); i < hash_chain->count;
|
||||
i++, hash_elem = (hash_element *)(ubi_dlNext(hash_elem))) {
|
||||
if ((table->comp_func)(hash_elem->key, key) == 0) {
|
||||
/* Move to the head of the lru List. */
|
||||
lru_item = ubi_dlRemove(&(table->lru_chain), &(hash_elem->lru_link.lru_link));
|
||||
ubi_dlAddHead(&(table->lru_chain), lru_item);
|
||||
return(hash_elem);
|
||||
}
|
||||
}
|
||||
return ((hash_element *) NULL);
|
||||
}
|
||||
|
||||
/* ***************************************************************************
|
||||
*
|
||||
* Lookup a hash table for an element with key.
|
||||
* The function returns a pointer to the hash element.
|
||||
* If no element is found, the function returns NULL.
|
||||
*
|
||||
* Input:
|
||||
* table -- The hash table to be searched on.
|
||||
* key -- The key to be found.
|
||||
*****************************************************************************
|
||||
*/
|
||||
|
||||
hash_element *hash_lookup(hash_table *table, char *key)
|
||||
{
|
||||
return (hash_chain_find(table, &table->buckets[string_hash(table->size, key)], key));
|
||||
}
|
||||
|
||||
/* ***************************************************************
|
||||
*
|
||||
* This function first checks if an element with key "key"
|
||||
* exists in the hash table. If so, the function moves the
|
||||
* element to the front of the LRU list. Otherwise, a new
|
||||
* hash element corresponding to "value" and "key" is allocated
|
||||
* and inserted into the hash table. The new elements are
|
||||
* always inserted in the LRU order to the LRU list as well.
|
||||
*
|
||||
* Input:
|
||||
* table -- The hash table to be inserted in.
|
||||
* value -- The content of the element to be inserted.
|
||||
* key -- The key of the new element to be inserted.
|
||||
*
|
||||
****************************************************************
|
||||
*/
|
||||
|
||||
hash_element *hash_insert(hash_table *table, char *value, char *key)
|
||||
{
|
||||
hash_element *hash_elem;
|
||||
ubi_dlNodePtr lru_item;
|
||||
ubi_dlList *bucket;
|
||||
size_t string_length;
|
||||
|
||||
/*
|
||||
* If the hash table size has not reached the MAX_HASH_TABLE_SIZE,
|
||||
* the hash table may be enlarged if the current hash table is full.
|
||||
* If the hash table size has reached the MAX_HASH_TABLE_SIZE,
|
||||
* use LRU to remove the oldest element from the hash table.
|
||||
*/
|
||||
|
||||
if ((table->num_elements >= table->size) &&
|
||||
(table->num_elements < MAX_HASH_TABLE_SIZE)) {
|
||||
if(!enlarge_hash_table(table))
|
||||
return (hash_element *)NULL;
|
||||
table->num_elements += 1;
|
||||
} else if (table->num_elements >= MAX_HASH_TABLE_SIZE) {
|
||||
/* Do an LRU replacement. */
|
||||
lru_item = ubi_dlLast(&(table->lru_chain));
|
||||
hash_elem = (hash_element *)(((lru_node *)lru_item)->hash_elem);
|
||||
bucket = hash_elem->bucket;
|
||||
ubi_dlRemThis(&(table->lru_chain), &(hash_elem->lru_link.lru_link));
|
||||
ubi_dlRemThis(bucket, (ubi_dlNodePtr)hash_elem);
|
||||
SAFE_FREE(hash_elem->value);
|
||||
SAFE_FREE(hash_elem);
|
||||
} else {
|
||||
table->num_elements += 1;
|
||||
}
|
||||
|
||||
bucket = &table->buckets[string_hash(table->size, key)];
|
||||
|
||||
/* Since we only have 1-byte for the key string, we need to
|
||||
* allocate extra space in the hash_element to store the entire key
|
||||
* string.
|
||||
*/
|
||||
|
||||
string_length = strlen(key);
|
||||
if(!(hash_elem = (hash_element *) malloc(sizeof(hash_element) + string_length))) {
|
||||
DEBUG(0,("hash_insert: malloc fail !\n"));
|
||||
return (hash_element *)NULL;
|
||||
}
|
||||
|
||||
safe_strcpy((char *) hash_elem->key, key, string_length);
|
||||
|
||||
hash_elem->value = (char *)value;
|
||||
hash_elem->bucket = bucket;
|
||||
/* Insert in front of the lru list and the bucket list. */
|
||||
ubi_dlAddHead(bucket, hash_elem);
|
||||
hash_elem->lru_link.hash_elem = hash_elem;
|
||||
ubi_dlAddHead(&(table->lru_chain), &(hash_elem->lru_link.lru_link));
|
||||
|
||||
return(hash_elem);
|
||||
}
|
||||
|
||||
/* **************************************************************************
|
||||
*
|
||||
* Remove a hash element from the hash table. The hash element is
|
||||
* removed from both the LRU list and the hash bucket chain.
|
||||
*
|
||||
* Input:
|
||||
* table -- the hash table to be manipulated on.
|
||||
* hash_elem -- the element to be removed.
|
||||
**************************************************************************
|
||||
*/
|
||||
|
||||
void hash_remove(hash_table *table, hash_element *hash_elem)
|
||||
{
|
||||
if (hash_elem) {
|
||||
ubi_dlRemove(&(table->lru_chain), &(hash_elem->lru_link.lru_link));
|
||||
ubi_dlRemove(hash_elem->bucket, (ubi_dlNodePtr) hash_elem);
|
||||
SAFE_FREE(hash_elem->value);
|
||||
SAFE_FREE(hash_elem);
|
||||
table->num_elements--;
|
||||
}
|
||||
}
|
||||
|
||||
/* ******************************************************************
|
||||
* Increase the hash table size if it is too small.
|
||||
* The hash table size is increased by the HASH_TABLE_INCREMENT
|
||||
* ratio.
|
||||
* Input:
|
||||
* table -- the hash table to be enlarged.
|
||||
******************************************************************
|
||||
*/
|
||||
|
||||
static BOOL enlarge_hash_table(hash_table *table)
|
||||
{
|
||||
hash_element *hash_elem;
|
||||
int size, hash_value;
|
||||
ubi_dlList *buckets;
|
||||
ubi_dlList *old_bucket;
|
||||
ubi_dlList *bucket;
|
||||
ubi_dlList lru_chain;
|
||||
|
||||
buckets = table->buckets;
|
||||
lru_chain = table->lru_chain;
|
||||
size = table->size;
|
||||
|
||||
/* Reinitialize the hash table. */
|
||||
if(!hash_table_init(table, table->size * HASH_TABLE_INCREMENT, table->comp_func))
|
||||
return False;
|
||||
|
||||
for (old_bucket = buckets; size > 0; size--, old_bucket++) {
|
||||
while (old_bucket->count != 0) {
|
||||
hash_elem = (hash_element *) ubi_dlRemHead(old_bucket);
|
||||
ubi_dlRemove(&lru_chain, &(hash_elem->lru_link.lru_link));
|
||||
hash_value = string_hash(table->size, (char *) hash_elem->key);
|
||||
bucket = &(table->buckets[hash_value]);
|
||||
ubi_dlAddHead(bucket, hash_elem);
|
||||
ubi_dlAddHead(&(table->lru_chain), &(hash_elem->lru_link.lru_link));
|
||||
hash_elem->bucket = bucket;
|
||||
hash_elem->lru_link.hash_elem = hash_elem;
|
||||
table->num_elements++;
|
||||
}
|
||||
}
|
||||
SAFE_FREE(buckets);
|
||||
|
||||
return True;
|
||||
}
|
||||
|
||||
/* **********************************************************************
|
||||
*
|
||||
* Remove everything from a hash table and free up the memory it
|
||||
* occupies.
|
||||
* Input:
|
||||
* table -- the hash table to be cleared.
|
||||
*
|
||||
*************************************************************************
|
||||
*/
|
||||
|
||||
void hash_clear(hash_table *table)
|
||||
{
|
||||
unsigned int i;
|
||||
ubi_dlList *bucket = table->buckets;
|
||||
hash_element *hash_elem;
|
||||
for (i = 0; i < table->size; bucket++, i++) {
|
||||
while (bucket->count != 0) {
|
||||
hash_elem = (hash_element *) ubi_dlRemHead(bucket);
|
||||
SAFE_FREE(hash_elem->value);
|
||||
SAFE_FREE(hash_elem);
|
||||
}
|
||||
}
|
||||
table->size = 0;
|
||||
SAFE_FREE(table->buckets);
|
||||
table->buckets = NULL;
|
||||
}
|
@ -26,15 +26,7 @@
|
||||
Stat cache code used in unix_convert.
|
||||
*****************************************************************************/
|
||||
|
||||
typedef struct {
|
||||
char *original_path;
|
||||
char *translated_path;
|
||||
size_t translated_path_length;
|
||||
char names[2]; /* This is extended via malloc... */
|
||||
} stat_cache_entry;
|
||||
|
||||
#define INIT_STAT_CACHE_SIZE 512
|
||||
static hash_table stat_cache;
|
||||
static TDB_CONTEXT *tdb_stat_cache;
|
||||
|
||||
/**
|
||||
* Add an entry into the stat cache.
|
||||
@ -50,19 +42,17 @@ static hash_table stat_cache;
|
||||
|
||||
void stat_cache_add( const char *full_orig_name, const char *orig_translated_path, BOOL case_sensitive)
|
||||
{
|
||||
stat_cache_entry *scp;
|
||||
stat_cache_entry *found_scp;
|
||||
char *translated_path;
|
||||
size_t translated_path_length;
|
||||
|
||||
TDB_DATA data_val;
|
||||
char *original_path;
|
||||
size_t original_path_length;
|
||||
|
||||
hash_element *hash_elem;
|
||||
|
||||
if (!lp_stat_cache())
|
||||
return;
|
||||
|
||||
ZERO_STRUCT(data_val);
|
||||
|
||||
/*
|
||||
* Don't cache trivial valid directory entries such as . and ..
|
||||
*/
|
||||
@ -124,7 +114,7 @@ void stat_cache_add( const char *full_orig_name, const char *orig_translated_pat
|
||||
return;
|
||||
}
|
||||
|
||||
/* we only want to store the first part of original_path,
|
||||
/* we only want to index by the first part of original_path,
|
||||
up to the length of translated_path */
|
||||
|
||||
original_path[translated_path_length] = '\0';
|
||||
@ -132,55 +122,26 @@ void stat_cache_add( const char *full_orig_name, const char *orig_translated_pat
|
||||
}
|
||||
|
||||
/*
|
||||
* Check this name doesn't exist in the cache before we
|
||||
* add it.
|
||||
* New entry or replace old entry.
|
||||
*/
|
||||
|
||||
data_val.dsize = translated_path_length + 1;
|
||||
data_val.dptr = translated_path;
|
||||
|
||||
if ((hash_elem = hash_lookup(&stat_cache, original_path))) {
|
||||
found_scp = (stat_cache_entry *)(hash_elem->value);
|
||||
if (strcmp((found_scp->translated_path), orig_translated_path) == 0) {
|
||||
/* already in hash table */
|
||||
SAFE_FREE(original_path);
|
||||
SAFE_FREE(translated_path);
|
||||
return;
|
||||
}
|
||||
/* hash collision - remove before we re-add */
|
||||
hash_remove(&stat_cache, hash_elem);
|
||||
}
|
||||
|
||||
/*
|
||||
* New entry.
|
||||
*/
|
||||
|
||||
if((scp = (stat_cache_entry *)malloc(sizeof(stat_cache_entry)
|
||||
+original_path_length
|
||||
+translated_path_length)) == NULL) {
|
||||
DEBUG(0,("stat_cache_add: Out of memory !\n"));
|
||||
SAFE_FREE(original_path);
|
||||
SAFE_FREE(translated_path);
|
||||
return;
|
||||
if (tdb_store_bystring(tdb_stat_cache, original_path, data_val, TDB_REPLACE) != 0) {
|
||||
DEBUG(0,("stat_cache_add: Error storing entry %s -> %s\n", original_path, translated_path));
|
||||
} else {
|
||||
DEBUG(5,("stat_cache_add: Added entry (%x:size%x) %s -> %s\n",
|
||||
(unsigned int)data_val.dptr, data_val.dsize, original_path, translated_path));
|
||||
}
|
||||
|
||||
scp->original_path = scp->names;
|
||||
/* pointer into the structure... */
|
||||
scp->translated_path = scp->names + original_path_length + 1;
|
||||
safe_strcpy(scp->original_path, original_path, original_path_length);
|
||||
safe_strcpy(scp->translated_path, translated_path, translated_path_length);
|
||||
scp->translated_path_length = translated_path_length;
|
||||
|
||||
hash_insert(&stat_cache, (char *)scp, original_path);
|
||||
|
||||
SAFE_FREE(original_path);
|
||||
SAFE_FREE(translated_path);
|
||||
|
||||
DEBUG(5,("stat_cache_add: Added entry %s -> %s\n", scp->original_path, scp->translated_path));
|
||||
}
|
||||
|
||||
/**
|
||||
* Look through the stat cache for an entry
|
||||
*
|
||||
* The hash-table's internals will promote it to the top if found.
|
||||
*
|
||||
* @param conn A connection struct to do the stat() with.
|
||||
* @param name The path we are attempting to cache, modified by this routine
|
||||
* to be correct as far as the cache can tell us
|
||||
@ -195,11 +156,8 @@ void stat_cache_add( const char *full_orig_name, const char *orig_translated_pat
|
||||
BOOL stat_cache_lookup(connection_struct *conn, pstring name, pstring dirpath,
|
||||
char **start, SMB_STRUCT_STAT *pst)
|
||||
{
|
||||
stat_cache_entry *scp;
|
||||
char *chk_name;
|
||||
size_t namelen;
|
||||
hash_element *hash_elem;
|
||||
char *sp;
|
||||
BOOL sizechanged = False;
|
||||
unsigned int num_components = 0;
|
||||
|
||||
@ -244,8 +202,11 @@ BOOL stat_cache_lookup(connection_struct *conn, pstring name, pstring dirpath,
|
||||
}
|
||||
|
||||
while (1) {
|
||||
hash_elem = hash_lookup(&stat_cache, chk_name);
|
||||
if(hash_elem == NULL) {
|
||||
TDB_DATA data_val;
|
||||
char *sp;
|
||||
|
||||
data_val = tdb_fetch_bystring(tdb_stat_cache, chk_name);
|
||||
if(data_val.dptr == NULL || data_val.dsize == 0) {
|
||||
DEBUG(10,("stat_cache_lookup: lookup failed for name [%s]\n", chk_name ));
|
||||
/*
|
||||
* Didn't find it - remove last component for next try.
|
||||
@ -275,44 +236,71 @@ BOOL stat_cache_lookup(connection_struct *conn, pstring name, pstring dirpath,
|
||||
return False;
|
||||
}
|
||||
} else {
|
||||
scp = (stat_cache_entry *)(hash_elem->value);
|
||||
DEBUG(10,("stat_cache_lookup: lookup succeeded for name [%s] -> [%s]\n", chk_name, scp->translated_path ));
|
||||
BOOL retval;
|
||||
char *translated_path = data_val.dptr;
|
||||
size_t translated_path_length = data_val.dsize - 1;
|
||||
|
||||
DEBUG(10,("stat_cache_lookup: lookup succeeded for name [%s] -> [%s]\n", chk_name, translated_path ));
|
||||
DO_PROFILE_INC(statcache_hits);
|
||||
if(SMB_VFS_STAT(conn,scp->translated_path, pst) != 0) {
|
||||
if(SMB_VFS_STAT(conn,translated_path, pst) != 0) {
|
||||
/* Discard this entry - it doesn't exist in the filesystem. */
|
||||
hash_remove(&stat_cache, hash_elem);
|
||||
tdb_delete_bystring(tdb_stat_cache, chk_name);
|
||||
SAFE_FREE(chk_name);
|
||||
SAFE_FREE(data_val.dptr);
|
||||
return False;
|
||||
}
|
||||
|
||||
if (!sizechanged) {
|
||||
memcpy(name, scp->translated_path, MIN(sizeof(pstring)-1, scp->translated_path_length));
|
||||
memcpy(name, translated_path, MIN(sizeof(pstring)-1, translated_path_length));
|
||||
} else if (num_components == 0) {
|
||||
pstrcpy(name, scp->translated_path);
|
||||
pstrcpy(name, translated_path);
|
||||
} else {
|
||||
sp = strnrchr_m(name, '/', num_components);
|
||||
if (sp) {
|
||||
pstring last_component;
|
||||
pstrcpy(last_component, sp);
|
||||
pstrcpy(name, scp->translated_path);
|
||||
pstrcpy(name, translated_path);
|
||||
pstrcat(name, last_component);
|
||||
} else {
|
||||
pstrcpy(name, scp->translated_path);
|
||||
pstrcpy(name, translated_path);
|
||||
}
|
||||
}
|
||||
|
||||
/* set pointer for 'where to start' on fixing the rest of the name */
|
||||
*start = &name[scp->translated_path_length];
|
||||
*start = &name[translated_path_length];
|
||||
if(**start == '/')
|
||||
++*start;
|
||||
|
||||
pstrcpy(dirpath, scp->translated_path);
|
||||
pstrcpy(dirpath, translated_path);
|
||||
retval = (namelen == translated_path_length) ? True : False;
|
||||
SAFE_FREE(chk_name);
|
||||
return (namelen == scp->translated_path_length);
|
||||
SAFE_FREE(data_val.dptr);
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
**************************************************************
|
||||
* Compute a hash value based on a string key value.
|
||||
* Make the string key into an array of int's if possible.
|
||||
* For the last few chars that cannot be int'ed, use char instead.
|
||||
* The function returns the bucket index number for the hashed
|
||||
* key.
|
||||
* JRA. Use a djb-algorithm hash for speed.
|
||||
**************************************************************
|
||||
*/
|
||||
|
||||
static u32 string_hash(TDB_DATA *key)
|
||||
{
|
||||
u32 n = 0;
|
||||
const char *p;
|
||||
for (p = key->dptr; *p != '\0'; p++) {
|
||||
n = ((n << 5) + n) ^ (u32)(*p);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
/*************************************************************************** **
|
||||
* Initializes or clears the stat cache.
|
||||
*
|
||||
@ -323,15 +311,17 @@ BOOL stat_cache_lookup(connection_struct *conn, pstring name, pstring dirpath,
|
||||
*/
|
||||
BOOL reset_stat_cache( void )
|
||||
{
|
||||
static BOOL initialised;
|
||||
if (!lp_stat_cache())
|
||||
return True;
|
||||
|
||||
if (initialised) {
|
||||
hash_clear(&stat_cache);
|
||||
if (tdb_stat_cache) {
|
||||
tdb_close(tdb_stat_cache);
|
||||
}
|
||||
|
||||
initialised = hash_table_init( &stat_cache, INIT_STAT_CACHE_SIZE,
|
||||
(compare_function)(strcmp));
|
||||
return initialised;
|
||||
/* Create the in-memory tdb. */
|
||||
tdb_stat_cache = tdb_open_log("statcache", 0, TDB_INTERNAL, (O_RDWR|O_CREAT), 0644);
|
||||
if (!tdb_stat_cache)
|
||||
return False;
|
||||
tdb_set_hash_function(tdb_stat_cache, string_hash);
|
||||
return True;
|
||||
}
|
||||
|
@ -329,19 +329,6 @@ static int tdb_unlock(TDB_CONTEXT *tdb, int list, int ltype)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This is based on the hash algorithm from gdbm */
|
||||
static u32 tdb_hash(TDB_DATA *key)
|
||||
{
|
||||
u32 value; /* Used to compute the hash value. */
|
||||
u32 i; /* Used to cycle through random values. */
|
||||
|
||||
/* Set the initial value from the key size. */
|
||||
for (value = 0x238F13AF * key->dsize, i=0; i < key->dsize; i++)
|
||||
value = (value + (key->dptr[i] << (i*5 % 24)));
|
||||
|
||||
return (1103515243 * value + 12345);
|
||||
}
|
||||
|
||||
/* check for an out of bounds access - if it is out of bounds then
|
||||
see if the database has been expanded by someone else and expand
|
||||
if necessary
|
||||
@ -1121,7 +1108,7 @@ TDB_DATA tdb_fetch(TDB_CONTEXT *tdb, TDB_DATA key)
|
||||
u32 hash;
|
||||
|
||||
/* find which hash bucket it is in */
|
||||
hash = tdb_hash(&key);
|
||||
hash = tdb->hash_fn(&key);
|
||||
if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec)))
|
||||
return tdb_null;
|
||||
|
||||
@ -1153,7 +1140,7 @@ static int tdb_exists_hash(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash)
|
||||
|
||||
int tdb_exists(TDB_CONTEXT *tdb, TDB_DATA key)
|
||||
{
|
||||
u32 hash = tdb_hash(&key);
|
||||
u32 hash = tdb->hash_fn(&key);
|
||||
return tdb_exists_hash(tdb, key, hash);
|
||||
}
|
||||
|
||||
@ -1413,7 +1400,7 @@ TDB_DATA tdb_nextkey(TDB_CONTEXT *tdb, TDB_DATA oldkey)
|
||||
|
||||
if (!tdb->travlocks.off) {
|
||||
/* No previous element: do normal find, and lock record */
|
||||
tdb->travlocks.off = tdb_find_lock_hash(tdb, oldkey, tdb_hash(&oldkey), F_WRLCK, &rec);
|
||||
tdb->travlocks.off = tdb_find_lock_hash(tdb, oldkey, tdb->hash_fn(&oldkey), F_WRLCK, &rec);
|
||||
if (!tdb->travlocks.off)
|
||||
return tdb_null;
|
||||
tdb->travlocks.hash = BUCKET(rec.full_hash);
|
||||
@ -1457,7 +1444,7 @@ static int tdb_delete_hash(TDB_CONTEXT *tdb, TDB_DATA key, u32 hash)
|
||||
|
||||
int tdb_delete(TDB_CONTEXT *tdb, TDB_DATA key)
|
||||
{
|
||||
u32 hash = tdb_hash(&key);
|
||||
u32 hash = tdb->hash_fn(&key);
|
||||
return tdb_delete_hash(tdb, key, hash);
|
||||
}
|
||||
|
||||
@ -1475,7 +1462,7 @@ int tdb_store(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
|
||||
int ret = 0;
|
||||
|
||||
/* find which hash bucket it is in */
|
||||
hash = tdb_hash(&key);
|
||||
hash = tdb->hash_fn(&key);
|
||||
if (!tdb_keylocked(tdb, hash))
|
||||
return -1;
|
||||
if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1)
|
||||
@ -1593,7 +1580,7 @@ int tdb_append(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA new_dbuf)
|
||||
size_t new_data_size = 0;
|
||||
|
||||
/* find which hash bucket it is in */
|
||||
hash = tdb_hash(&key);
|
||||
hash = tdb->hash_fn(&key);
|
||||
if (!tdb_keylocked(tdb, hash))
|
||||
return -1;
|
||||
if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1)
|
||||
@ -1689,6 +1676,24 @@ static int tdb_already_open(dev_t device,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This is based on the hash algorithm from gdbm */
|
||||
static u32 default_tdb_hash(TDB_DATA *key)
|
||||
{
|
||||
u32 value; /* Used to compute the hash value. */
|
||||
u32 i; /* Used to cycle through random values. */
|
||||
|
||||
/* Set the initial value from the key size. */
|
||||
for (value = 0x238F13AF * key->dsize, i=0; i < key->dsize; i++)
|
||||
value = (value + (key->dptr[i] << (i*5 % 24)));
|
||||
|
||||
return (1103515243 * value + 12345);
|
||||
}
|
||||
|
||||
void tdb_set_hash_function(TDB_CONTEXT *tdb, tdb_hash_func fn)
|
||||
{
|
||||
tdb->hash_fn = fn;
|
||||
}
|
||||
|
||||
/* open the database, creating it if necessary
|
||||
|
||||
The open_flags and mode are passed straight to the open call on the
|
||||
@ -1728,7 +1733,8 @@ TDB_CONTEXT *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
|
||||
tdb->flags = tdb_flags;
|
||||
tdb->open_flags = open_flags;
|
||||
tdb->log_fn = log_fn;
|
||||
|
||||
tdb->hash_fn = default_tdb_hash;
|
||||
|
||||
if ((open_flags & O_ACCMODE) == O_WRONLY) {
|
||||
TDB_LOG((tdb, 0, "tdb_open_ex: can't open tdb %s write-only\n",
|
||||
name));
|
||||
@ -1973,7 +1979,7 @@ int tdb_lockkeys(TDB_CONTEXT *tdb, u32 number, TDB_DATA keys[])
|
||||
|
||||
/* Insertion sort by bucket */
|
||||
for (i = 0; i < number; i++) {
|
||||
hash = tdb_hash(&keys[i]);
|
||||
hash = tdb->hash_fn(&keys[i]);
|
||||
for (j = 0; j < i && BUCKET(tdb->lockedkeys[j+1]) < BUCKET(hash); j++);
|
||||
memmove(&tdb->lockedkeys[j+2], &tdb->lockedkeys[j+1], sizeof(u32) * (i-j));
|
||||
tdb->lockedkeys[j+1] = hash;
|
||||
@ -2008,22 +2014,22 @@ void tdb_unlockkeys(TDB_CONTEXT *tdb)
|
||||
contention - it cannot guarantee how many records will be locked */
|
||||
int tdb_chainlock(TDB_CONTEXT *tdb, TDB_DATA key)
|
||||
{
|
||||
return tdb_lock(tdb, BUCKET(tdb_hash(&key)), F_WRLCK);
|
||||
return tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
|
||||
}
|
||||
|
||||
int tdb_chainunlock(TDB_CONTEXT *tdb, TDB_DATA key)
|
||||
{
|
||||
return tdb_unlock(tdb, BUCKET(tdb_hash(&key)), F_WRLCK);
|
||||
return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
|
||||
}
|
||||
|
||||
int tdb_chainlock_read(TDB_CONTEXT *tdb, TDB_DATA key)
|
||||
{
|
||||
return tdb_lock(tdb, BUCKET(tdb_hash(&key)), F_RDLCK);
|
||||
return tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
|
||||
}
|
||||
|
||||
int tdb_chainunlock_read(TDB_CONTEXT *tdb, TDB_DATA key)
|
||||
{
|
||||
return tdb_unlock(tdb, BUCKET(tdb_hash(&key)), F_RDLCK);
|
||||
return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
|
||||
}
|
||||
|
||||
|
||||
@ -2033,7 +2039,6 @@ void tdb_logging_function(TDB_CONTEXT *tdb, void (*fn)(TDB_CONTEXT *, int , cons
|
||||
tdb->log_fn = fn;
|
||||
}
|
||||
|
||||
|
||||
/* reopen a tdb - this can be used after a fork to ensure that we have an independent
|
||||
seek pointer from our parent and to re-establish locks */
|
||||
int tdb_reopen(TDB_CONTEXT *tdb)
|
||||
|
@ -102,11 +102,13 @@ typedef struct tdb_context {
|
||||
dev_t device; /* uniquely identifies this tdb */
|
||||
ino_t inode; /* uniquely identifies this tdb */
|
||||
void (*log_fn)(struct tdb_context *tdb, int level, const char *, ...); /* logging function */
|
||||
u32 (*hash_fn)(TDB_DATA *key);
|
||||
int open_flags; /* flags used in the open - needed by reopen */
|
||||
} TDB_CONTEXT;
|
||||
|
||||
typedef int (*tdb_traverse_func)(TDB_CONTEXT *, TDB_DATA, TDB_DATA, void *);
|
||||
typedef void (*tdb_log_func)(TDB_CONTEXT *, int , const char *, ...);
|
||||
typedef u32 (*tdb_hash_func)(TDB_DATA *key);
|
||||
|
||||
TDB_CONTEXT *tdb_open(const char *name, int hash_size, int tdb_flags,
|
||||
int open_flags, mode_t mode);
|
||||
@ -117,6 +119,7 @@ TDB_CONTEXT *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
|
||||
int tdb_reopen(TDB_CONTEXT *tdb);
|
||||
int tdb_reopen_all(void);
|
||||
void tdb_logging_function(TDB_CONTEXT *tdb, tdb_log_func);
|
||||
void tdb_set_hash_function(TDB_CONTEXT *tdb, tdb_hash_func);
|
||||
enum TDB_ERROR tdb_error(TDB_CONTEXT *tdb);
|
||||
const char *tdb_errorstr(TDB_CONTEXT *tdb);
|
||||
TDB_DATA tdb_fetch(TDB_CONTEXT *tdb, TDB_DATA key);
|
||||
|
Loading…
x
Reference in New Issue
Block a user