1
0
mirror of https://github.com/samba-team/samba.git synced 2025-12-16 00:23:52 +03:00

r17346: Add optimisation vl needs for the cluster code where

we don't get the chainlock when getting the byte range
lock record read-only.
Jeremy.
This commit is contained in:
Jeremy Allison
2006-07-31 20:58:02 +00:00
committed by Gerald (Jerry) Carter
parent 21c8fa2fc8
commit fcd798ca0c
4 changed files with 42 additions and 12 deletions

View File

@@ -55,6 +55,7 @@ struct byte_range_lock {
struct files_struct *fsp;
unsigned int num_locks;
BOOL modified;
BOOL read_only;
struct lock_key key;
void *lock_data;
};

View File

@@ -1500,6 +1500,10 @@ static int byte_range_lock_destructor(void *p)
key.dptr = (char *)&br_lck->key;
key.dsize = sizeof(struct lock_key);
if (br_lck->read_only) {
SMB_ASSERT(!br_lck->modified);
}
if (!br_lck->modified) {
goto done;
}
@@ -1521,7 +1525,9 @@ static int byte_range_lock_destructor(void *p)
done:
tdb_chainunlock(tdb, key);
if (!br_lck->read_only) {
tdb_chainunlock(tdb, key);
}
SAFE_FREE(br_lck->lock_data);
return 0;
}
@@ -1532,8 +1538,8 @@ static int byte_range_lock_destructor(void *p)
TALLOC_FREE(brl) will release the lock in the destructor.
********************************************************************/
struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
files_struct *fsp)
static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
files_struct *fsp, BOOL read_only)
{
TDB_DATA key;
TDB_DATA data;
@@ -1553,10 +1559,21 @@ struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
key.dptr = (char *)&br_lck->key;
key.dsize = sizeof(struct lock_key);
if (tdb_chainlock(tdb, key) != 0) {
DEBUG(3, ("Could not lock byte range lock entry\n"));
TALLOC_FREE(br_lck);
return NULL;
if (!fsp->lockdb_clean) {
/* We must be read/write to clean
the dead entries. */
read_only = False;
}
if (read_only) {
br_lck->read_only = True;
} else {
if (tdb_chainlock(tdb, key) != 0) {
DEBUG(3, ("Could not lock byte range lock entry\n"));
TALLOC_FREE(br_lck);
return NULL;
}
br_lck->read_only = False;
}
talloc_set_destructor(br_lck, byte_range_lock_destructor);
@@ -1594,7 +1611,7 @@ struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
if (DEBUGLEVEL >= 10) {
unsigned int i;
struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
DEBUG(10,("brl_get_locks: %u current locks on dev=%.0f, inode=%.0f\n",
DEBUG(10,("brl_get_locks_internal: %u current locks on dev=%.0f, inode=%.0f\n",
br_lck->num_locks,
(double)fsp->dev, (double)fsp->inode ));
for( i = 0; i < br_lck->num_locks; i++) {
@@ -1603,3 +1620,15 @@ struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
}
return br_lck;
}
struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
files_struct *fsp)
{
return brl_get_locks_internal(mem_ctx, fsp, False);
}
struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
files_struct *fsp)
{
return brl_get_locks_internal(mem_ctx, fsp, True);
}

View File

@@ -102,7 +102,7 @@ BOOL is_locked(files_struct *fsp,
DEBUG(10,("is_locked: optimisation - level II oplock on file %s\n", fsp->fsp_name ));
ret = False;
} else {
struct byte_range_lock *br_lck = brl_get_locks(NULL, fsp);
struct byte_range_lock *br_lck = brl_get_locks_readonly(NULL, fsp);
if (!br_lck) {
return False;
}
@@ -116,7 +116,7 @@ BOOL is_locked(files_struct *fsp,
TALLOC_FREE(br_lck);
}
} else {
struct byte_range_lock *br_lck = brl_get_locks(NULL, fsp);
struct byte_range_lock *br_lck = brl_get_locks_readonly(NULL, fsp);
if (!br_lck) {
return False;
}
@@ -160,7 +160,7 @@ NTSTATUS query_lock(files_struct *fsp,
return NT_STATUS_OK;
}
br_lck = brl_get_locks(NULL, fsp);
br_lck = brl_get_locks_readonly(NULL, fsp);
if (!br_lck) {
return NT_STATUS_NO_MEMORY;
}

View File

@@ -152,7 +152,7 @@ static void enum_file_fn( const struct share_mode_entry *e,
fsp.dev = e->dev;
fsp.inode = e->inode;
if ( (brl = brl_get_locks(NULL,&fsp)) != NULL ) {
if ( (brl = brl_get_locks_readonly(NULL,&fsp)) != NULL ) {
num_locks = brl->num_locks;
TALLOC_FREE( brl );
}