1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-11 05:18:09 +03:00

s3: Cache brlock.tdb entries for the fast read&write strict locking code path

For a netbench run this gains around 2% user-space CPU, fetching a 100MB file
takes around 4% less.
This commit is contained in:
Volker Lendecke 2009-11-16 09:40:47 +01:00
parent bda1c701f4
commit f0a933d140
4 changed files with 74 additions and 17 deletions

View File

@ -3428,8 +3428,7 @@ int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
void *private_data);
struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
files_struct *fsp);
struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
files_struct *fsp);
struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp);
void brl_register_msgs(struct messaging_context *msg_ctx);
/* The following definitions come from locking/locking.c */

View File

@ -461,6 +461,14 @@ typedef struct files_struct {
struct files_struct *base_fsp; /* placeholder for delete on close */
/*
* Read-only cached brlock record, thrown away when the
* brlock.tdb seqnum changes. This avoids fetching data from
* the brlock.tdb on every read/write call.
*/
int brlock_seqnum;
struct byte_range_lock *brlock_rec;
struct dptr_struct *dptr;
} files_struct;

View File

@ -264,12 +264,25 @@ NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool
void brl_init(bool read_only)
{
int tdb_flags;
if (brlock_db) {
return;
}
tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST;
if (!lp_clustering()) {
/*
* We can't use the SEQNUM trick to cache brlock
* entries in the clustering case because ctdb seqnum
* propagation has a delay.
*/
tdb_flags |= TDB_SEQNUM;
}
brlock_db = db_open(NULL, lock_path("brlock.tdb"),
lp_open_files_db_hash_size(),
TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST,
lp_open_files_db_hash_size(), tdb_flags,
read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
if (!brlock_db) {
DEBUG(0,("Failed to open byte range locking database %s\n",
@ -1890,10 +1903,49 @@ struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
return brl_get_locks_internal(mem_ctx, fsp, False);
}
struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
files_struct *fsp)
struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
{
return brl_get_locks_internal(mem_ctx, fsp, True);
struct byte_range_lock *br_lock;
if (lp_clustering()) {
return brl_get_locks_internal(talloc_tos(), fsp, true);
}
if ((fsp->brlock_rec != NULL)
&& (brlock_db->get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
return fsp->brlock_rec;
}
TALLOC_FREE(fsp->brlock_rec);
br_lock = brl_get_locks_internal(talloc_tos(), fsp, false);
if (br_lock == NULL) {
return NULL;
}
fsp->brlock_seqnum = brlock_db->get_seqnum(brlock_db);
fsp->brlock_rec = talloc_zero(fsp, struct byte_range_lock);
if (fsp->brlock_rec == NULL) {
goto fail;
}
fsp->brlock_rec->fsp = fsp;
fsp->brlock_rec->num_locks = br_lock->num_locks;
fsp->brlock_rec->read_only = true;
fsp->brlock_rec->key = br_lock->key;
fsp->brlock_rec->lock_data = (struct lock_struct *)
talloc_memdup(fsp->brlock_rec, br_lock->lock_data,
sizeof(struct lock_struct) * br_lock->num_locks);
if (fsp->brlock_rec->lock_data == NULL) {
goto fail;
}
TALLOC_FREE(br_lock);
return fsp->brlock_rec;
fail:
TALLOC_FREE(br_lock);
TALLOC_FREE(fsp->brlock_rec);
return NULL;
}
struct brl_revalidate_state {

View File

@ -116,7 +116,9 @@ bool strict_lock_default(files_struct *fsp, struct lock_struct *plock)
DEBUG(10,("is_locked: optimisation - level II oplock on file %s\n", fsp_str_dbg(fsp)));
ret = True;
} else {
struct byte_range_lock *br_lck = brl_get_locks_readonly(talloc_tos(), fsp);
struct byte_range_lock *br_lck;
br_lck = brl_get_locks_readonly(fsp);
if (!br_lck) {
return True;
}
@ -127,10 +129,11 @@ bool strict_lock_default(files_struct *fsp, struct lock_struct *plock)
plock->size,
plock->lock_type,
plock->lock_flav);
TALLOC_FREE(br_lck);
}
} else {
struct byte_range_lock *br_lck = brl_get_locks_readonly(talloc_tos(), fsp);
struct byte_range_lock *br_lck;
br_lck = brl_get_locks_readonly(fsp);
if (!br_lck) {
return True;
}
@ -141,7 +144,6 @@ bool strict_lock_default(files_struct *fsp, struct lock_struct *plock)
plock->size,
plock->lock_type,
plock->lock_flav);
TALLOC_FREE(br_lck);
}
DEBUG(10,("strict_lock_default: flavour = %s brl start=%.0f "
@ -170,7 +172,6 @@ NTSTATUS query_lock(files_struct *fsp,
enum brl_flavour lock_flav)
{
struct byte_range_lock *br_lck = NULL;
NTSTATUS status = NT_STATUS_LOCK_NOT_GRANTED;
if (!fsp->can_lock) {
return fsp->is_directory ? NT_STATUS_INVALID_DEVICE_REQUEST : NT_STATUS_INVALID_HANDLE;
@ -180,21 +181,18 @@ NTSTATUS query_lock(files_struct *fsp,
return NT_STATUS_OK;
}
br_lck = brl_get_locks_readonly(talloc_tos(), fsp);
br_lck = brl_get_locks_readonly(fsp);
if (!br_lck) {
return NT_STATUS_NO_MEMORY;
}
status = brl_lockquery(br_lck,
return brl_lockquery(br_lck,
psmbpid,
procid_self(),
poffset,
pcount,
plock_type,
lock_flav);
TALLOC_FREE(br_lck);
return status;
}
static void increment_current_lock_count(files_struct *fsp,