1
0
mirror of https://github.com/samba-team/samba.git synced 2024-12-23 17:34:34 +03:00

Added the hard code :-).

HEAD should now map brl locks correctly into POSIX locks, including the
really nasty case of large range unlock.

There is a lot of pretty ASCII art in locking/brlock.c explaining
exactly how this code works. If it is unclear, please ask me.

Jeremy.
(This used to be commit 135855dbd3)
This commit is contained in:
Jeremy Allison 2000-04-25 20:30:58 +00:00
parent 00e3fe1324
commit e4d382750c
4 changed files with 469 additions and 78 deletions

View File

@ -816,6 +816,8 @@ BOOL brl_lock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
uint16 smbpid, pid_t pid, uint16 tid,
br_off start, br_off size,
enum brl_type lock_type);
struct unlock_list *brl_unlock_list(TALLOC_CTX *ctx, struct unlock_list *ulhead,
pid_t pid, SMB_DEV_T dev, SMB_INO_T ino);
BOOL brl_unlock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
uint16 smbpid, pid_t pid, uint16 tid,
br_off start, br_off size);
@ -824,11 +826,11 @@ BOOL brl_locktest(SMB_DEV_T dev, SMB_INO_T ino,
br_off start, br_off size,
enum brl_type lock_type);
void brl_close(SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum);
struct unlock_list *brl_getlocklist( TALLOC_CTX *ctx, SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum);
int brl_forall(BRLOCK_FN(fn));
/*The following definitions come from locking/locking.c */
void locking_close_file(files_struct *fsp);
BOOL is_locked(files_struct *fsp,connection_struct *conn,
SMB_BIG_UINT count,SMB_BIG_UINT offset,
enum brl_type lock_type);
@ -838,6 +840,7 @@ BOOL do_lock(files_struct *fsp,connection_struct *conn,
BOOL do_unlock(files_struct *fsp,connection_struct *conn,
SMB_BIG_UINT count,SMB_BIG_UINT offset,
int *eclass,uint32 *ecode);
void locking_close_file(files_struct *fsp);
BOOL locking_init(int read_only);
BOOL locking_end(void);
BOOL lock_share_entry(connection_struct *conn,

View File

@ -519,6 +519,18 @@ struct uid_cache {
uid_t list[UID_CACHE_SIZE];
};
/*
* Structure used when splitting a lock range
* into a POSIX lock range. Doubly linked list.
*/
struct unlock_list {
struct unlock_list *next;
struct unlock_list *prev;
SMB_BIG_UINT start;
SMB_BIG_UINT size;
};
typedef struct
{
char *name;

View File

@ -2,7 +2,10 @@
Unix SMB/Netbios implementation.
Version 3.0
byte range locking code
Copyright (C) Andrew Tridgell 1992-1998
Updated to handle range splits/merges.
Copyright (C) Andrew Tridgell 1992-2000
Copyright (C) Jeremy Allison 1992-2000
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -19,7 +22,7 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this module implements a tdb based byte range locking service,
/* This module implements a tdb based byte range locking service,
replacing the fcntl() based byte range locking previously
used. This allows us to provide the same semantics as NT */
@ -27,18 +30,20 @@
extern int DEBUGLEVEL;
/* this contains elements that differentiate locks. The smbpid is a
/* This contains elements that differentiate locks. The smbpid is a
client supplied pid, and is essentially the locking context for
this client */
struct lock_context {
uint16 smbpid;
uint16 tid;
pid_t pid;
};
/* the data in brlock records is an unsorted linear array of these
/* The data in brlock records is an unsorted linear array of these
records. It is unnecessary to store the count as tdb provides the
size of the record */
struct lock_struct {
struct lock_context context;
br_off start;
@ -47,19 +52,21 @@ struct lock_struct {
enum brl_type lock_type;
};
/* the key used in the brlock database */
/* The key used in the brlock database. */
struct lock_key {
SMB_DEV_T device;
SMB_INO_T inode;
};
/* the open brlock.tdb database */
/* The open brlock.tdb database. */
static TDB_CONTEXT *tdb;
/****************************************************************************
see if two locking contexts are equal
See if two locking contexts are equal.
****************************************************************************/
static BOOL brl_same_context(struct lock_context *ctx1,
struct lock_context *ctx2)
{
@ -69,8 +76,9 @@ static BOOL brl_same_context(struct lock_context *ctx1,
}
/****************************************************************************
see if lock2 can be added when lock1 is in place
See if lock2 can be added when lock1 is in place.
****************************************************************************/
static BOOL brl_conflict(struct lock_struct *lck1,
struct lock_struct *lck2)
{
@ -88,8 +96,9 @@ static BOOL brl_conflict(struct lock_struct *lck1,
/****************************************************************************
open up the brlock.tdb database
Open up the brlock.tdb database.
****************************************************************************/
void brl_init(int read_only)
{
if (tdb) return;
@ -102,8 +111,9 @@ void brl_init(int read_only)
/****************************************************************************
lock a range of bytes
Lock a range of bytes.
****************************************************************************/
BOOL brl_lock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
uint16 smbpid, pid_t pid, uint16 tid,
br_off start, br_off size,
@ -160,10 +170,238 @@ BOOL brl_lock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
return False;
}
/****************************************************************************
Create a list of lock ranges that don't overlap a given range. Used in calculating
POSIX lock unlocks. This is a difficult function that requires ASCII art to
understand it :-).
****************************************************************************/
struct unlock_list *brl_unlock_list(TALLOC_CTX *ctx, struct unlock_list *ulhead,
pid_t pid, SMB_DEV_T dev, SMB_INO_T ino)
{
struct lock_key key;
TDB_DATA kbuf, dbuf;
struct lock_struct *locks;
int num_locks, i;
/*
* Setup the key for this fetch.
*/
key.device = dev;
key.inode = ino;
kbuf.dptr = (char *)&key;
kbuf.dsize = sizeof(key);
dbuf.dptr = NULL;
tdb_lockchain(tdb, kbuf);
dbuf = tdb_fetch(tdb, kbuf);
if (!dbuf.dptr) {
tdb_unlockchain(tdb, kbuf);
return ulhead;
}
locks = (struct lock_struct *)dbuf.dptr;
num_locks = dbuf.dsize / sizeof(*locks);
/*
* Check the current lock list on this dev/inode pair.
* Quit if the list is deleted.
*/
for (i=0; i<num_locks && ulhead; i++) {
struct lock_struct *lock = &locks[i];
struct unlock_list *ul_curr;
/* If it's not this process, ignore it. */
if (lock->context.pid != pid)
continue;
/*
* Walk the unlock list, checking for overlaps. Note that
* the unlock list can expand within this loop if the current
* range being examined needs to be split.
*/
for (ul_curr = ulhead; ul_curr;) {
DEBUG(10,("brl_unlock_list: curr: start=%.0f,size=%.0f \
lock: start=%.0f,size=%.0f\n", (double)ul_curr->start, (double)ul_curr->size,
(double)lock->start, (double)lock->size ));
if ( (ul_curr->start >= (lock->start + lock->size)) ||
(lock->start > (ul_curr->start + ul_curr->size))) {
/* No overlap with this lock - leave this range alone. */
/*********************************************
+---------+
| ul_curr |
+---------+
+-------+
| lock |
+-------+
OR....
+---------+
| ul_curr |
+---------+
**********************************************/
DEBUG(10,("brl_unlock_list: no overlap case.\n" ));
ul_curr = ul_curr->next;
} else if ( (ul_curr->start >= lock->start) &&
(ul_curr->start + ul_curr->size <= lock->start + lock->size) ) {
/*
* This unlock is completely overlapped by this existing lock range
* and thus should have no effect (not be unlocked). Delete it from the list.
*/
/*********************************************
+---------+
| ul_curr |
+---------+
+---------------------------+
| lock |
+---------------------------+
**********************************************/
/* Save the next pointer */
struct unlock_list *ul_next = ul_curr->next;
DEBUG(10,("brl_unlock_list: delete case.\n" ));
DLIST_REMOVE(ulhead, ul_curr);
if(ulhead == NULL)
break; /* No more list... */
ul_curr = ul_next;
} else if ( (ul_curr->start >= lock->start) &&
(ul_curr->start < lock->start + lock->size) &&
(ul_curr->start + ul_curr->size > lock->start + lock->size) ) {
/*
* This unlock overlaps the existing lock range at the high end.
* Truncate by moving start to existing range end and reducing size.
*/
/*********************************************
+---------------+
| ul_curr |
+---------------+
+---------------+
| lock |
+---------------+
BECOMES....
+-------+
|ul_curr|
+-------+
**********************************************/
ul_curr->size = (ul_curr->start + ul_curr->size) - (lock->start + lock->size);
ul_curr->start = lock->start + lock->size;
DEBUG(10,("brl_unlock_list: truncate high case: start=%.0f,size=%.0f\n",
(double)ul_curr->start, (double)ul_curr->size ));
ul_curr = ul_curr->next;
} else if ( (ul_curr->start < lock->start) &&
(ul_curr->start + ul_curr->size > lock->start) ) {
/*
* This unlock overlaps the existing lock range at the low end.
* Truncate by reducing size.
*/
/*********************************************
+---------------+
| ul_curr |
+---------------+
+---------------+
| lock |
+---------------+
BECOMES....
+-------+
|ul_curr|
+-------+
**********************************************/
ul_curr->size = lock->start - ul_curr->start;
DEBUG(10,("brl_unlock_list: truncate low case: start=%.0f,size=%.0f\n",
(double)ul_curr->start, (double)ul_curr->size ));
ul_curr = ul_curr->next;
} else if ( (ul_curr->start < lock->start) &&
(ul_curr->start + ul_curr->size > lock->start + lock->size) ) {
/*
* Worst case scenario. Unlock request completely overlaps an existing
* lock range. Split the request into two, push the new (upper) request
* into the dlink list, and continue with the entry after ul_new (as we
* know that ul_new will not overlap with this lock).
*/
/*********************************************
+---------------------------+
| ul_curr |
+---------------------------+
+---------+
| lock |
+---------+
BECOMES.....
+-------+ +---------+
|ul_curr| |ul_new |
+-------+ +---------+
**********************************************/
struct unlock_list *ul_new = (struct unlock_list *)talloc(ctx,
sizeof(struct unlock_list));
if(ul_new == NULL) {
DEBUG(0,("brl_unlock_list: talloc fail.\n"));
return NULL; /* The talloc_destroy takes care of cleanup. */
}
ZERO_STRUCTP(ul_new);
ul_new->start = lock->start + lock->size;
ul_new->size = ul_curr->start + ul_curr->size - ul_new->start;
/* Add into the dlink list after the ul_curr point - NOT at ulhead. */
DLIST_ADD(ul_curr, ul_new);
/* Truncate the ul_curr. */
ul_curr->size = lock->start - ul_curr->start;
DEBUG(10,("brl_unlock_list: split case: curr: start=%.0f,size=%.0f \
new: start=%.0f,size=%.0f\n", (double)ul_curr->start, (double)ul_curr->size,
(double)ul_new->start, (double)ul_new->size ));
ul_curr = ul_new->next;
} else {
/*
* This logic case should never happen. Ensure this is the
* case by forcing an abort.... Remove in production.
*/
smb_panic("brl_unlock_list: logic flaw in cases...\n");
}
} /* end for ( ul_curr = ulhead; ul_curr;) */
} /* end for (i=0; i<num_locks && ul_head; i++) */
tdb_unlockchain(tdb, kbuf);
if (dbuf.dptr)
free(dbuf.dptr);
return ulhead;
}
/****************************************************************************
unlock a range of bytes
Unlock a range of bytes.
****************************************************************************/
BOOL brl_unlock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
uint16 smbpid, pid_t pid, uint16 tid,
br_off start, br_off size)
@ -194,10 +432,13 @@ BOOL brl_unlock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
locks = (struct lock_struct *)dbuf.dptr;
count = dbuf.dsize / sizeof(*locks);
for (i=0; i<count; i++) {
if (brl_same_context(&locks[i].context, &context) &&
locks[i].fnum == fnum &&
locks[i].start == start &&
locks[i].size == size) {
struct lock_struct *lock = &locks[i];
if (brl_same_context(&lock->context, &context) &&
lock->fnum == fnum &&
lock->start == start &&
lock->size == size) {
/* found it - delete it */
if (count == 1) {
tdb_delete(tdb, kbuf);
@ -224,11 +465,10 @@ BOOL brl_unlock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
return False;
}
/****************************************************************************
test if we could add a lock if we wanted to
Test if we could add a lock if we wanted to.
****************************************************************************/
BOOL brl_locktest(SMB_DEV_T dev, SMB_INO_T ino,
uint16 smbpid, pid_t pid, uint16 tid,
br_off start, br_off size,
@ -278,10 +518,10 @@ BOOL brl_locktest(SMB_DEV_T dev, SMB_INO_T ino,
return False;
}
/****************************************************************************
remove any locks associated with a open file
Remove any locks associated with a open file.
****************************************************************************/
void brl_close(SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
{
struct lock_key key;
@ -305,9 +545,11 @@ void brl_close(SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
locks = (struct lock_struct *)dbuf.dptr;
count = dbuf.dsize / sizeof(*locks);
for (i=0; i<count; i++) {
if (locks[i].context.tid == tid &&
locks[i].context.pid == pid &&
locks[i].fnum == fnum) {
struct lock_struct *lock = &locks[i];
if (lock->context.tid == tid &&
lock->context.pid == pid &&
lock->fnum == fnum) {
/* found it - delete it */
if (count > 1 && i < count-1) {
memmove(&locks[i], &locks[i+1],
@ -330,11 +572,73 @@ void brl_close(SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
tdb_unlockchain(tdb, kbuf);
}
/****************************************************************************
Return a lock list associated with an open file.
****************************************************************************/
struct unlock_list *brl_getlocklist( TALLOC_CTX *ctx, SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
{
struct lock_key key;
TDB_DATA kbuf, dbuf;
int i, count;
struct lock_struct *locks;
struct unlock_list *ulist = NULL;
key.device = dev;
key.inode = ino;
kbuf.dptr = (char *)&key;
kbuf.dsize = sizeof(key);
dbuf.dptr = NULL;
tdb_lockchain(tdb, kbuf);
dbuf = tdb_fetch(tdb, kbuf);
if (!dbuf.dptr) {
tdb_unlockchain(tdb, kbuf);
return NULL;
}
/* There are existing locks - allocate an entry for each one. */
locks = (struct lock_struct *)dbuf.dptr;
count = dbuf.dsize / sizeof(*locks);
for (i=0; i<count; i++) {
struct lock_struct *lock = &locks[i];
if (lock->context.tid == tid &&
lock->context.pid == pid &&
lock->fnum == fnum) {
struct unlock_list *ul_new = (struct unlock_list *)talloc(ctx,
sizeof(struct unlock_list));
if(ul_new == NULL) {
DEBUG(0,("brl_getlocklist: talloc fail.\n"));
return NULL; /* The talloc_destroy takes care of cleanup. */
}
ZERO_STRUCTP(ul_new);
ul_new->start = lock->start;
ul_new->size = lock->size;
DLIST_ADD(ulist, ul_new);
}
}
if (dbuf.dptr)
free(dbuf.dptr);
tdb_unlockchain(tdb, kbuf);
return ulist;
}
/****************************************************************************
traverse the whole database with this function, calling traverse_callback
on each lock
Traverse the whole database with this function, calling traverse_callback
on each lock.
****************************************************************************/
static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
{
struct lock_struct *locks;
@ -357,8 +661,9 @@ static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *st
}
/*******************************************************************
Call the specified function on each lock in the database
Call the specified function on each lock in the database.
********************************************************************/
int brl_forall(BRLOCK_FN(fn))
{
if (!tdb) return 0;

View File

@ -380,29 +380,6 @@ static BOOL release_posix_lock(files_struct *fsp, SMB_BIG_UINT u_offset, SMB_BIG
return True;
}
/****************************************************************************
Remove any locks on this fd. Called from file_close().
****************************************************************************/
void locking_close_file(files_struct *fsp)
{
if (!lp_locking(SNUM(fsp->conn)))
return;
if(lp_posix_locking(SNUM(fsp->conn))) {
/*
* We need to release all POSIX locks we have on this
* fd.
*/
}
/*
* Now release all the tdb locks.
*/
brl_close(fsp->dev, fsp->inode, getpid(), fsp->conn->cnum, fsp->fnum);
}
/****************************************************************************
Utility function called to see if a file region is locked.
****************************************************************************/
@ -502,51 +479,145 @@ BOOL do_unlock(files_struct *fsp,connection_struct *conn,
int *eclass,uint32 *ecode)
{
BOOL ok = False;
TALLOC_CTX *ul_ctx = NULL;
struct unlock_list *ulist = NULL;
struct unlock_list *ul = NULL;
pid_t pid;
if (!lp_locking(SNUM(conn)))
return(True);
if (!OPEN_FSP(fsp) || !fsp->can_lock || (fsp->conn != conn)) {
*eclass = ERRDOS;
*ecode = ERRlock;
return False;
}
DEBUG(10,("do_unlock: unlock start=%.0f len=%.0f requested for file %s\n",
(double)offset, (double)count, fsp->fsp_name ));
if (OPEN_FSP(fsp) && fsp->can_lock && (fsp->conn == conn)) {
if(lp_posix_locking(SNUM(conn))) {
/*
* Remove the existing lock record from the tdb lockdb
* before looking at POSIX locks. If this record doesn't
* match then don't bother looking to remove POSIX locks.
*/
#if 0
/*
* The following call calculates if there are any
* overlapping read locks held by this process on
* other fd's open on the same file and truncates
* any overlapping range and returns the value in
* the non_overlap_XXX variables. Thus the POSIX
* unlock may not be done on the same region as
* the brl_lock. JRA.
*/
pid = getpid();
brl_unlock_list(fsp->dev, fsp->inode, fsp->fnum,
#endif
/*
* Release the POSIX lock on this range.
*/
(void)release_posix_lock(fsp, offset, count);
fsp->num_posix_locks--;
}
ok = brl_unlock(fsp->dev, fsp->inode, fsp->fnum,
global_smbpid, getpid(), conn->cnum, offset, count);
}
ok = brl_unlock(fsp->dev, fsp->inode, fsp->fnum,
global_smbpid, pid, conn->cnum, offset, count);
if (!ok) {
*eclass = ERRDOS;
*ecode = ERRlock;
return False;
}
if (!lp_posix_locking(SNUM(conn)))
return True;
if ((ul_ctx = talloc_init()) == NULL) {
DEBUG(0,("do_unlock: unable to init talloc context.\n"));
return True; /* Not a fatal error. */
}
if ((ul = (struct unlock_list *)talloc(ul_ctx, sizeof(struct unlock_list))) == NULL) {
DEBUG(0,("do_unlock: unable to talloc unlock list.\n"));
talloc_destroy(ul_ctx);
return True; /* Not a fatal error. */
}
/*
* Create the initial list entry containing the
* lock we want to remove.
*/
ZERO_STRUCTP(ul);
ul->start = offset;
ul->size = count;
DLIST_ADD(ulist, ul);
/*
* The following call calculates if there are any
* overlapping read locks held by this process on
* other fd's open on the same file and creates a
* list of unlock ranges that will allow other
* POSIX lock ranges to remain on the file whilst the
* unlocks are performed.
*/
ulist = brl_unlock_list(ul_ctx, ulist, pid, fsp->dev, fsp->inode);
/*
* Release the POSIX locks on the list of ranges returned.
*/
for(; ulist; ulist = ulist->next)
(void)release_posix_lock(fsp, ulist->start, ulist->size);
talloc_destroy(ul_ctx);
/*
* We treat this as one unlock request for POSIX accounting purposes even
* if it may have been split into multiple smaller POSIX unlock ranges.
*/
fsp->num_posix_locks--;
return True; /* Did unlock */
}
/****************************************************************************
Remove any locks on this fd. Called from file_close().
****************************************************************************/
void locking_close_file(files_struct *fsp)
{
pid_t pid = getpid();
if (!lp_locking(SNUM(fsp->conn)))
return;
if(lp_posix_locking(SNUM(fsp->conn))) {
TALLOC_CTX *ul_ctx = NULL;
struct unlock_list *ul = NULL;
int eclass;
uint32 ecode;
if ((ul_ctx = talloc_init()) == NULL) {
DEBUG(0,("locking_close_file: unable to init talloc context.\n"));
return;
}
/*
* We need to release all POSIX locks we have on this
* fd. Get all our existing locks from the tdb locking database.
*/
ul = brl_getlocklist(ul_ctx, fsp->dev, fsp->inode, pid, fsp->conn->cnum, fsp->fnum);
/*
* Now unlock all of them. This will remove the brl entry also
* for each lock.
*/
for(; ul; ul = ul->next)
do_unlock(fsp,fsp->conn,ul->size,ul->start,&eclass,&ecode);
talloc_destroy(ul_ctx);
} else {
/*
* Just release all the tdb locks, no need to release individually.
*/
brl_close(fsp->dev, fsp->inode, pid, fsp->conn->cnum, fsp->fnum);
}
}
/****************************************************************************
Initialise the locking functions.
****************************************************************************/