mirror of
https://github.com/samba-team/samba.git
synced 2024-12-23 17:34:34 +03:00
r3029: implemented byte range lock timeouts.
This adds a pvfs_wait_message() routine which uses the new messaging
system, event timers and talloc destructors to give a nice generic
async event handling system with a easy to use interface. The
extensions to pvfs_lock.c are based on calls to pvfs_wait_message()
routines.
We now pass all of our smbtorture locking tests, although while
writing this code I have thought of some additonal tests that should
be added, particularly for lock cancel operations. I'll work on that
soon.
This commit also extends the smbtorture lock tests to test the rather
weird 0xEEFFFFFF locking semantics that I have discovered in
win2003. Win2003 treats the 0xEEFFFFFF boundary as special, and will
give different error codes on either side of it. Locks on both sides
are allowed, the only difference is which error code is given when a
lock is denied. Anyone like to hazard a guess as to why? It has
me stumped.
(This used to be commit 4395c0557a
)
This commit is contained in:
parent
d37acd0fe7
commit
d0cc571e30
@ -26,50 +26,6 @@
|
||||
#define MSG_DEBUG 1
|
||||
#define MSG_PING 2
|
||||
#define MSG_PONG 3
|
||||
#define MSG_PROFILE 4
|
||||
#define MSG_REQ_DEBUGLEVEL 5
|
||||
#define MSG_DEBUGLEVEL 6
|
||||
#define MSG_REQ_PROFILELEVEL 7
|
||||
#define MSG_PROFILELEVEL 8
|
||||
#define MSG_REQ_POOL_USAGE 9
|
||||
#define MSG_POOL_USAGE 10
|
||||
|
||||
/* If dmalloc is included, set a steady-state mark */
|
||||
#define MSG_REQ_DMALLOC_MARK 11
|
||||
|
||||
/* If dmalloc is included, dump to the dmalloc log a description of
|
||||
* what has changed since the last MARK */
|
||||
#define MSG_REQ_DMALLOC_LOG_CHANGED 12
|
||||
|
||||
#define MSG_SHUTDOWN 13
|
||||
|
||||
/* Dump out the talloc useage. */
|
||||
#define MSG_REQ_TALLOC_USAGE 14
|
||||
#define MSG_TALLOC_USAGE 15
|
||||
|
||||
/* nmbd messages */
|
||||
#define MSG_FORCE_ELECTION 1001
|
||||
#define MSG_WINS_NEW_ENTRY 1002
|
||||
|
||||
/* printing messages */
|
||||
/* #define MSG_PRINTER_NOTIFY 2001*/ /* Obsolete */
|
||||
#define MSG_PRINTER_DRVUPGRADE 2002
|
||||
#define MSG_PRINTER_NOTIFY2 2003
|
||||
#define MSG_PRINTERDATA_INIT_RESET 2004
|
||||
|
||||
/* smbd messages */
|
||||
#define MSG_SMB_CONF_UPDATED 3001
|
||||
#define MSG_SMB_FORCE_TDIS 3002
|
||||
#define MSG_SMB_SAM_SYNC 3003
|
||||
#define MSG_SMB_SAM_REPL 3004
|
||||
#define MSG_SMB_UNLOCK 3005
|
||||
|
||||
/* Flags to classify messages - used in message_send_all() */
|
||||
/* Sender will filter by flag. */
|
||||
|
||||
#define FLAG_MSG_GENERAL 0x0001
|
||||
#define FLAG_MSG_SMBD 0x0002
|
||||
#define FLAG_MSG_NMBD 0x0004
|
||||
#define FLAG_MSG_PRINTING 0x0008
|
||||
#define MSG_BRL_RETRY 4
|
||||
|
||||
#endif
|
||||
|
@ -611,6 +611,6 @@ typedef struct nt_user_token {
|
||||
#define REQ_CONTROL_ASYNC (1<<2) /* the backend will answer this one later */
|
||||
|
||||
/* passed to br lock code */
|
||||
enum brl_type {READ_LOCK, WRITE_LOCK};
|
||||
enum brl_type {READ_LOCK, WRITE_LOCK, PENDING_READ_LOCK, PENDING_WRITE_LOCK};
|
||||
|
||||
#endif /* _SMB_H */
|
||||
|
@ -250,14 +250,15 @@ void messaging_register(void *ctx, void *private,
|
||||
/*
|
||||
De-register the function for a particular message type.
|
||||
*/
|
||||
void messaging_deregister(void *ctx, uint32_t msg_type)
|
||||
void messaging_deregister(void *ctx, uint32_t msg_type, void *private)
|
||||
{
|
||||
struct messaging_state *msg = ctx;
|
||||
struct dispatch_fn *d, *next;
|
||||
|
||||
for (d = msg->dispatch; d; d = next) {
|
||||
next = d->next;
|
||||
if (d->msg_type == msg_type) {
|
||||
if (d->msg_type == msg_type &&
|
||||
d->private == private) {
|
||||
DLIST_REMOVE(msg->dispatch, d);
|
||||
talloc_free(d);
|
||||
}
|
||||
|
@ -27,12 +27,6 @@
|
||||
|
||||
#include "includes.h"
|
||||
|
||||
struct brl_context {
|
||||
struct tdb_wrap *w;
|
||||
servid_t server;
|
||||
uint16_t tid;
|
||||
};
|
||||
|
||||
/*
|
||||
in this module a "DATA_BLOB *file_key" is a blob that uniquely identifies
|
||||
a file. For a local posix filesystem this will usually be a combination
|
||||
@ -60,13 +54,25 @@ struct lock_struct {
|
||||
uint64_t size;
|
||||
uint16_t fnum;
|
||||
enum brl_type lock_type;
|
||||
void *notify_ptr;
|
||||
};
|
||||
|
||||
struct brl_context {
|
||||
struct tdb_wrap *w;
|
||||
servid_t server;
|
||||
uint16_t tid;
|
||||
void *messaging_ctx;
|
||||
struct lock_struct last_lock_failure;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
Open up the brlock.tdb database. Close it down using
|
||||
talloc_free()
|
||||
talloc_free(). We need the messaging_ctx to allow for
|
||||
pending lock notifications.
|
||||
*/
|
||||
void *brl_init(TALLOC_CTX *mem_ctx, servid_t server, uint16_t tid)
|
||||
void *brl_init(TALLOC_CTX *mem_ctx, servid_t server, uint16_t tid,
|
||||
void *messaging_ctx)
|
||||
{
|
||||
char *path;
|
||||
struct brl_context *brl;
|
||||
@ -88,6 +94,8 @@ void *brl_init(TALLOC_CTX *mem_ctx, servid_t server, uint16_t tid)
|
||||
|
||||
brl->server = server;
|
||||
brl->tid = tid;
|
||||
brl->messaging_ctx = messaging_ctx;
|
||||
ZERO_STRUCT(brl->last_lock_failure);
|
||||
|
||||
return (void *)brl;
|
||||
}
|
||||
@ -103,12 +111,31 @@ static BOOL brl_same_context(struct lock_context *ctx1, struct lock_context *ctx
|
||||
ctx1->tid == ctx2->tid);
|
||||
}
|
||||
|
||||
/*
|
||||
see if lck1 and lck2 overlap
|
||||
*/
|
||||
static BOOL brl_overlap(struct lock_struct *lck1,
|
||||
struct lock_struct *lck2)
|
||||
{
|
||||
if (lck1->start >= (lck2->start + lck2->size) ||
|
||||
lck2->start >= (lck1->start + lck1->size)) {
|
||||
return False;
|
||||
}
|
||||
return True;
|
||||
}
|
||||
|
||||
/*
|
||||
See if lock2 can be added when lock1 is in place.
|
||||
*/
|
||||
static BOOL brl_conflict(struct lock_struct *lck1,
|
||||
struct lock_struct *lck2)
|
||||
{
|
||||
/* pending locks don't conflict with anything */
|
||||
if (lck1->lock_type >= PENDING_READ_LOCK ||
|
||||
lck2->lock_type >= PENDING_READ_LOCK) {
|
||||
return False;
|
||||
}
|
||||
|
||||
if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
|
||||
return False;
|
||||
}
|
||||
@ -118,12 +145,7 @@ static BOOL brl_conflict(struct lock_struct *lck1,
|
||||
return False;
|
||||
}
|
||||
|
||||
if (lck1->start >= (lck2->start + lck2->size) ||
|
||||
lck2->start >= (lck1->start + lck1->size)) {
|
||||
return False;
|
||||
}
|
||||
|
||||
return True;
|
||||
return brl_overlap(lck1, lck2);
|
||||
}
|
||||
|
||||
|
||||
@ -133,32 +155,68 @@ static BOOL brl_conflict(struct lock_struct *lck1,
|
||||
*/
|
||||
static BOOL brl_conflict_other(struct lock_struct *lck1, struct lock_struct *lck2)
|
||||
{
|
||||
if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
|
||||
return False;
|
||||
|
||||
if (brl_same_context(&lck1->context, &lck2->context) &&
|
||||
lck1->fnum == lck2->fnum) {
|
||||
/* pending locks don't conflict with anything */
|
||||
if (lck1->lock_type >= PENDING_READ_LOCK ||
|
||||
lck2->lock_type >= PENDING_READ_LOCK) {
|
||||
return False;
|
||||
}
|
||||
|
||||
if (lck1->start >= (lck2->start + lck2->size) ||
|
||||
lck2->start >= (lck1->start + lck1->size))
|
||||
if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
|
||||
return False;
|
||||
|
||||
return True;
|
||||
|
||||
/*
|
||||
* note that incoming write calls conflict with existing READ
|
||||
* locks even if the context is the same. JRA. See LOCKTEST7
|
||||
* in smbtorture.
|
||||
*/
|
||||
if (brl_same_context(&lck1->context, &lck2->context) &&
|
||||
lck1->fnum == lck2->fnum &&
|
||||
(lck2->lock_type == READ_LOCK || lck1->lock_type == WRITE_LOCK)) {
|
||||
return False;
|
||||
}
|
||||
|
||||
return brl_overlap(lck1, lck2);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
amazingly enough, w2k3 "remembers" whether the last lock failure
|
||||
is the same as this one and changes its error code. I wonder if any
|
||||
app depends on this?
|
||||
*/
|
||||
static NTSTATUS brl_lock_failed(struct brl_context *brl, struct lock_struct *lock)
|
||||
{
|
||||
if (brl_same_context(&lock->context, &brl->last_lock_failure.context) &&
|
||||
lock->fnum == brl->last_lock_failure.fnum &&
|
||||
lock->start == brl->last_lock_failure.start &&
|
||||
lock->size == brl->last_lock_failure.size) {
|
||||
return NT_STATUS_FILE_LOCK_CONFLICT;
|
||||
}
|
||||
brl->last_lock_failure = *lock;
|
||||
if (lock->start >= 0xEF000000) {
|
||||
/* amazing the little things you learn with a test
|
||||
suite. Locks beyond this offset (as a 64 bit
|
||||
number!) always generate the conflict error
|
||||
code. */
|
||||
return NT_STATUS_FILE_LOCK_CONFLICT;
|
||||
}
|
||||
return NT_STATUS_LOCK_NOT_GRANTED;
|
||||
}
|
||||
|
||||
/*
|
||||
Lock a range of bytes.
|
||||
Lock a range of bytes. The lock_type can be a PENDING_*_LOCK, in
|
||||
which case a real lock is first tried, and if that fails then a
|
||||
pending lock is created. When the pending lock is triggered (by
|
||||
someone else closing an overlapping lock range) a messaging
|
||||
notification is sent, identified by the notify_ptr
|
||||
*/
|
||||
NTSTATUS brl_lock(void *brl_ctx,
|
||||
DATA_BLOB *file_key,
|
||||
uint16_t smbpid,
|
||||
uint16_t fnum,
|
||||
uint64_t start, uint64_t size,
|
||||
enum brl_type lock_type)
|
||||
enum brl_type lock_type,
|
||||
void *notify_ptr)
|
||||
{
|
||||
struct brl_context *brl = brl_ctx;
|
||||
TDB_DATA kbuf, dbuf;
|
||||
@ -174,6 +232,20 @@ NTSTATUS brl_lock(void *brl_ctx,
|
||||
return NT_STATUS_INTERNAL_DB_CORRUPTION;
|
||||
}
|
||||
|
||||
/* if this is a pending lock, then with the chainlock held we
|
||||
try to get the real lock. If we succeed then we don't need
|
||||
to make it pending. This prevents a possible race condition
|
||||
where the pending lock gets created after the lock that is
|
||||
preventing the real lock gets removed */
|
||||
if (lock_type >= PENDING_READ_LOCK) {
|
||||
enum brl_type rw = (lock_type==PENDING_READ_LOCK? READ_LOCK : WRITE_LOCK);
|
||||
status = brl_lock(brl_ctx, file_key, smbpid, fnum, start, size, rw, NULL);
|
||||
if (NT_STATUS_IS_OK(status)) {
|
||||
tdb_chainunlock(brl->w->tdb, kbuf);
|
||||
return NT_STATUS_OK;
|
||||
}
|
||||
}
|
||||
|
||||
dbuf = tdb_fetch(brl->w->tdb, kbuf);
|
||||
|
||||
lock.context.smbpid = smbpid;
|
||||
@ -183,6 +255,7 @@ NTSTATUS brl_lock(void *brl_ctx,
|
||||
lock.size = size;
|
||||
lock.fnum = fnum;
|
||||
lock.lock_type = lock_type;
|
||||
lock.notify_ptr = notify_ptr;
|
||||
|
||||
if (dbuf.dptr) {
|
||||
/* there are existing locks - make sure they don't conflict */
|
||||
@ -190,7 +263,7 @@ NTSTATUS brl_lock(void *brl_ctx,
|
||||
count = dbuf.dsize / sizeof(*locks);
|
||||
for (i=0; i<count; i++) {
|
||||
if (brl_conflict(&locks[i], &lock)) {
|
||||
status = NT_STATUS_LOCK_NOT_GRANTED;
|
||||
status = brl_lock_failed(brl, &lock);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@ -214,6 +287,14 @@ NTSTATUS brl_lock(void *brl_ctx,
|
||||
|
||||
free(dbuf.dptr);
|
||||
tdb_chainunlock(brl->w->tdb, kbuf);
|
||||
|
||||
/* the caller needs to know if the real lock was granted. If
|
||||
we have reached here then it must be a pending lock that
|
||||
was granted, so tell them the lock failed */
|
||||
if (lock_type >= PENDING_READ_LOCK) {
|
||||
return brl_lock_failed(brl, &lock);
|
||||
}
|
||||
|
||||
return NT_STATUS_OK;
|
||||
|
||||
fail:
|
||||
@ -224,6 +305,57 @@ NTSTATUS brl_lock(void *brl_ctx,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
we are removing a lock that might be holding up a pending lock. Scan for pending
|
||||
locks that cover this range and if we find any then notify the server that it should
|
||||
retry the lock
|
||||
*/
|
||||
static void brl_notify_unlock(struct brl_context *brl,
|
||||
struct lock_struct *locks, int count,
|
||||
struct lock_struct *removed_lock)
|
||||
{
|
||||
int i, last_notice;
|
||||
|
||||
/* the last_notice logic is to prevent stampeding on a lock
|
||||
range. It prevents us sending hundreds of notifies on the
|
||||
same range of bytes. It doesn't prevent all possible
|
||||
stampedes, but it does prevent the most common problem */
|
||||
last_notice = -1;
|
||||
|
||||
for (i=0;i<count;i++) {
|
||||
if (locks[i].lock_type >= PENDING_READ_LOCK &&
|
||||
brl_overlap(&locks[i], removed_lock)) {
|
||||
DATA_BLOB data;
|
||||
|
||||
if (last_notice != -1 && brl_overlap(&locks[i], &locks[last_notice])) {
|
||||
continue;
|
||||
}
|
||||
last_notice = i;
|
||||
data.data = (void *)&locks[i].notify_ptr;
|
||||
data.length = sizeof(void *);
|
||||
messaging_send(brl->messaging_ctx, locks[i].context.server, MSG_BRL_RETRY, &data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
send notifications for all pending locks - the file is being closed by this
|
||||
user
|
||||
*/
|
||||
static void brl_notify_all(struct brl_context *brl,
|
||||
struct lock_struct *locks, int count)
|
||||
{
|
||||
int i;
|
||||
for (i=0;i<count;i++) {
|
||||
if (locks->lock_type >= PENDING_READ_LOCK) {
|
||||
brl_notify_unlock(brl, locks, count, &locks[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
Unlock a range of bytes.
|
||||
*/
|
||||
@ -261,15 +393,92 @@ NTSTATUS brl_unlock(void *brl_ctx,
|
||||
locks = (struct lock_struct *)dbuf.dptr;
|
||||
count = dbuf.dsize / sizeof(*locks);
|
||||
|
||||
locks = (struct lock_struct *)dbuf.dptr;
|
||||
count = dbuf.dsize / sizeof(*locks);
|
||||
for (i=0; i<count; i++) {
|
||||
struct lock_struct *lock = &locks[i];
|
||||
|
||||
if (brl_same_context(&lock->context, &context) &&
|
||||
lock->fnum == fnum &&
|
||||
lock->start == start &&
|
||||
lock->size == size) {
|
||||
lock->size == size &&
|
||||
lock->notify_ptr == NULL) {
|
||||
/* found it - delete it */
|
||||
if (count == 1) {
|
||||
if (tdb_delete(brl->w->tdb, kbuf) != 0) {
|
||||
status = NT_STATUS_INTERNAL_DB_CORRUPTION;
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
struct lock_struct removed_lock = *lock;
|
||||
if (i < count-1) {
|
||||
memmove(&locks[i], &locks[i+1],
|
||||
sizeof(*locks)*((count-1) - i));
|
||||
}
|
||||
count--;
|
||||
|
||||
/* send notifications for any relevant pending locks */
|
||||
brl_notify_unlock(brl, locks, count, &removed_lock);
|
||||
|
||||
dbuf.dsize = count * sizeof(*locks);
|
||||
|
||||
if (tdb_store(brl->w->tdb, kbuf, dbuf, TDB_REPLACE) != 0) {
|
||||
status = NT_STATUS_INTERNAL_DB_CORRUPTION;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
free(dbuf.dptr);
|
||||
tdb_chainunlock(brl->w->tdb, kbuf);
|
||||
return NT_STATUS_OK;
|
||||
}
|
||||
}
|
||||
|
||||
/* we didn't find it */
|
||||
status = NT_STATUS_RANGE_NOT_LOCKED;
|
||||
|
||||
fail:
|
||||
free(dbuf.dptr);
|
||||
tdb_chainunlock(brl->w->tdb, kbuf);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
remove a pending lock. This is called when the caller has either
|
||||
given up trying to establish a lock or when they have succeeded in
|
||||
getting it. In either case they no longer need to be notified.
|
||||
*/
|
||||
NTSTATUS brl_remove_pending(void *brl_ctx,
|
||||
DATA_BLOB *file_key,
|
||||
void *notify_ptr)
|
||||
{
|
||||
struct brl_context *brl = brl_ctx;
|
||||
TDB_DATA kbuf, dbuf;
|
||||
int count, i;
|
||||
struct lock_struct *locks;
|
||||
NTSTATUS status;
|
||||
|
||||
kbuf.dptr = file_key->data;
|
||||
kbuf.dsize = file_key->length;
|
||||
|
||||
if (tdb_chainlock(brl->w->tdb, kbuf) != 0) {
|
||||
return NT_STATUS_INTERNAL_DB_CORRUPTION;
|
||||
}
|
||||
|
||||
dbuf = tdb_fetch(brl->w->tdb, kbuf);
|
||||
if (!dbuf.dptr) {
|
||||
tdb_chainunlock(brl->w->tdb, kbuf);
|
||||
return NT_STATUS_RANGE_NOT_LOCKED;
|
||||
}
|
||||
|
||||
/* there are existing locks - find a match */
|
||||
locks = (struct lock_struct *)dbuf.dptr;
|
||||
count = dbuf.dsize / sizeof(*locks);
|
||||
|
||||
for (i=0; i<count; i++) {
|
||||
struct lock_struct *lock = &locks[i];
|
||||
|
||||
if (lock->notify_ptr == notify_ptr &&
|
||||
lock->context.server == brl->server) {
|
||||
/* found it - delete it */
|
||||
if (count == 1) {
|
||||
if (tdb_delete(brl->w->tdb, kbuf) != 0) {
|
||||
@ -281,7 +490,8 @@ NTSTATUS brl_unlock(void *brl_ctx,
|
||||
memmove(&locks[i], &locks[i+1],
|
||||
sizeof(*locks)*((count-1) - i));
|
||||
}
|
||||
dbuf.dsize -= sizeof(*locks);
|
||||
count--;
|
||||
dbuf.dsize = count * sizeof(*locks);
|
||||
if (tdb_store(brl->w->tdb, kbuf, dbuf, TDB_REPLACE) != 0) {
|
||||
status = NT_STATUS_INTERNAL_DB_CORRUPTION;
|
||||
goto fail;
|
||||
@ -404,7 +614,13 @@ NTSTATUS brl_close(void *brl_ctx,
|
||||
status = NT_STATUS_INTERNAL_DB_CORRUPTION;
|
||||
}
|
||||
} else if (dcount != 0) {
|
||||
dbuf.dsize -= dcount * sizeof(*locks);
|
||||
/* tell all pending lock holders for this file that
|
||||
they have a chance now. This is a bit indiscriminant,
|
||||
but works OK */
|
||||
brl_notify_all(brl, locks, count);
|
||||
|
||||
dbuf.dsize = count * sizeof(*locks);
|
||||
|
||||
if (tdb_store(brl->w->tdb, kbuf, dbuf, TDB_REPLACE) != 0) {
|
||||
status = NT_STATUS_INTERNAL_DB_CORRUPTION;
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ ADD_OBJ_FILES = \
|
||||
ntvfs/posix/pvfs_resolve.o \
|
||||
ntvfs/posix/pvfs_shortname.o \
|
||||
ntvfs/posix/pvfs_lock.o \
|
||||
ntvfs/posix/pvfs_wait.o \
|
||||
ntvfs/common/brlock.o
|
||||
# End MODULE ntvfs_posix
|
||||
################################################
|
||||
|
@ -44,6 +44,153 @@ NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
|
||||
offset, count, rw);
|
||||
}
|
||||
|
||||
/* this state structure holds information about a lock we are waiting on */
|
||||
struct pending_state {
|
||||
struct pvfs_state *pvfs;
|
||||
union smb_lock *lck;
|
||||
struct pvfs_file *f;
|
||||
struct smbsrv_request *req;
|
||||
int pending_lock;
|
||||
void *wait_handle;
|
||||
time_t end_time;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
a secondary attempt to setup a lock has failed - back out
|
||||
the locks we did get and send an error
|
||||
*/
|
||||
static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
|
||||
struct smbsrv_request *req,
|
||||
struct pvfs_file *f,
|
||||
struct smb_lock_entry *locks,
|
||||
int i,
|
||||
NTSTATUS status)
|
||||
{
|
||||
/* undo the locks we just did */
|
||||
for (i=i-1;i>=0;i--) {
|
||||
brl_unlock(pvfs->brl_context,
|
||||
&f->locking_key,
|
||||
locks[i].pid,
|
||||
f->fnum,
|
||||
locks[i].offset,
|
||||
locks[i].count);
|
||||
}
|
||||
req->async.status = status;
|
||||
req->async.send_fn(req);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
called when we receive a pending lock notification. It means that
|
||||
either our lock timed out or somoene else has unlocked a overlapping
|
||||
range, so we should try the lock again. Note that on timeout we
|
||||
do retry the lock, giving it a last chance.
|
||||
*/
|
||||
static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
|
||||
{
|
||||
struct pending_state *pending = private;
|
||||
struct pvfs_state *pvfs = pending->pvfs;
|
||||
struct pvfs_file *f = pending->f;
|
||||
struct smbsrv_request *req = pending->req;
|
||||
union smb_lock *lck = pending->lck;
|
||||
struct smb_lock_entry *locks;
|
||||
enum brl_type rw;
|
||||
NTSTATUS status;
|
||||
int i;
|
||||
|
||||
locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
|
||||
|
||||
if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
|
||||
rw = READ_LOCK;
|
||||
} else {
|
||||
rw = WRITE_LOCK;
|
||||
}
|
||||
|
||||
status = brl_lock(pvfs->brl_context,
|
||||
&f->locking_key,
|
||||
req->smbpid,
|
||||
f->fnum,
|
||||
locks[pending->pending_lock].offset,
|
||||
locks[pending->pending_lock].count,
|
||||
rw, NULL);
|
||||
|
||||
/* if we have failed and timed out, or succeeded, then we
|
||||
don't need the pending lock any more */
|
||||
if (NT_STATUS_IS_OK(status) || timed_out) {
|
||||
NTSTATUS status2;
|
||||
status2 = brl_remove_pending(pvfs->brl_context, &f->locking_key, pending);
|
||||
if (!NT_STATUS_IS_OK(status2)) {
|
||||
DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
|
||||
}
|
||||
talloc_free(pending->wait_handle);
|
||||
}
|
||||
|
||||
if (!NT_STATUS_IS_OK(status)) {
|
||||
if (timed_out) {
|
||||
/* no more chances */
|
||||
pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
|
||||
}
|
||||
/* we can try again */
|
||||
return;
|
||||
}
|
||||
|
||||
/* if we haven't timed out yet, then we can do more pending locks */
|
||||
if (timed_out) {
|
||||
pending = NULL;
|
||||
} else {
|
||||
if (rw == READ_LOCK) {
|
||||
rw = PENDING_READ_LOCK;
|
||||
} else {
|
||||
rw = PENDING_WRITE_LOCK;
|
||||
}
|
||||
}
|
||||
|
||||
/* we've now got the pending lock. try and get the rest, which might
|
||||
lead to more pending locks */
|
||||
for (i=pending->pending_lock;i<lck->lockx.in.lock_cnt;i++) {
|
||||
if (pending) {
|
||||
pending->pending_lock = i;
|
||||
}
|
||||
|
||||
status = brl_lock(pvfs->brl_context,
|
||||
&f->locking_key,
|
||||
req->smbpid,
|
||||
f->fnum,
|
||||
locks[i].offset,
|
||||
locks[i].count,
|
||||
rw, pending);
|
||||
if (!NT_STATUS_IS_OK(status)) {
|
||||
if (pending) {
|
||||
/* a timed lock failed - setup a wait message to handle
|
||||
the pending lock notification or a timeout */
|
||||
pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
|
||||
pending->end_time,
|
||||
pvfs_pending_lock_continue,
|
||||
pending);
|
||||
if (pending->wait_handle == NULL) {
|
||||
pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
|
||||
}
|
||||
return;
|
||||
}
|
||||
pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
brl_unlock(pvfs->brl_context,
|
||||
&f->locking_key,
|
||||
req->smbpid,
|
||||
f->fnum,
|
||||
lck->lock.in.offset,
|
||||
lck->lock.in.count);
|
||||
|
||||
/* we've managed to get all the locks. Tell the client */
|
||||
req->async.status = NT_STATUS_OK;
|
||||
req->async.send_fn(req);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
lock or unlock a byte range
|
||||
*/
|
||||
@ -55,6 +202,7 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
|
||||
struct smb_lock_entry *locks;
|
||||
int i;
|
||||
enum brl_type rw;
|
||||
struct pending_state *pending = NULL;
|
||||
|
||||
f = pvfs_find_fd(pvfs, req, lck->generic.in.fnum);
|
||||
if (!f) {
|
||||
@ -69,7 +217,7 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
|
||||
f->fnum,
|
||||
lck->lock.in.offset,
|
||||
lck->lock.in.count,
|
||||
WRITE_LOCK);
|
||||
WRITE_LOCK, NULL);
|
||||
|
||||
case RAW_LOCK_UNLOCK:
|
||||
return brl_unlock(pvfs->brl_context,
|
||||
@ -88,11 +236,25 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
|
||||
}
|
||||
|
||||
/* now the lockingX case, most common and also most complex */
|
||||
if (lck->lockx.in.timeout != 0) {
|
||||
pending = talloc_p(req, struct pending_state);
|
||||
if (pending == NULL) {
|
||||
return NT_STATUS_NO_MEMORY;
|
||||
}
|
||||
|
||||
pending->pvfs = pvfs;
|
||||
pending->lck = lck;
|
||||
pending->f = f;
|
||||
pending->req = req;
|
||||
|
||||
/* round up to the nearest second */
|
||||
pending->end_time = time(NULL) + ((lck->lockx.in.timeout+999)/1000);
|
||||
}
|
||||
|
||||
if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
|
||||
rw = READ_LOCK;
|
||||
rw = pending? PENDING_READ_LOCK : READ_LOCK;
|
||||
} else {
|
||||
rw = WRITE_LOCK;
|
||||
rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
|
||||
}
|
||||
|
||||
if (lck->lockx.in.mode &
|
||||
@ -125,14 +287,30 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
|
||||
for (i=0;i<lck->lockx.in.lock_cnt;i++) {
|
||||
NTSTATUS status;
|
||||
|
||||
if (pending) {
|
||||
pending->pending_lock = i;
|
||||
}
|
||||
|
||||
status = brl_lock(pvfs->brl_context,
|
||||
&f->locking_key,
|
||||
locks[i].pid,
|
||||
f->fnum,
|
||||
locks[i].offset,
|
||||
locks[i].count,
|
||||
rw);
|
||||
rw, pending);
|
||||
if (!NT_STATUS_IS_OK(status)) {
|
||||
if (pending) {
|
||||
/* a timed lock failed - setup a wait message to handle
|
||||
the pending lock notification or a timeout */
|
||||
pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
|
||||
pending->end_time,
|
||||
pvfs_pending_lock_continue,
|
||||
pending);
|
||||
if (pending->wait_handle == NULL) {
|
||||
return NT_STATUS_NO_MEMORY;
|
||||
}
|
||||
return NT_STATUS_OK;
|
||||
}
|
||||
/* undo the locks we just did */
|
||||
for (i=i-1;i>=0;i--) {
|
||||
brl_unlock(pvfs->brl_context,
|
||||
|
@ -96,9 +96,11 @@ NTSTATUS pvfs_open(struct ntvfs_module_context *ntvfs,
|
||||
flags = O_CREAT | O_TRUNC;
|
||||
break;
|
||||
case NTCREATEX_DISP_OPEN:
|
||||
case NTCREATEX_DISP_OVERWRITE:
|
||||
flags = 0;
|
||||
break;
|
||||
case NTCREATEX_DISP_OVERWRITE:
|
||||
flags = O_TRUNC;
|
||||
break;
|
||||
case NTCREATEX_DISP_CREATE:
|
||||
flags = O_CREAT | O_EXCL;
|
||||
break;
|
||||
@ -222,7 +224,7 @@ NTSTATUS pvfs_close(struct ntvfs_module_context *ntvfs,
|
||||
if (!NT_STATUS_IS_OK(status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
if (close(f->fd) != 0) {
|
||||
status = pvfs_map_errno(pvfs, errno);
|
||||
} else {
|
||||
|
128
source4/ntvfs/posix/pvfs_wait.c
Normal file
128
source4/ntvfs/posix/pvfs_wait.c
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
Unix SMB/CIFS implementation.
|
||||
|
||||
POSIX NTVFS backend - async request wait routines
|
||||
|
||||
Copyright (C) Andrew Tridgell 2004
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include "include/includes.h"
|
||||
#include "vfs_posix.h"
|
||||
|
||||
/* the context for a single wait instance */
|
||||
struct pvfs_wait {
|
||||
void (*handler)(void *, BOOL);
|
||||
void *private;
|
||||
struct timed_event *te;
|
||||
int msg_type;
|
||||
void *msg_ctx;
|
||||
struct event_context *ev;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
receive a completion message for a wait
|
||||
*/
|
||||
static void pvfs_wait_dispatch(void *msg_ctx, void *private, uint32_t msg_type,
|
||||
servid_t src, DATA_BLOB *data)
|
||||
{
|
||||
struct pvfs_wait *pwait = private;
|
||||
|
||||
/* we need to check that this one is for us. This sender sends
|
||||
the private pointer as the body of the message. This might
|
||||
seem a little unusual, but as the pointer is guaranteed
|
||||
unique for this server, it is a good token */
|
||||
if (data->length != sizeof(void *) ||
|
||||
*(void **)data->data != pwait->private) {
|
||||
return;
|
||||
}
|
||||
|
||||
pwait->handler(pwait->private, False);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
receive a timeout on a message wait
|
||||
*/
|
||||
static void pvfs_wait_timeout(struct event_context *ev, struct timed_event *te, time_t t)
|
||||
{
|
||||
struct pvfs_wait *pwait = te->private;
|
||||
pwait->handler(pwait->private, True);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
destroy a pending wait
|
||||
*/
|
||||
static int pvfs_wait_destructor(void *ptr)
|
||||
{
|
||||
struct pvfs_wait *pwait = ptr;
|
||||
messaging_deregister(pwait->msg_ctx, pwait->msg_type, pwait->private);
|
||||
event_remove_timed(pwait->ev, pwait->te);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
setup a request to wait on a message of type msg_type, with a
|
||||
timeout (given as an expiry time)
|
||||
|
||||
the return value is a handle. To stop waiting talloc_free this
|
||||
handle.
|
||||
*/
|
||||
void *pvfs_wait_message(struct pvfs_state *pvfs,
|
||||
struct smbsrv_request *req,
|
||||
int msg_type,
|
||||
time_t end_time,
|
||||
void (*fn)(void *, BOOL),
|
||||
void *private)
|
||||
{
|
||||
struct timed_event te;
|
||||
struct pvfs_wait *pwait;
|
||||
|
||||
pwait = talloc_p(req, struct pvfs_wait);
|
||||
if (pwait == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pwait->private = private;
|
||||
pwait->handler = fn;
|
||||
pwait->msg_ctx = pvfs->tcon->smb_conn->connection->messaging_ctx;
|
||||
pwait->ev = req->tcon->smb_conn->connection->event.ctx;
|
||||
pwait->msg_type = msg_type;
|
||||
|
||||
/* setup a timer */
|
||||
te.next_event = end_time;
|
||||
te.handler = pvfs_wait_timeout;
|
||||
te.private = pwait;
|
||||
pwait->te = event_add_timed(pwait->ev, &te);
|
||||
|
||||
/* register with the messaging subsystem for this message
|
||||
type */
|
||||
messaging_register(pwait->msg_ctx,
|
||||
pwait,
|
||||
msg_type,
|
||||
pvfs_wait_dispatch);
|
||||
|
||||
/* tell the main smb server layer that we will be replying
|
||||
asynchronously */
|
||||
req->control_flags |= REQ_CONTROL_ASYNC;
|
||||
|
||||
/* make sure we cleanup the timer and message handler */
|
||||
talloc_set_destructor(pwait, pvfs_wait_destructor);
|
||||
|
||||
return pwait;
|
||||
}
|
@ -89,7 +89,8 @@ static NTSTATUS pvfs_connect(struct ntvfs_module_context *ntvfs,
|
||||
|
||||
pvfs->brl_context = brl_init(pvfs,
|
||||
pvfs->tcon->smb_conn->connection->server_id,
|
||||
pvfs->tcon->service);
|
||||
pvfs->tcon->service,
|
||||
pvfs->tcon->smb_conn->connection->messaging_ctx);
|
||||
if (pvfs->brl_context == NULL) {
|
||||
return NT_STATUS_INTERNAL_DB_CORRUPTION;
|
||||
}
|
||||
|
@ -76,6 +76,42 @@ BOOL torture_locktest1(int dummy)
|
||||
NT_STATUS_LOCK_NOT_GRANTED)) return False;
|
||||
}
|
||||
|
||||
if (NT_STATUS_IS_OK(smbcli_lock(cli2->tree, fnum3, 0, 4, 0, WRITE_LOCK))) {
|
||||
printf("lock2 succeeded! This is a locking bug\n");
|
||||
return False;
|
||||
} else {
|
||||
if (!check_error(__location__, cli2, ERRDOS, ERRlock,
|
||||
NT_STATUS_FILE_LOCK_CONFLICT)) return False;
|
||||
}
|
||||
|
||||
if (NT_STATUS_IS_ERR(smbcli_lock(cli1->tree, fnum1, 5, 9, 0, WRITE_LOCK))) {
|
||||
printf("lock1 failed (%s)\n", smbcli_errstr(cli1->tree));
|
||||
return False;
|
||||
}
|
||||
|
||||
if (NT_STATUS_IS_OK(smbcli_lock(cli2->tree, fnum3, 5, 9, 0, WRITE_LOCK))) {
|
||||
printf("lock2 succeeded! This is a locking bug\n");
|
||||
return False;
|
||||
} else {
|
||||
if (!check_error(__location__, cli2, ERRDOS, ERRlock,
|
||||
NT_STATUS_LOCK_NOT_GRANTED)) return False;
|
||||
}
|
||||
|
||||
if (NT_STATUS_IS_OK(smbcli_lock(cli2->tree, fnum3, 0, 4, 0, WRITE_LOCK))) {
|
||||
printf("lock2 succeeded! This is a locking bug\n");
|
||||
return False;
|
||||
} else {
|
||||
if (!check_error(__location__, cli2, ERRDOS, ERRlock,
|
||||
NT_STATUS_LOCK_NOT_GRANTED)) return False;
|
||||
}
|
||||
|
||||
if (NT_STATUS_IS_OK(smbcli_lock(cli2->tree, fnum3, 0, 4, 0, WRITE_LOCK))) {
|
||||
printf("lock2 succeeded! This is a locking bug\n");
|
||||
return False;
|
||||
} else {
|
||||
if (!check_error(__location__, cli2, ERRDOS, ERRlock,
|
||||
NT_STATUS_FILE_LOCK_CONFLICT)) return False;
|
||||
}
|
||||
|
||||
lock_timeout = (6 + (random() % 20));
|
||||
printf("Testing lock timeout with timeout=%u\n", lock_timeout);
|
||||
@ -768,21 +804,23 @@ BOOL torture_locktest7(int dummy)
|
||||
memset(buf, 0, sizeof(buf));
|
||||
|
||||
if (smbcli_write(cli1->tree, fnum1, 0, buf, 0, sizeof(buf)) != sizeof(buf)) {
|
||||
printf("Failed to create file\n");
|
||||
printf("Failed to create file (%s)\n", __location__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cli1->session->pid = 1;
|
||||
|
||||
if (NT_STATUS_IS_ERR(smbcli_lock(cli1->tree, fnum1, 130, 4, 0, READ_LOCK))) {
|
||||
printf("Unable to apply read lock on range 130:4, error was %s\n", smbcli_errstr(cli1->tree));
|
||||
printf("Unable to apply read lock on range 130:4, error was %s (%s)\n",
|
||||
smbcli_errstr(cli1->tree), __location__);
|
||||
goto fail;
|
||||
} else {
|
||||
printf("pid1 successfully locked range 130:4 for READ\n");
|
||||
}
|
||||
|
||||
if (smbcli_read(cli1->tree, fnum1, buf, 130, 4) != 4) {
|
||||
printf("pid1 unable to read the range 130:4, error was %s\n", smbcli_errstr(cli1->tree));
|
||||
printf("pid1 unable to read the range 130:4, error was %s (%s)\n",
|
||||
smbcli_errstr(cli1->tree), __location__);
|
||||
goto fail;
|
||||
} else {
|
||||
printf("pid1 successfully read the range 130:4\n");
|
||||
@ -791,11 +829,13 @@ BOOL torture_locktest7(int dummy)
|
||||
if (smbcli_write(cli1->tree, fnum1, 0, buf, 130, 4) != 4) {
|
||||
printf("pid1 unable to write to the range 130:4, error was %s\n", smbcli_errstr(cli1->tree));
|
||||
if (NT_STATUS_V(smbcli_nt_error(cli1->tree)) != NT_STATUS_V(NT_STATUS_FILE_LOCK_CONFLICT)) {
|
||||
printf("Incorrect error (should be NT_STATUS_FILE_LOCK_CONFLICT)\n");
|
||||
printf("Incorrect error (should be NT_STATUS_FILE_LOCK_CONFLICT) (%s)\n",
|
||||
__location__);
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
printf("pid1 successfully wrote to the range 130:4 (should be denied)\n");
|
||||
printf("pid1 successfully wrote to the range 130:4 (should be denied) (%s)\n",
|
||||
__location__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -810,11 +850,13 @@ BOOL torture_locktest7(int dummy)
|
||||
if (smbcli_write(cli1->tree, fnum1, 0, buf, 130, 4) != 4) {
|
||||
printf("pid2 unable to write to the range 130:4, error was %s\n", smbcli_errstr(cli1->tree));
|
||||
if (NT_STATUS_V(smbcli_nt_error(cli1->tree)) != NT_STATUS_V(NT_STATUS_FILE_LOCK_CONFLICT)) {
|
||||
printf("Incorrect error (should be NT_STATUS_FILE_LOCK_CONFLICT)\n");
|
||||
printf("Incorrect error (should be NT_STATUS_FILE_LOCK_CONFLICT) (%s)\n",
|
||||
__location__);
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
printf("pid2 successfully wrote to the range 130:4 (should be denied)\n");
|
||||
printf("pid2 successfully wrote to the range 130:4 (should be denied) (%s)\n",
|
||||
__location__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -822,21 +864,24 @@ BOOL torture_locktest7(int dummy)
|
||||
smbcli_unlock(cli1->tree, fnum1, 130, 4);
|
||||
|
||||
if (NT_STATUS_IS_ERR(smbcli_lock(cli1->tree, fnum1, 130, 4, 0, WRITE_LOCK))) {
|
||||
printf("Unable to apply write lock on range 130:4, error was %s\n", smbcli_errstr(cli1->tree));
|
||||
printf("Unable to apply write lock on range 130:4, error was %s (%s)\n",
|
||||
smbcli_errstr(cli1->tree), __location__);
|
||||
goto fail;
|
||||
} else {
|
||||
printf("pid1 successfully locked range 130:4 for WRITE\n");
|
||||
}
|
||||
|
||||
if (smbcli_read(cli1->tree, fnum1, buf, 130, 4) != 4) {
|
||||
printf("pid1 unable to read the range 130:4, error was %s\n", smbcli_errstr(cli1->tree));
|
||||
printf("pid1 unable to read the range 130:4, error was %s (%s)\n",
|
||||
smbcli_errstr(cli1->tree), __location__);
|
||||
goto fail;
|
||||
} else {
|
||||
printf("pid1 successfully read the range 130:4\n");
|
||||
}
|
||||
|
||||
if (smbcli_write(cli1->tree, fnum1, 0, buf, 130, 4) != 4) {
|
||||
printf("pid1 unable to write to the range 130:4, error was %s\n", smbcli_errstr(cli1->tree));
|
||||
printf("pid1 unable to write to the range 130:4, error was %s (%s)\n",
|
||||
smbcli_errstr(cli1->tree), __location__);
|
||||
goto fail;
|
||||
} else {
|
||||
printf("pid1 successfully wrote to the range 130:4\n");
|
||||
@ -845,24 +890,30 @@ BOOL torture_locktest7(int dummy)
|
||||
cli1->session->pid = 2;
|
||||
|
||||
if (smbcli_read(cli1->tree, fnum1, buf, 130, 4) != 4) {
|
||||
printf("pid2 unable to read the range 130:4, error was %s\n", smbcli_errstr(cli1->tree));
|
||||
printf("pid2 unable to read the range 130:4, error was %s\n",
|
||||
smbcli_errstr(cli1->tree));
|
||||
if (NT_STATUS_V(smbcli_nt_error(cli1->tree)) != NT_STATUS_V(NT_STATUS_FILE_LOCK_CONFLICT)) {
|
||||
printf("Incorrect error (should be NT_STATUS_FILE_LOCK_CONFLICT)\n");
|
||||
printf("Incorrect error (should be NT_STATUS_FILE_LOCK_CONFLICT) (%s)\n",
|
||||
__location__);
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
printf("pid2 successfully read the range 130:4 (should be denied)\n");
|
||||
printf("pid2 successfully read the range 130:4 (should be denied) (%s)\n",
|
||||
__location__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (smbcli_write(cli1->tree, fnum1, 0, buf, 130, 4) != 4) {
|
||||
printf("pid2 unable to write to the range 130:4, error was %s\n", smbcli_errstr(cli1->tree));
|
||||
printf("pid2 unable to write to the range 130:4, error was %s\n",
|
||||
smbcli_errstr(cli1->tree));
|
||||
if (NT_STATUS_V(smbcli_nt_error(cli1->tree)) != NT_STATUS_V(NT_STATUS_FILE_LOCK_CONFLICT)) {
|
||||
printf("Incorrect error (should be NT_STATUS_FILE_LOCK_CONFLICT)\n");
|
||||
printf("Incorrect error (should be NT_STATUS_FILE_LOCK_CONFLICT) (%s)\n",
|
||||
__location__);
|
||||
goto fail;
|
||||
}
|
||||
} else {
|
||||
printf("pid2 successfully wrote to the range 130:4 (should be denied)\n");
|
||||
printf("pid2 successfully wrote to the range 130:4 (should be denied) (%s)\n",
|
||||
__location__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -871,7 +922,7 @@ BOOL torture_locktest7(int dummy)
|
||||
fnum2 = smbcli_open(cli1->tree, fname, O_RDWR|O_TRUNC, DENY_NONE);
|
||||
|
||||
if (fnum2 == -1) {
|
||||
printf("Unable to truncate locked file.\n");
|
||||
printf("Unable to truncate locked file (%s)\n", __location__);
|
||||
correct = False;
|
||||
goto fail;
|
||||
} else {
|
||||
@ -879,13 +930,13 @@ BOOL torture_locktest7(int dummy)
|
||||
}
|
||||
|
||||
if (NT_STATUS_IS_ERR(smbcli_getatr(cli1->tree, fname, NULL, &size, NULL))) {
|
||||
printf("getatr failed (%s)\n", smbcli_errstr(cli1->tree));
|
||||
printf("getatr failed (%s) (%s)\n", smbcli_errstr(cli1->tree), __location__);
|
||||
correct = False;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (size != 0) {
|
||||
printf("Unable to truncate locked file. Size was %u\n", size);
|
||||
printf("Unable to truncate locked file. Size was %u (%s)\n", size, __location__);
|
||||
correct = False;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -99,11 +99,47 @@ static BOOL test_lock(struct smbcli_state *cli, TALLOC_CTX *mem_ctx)
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_RANGE_NOT_LOCKED);
|
||||
|
||||
printf("Trying 0xEEFFFFFF lock\n");
|
||||
io.lock.level = RAW_LOCK_LOCK;
|
||||
io.lock.in.fnum = fnum;
|
||||
io.lock.in.count = 4000;
|
||||
io.lock.in.offset = 0xEEFFFFFF;
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_OK);
|
||||
cli->session->pid++;
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_LOCK_NOT_GRANTED);
|
||||
cli->session->pid--;
|
||||
io.lock.level = RAW_LOCK_UNLOCK;
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_OK);
|
||||
io.lock.level = RAW_LOCK_UNLOCK;
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_RANGE_NOT_LOCKED);
|
||||
|
||||
printf("Trying 0xEF000000 lock\n");
|
||||
io.lock.level = RAW_LOCK_LOCK;
|
||||
io.lock.in.fnum = fnum;
|
||||
io.lock.in.count = 4000;
|
||||
io.lock.in.offset = 0xEEFFFFFF;
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_OK);
|
||||
cli->session->pid++;
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_FILE_LOCK_CONFLICT);
|
||||
cli->session->pid--;
|
||||
io.lock.level = RAW_LOCK_UNLOCK;
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_OK);
|
||||
io.lock.level = RAW_LOCK_UNLOCK;
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_RANGE_NOT_LOCKED);
|
||||
|
||||
printf("Trying max lock\n");
|
||||
io.lock.level = RAW_LOCK_LOCK;
|
||||
io.lock.in.fnum = fnum;
|
||||
io.lock.in.count = 4000;
|
||||
io.lock.in.offset = ~0;
|
||||
io.lock.in.offset = 0xEF000000;
|
||||
status = smb_raw_lock(cli->tree, &io);
|
||||
CHECK_STATUS(status, NT_STATUS_OK);
|
||||
cli->session->pid++;
|
||||
|
Loading…
Reference in New Issue
Block a user