mirror of
https://github.com/samba-team/samba.git
synced 2025-01-08 21:18:16 +03:00
20d17b8057
preparation for the full share modes and ntcreatex code that I am
working on.
highlights include:
- changed the way a backend determines if it is allowed to process a
request asynchronously. The previous method of looking at the
send_fn caused problems when an intermediate ntvfs module disabled
it, and the caller then wanted to finished processing using this
function. The new method is a REQ_CONTROL_MAY_ASYNC flag in
req->control_flags, which is also a bit easier to read
- fixed 2 bugs in the readbraw server code. One related to trying to
answer a readbraw with smb signing (which can't work, and crashed
our signing code), the second related to error handling, which
attempted to send a normal SMB error packet, when readbraw must
send a 0 read reply (as it has no header)
- added several more ntvfs_generic.c generic mapping functions. This
means that backends no longer need to implement such esoteric
functions as SMBwriteunlock() if they don't want to. The backend
can just request the mapping layer turn it into a write followed by
an unlock. This makes the backends considerably simpler as they
only need to implement one style of each function for lock, read,
write, open etc, rather than the full host of functions that SMB
provides. A backend can still choose to implement them
individually, of course, and the CIFS backend does that.
- simplified the generic structures to make them identical to the
principal call for several common SMB calls (such as
RAW_WRITE_GENERIC now being an alias for RAW_WRITE_WRITEX).
- started rewriting the pvfs_open() code in preparation for the full
ntcreatex semantics.
- in pvfs_open and ipc_open, initially allocate the open file
structure as a child of the request, so on error we don't need to
clean up. Then when we are going to succeed the open steal the
pointer into the long term backend context. This makes for much
simpler error handling (and fixes some bugs)
- use a destructor in the ipc backend to make sure that everthing is
cleaned up on receive error conditions.
- switched the ipc backend to using idtree for fnum allocation
- in the ntvfs_generic mapping routines, use a allocated secondary
structure not a stack structure to ensure the request pointer
remains valid even if the backend replies async.
(This used to be commit 3457c1836c
)
389 lines
9.9 KiB
C
389 lines
9.9 KiB
C
/*
|
|
Unix SMB/CIFS implementation.
|
|
|
|
POSIX NTVFS backend - locking
|
|
|
|
Copyright (C) Andrew Tridgell 2004
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
*/
|
|
|
|
#include "include/includes.h"
|
|
#include "vfs_posix.h"
|
|
|
|
|
|
/*
|
|
check if we can perform IO on a range that might be locked
|
|
*/
|
|
NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
|
|
struct pvfs_file *f,
|
|
uint16_t smbpid,
|
|
uint64_t offset, uint64_t count,
|
|
enum brl_type rw)
|
|
{
|
|
if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
|
|
return NT_STATUS_OK;
|
|
}
|
|
|
|
return brl_locktest(pvfs->brl_context,
|
|
&f->locking_key,
|
|
f->fnum,
|
|
smbpid,
|
|
offset, count, rw);
|
|
}
|
|
|
|
/* this state structure holds information about a lock we are waiting on */
|
|
struct pvfs_pending_lock {
|
|
struct pvfs_pending_lock *next, *prev;
|
|
struct pvfs_state *pvfs;
|
|
union smb_lock *lck;
|
|
struct pvfs_file *f;
|
|
struct smbsrv_request *req;
|
|
int pending_lock;
|
|
void *wait_handle;
|
|
time_t end_time;
|
|
};
|
|
|
|
/*
|
|
a secondary attempt to setup a lock has failed - back out
|
|
the locks we did get and send an error
|
|
*/
|
|
static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
|
|
struct smbsrv_request *req,
|
|
struct pvfs_file *f,
|
|
struct smb_lock_entry *locks,
|
|
int i,
|
|
NTSTATUS status)
|
|
{
|
|
/* undo the locks we just did */
|
|
for (i=i-1;i>=0;i--) {
|
|
brl_unlock(pvfs->brl_context,
|
|
&f->locking_key,
|
|
locks[i].pid,
|
|
f->fnum,
|
|
locks[i].offset,
|
|
locks[i].count);
|
|
f->lock_count--;
|
|
}
|
|
req->async.status = status;
|
|
req->async.send_fn(req);
|
|
}
|
|
|
|
|
|
/*
|
|
called when we receive a pending lock notification. It means that
|
|
either our lock timed out or somoene else has unlocked a overlapping
|
|
range, so we should try the lock again. Note that on timeout we
|
|
do retry the lock, giving it a last chance.
|
|
*/
|
|
static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
|
|
{
|
|
struct pvfs_pending_lock *pending = private;
|
|
struct pvfs_state *pvfs = pending->pvfs;
|
|
struct pvfs_file *f = pending->f;
|
|
struct smbsrv_request *req = pending->req;
|
|
union smb_lock *lck = pending->lck;
|
|
struct smb_lock_entry *locks;
|
|
enum brl_type rw;
|
|
NTSTATUS status;
|
|
int i;
|
|
|
|
locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
|
|
|
|
if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
|
|
rw = READ_LOCK;
|
|
} else {
|
|
rw = WRITE_LOCK;
|
|
}
|
|
|
|
DLIST_REMOVE(f->pending_list, pending);
|
|
|
|
status = brl_lock(pvfs->brl_context,
|
|
&f->locking_key,
|
|
req->smbpid,
|
|
f->fnum,
|
|
locks[pending->pending_lock].offset,
|
|
locks[pending->pending_lock].count,
|
|
rw, NULL);
|
|
|
|
if (NT_STATUS_IS_OK(status)) {
|
|
f->lock_count++;
|
|
}
|
|
|
|
/* if we have failed and timed out, or succeeded, then we
|
|
don't need the pending lock any more */
|
|
if (NT_STATUS_IS_OK(status) || timed_out) {
|
|
NTSTATUS status2;
|
|
status2 = brl_remove_pending(pvfs->brl_context, &f->locking_key, pending);
|
|
if (!NT_STATUS_IS_OK(status2)) {
|
|
DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
|
|
}
|
|
talloc_free(pending->wait_handle);
|
|
}
|
|
|
|
if (!NT_STATUS_IS_OK(status)) {
|
|
if (timed_out) {
|
|
/* no more chances */
|
|
pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
|
|
} else {
|
|
/* we can try again */
|
|
DLIST_ADD(f->pending_list, pending);
|
|
}
|
|
return;
|
|
}
|
|
|
|
/* if we haven't timed out yet, then we can do more pending locks */
|
|
if (timed_out) {
|
|
pending = NULL;
|
|
} else {
|
|
if (rw == READ_LOCK) {
|
|
rw = PENDING_READ_LOCK;
|
|
} else {
|
|
rw = PENDING_WRITE_LOCK;
|
|
}
|
|
}
|
|
|
|
/* we've now got the pending lock. try and get the rest, which might
|
|
lead to more pending locks */
|
|
for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {
|
|
if (pending) {
|
|
pending->pending_lock = i;
|
|
}
|
|
|
|
status = brl_lock(pvfs->brl_context,
|
|
&f->locking_key,
|
|
req->smbpid,
|
|
f->fnum,
|
|
locks[i].offset,
|
|
locks[i].count,
|
|
rw, pending);
|
|
if (!NT_STATUS_IS_OK(status)) {
|
|
if (pending) {
|
|
/* a timed lock failed - setup a wait message to handle
|
|
the pending lock notification or a timeout */
|
|
pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
|
|
pending->end_time,
|
|
pvfs_pending_lock_continue,
|
|
pending);
|
|
if (pending->wait_handle == NULL) {
|
|
pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
|
|
} else {
|
|
DLIST_ADD(f->pending_list, pending);
|
|
}
|
|
return;
|
|
}
|
|
pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
|
|
return;
|
|
}
|
|
|
|
f->lock_count++;
|
|
}
|
|
|
|
/* we've managed to get all the locks. Tell the client */
|
|
req->async.status = NT_STATUS_OK;
|
|
req->async.send_fn(req);
|
|
}
|
|
|
|
|
|
/*
|
|
called when we close a file that might have locks
|
|
*/
|
|
void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
|
|
{
|
|
struct pvfs_pending_lock *p, *next;
|
|
|
|
if (f->lock_count || f->pending_list) {
|
|
DEBUG(5,("pvfs_lock: removing %.0f locks on close\n",
|
|
(double)f->lock_count));
|
|
brl_close(f->pvfs->brl_context, &f->locking_key, f->fnum);
|
|
f->lock_count = 0;
|
|
}
|
|
|
|
/* reply to all the pending lock requests, telling them the
|
|
lock failed */
|
|
for (p=f->pending_list;p;p=next) {
|
|
next = p->next;
|
|
DLIST_REMOVE(f->pending_list, p);
|
|
talloc_free(p->wait_handle);
|
|
p->req->async.status = NT_STATUS_RANGE_NOT_LOCKED;
|
|
p->req->async.send_fn(p->req);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
cancel a set of locks
|
|
*/
|
|
static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request *req, union smb_lock *lck,
|
|
struct pvfs_file *f)
|
|
{
|
|
struct pvfs_pending_lock *p;
|
|
|
|
for (p=f->pending_list;p;p=p->next) {
|
|
/* check if the lock request matches exactly - you can only cancel with exact matches */
|
|
if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
|
|
p->lck->lockx.in.lock_cnt == lck->lockx.in.lock_cnt &&
|
|
p->lck->lockx.in.fnum == lck->lockx.in.fnum &&
|
|
p->lck->lockx.in.mode == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
|
|
int i;
|
|
|
|
for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
|
|
if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
|
|
p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
|
|
p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
|
|
break;
|
|
}
|
|
}
|
|
if (i < lck->lockx.in.ulock_cnt) continue;
|
|
|
|
/* an exact match! we can cancel it, which is equivalent
|
|
to triggering the timeout early */
|
|
pvfs_pending_lock_continue(p ,True);
|
|
return NT_STATUS_OK;
|
|
}
|
|
}
|
|
|
|
return NT_STATUS_UNSUCCESSFUL;
|
|
}
|
|
|
|
|
|
/*
|
|
lock or unlock a byte range
|
|
*/
|
|
NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
|
|
struct smbsrv_request *req, union smb_lock *lck)
|
|
{
|
|
struct pvfs_state *pvfs = ntvfs->private_data;
|
|
struct pvfs_file *f;
|
|
struct smb_lock_entry *locks;
|
|
int i;
|
|
enum brl_type rw;
|
|
struct pvfs_pending_lock *pending = NULL;
|
|
NTSTATUS status;
|
|
|
|
if (lck->generic.level != RAW_LOCK_GENERIC) {
|
|
return ntvfs_map_lock(req, lck, ntvfs);
|
|
}
|
|
|
|
f = pvfs_find_fd(pvfs, req, lck->lockx.in.fnum);
|
|
if (!f) {
|
|
return NT_STATUS_INVALID_HANDLE;
|
|
}
|
|
|
|
if (f->name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY) {
|
|
return NT_STATUS_FILE_IS_A_DIRECTORY;
|
|
}
|
|
|
|
if (lck->lockx.in.timeout != 0 &&
|
|
(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
|
|
pending = talloc_p(req, struct pvfs_pending_lock);
|
|
if (pending == NULL) {
|
|
return NT_STATUS_NO_MEMORY;
|
|
}
|
|
|
|
pending->pvfs = pvfs;
|
|
pending->lck = lck;
|
|
pending->f = f;
|
|
pending->req = req;
|
|
|
|
/* round up to the nearest second */
|
|
pending->end_time = time(NULL) + ((lck->lockx.in.timeout+999)/1000);
|
|
}
|
|
|
|
if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
|
|
rw = pending? PENDING_READ_LOCK : READ_LOCK;
|
|
} else {
|
|
rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
|
|
}
|
|
|
|
if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
|
|
return pvfs_lock_cancel(pvfs, req, lck, f);
|
|
}
|
|
|
|
if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
|
|
/* this seems to not be supported by any windows server,
|
|
or used by any clients */
|
|
return NT_STATUS_UNSUCCESSFUL;
|
|
}
|
|
|
|
if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
|
|
DEBUG(0,("received unexpected oplock break\n"));
|
|
return NT_STATUS_NOT_IMPLEMENTED;
|
|
}
|
|
|
|
|
|
/* the unlocks happen first */
|
|
locks = lck->lockx.in.locks;
|
|
|
|
for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
|
|
status = brl_unlock(pvfs->brl_context,
|
|
&f->locking_key,
|
|
locks[i].pid,
|
|
f->fnum,
|
|
locks[i].offset,
|
|
locks[i].count);
|
|
if (!NT_STATUS_IS_OK(status)) {
|
|
return status;
|
|
}
|
|
f->lock_count--;
|
|
}
|
|
|
|
locks += i;
|
|
|
|
for (i=0;i<lck->lockx.in.lock_cnt;i++) {
|
|
if (pending) {
|
|
pending->pending_lock = i;
|
|
}
|
|
|
|
status = brl_lock(pvfs->brl_context,
|
|
&f->locking_key,
|
|
locks[i].pid,
|
|
f->fnum,
|
|
locks[i].offset,
|
|
locks[i].count,
|
|
rw, pending);
|
|
if (!NT_STATUS_IS_OK(status)) {
|
|
if (pending) {
|
|
/* a timed lock failed - setup a wait message to handle
|
|
the pending lock notification or a timeout */
|
|
pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
|
|
pending->end_time,
|
|
pvfs_pending_lock_continue,
|
|
pending);
|
|
if (pending->wait_handle == NULL) {
|
|
return NT_STATUS_NO_MEMORY;
|
|
}
|
|
DLIST_ADD(f->pending_list, pending);
|
|
return NT_STATUS_OK;
|
|
}
|
|
/* undo the locks we just did */
|
|
for (i=i-1;i>=0;i--) {
|
|
brl_unlock(pvfs->brl_context,
|
|
&f->locking_key,
|
|
locks[i].pid,
|
|
f->fnum,
|
|
locks[i].offset,
|
|
locks[i].count);
|
|
f->lock_count--;
|
|
}
|
|
return status;
|
|
}
|
|
f->lock_count++;
|
|
}
|
|
|
|
return NT_STATUS_OK;
|
|
}
|
|
|