mirror of
https://github.com/samba-team/samba.git
synced 2024-12-22 13:34:15 +03:00
smbd: Remove unused brlock code
No PENDING locks in brlock.tdb anymore. Signed-off-by: Volker Lendecke <vl@samba.org> Reviewed-by: Jeremy Allison <jra@samba.org>
This commit is contained in:
parent
b3284bef35
commit
871bb7562c
@ -67,7 +67,7 @@ interface messaging
|
||||
MSG_SMB_FORCE_TDIS = 0x0302,
|
||||
/* MSG_SMB_SAM_SYNC = 0x0303, Obsoleted */
|
||||
/* MSG_SMB_SAM_REPL = 0x0304, Obsoleted */
|
||||
MSG_SMB_UNLOCK = 0x0305,
|
||||
/* MSG_SMB_UNLOCK = 0x0305, Obsoleted */
|
||||
MSG_SMB_BREAK_REQUEST = 0x0306,
|
||||
/* MSG_SMB_BREAK_RESPONSE = 0x0307, Obsoleted */
|
||||
/* MSG_SMB_ASYNC_LEVEL2_BREAK = 0x0308, Obsoleted */
|
||||
@ -83,7 +83,7 @@ interface messaging
|
||||
MSG_PVFS_NOTIFY = 0x0310,
|
||||
|
||||
/* cluster reconfigure events */
|
||||
MSG_SMB_BRL_VALIDATE = 0x0311,
|
||||
/* MSG_SMB_BRL_VALIDATE = 0x0311, Oboleted */
|
||||
|
||||
/*Close a specific file given a share entry. */
|
||||
MSG_SMB_CLOSE_FILE = 0x0313,
|
||||
|
@ -26,11 +26,9 @@
|
||||
PENDING read and write locks to allow posix lock downgrades to trigger a lock
|
||||
re-evaluation. */
|
||||
|
||||
enum brl_type {READ_LOCK, WRITE_LOCK, PENDING_READ_LOCK, PENDING_WRITE_LOCK, UNLOCK_LOCK};
|
||||
enum brl_type {READ_LOCK, WRITE_LOCK, UNLOCK_LOCK};
|
||||
enum brl_flavour {WINDOWS_LOCK = 0, POSIX_LOCK = 1};
|
||||
|
||||
#define IS_PENDING_LOCK(type) ((type) == PENDING_READ_LOCK || (type) == PENDING_WRITE_LOCK)
|
||||
|
||||
#include "librpc/gen_ndr/server_id.h"
|
||||
|
||||
/* This contains elements that differentiate locks. The smbpid is a
|
||||
|
@ -996,9 +996,6 @@ void unbecome_root(void);
|
||||
/* The following definitions come from lib/smbd_shim.c */
|
||||
|
||||
int find_service(TALLOC_CTX *ctx, const char *service_in, char **p_service_out);
|
||||
void cancel_pending_lock_requests_by_fid(files_struct *fsp,
|
||||
struct byte_range_lock *br_lck,
|
||||
enum file_close_type close_type);
|
||||
void send_stat_cache_delete_message(struct messaging_context *msg_ctx,
|
||||
const char *name);
|
||||
NTSTATUS can_delete_directory_fsp(files_struct *fsp);
|
||||
|
@ -32,16 +32,6 @@ void set_smbd_shim(const struct smbd_shim *shim_functions)
|
||||
shim = *shim_functions;
|
||||
}
|
||||
|
||||
void cancel_pending_lock_requests_by_fid(files_struct *fsp,
|
||||
struct byte_range_lock *br_lck,
|
||||
enum file_close_type close_type)
|
||||
{
|
||||
if (shim.cancel_pending_lock_requests_by_fid) {
|
||||
|
||||
shim.cancel_pending_lock_requests_by_fid(fsp, br_lck, close_type);
|
||||
}
|
||||
}
|
||||
|
||||
void send_stat_cache_delete_message(struct messaging_context *msg_ctx,
|
||||
const char *name)
|
||||
{
|
||||
|
@ -29,9 +29,6 @@
|
||||
|
||||
struct smbd_shim
|
||||
{
|
||||
void (*cancel_pending_lock_requests_by_fid)(files_struct *fsp,
|
||||
struct byte_range_lock *br_lck,
|
||||
enum file_close_type close_type);
|
||||
void (*send_stat_cache_delete_message)(struct messaging_context *msg_ctx,
|
||||
const char *name);
|
||||
|
||||
|
@ -142,10 +142,6 @@ static bool brl_overlap(const struct lock_struct *lck1,
|
||||
static bool brl_conflict(const struct lock_struct *lck1,
|
||||
const struct lock_struct *lck2)
|
||||
{
|
||||
/* Ignore PENDING locks. */
|
||||
if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
|
||||
return False;
|
||||
|
||||
/* Read locks never conflict. */
|
||||
if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
|
||||
return False;
|
||||
@ -176,10 +172,6 @@ static bool brl_conflict_posix(const struct lock_struct *lck1,
|
||||
SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
|
||||
#endif
|
||||
|
||||
/* Ignore PENDING locks. */
|
||||
if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
|
||||
return False;
|
||||
|
||||
/* Read locks never conflict. */
|
||||
if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
|
||||
return False;
|
||||
@ -199,9 +191,6 @@ static bool brl_conflict_posix(const struct lock_struct *lck1,
|
||||
static bool brl_conflict1(const struct lock_struct *lck1,
|
||||
const struct lock_struct *lck2)
|
||||
{
|
||||
if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
|
||||
return False;
|
||||
|
||||
if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
|
||||
return False;
|
||||
}
|
||||
@ -233,11 +222,6 @@ static bool brl_conflict1(const struct lock_struct *lck1,
|
||||
static bool brl_conflict_other(const struct lock_struct *lock,
|
||||
const struct lock_struct *rw_probe)
|
||||
{
|
||||
if (IS_PENDING_LOCK(lock->lock_type) ||
|
||||
IS_PENDING_LOCK(rw_probe->lock_type)) {
|
||||
return False;
|
||||
}
|
||||
|
||||
if (lock->lock_type == READ_LOCK && rw_probe->lock_type == READ_LOCK) {
|
||||
return False;
|
||||
}
|
||||
@ -291,24 +275,6 @@ static bool brl_conflict_other(const struct lock_struct *lock,
|
||||
return false;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Check if an unlock overlaps a pending lock.
|
||||
****************************************************************************/
|
||||
|
||||
static bool brl_pending_overlap(const struct lock_struct *lock,
|
||||
const struct lock_struct *pend_lock)
|
||||
{
|
||||
if ((lock->start <= pend_lock->start) &&
|
||||
(lock->start + lock->size > pend_lock->start)) {
|
||||
return true;
|
||||
}
|
||||
if ((lock->start >= pend_lock->start) &&
|
||||
(lock->start < pend_lock->start + pend_lock->size)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Open up the brlock.tdb database.
|
||||
****************************************************************************/
|
||||
@ -418,15 +384,13 @@ NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!IS_PENDING_LOCK(plock->lock_type)) {
|
||||
contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
|
||||
}
|
||||
contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
|
||||
|
||||
/* We can get the Windows lock, now see if it needs to
|
||||
be mapped into a lower level POSIX one, and if so can
|
||||
we get it ? */
|
||||
|
||||
if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
|
||||
if (lp_posix_locking(fsp->conn->params)) {
|
||||
int errno_ret;
|
||||
if (!set_posix_lock_windows_flavour(fsp,
|
||||
plock->start,
|
||||
@ -465,9 +429,7 @@ NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
|
||||
|
||||
return NT_STATUS_OK;
|
||||
fail:
|
||||
if (!IS_PENDING_LOCK(plock->lock_type)) {
|
||||
contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
|
||||
}
|
||||
contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -772,7 +734,6 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
|
||||
unsigned int i, count, posix_count;
|
||||
struct lock_struct *locks = br_lck->lock_data;
|
||||
struct lock_struct *tp;
|
||||
bool signal_pending_read = False;
|
||||
bool break_oplocks = false;
|
||||
NTSTATUS status;
|
||||
|
||||
@ -800,13 +761,6 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
|
||||
for (i=0; i < br_lck->num_locks; i++) {
|
||||
struct lock_struct *curr_lock = &locks[i];
|
||||
|
||||
/* If we have a pending read lock, a lock downgrade should
|
||||
trigger a lock re-evaluation. */
|
||||
if (curr_lock->lock_type == PENDING_READ_LOCK &&
|
||||
brl_pending_overlap(plock, curr_lock)) {
|
||||
signal_pending_read = True;
|
||||
}
|
||||
|
||||
if (curr_lock->lock_flav == WINDOWS_LOCK) {
|
||||
/* Do any Windows flavour locks conflict ? */
|
||||
if (brl_conflict(curr_lock, plock)) {
|
||||
@ -857,8 +811,7 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
|
||||
* call contend_level2_oplocks_begin if this is the first POSIX brl on
|
||||
* the file.
|
||||
*/
|
||||
break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
|
||||
posix_count == 0);
|
||||
break_oplocks = (posix_count == 0);
|
||||
if (break_oplocks) {
|
||||
contend_level2_oplocks_begin(br_lck->fsp,
|
||||
LEVEL2_CONTEND_POSIX_BRL);
|
||||
@ -884,7 +837,7 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
|
||||
be mapped into a lower level POSIX one, and if so can
|
||||
we get it ? */
|
||||
|
||||
if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
|
||||
if (lp_posix_locking(br_lck->fsp->conn->params)) {
|
||||
int errno_ret;
|
||||
|
||||
/* The lower layer just needs to attempt to
|
||||
@ -932,31 +885,6 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
|
||||
/* A successful downgrade from write to read lock can trigger a lock
|
||||
re-evalutation where waiting readers can now proceed. */
|
||||
|
||||
if (signal_pending_read) {
|
||||
/* Send unlock messages to any pending read waiters that overlap. */
|
||||
for (i=0; i < br_lck->num_locks; i++) {
|
||||
struct lock_struct *pend_lock = &locks[i];
|
||||
|
||||
/* Ignore non-pending locks. */
|
||||
if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pend_lock->lock_type == PENDING_READ_LOCK &&
|
||||
brl_pending_overlap(plock, pend_lock)) {
|
||||
struct server_id_buf tmp;
|
||||
|
||||
DEBUG(10, ("brl_lock_posix: sending unlock "
|
||||
"message to pid %s\n",
|
||||
server_id_str_buf(pend_lock->context.pid,
|
||||
&tmp)));
|
||||
|
||||
messaging_send(msg_ctx, pend_lock->context.pid,
|
||||
MSG_SMB_UNLOCK, &data_blob_null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NT_STATUS_OK;
|
||||
fail:
|
||||
if (break_oplocks) {
|
||||
@ -1053,7 +981,7 @@ bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
|
||||
struct byte_range_lock *br_lck,
|
||||
const struct lock_struct *plock)
|
||||
{
|
||||
unsigned int i, j;
|
||||
unsigned int i;
|
||||
struct lock_struct *locks = br_lck->lock_data;
|
||||
enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
|
||||
|
||||
@ -1088,10 +1016,6 @@ bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
|
||||
for (i = 0; i < br_lck->num_locks; i++) {
|
||||
struct lock_struct *lock = &locks[i];
|
||||
|
||||
if (IS_PENDING_LOCK(lock->lock_type)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Only remove our own locks that match in start, size, and flavour. */
|
||||
if (brl_same_context(&lock->context, &plock->context) &&
|
||||
lock->fnum == plock->fnum &&
|
||||
@ -1127,29 +1051,6 @@ bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
|
||||
br_lck->num_locks);
|
||||
}
|
||||
|
||||
/* Send unlock messages to any pending waiters that overlap. */
|
||||
for (j=0; j < br_lck->num_locks; j++) {
|
||||
struct lock_struct *pend_lock = &locks[j];
|
||||
|
||||
/* Ignore non-pending locks. */
|
||||
if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We could send specific lock info here... */
|
||||
if (brl_pending_overlap(plock, pend_lock)) {
|
||||
struct server_id_buf tmp;
|
||||
|
||||
DEBUG(10, ("brl_unlock: sending unlock message to "
|
||||
"pid %s\n",
|
||||
server_id_str_buf(pend_lock->context.pid,
|
||||
&tmp)));
|
||||
|
||||
messaging_send(msg_ctx, pend_lock->context.pid,
|
||||
MSG_SMB_UNLOCK, &data_blob_null);
|
||||
}
|
||||
}
|
||||
|
||||
contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
|
||||
return True;
|
||||
}
|
||||
@ -1162,7 +1063,7 @@ static bool brl_unlock_posix(struct messaging_context *msg_ctx,
|
||||
struct byte_range_lock *br_lck,
|
||||
struct lock_struct *plock)
|
||||
{
|
||||
unsigned int i, j, count;
|
||||
unsigned int i, count;
|
||||
struct lock_struct *tp;
|
||||
struct lock_struct *locks = br_lck->lock_data;
|
||||
bool overlap_found = False;
|
||||
@ -1195,8 +1096,7 @@ static bool brl_unlock_posix(struct messaging_context *msg_ctx,
|
||||
unsigned int tmp_count;
|
||||
|
||||
/* Only remove our own locks - ignore fnum. */
|
||||
if (IS_PENDING_LOCK(lock->lock_type) ||
|
||||
!brl_same_context(&lock->context, &plock->context)) {
|
||||
if (!brl_same_context(&lock->context, &plock->context)) {
|
||||
memcpy(&tp[count], lock, sizeof(struct lock_struct));
|
||||
count++;
|
||||
continue;
|
||||
@ -1287,30 +1187,6 @@ static bool brl_unlock_posix(struct messaging_context *msg_ctx,
|
||||
br_lck->lock_data = tp;
|
||||
br_lck->modified = True;
|
||||
|
||||
/* Send unlock messages to any pending waiters that overlap. */
|
||||
|
||||
for (j=0; j < br_lck->num_locks; j++) {
|
||||
struct lock_struct *pend_lock = &locks[j];
|
||||
|
||||
/* Ignore non-pending locks. */
|
||||
if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We could send specific lock info here... */
|
||||
if (brl_pending_overlap(plock, pend_lock)) {
|
||||
struct server_id_buf tmp;
|
||||
|
||||
DEBUG(10, ("brl_unlock: sending unlock message to "
|
||||
"pid %s\n",
|
||||
server_id_str_buf(pend_lock->context.pid,
|
||||
&tmp)));
|
||||
|
||||
messaging_send(msg_ctx, pend_lock->context.pid,
|
||||
MSG_SMB_UNLOCK, &data_blob_null);
|
||||
}
|
||||
}
|
||||
|
||||
return True;
|
||||
}
|
||||
|
||||
@ -1498,69 +1374,10 @@ bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
|
||||
return handle->fns->brl_cancel_windows_fn(handle, br_lck, plock);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Remove a particular pending lock.
|
||||
****************************************************************************/
|
||||
bool brl_lock_cancel(struct byte_range_lock *br_lck,
|
||||
uint64_t smblctx,
|
||||
struct server_id pid,
|
||||
br_off start,
|
||||
br_off size,
|
||||
enum brl_flavour lock_flav)
|
||||
{
|
||||
bool ret;
|
||||
struct lock_struct lock;
|
||||
|
||||
lock.context.smblctx = smblctx;
|
||||
lock.context.pid = pid;
|
||||
lock.context.tid = br_lck->fsp->conn->cnum;
|
||||
lock.start = start;
|
||||
lock.size = size;
|
||||
lock.fnum = br_lck->fsp->fnum;
|
||||
lock.lock_flav = lock_flav;
|
||||
/* lock.lock_type doesn't matter */
|
||||
|
||||
if (lock_flav == WINDOWS_LOCK) {
|
||||
ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
|
||||
&lock);
|
||||
} else {
|
||||
ret = brl_lock_cancel_default(br_lck, &lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
|
||||
struct lock_struct *plock)
|
||||
{
|
||||
unsigned int i;
|
||||
struct lock_struct *locks = br_lck->lock_data;
|
||||
|
||||
SMB_ASSERT(plock);
|
||||
|
||||
for (i = 0; i < br_lck->num_locks; i++) {
|
||||
struct lock_struct *lock = &locks[i];
|
||||
|
||||
/* For pending locks we *always* care about the fnum. */
|
||||
if (brl_same_context(&lock->context, &plock->context) &&
|
||||
lock->fnum == plock->fnum &&
|
||||
IS_PENDING_LOCK(lock->lock_type) &&
|
||||
lock->lock_flav == plock->lock_flav &&
|
||||
lock->start == plock->start &&
|
||||
lock->size == plock->size) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == br_lck->num_locks) {
|
||||
/* Didn't find it. */
|
||||
return False;
|
||||
}
|
||||
|
||||
brl_delete_lock_struct(locks, br_lck->num_locks, i);
|
||||
br_lck->num_locks -= 1;
|
||||
br_lck->modified = True;
|
||||
return True;
|
||||
return false;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -2083,105 +1900,6 @@ struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
|
||||
return br_lock;
|
||||
}
|
||||
|
||||
struct brl_revalidate_state {
|
||||
ssize_t array_size;
|
||||
uint32_t num_pids;
|
||||
struct server_id *pids;
|
||||
};
|
||||
|
||||
/*
|
||||
* Collect PIDs of all processes with pending entries
|
||||
*/
|
||||
|
||||
static void brl_revalidate_collect(struct file_id id, struct server_id pid,
|
||||
enum brl_type lock_type,
|
||||
enum brl_flavour lock_flav,
|
||||
br_off start, br_off size,
|
||||
void *private_data)
|
||||
{
|
||||
struct brl_revalidate_state *state =
|
||||
(struct brl_revalidate_state *)private_data;
|
||||
|
||||
if (!IS_PENDING_LOCK(lock_type)) {
|
||||
return;
|
||||
}
|
||||
|
||||
add_to_large_array(state, sizeof(pid), (void *)&pid,
|
||||
&state->pids, &state->num_pids,
|
||||
&state->array_size);
|
||||
}
|
||||
|
||||
/*
|
||||
* qsort callback to sort the processes
|
||||
*/
|
||||
|
||||
static int compare_procids(const void *p1, const void *p2)
|
||||
{
|
||||
const struct server_id *i1 = (const struct server_id *)p1;
|
||||
const struct server_id *i2 = (const struct server_id *)p2;
|
||||
|
||||
if (i1->pid < i2->pid) return -1;
|
||||
if (i1->pid > i2->pid) return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a MSG_SMB_UNLOCK message to all processes with pending byte range
|
||||
* locks so that they retry. Mainly used in the cluster code after a node has
|
||||
* died.
|
||||
*
|
||||
* Done in two steps to avoid double-sends: First we collect all entries in an
|
||||
* array, then qsort that array and only send to non-dupes.
|
||||
*/
|
||||
|
||||
void brl_revalidate(struct messaging_context *msg_ctx,
|
||||
void *private_data,
|
||||
uint32_t msg_type,
|
||||
struct server_id server_id,
|
||||
DATA_BLOB *data)
|
||||
{
|
||||
struct brl_revalidate_state *state;
|
||||
uint32_t i;
|
||||
struct server_id last_pid;
|
||||
|
||||
if (!(state = talloc_zero(NULL, struct brl_revalidate_state))) {
|
||||
DEBUG(0, ("talloc failed\n"));
|
||||
return;
|
||||
}
|
||||
|
||||
brl_forall(brl_revalidate_collect, state);
|
||||
|
||||
if (state->array_size == -1) {
|
||||
DEBUG(0, ("talloc failed\n"));
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (state->num_pids == 0) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids);
|
||||
|
||||
ZERO_STRUCT(last_pid);
|
||||
|
||||
for (i=0; i<state->num_pids; i++) {
|
||||
if (serverid_equal(&last_pid, &state->pids[i])) {
|
||||
/*
|
||||
* We've seen that one already
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
|
||||
&data_blob_null);
|
||||
last_pid = state->pids[i];
|
||||
}
|
||||
|
||||
done:
|
||||
TALLOC_FREE(state);
|
||||
return;
|
||||
}
|
||||
|
||||
bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id)
|
||||
{
|
||||
bool ret = false;
|
||||
|
@ -66,10 +66,6 @@ const char *lock_type_name(enum brl_type lock_type)
|
||||
return "READ";
|
||||
case WRITE_LOCK:
|
||||
return "WRITE";
|
||||
case PENDING_READ_LOCK:
|
||||
return "PENDING_READ";
|
||||
case PENDING_WRITE_LOCK:
|
||||
return "PENDING_WRITE";
|
||||
default:
|
||||
return "other";
|
||||
}
|
||||
@ -358,55 +354,6 @@ NTSTATUS do_unlock(struct messaging_context *msg_ctx,
|
||||
return NT_STATUS_OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Cancel any pending blocked locks.
|
||||
****************************************************************************/
|
||||
|
||||
NTSTATUS do_lock_cancel(files_struct *fsp,
|
||||
uint64_t smblctx,
|
||||
uint64_t count,
|
||||
uint64_t offset,
|
||||
enum brl_flavour lock_flav)
|
||||
{
|
||||
bool ok = False;
|
||||
struct byte_range_lock *br_lck = NULL;
|
||||
|
||||
if (!fsp->can_lock) {
|
||||
return fsp->is_directory ?
|
||||
NT_STATUS_INVALID_DEVICE_REQUEST : NT_STATUS_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
if (!lp_locking(fsp->conn->params)) {
|
||||
return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
|
||||
}
|
||||
|
||||
DEBUG(10, ("do_lock_cancel: cancel start=%ju len=%ju requested for "
|
||||
"%s file %s\n", (uintmax_t)offset, (uintmax_t)count,
|
||||
fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
|
||||
|
||||
br_lck = brl_get_locks(talloc_tos(), fsp);
|
||||
if (!br_lck) {
|
||||
return NT_STATUS_NO_MEMORY;
|
||||
}
|
||||
|
||||
ok = brl_lock_cancel(br_lck,
|
||||
smblctx,
|
||||
messaging_server_id(fsp->conn->sconn->msg_ctx),
|
||||
offset,
|
||||
count,
|
||||
lock_flav);
|
||||
|
||||
TALLOC_FREE(br_lck);
|
||||
|
||||
if (!ok) {
|
||||
DEBUG(10,("do_lock_cancel: returning ERRcancelviolation.\n" ));
|
||||
return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
|
||||
}
|
||||
|
||||
decrement_current_lock_count(fsp, lock_flav);
|
||||
return NT_STATUS_OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Remove any locks on this fd. Called from file_close().
|
||||
****************************************************************************/
|
||||
|
@ -68,12 +68,6 @@ NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
|
||||
br_off *psize,
|
||||
enum brl_type *plock_type,
|
||||
enum brl_flavour lock_flav);
|
||||
bool brl_lock_cancel(struct byte_range_lock *br_lck,
|
||||
uint64_t smblctx,
|
||||
struct server_id pid,
|
||||
br_off start,
|
||||
br_off size,
|
||||
enum brl_flavour lock_flav);
|
||||
bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
|
||||
struct lock_struct *plock);
|
||||
bool brl_mark_disconnected(struct files_struct *fsp);
|
||||
@ -89,11 +83,6 @@ int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
|
||||
struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
|
||||
files_struct *fsp);
|
||||
struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp);
|
||||
void brl_revalidate(struct messaging_context *msg_ctx,
|
||||
void *private_data,
|
||||
uint32_t msg_type,
|
||||
struct server_id server_id,
|
||||
DATA_BLOB *data);
|
||||
bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id);
|
||||
|
||||
/* The following definitions come from locking/locking.c */
|
||||
@ -131,11 +120,6 @@ NTSTATUS do_unlock(struct messaging_context *msg_ctx,
|
||||
uint64_t count,
|
||||
uint64_t offset,
|
||||
enum brl_flavour lock_flav);
|
||||
NTSTATUS do_lock_cancel(files_struct *fsp,
|
||||
uint64_t smblctx,
|
||||
uint64_t count,
|
||||
uint64_t offset,
|
||||
enum brl_flavour lock_flav);
|
||||
void locking_close_file(struct messaging_context *msg_ctx,
|
||||
files_struct *fsp,
|
||||
enum file_close_type close_type);
|
||||
|
@ -28,34 +28,6 @@
|
||||
#undef DBGC_CLASS
|
||||
#define DBGC_CLASS DBGC_LOCKING
|
||||
|
||||
static void received_unlock_msg(struct messaging_context *msg,
|
||||
void *private_data,
|
||||
uint32_t msg_type,
|
||||
struct server_id server_id,
|
||||
DATA_BLOB *data);
|
||||
|
||||
void brl_timeout_fn(struct tevent_context *event_ctx,
|
||||
struct tevent_timer *te,
|
||||
struct timeval now,
|
||||
void *private_data)
|
||||
{
|
||||
struct smbd_server_connection *sconn = talloc_get_type_abort(
|
||||
private_data, struct smbd_server_connection);
|
||||
|
||||
if (sconn->using_smb2) {
|
||||
SMB_ASSERT(sconn->smb2.locks.brl_timeout == te);
|
||||
TALLOC_FREE(sconn->smb2.locks.brl_timeout);
|
||||
} else {
|
||||
SMB_ASSERT(sconn->smb1.locks.brl_timeout == te);
|
||||
TALLOC_FREE(sconn->smb1.locks.brl_timeout);
|
||||
}
|
||||
|
||||
change_to_root_user(); /* TODO: Possibly run all timed events as
|
||||
* root */
|
||||
|
||||
process_blocking_lock_queue(sconn);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
We need a version of timeval_min that treats zero timval as infinite.
|
||||
****************************************************************************/
|
||||
@ -72,928 +44,6 @@ struct timeval timeval_brl_min(const struct timeval *tv1,
|
||||
return timeval_min(tv1, tv2);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
After a change to blocking_lock_queue, recalculate the timed_event for the
|
||||
next processing.
|
||||
****************************************************************************/
|
||||
|
||||
static bool recalc_brl_timeout(struct smbd_server_connection *sconn)
|
||||
{
|
||||
struct blocking_lock_record *blr;
|
||||
struct timeval next_timeout;
|
||||
int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
|
||||
|
||||
TALLOC_FREE(sconn->smb1.locks.brl_timeout);
|
||||
|
||||
next_timeout = timeval_zero();
|
||||
|
||||
for (blr = sconn->smb1.locks.blocking_lock_queue; blr; blr = blr->next) {
|
||||
if (timeval_is_zero(&blr->expire_time)) {
|
||||
/*
|
||||
* If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
|
||||
* a POSIX lock, so calculate a timeout of
|
||||
* 10 seconds into the future.
|
||||
*/
|
||||
if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
|
||||
struct timeval psx_to = timeval_current_ofs(10, 0);
|
||||
next_timeout = timeval_brl_min(&next_timeout, &psx_to);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
|
||||
}
|
||||
|
||||
if (timeval_is_zero(&next_timeout)) {
|
||||
DEBUG(10, ("Next timeout = Infinite.\n"));
|
||||
return True;
|
||||
}
|
||||
|
||||
/*
|
||||
to account for unclean shutdowns by clients we need a
|
||||
maximum timeout that we use for checking pending locks. If
|
||||
we have any pending locks at all, then check if the pending
|
||||
lock can continue at least every brl:recalctime seconds
|
||||
(default 5 seconds).
|
||||
|
||||
This saves us needing to do a message_send_all() in the
|
||||
SIGCHLD handler in the parent daemon. That
|
||||
message_send_all() caused O(n^2) work to be done when IP
|
||||
failovers happened in clustered Samba, which could make the
|
||||
entire system unusable for many minutes.
|
||||
*/
|
||||
|
||||
if (max_brl_timeout > 0) {
|
||||
struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
|
||||
next_timeout = timeval_min(&next_timeout, &min_to);
|
||||
}
|
||||
|
||||
if (DEBUGLVL(10)) {
|
||||
struct timeval cur, from_now;
|
||||
|
||||
cur = timeval_current();
|
||||
from_now = timeval_until(&cur, &next_timeout);
|
||||
DEBUG(10, ("Next timeout = %d.%d seconds from now.\n",
|
||||
(int)from_now.tv_sec, (int)from_now.tv_usec));
|
||||
}
|
||||
|
||||
sconn->smb1.locks.brl_timeout = tevent_add_timer(sconn->ev_ctx,
|
||||
NULL, next_timeout,
|
||||
brl_timeout_fn, sconn);
|
||||
if (sconn->smb1.locks.brl_timeout == NULL) {
|
||||
return False;
|
||||
}
|
||||
|
||||
return True;
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************
|
||||
Function to push a blocking lock request onto the lock queue.
|
||||
****************************************************************************/
|
||||
|
||||
bool push_blocking_lock_request( struct byte_range_lock *br_lck,
|
||||
struct smb_request *req,
|
||||
files_struct *fsp,
|
||||
int lock_timeout,
|
||||
int lock_num,
|
||||
uint64_t smblctx,
|
||||
enum brl_type lock_type,
|
||||
enum brl_flavour lock_flav,
|
||||
uint64_t offset,
|
||||
uint64_t count,
|
||||
uint64_t blocking_smblctx)
|
||||
{
|
||||
struct smbd_server_connection *sconn = req->sconn;
|
||||
struct blocking_lock_record *blr;
|
||||
struct server_id blocker_pid;
|
||||
NTSTATUS status;
|
||||
|
||||
if (req->smb2req) {
|
||||
return push_blocking_lock_request_smb2(br_lck,
|
||||
req,
|
||||
fsp,
|
||||
lock_timeout,
|
||||
lock_num,
|
||||
smblctx,
|
||||
lock_type,
|
||||
lock_flav,
|
||||
offset,
|
||||
count,
|
||||
blocking_smblctx);
|
||||
}
|
||||
|
||||
if(req_is_in_chain(req)) {
|
||||
DEBUG(0,("push_blocking_lock_request: cannot queue a chained request (currently).\n"));
|
||||
return False;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now queue an entry on the blocking lock queue. We setup
|
||||
* the expiration time here.
|
||||
*/
|
||||
|
||||
blr = talloc(NULL, struct blocking_lock_record);
|
||||
if (blr == NULL) {
|
||||
DEBUG(0,("push_blocking_lock_request: Malloc fail !\n" ));
|
||||
return False;
|
||||
}
|
||||
|
||||
blr->next = NULL;
|
||||
blr->prev = NULL;
|
||||
|
||||
blr->fsp = fsp;
|
||||
if (lock_timeout == -1) {
|
||||
blr->expire_time.tv_sec = 0;
|
||||
blr->expire_time.tv_usec = 0; /* Never expire. */
|
||||
} else {
|
||||
blr->expire_time = timeval_current_ofs_msec(lock_timeout);
|
||||
}
|
||||
blr->lock_num = lock_num;
|
||||
blr->smblctx = smblctx;
|
||||
blr->blocking_smblctx = blocking_smblctx;
|
||||
blr->lock_flav = lock_flav;
|
||||
blr->lock_type = lock_type;
|
||||
blr->offset = offset;
|
||||
blr->count = count;
|
||||
|
||||
/* Specific brl_lock() implementations can fill this in. */
|
||||
blr->blr_private = NULL;
|
||||
|
||||
/* Add a pending lock record for this. */
|
||||
status = brl_lock(req->sconn->msg_ctx,
|
||||
br_lck,
|
||||
smblctx,
|
||||
messaging_server_id(req->sconn->msg_ctx),
|
||||
offset,
|
||||
count,
|
||||
lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
|
||||
blr->lock_flav,
|
||||
True,
|
||||
&blocker_pid,
|
||||
NULL);
|
||||
|
||||
if (!NT_STATUS_IS_OK(status)) {
|
||||
DEBUG(0,("push_blocking_lock_request: failed to add PENDING_LOCK record.\n"));
|
||||
TALLOC_FREE(blr);
|
||||
return False;
|
||||
}
|
||||
|
||||
SMB_PERFCOUNT_DEFER_OP(&req->pcd, &req->pcd);
|
||||
blr->req = talloc_move(blr, &req);
|
||||
|
||||
DLIST_ADD_END(sconn->smb1.locks.blocking_lock_queue, blr);
|
||||
recalc_brl_timeout(sconn);
|
||||
|
||||
/* Ensure we'll receive messages when this is unlocked. */
|
||||
if (!sconn->smb1.locks.blocking_lock_unlock_state) {
|
||||
messaging_register(sconn->msg_ctx, sconn,
|
||||
MSG_SMB_UNLOCK, received_unlock_msg);
|
||||
sconn->smb1.locks.blocking_lock_unlock_state = true;
|
||||
}
|
||||
|
||||
DEBUG(3,("push_blocking_lock_request: lock request blocked with "
|
||||
"expiry time (%u sec. %u usec) (+%d msec) for %s, name = %s\n",
|
||||
(unsigned int)blr->expire_time.tv_sec,
|
||||
(unsigned int)blr->expire_time.tv_usec, lock_timeout,
|
||||
fsp_fnum_dbg(blr->fsp), fsp_str_dbg(blr->fsp)));
|
||||
|
||||
return True;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Return a lockingX success SMB.
|
||||
*****************************************************************************/
|
||||
|
||||
static void reply_lockingX_success(struct blocking_lock_record *blr)
|
||||
{
|
||||
struct smb_request *req = blr->req;
|
||||
|
||||
reply_outbuf(req, 2, 0);
|
||||
SSVAL(req->outbuf, smb_vwv0, 0xff); /* andx chain ends */
|
||||
SSVAL(req->outbuf, smb_vwv1, 0); /* no andx offset */
|
||||
|
||||
/*
|
||||
* As this message is a lockingX call we must handle
|
||||
* any following chained message correctly.
|
||||
* This is normally handled in construct_reply(),
|
||||
* but as that calls switch_message, we can't use
|
||||
* that here and must set up the chain info manually.
|
||||
*/
|
||||
|
||||
if (!srv_send_smb(req->xconn,
|
||||
(char *)req->outbuf,
|
||||
true, req->seqnum+1,
|
||||
IS_CONN_ENCRYPTED(req->conn)||req->encrypted,
|
||||
&req->pcd)) {
|
||||
exit_server_cleanly("reply_lockingX_success: srv_send_smb "
|
||||
"failed.");
|
||||
}
|
||||
|
||||
TALLOC_FREE(req->outbuf);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Return a generic lock fail error blocking call.
|
||||
*****************************************************************************/
|
||||
|
||||
static void generic_blocking_lock_error(struct blocking_lock_record *blr, NTSTATUS status)
|
||||
{
|
||||
/* whenever a timeout is given w2k maps LOCK_NOT_GRANTED to
|
||||
FILE_LOCK_CONFLICT! (tridge) */
|
||||
if (NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
|
||||
status = NT_STATUS_FILE_LOCK_CONFLICT;
|
||||
}
|
||||
|
||||
if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
|
||||
/* Store the last lock error. */
|
||||
files_struct *fsp = blr->fsp;
|
||||
|
||||
if (fsp) {
|
||||
fsp->last_lock_failure.context.smblctx = blr->smblctx;
|
||||
fsp->last_lock_failure.context.tid = fsp->conn->cnum;
|
||||
fsp->last_lock_failure.context.pid =
|
||||
messaging_server_id(fsp->conn->sconn->msg_ctx);
|
||||
fsp->last_lock_failure.start = blr->offset;
|
||||
fsp->last_lock_failure.size = blr->count;
|
||||
fsp->last_lock_failure.fnum = fsp->fnum;
|
||||
fsp->last_lock_failure.lock_type = READ_LOCK; /* Don't care. */
|
||||
fsp->last_lock_failure.lock_flav = blr->lock_flav;
|
||||
}
|
||||
}
|
||||
|
||||
reply_nterror(blr->req, status);
|
||||
if (!srv_send_smb(blr->req->xconn, (char *)blr->req->outbuf,
|
||||
true, blr->req->seqnum+1,
|
||||
blr->req->encrypted, NULL)) {
|
||||
exit_server_cleanly("generic_blocking_lock_error: srv_send_smb failed.");
|
||||
}
|
||||
TALLOC_FREE(blr->req->outbuf);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Return a lock fail error for a lockingX call. Undo all the locks we have
|
||||
obtained first.
|
||||
*****************************************************************************/
|
||||
|
||||
static void undo_locks_obtained(struct blocking_lock_record *blr)
|
||||
{
|
||||
files_struct *fsp = blr->fsp;
|
||||
uint16_t num_ulocks = SVAL(blr->req->vwv+6, 0);
|
||||
uint64_t count = (uint64_t)0, offset = (uint64_t) 0;
|
||||
uint64_t smblctx;
|
||||
unsigned char locktype = CVAL(blr->req->vwv+3, 0);
|
||||
bool large_file_format = (locktype & LOCKING_ANDX_LARGE_FILES);
|
||||
uint8_t *data;
|
||||
int i;
|
||||
|
||||
data = discard_const_p(uint8_t, blr->req->buf)
|
||||
+ ((large_file_format ? 20 : 10)*num_ulocks);
|
||||
|
||||
/*
|
||||
* Data now points at the beginning of the list
|
||||
* of smb_lkrng structs.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Ensure we don't do a remove on the lock that just failed,
|
||||
* as under POSIX rules, if we have a lock already there, we
|
||||
* will delete it (and we shouldn't) .....
|
||||
*/
|
||||
|
||||
for(i = blr->lock_num - 1; i >= 0; i--) {
|
||||
|
||||
smblctx = get_lock_pid( data, i, large_file_format);
|
||||
count = get_lock_count( data, i, large_file_format);
|
||||
offset = get_lock_offset( data, i, large_file_format);
|
||||
|
||||
/*
|
||||
* We know err cannot be set as if it was the lock
|
||||
* request would never have been queued. JRA.
|
||||
*/
|
||||
|
||||
do_unlock(fsp->conn->sconn->msg_ctx,
|
||||
fsp,
|
||||
smblctx,
|
||||
count,
|
||||
offset,
|
||||
WINDOWS_LOCK);
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Return a lock fail error.
|
||||
*****************************************************************************/
|
||||
|
||||
static void blocking_lock_reply_error(struct blocking_lock_record *blr, NTSTATUS status)
|
||||
{
|
||||
DEBUG(10, ("Replying with error=%s. BLR = %p\n", nt_errstr(status), blr));
|
||||
|
||||
switch(blr->req->cmd) {
|
||||
case SMBlockingX:
|
||||
/*
|
||||
* This code can be called during the rundown of a
|
||||
* file after it was already closed. In that case,
|
||||
* blr->fsp==NULL and we do not need to undo any
|
||||
* locks, they are already gone.
|
||||
*/
|
||||
if (blr->fsp != NULL) {
|
||||
undo_locks_obtained(blr);
|
||||
}
|
||||
generic_blocking_lock_error(blr, status);
|
||||
break;
|
||||
case SMBtrans2:
|
||||
case SMBtranss2:
|
||||
reply_nterror(blr->req, status);
|
||||
|
||||
/*
|
||||
* construct_reply_common has done us the favor to pre-fill
|
||||
* the command field with SMBtranss2 which is wrong :-)
|
||||
*/
|
||||
SCVAL(blr->req->outbuf,smb_com,SMBtrans2);
|
||||
|
||||
if (!srv_send_smb(blr->req->xconn,
|
||||
(char *)blr->req->outbuf,
|
||||
true, blr->req->seqnum+1,
|
||||
IS_CONN_ENCRYPTED(blr->fsp->conn),
|
||||
NULL)) {
|
||||
exit_server_cleanly("blocking_lock_reply_error: "
|
||||
"srv_send_smb failed.");
|
||||
}
|
||||
TALLOC_FREE(blr->req->outbuf);
|
||||
break;
|
||||
default:
|
||||
DEBUG(0,("blocking_lock_reply_error: PANIC - unknown type on blocking lock queue - exiting.!\n"));
|
||||
exit_server("PANIC - unknown type on blocking lock queue");
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Utility function that returns true if a lock timed out.
|
||||
*****************************************************************************/
|
||||
|
||||
static bool lock_timed_out(const struct blocking_lock_record *blr)
|
||||
{
|
||||
struct timeval tv_curr;
|
||||
|
||||
if (timeval_is_zero(&blr->expire_time)) {
|
||||
return false; /* Never times out. */
|
||||
}
|
||||
|
||||
tv_curr = timeval_current();
|
||||
if (timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Attempt to finish off getting all pending blocking locks for a lockingX call.
|
||||
Returns True if we want to be removed from the list.
|
||||
*****************************************************************************/
|
||||
|
||||
static bool process_lockingX(struct blocking_lock_record *blr)
|
||||
{
|
||||
unsigned char locktype = CVAL(blr->req->vwv+3, 0);
|
||||
files_struct *fsp = blr->fsp;
|
||||
uint16_t num_ulocks = SVAL(blr->req->vwv+6, 0);
|
||||
uint16_t num_locks = SVAL(blr->req->vwv+7, 0);
|
||||
bool large_file_format = (locktype & LOCKING_ANDX_LARGE_FILES);
|
||||
uint8_t *data;
|
||||
NTSTATUS status = NT_STATUS_OK;
|
||||
bool lock_timeout = lock_timed_out(blr);
|
||||
|
||||
data = discard_const_p(uint8_t, blr->req->buf)
|
||||
+ ((large_file_format ? 20 : 10)*num_ulocks);
|
||||
|
||||
/*
|
||||
* Data now points at the beginning of the list
|
||||
* of smb_lkrng structs.
|
||||
*/
|
||||
|
||||
for(; blr->lock_num < num_locks; blr->lock_num++) {
|
||||
struct byte_range_lock *br_lck = NULL;
|
||||
|
||||
/*
|
||||
* Ensure the blr record gets updated with
|
||||
* any lock we might end up blocked on.
|
||||
*/
|
||||
|
||||
blr->smblctx = get_lock_pid( data, blr->lock_num, large_file_format);
|
||||
blr->count = get_lock_count( data, blr->lock_num, large_file_format);
|
||||
blr->offset = get_lock_offset( data, blr->lock_num, large_file_format);
|
||||
|
||||
/*
|
||||
* We know err cannot be set as if it was the lock
|
||||
* request would never have been queued. JRA.
|
||||
*/
|
||||
errno = 0;
|
||||
br_lck = do_lock(fsp->conn->sconn->msg_ctx,
|
||||
fsp,
|
||||
blr->smblctx,
|
||||
blr->count,
|
||||
blr->offset,
|
||||
((locktype & LOCKING_ANDX_SHARED_LOCK) ?
|
||||
READ_LOCK : WRITE_LOCK),
|
||||
WINDOWS_LOCK,
|
||||
True,
|
||||
&status,
|
||||
NULL,
|
||||
&blr->blocking_smblctx);
|
||||
|
||||
if (ERROR_WAS_LOCK_DENIED(status) && !lock_timeout) {
|
||||
struct server_id blocker_pid;
|
||||
/*
|
||||
* If we didn't timeout, but still need to wait,
|
||||
* re-add the pending lock entry whilst holding
|
||||
* the brlock db lock.
|
||||
*/
|
||||
NTSTATUS status1 =
|
||||
brl_lock(blr->fsp->conn->sconn->msg_ctx,
|
||||
br_lck,
|
||||
blr->smblctx,
|
||||
messaging_server_id(
|
||||
blr->fsp->conn->sconn->msg_ctx),
|
||||
blr->offset,
|
||||
blr->count,
|
||||
blr->lock_type == READ_LOCK ?
|
||||
PENDING_READ_LOCK :
|
||||
PENDING_WRITE_LOCK,
|
||||
blr->lock_flav,
|
||||
true, /* Blocking lock. */
|
||||
&blocker_pid,
|
||||
NULL);
|
||||
|
||||
if (!NT_STATUS_IS_OK(status1)) {
|
||||
DEBUG(0,("failed to add PENDING_LOCK "
|
||||
"record.\n"));
|
||||
}
|
||||
}
|
||||
|
||||
TALLOC_FREE(br_lck);
|
||||
|
||||
if (NT_STATUS_IS_ERR(status)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(blr->lock_num == num_locks) {
|
||||
/*
|
||||
* Success - we got all the locks.
|
||||
*/
|
||||
|
||||
DEBUG(3,("process_lockingX file = %s, %s, type=%d "
|
||||
"num_locks=%d\n", fsp_str_dbg(fsp), fsp_fnum_dbg(fsp),
|
||||
(unsigned int)locktype, num_locks));
|
||||
|
||||
reply_lockingX_success(blr);
|
||||
return True;
|
||||
}
|
||||
|
||||
if (!ERROR_WAS_LOCK_DENIED(status)) {
|
||||
/*
|
||||
* We have other than a "can't get lock"
|
||||
* error. Free any locks we had and return an error.
|
||||
* Return True so we get dequeued.
|
||||
*/
|
||||
blocking_lock_reply_error(blr, status);
|
||||
return True;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return an error to the client if we timed out.
|
||||
*/
|
||||
if (lock_timeout) {
|
||||
blocking_lock_reply_error(blr,NT_STATUS_FILE_LOCK_CONFLICT);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Still can't get all the locks - keep waiting.
|
||||
*/
|
||||
|
||||
DEBUG(10, ("process_lockingX: only got %d locks of %d needed for "
|
||||
"file %s, %s. Waiting....\n",
|
||||
blr->lock_num, num_locks, fsp_str_dbg(fsp),
|
||||
fsp_fnum_dbg(fsp)));
|
||||
|
||||
return False;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Attempt to get the posix lock request from a SMBtrans2 call.
|
||||
Returns True if we want to be removed from the list.
|
||||
*****************************************************************************/
|
||||
|
||||
static bool process_trans2(struct blocking_lock_record *blr)
|
||||
{
|
||||
char params[2];
|
||||
NTSTATUS status;
|
||||
bool lock_timeout = lock_timed_out(blr);
|
||||
|
||||
struct byte_range_lock *br_lck = do_lock(
|
||||
blr->fsp->conn->sconn->msg_ctx,
|
||||
blr->fsp,
|
||||
blr->smblctx,
|
||||
blr->count,
|
||||
blr->offset,
|
||||
blr->lock_type,
|
||||
blr->lock_flav,
|
||||
True,
|
||||
&status,
|
||||
NULL,
|
||||
&blr->blocking_smblctx);
|
||||
if (ERROR_WAS_LOCK_DENIED(status) && !lock_timeout) {
|
||||
struct server_id blocker_pid;
|
||||
/*
|
||||
* If we didn't timeout, but still need to wait,
|
||||
* re-add the pending lock entry whilst holding
|
||||
* the brlock db lock.
|
||||
*/
|
||||
NTSTATUS status1 =
|
||||
brl_lock(blr->fsp->conn->sconn->msg_ctx,
|
||||
br_lck,
|
||||
blr->smblctx,
|
||||
messaging_server_id(
|
||||
blr->fsp->conn->sconn->msg_ctx),
|
||||
blr->offset,
|
||||
blr->count,
|
||||
blr->lock_type == READ_LOCK ?
|
||||
PENDING_READ_LOCK :
|
||||
PENDING_WRITE_LOCK,
|
||||
blr->lock_flav,
|
||||
true, /* Blocking lock. */
|
||||
&blocker_pid,
|
||||
NULL);
|
||||
|
||||
if (!NT_STATUS_IS_OK(status1)) {
|
||||
DEBUG(0,("failed to add PENDING_LOCK record.\n"));
|
||||
}
|
||||
}
|
||||
|
||||
TALLOC_FREE(br_lck);
|
||||
|
||||
if (!NT_STATUS_IS_OK(status)) {
|
||||
if (ERROR_WAS_LOCK_DENIED(status)) {
|
||||
if (lock_timeout) {
|
||||
/*
|
||||
* Return an error if we timed out
|
||||
* and return true to get dequeued.
|
||||
*/
|
||||
blocking_lock_reply_error(blr,
|
||||
NT_STATUS_FILE_LOCK_CONFLICT);
|
||||
return true;
|
||||
}
|
||||
/* Still can't get the lock, just keep waiting. */
|
||||
return False;
|
||||
}
|
||||
/*
|
||||
* We have other than a "can't get lock"
|
||||
* error. Send an error and return True so we get dequeued.
|
||||
*/
|
||||
blocking_lock_reply_error(blr, status);
|
||||
return True;
|
||||
}
|
||||
|
||||
/* We finally got the lock, return success. */
|
||||
|
||||
SSVAL(params,0,0);
|
||||
/* Fake up max_data_bytes here - we know it fits. */
|
||||
send_trans2_replies(blr->fsp->conn, blr->req, NT_STATUS_OK, params, 2, NULL, 0, 0xffff);
|
||||
return True;
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************
|
||||
Process a blocking lock SMB.
|
||||
Returns True if we want to be removed from the list.
|
||||
*****************************************************************************/
|
||||
|
||||
static bool blocking_lock_record_process(struct blocking_lock_record *blr)
|
||||
{
|
||||
switch(blr->req->cmd) {
|
||||
case SMBlockingX:
|
||||
return process_lockingX(blr);
|
||||
case SMBtrans2:
|
||||
case SMBtranss2:
|
||||
return process_trans2(blr);
|
||||
default:
|
||||
DEBUG(0,("blocking_lock_record_process: PANIC - unknown type on blocking lock queue - exiting.!\n"));
|
||||
exit_server("PANIC - unknown type on blocking lock queue");
|
||||
}
|
||||
return False; /* Keep compiler happy. */
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Cancel entries by fnum from the blocking lock pending queue.
|
||||
Called when a file is closed.
|
||||
*****************************************************************************/
|
||||
|
||||
void smbd_cancel_pending_lock_requests_by_fid(files_struct *fsp,
|
||||
struct byte_range_lock *br_lck,
|
||||
enum file_close_type close_type)
|
||||
{
|
||||
struct smbd_server_connection *sconn = fsp->conn->sconn;
|
||||
struct blocking_lock_record *blr, *blr_cancelled, *next = NULL;
|
||||
|
||||
if (sconn->using_smb2) {
|
||||
cancel_pending_lock_requests_by_fid_smb2(fsp,
|
||||
br_lck,
|
||||
close_type);
|
||||
return;
|
||||
}
|
||||
|
||||
for(blr = sconn->smb1.locks.blocking_lock_queue; blr; blr = next) {
|
||||
unsigned char locktype = 0;
|
||||
|
||||
next = blr->next;
|
||||
if (blr->fsp->fnum != fsp->fnum) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (blr->req->cmd == SMBlockingX) {
|
||||
locktype = CVAL(blr->req->vwv+3, 0);
|
||||
}
|
||||
|
||||
DEBUG(10, ("remove_pending_lock_requests_by_fid - removing "
|
||||
"request type %d for file %s, %s\n",
|
||||
blr->req->cmd, fsp_str_dbg(fsp), fsp_fnum_dbg(fsp)));
|
||||
|
||||
blr_cancelled = blocking_lock_cancel_smb1(fsp,
|
||||
blr->smblctx,
|
||||
blr->offset,
|
||||
blr->count,
|
||||
blr->lock_flav,
|
||||
locktype,
|
||||
NT_STATUS_RANGE_NOT_LOCKED);
|
||||
|
||||
SMB_ASSERT(blr_cancelled == blr);
|
||||
|
||||
brl_lock_cancel(br_lck,
|
||||
blr->smblctx,
|
||||
messaging_server_id(sconn->msg_ctx),
|
||||
blr->offset,
|
||||
blr->count,
|
||||
blr->lock_flav);
|
||||
|
||||
/* We're closing the file fsp here, so ensure
|
||||
* we don't have a dangling pointer. */
|
||||
blr->fsp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Delete entries by mid from the blocking lock pending queue. Always send reply.
|
||||
Only called from the SMB1 cancel code.
|
||||
*****************************************************************************/
|
||||
|
||||
void remove_pending_lock_requests_by_mid_smb1(
|
||||
struct smbd_server_connection *sconn, uint64_t mid)
|
||||
{
|
||||
struct blocking_lock_record *blr, *next = NULL;
|
||||
|
||||
for(blr = sconn->smb1.locks.blocking_lock_queue; blr; blr = next) {
|
||||
files_struct *fsp;
|
||||
struct byte_range_lock *br_lck;
|
||||
|
||||
next = blr->next;
|
||||
|
||||
if (blr->req->mid != mid) {
|
||||
continue;
|
||||
}
|
||||
|
||||
fsp = blr->fsp;
|
||||
br_lck = brl_get_locks(talloc_tos(), fsp);
|
||||
|
||||
if (br_lck) {
|
||||
DEBUG(10, ("remove_pending_lock_requests_by_mid_smb1 - "
|
||||
"removing request type %d for file %s, %s\n",
|
||||
blr->req->cmd, fsp_str_dbg(fsp),
|
||||
fsp_fnum_dbg(fsp)));
|
||||
|
||||
brl_lock_cancel(br_lck,
|
||||
blr->smblctx,
|
||||
messaging_server_id(sconn->msg_ctx),
|
||||
blr->offset,
|
||||
blr->count,
|
||||
blr->lock_flav);
|
||||
TALLOC_FREE(br_lck);
|
||||
}
|
||||
|
||||
blocking_lock_reply_error(blr,NT_STATUS_FILE_LOCK_CONFLICT);
|
||||
DLIST_REMOVE(sconn->smb1.locks.blocking_lock_queue, blr);
|
||||
TALLOC_FREE(blr);
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Is this mid a blocking lock request on the queue ?
|
||||
Currently only called from the SMB1 unix extensions POSIX lock code.
|
||||
*****************************************************************************/
|
||||
|
||||
bool blocking_lock_was_deferred_smb1(
|
||||
struct smbd_server_connection *sconn, uint64_t mid)
|
||||
{
|
||||
struct blocking_lock_record *blr, *next = NULL;
|
||||
|
||||
for(blr = sconn->smb1.locks.blocking_lock_queue; blr; blr = next) {
|
||||
next = blr->next;
|
||||
if(blr->req->mid == mid) {
|
||||
return True;
|
||||
}
|
||||
}
|
||||
return False;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Set a flag as an unlock request affects one of our pending locks.
|
||||
*****************************************************************************/
|
||||
|
||||
static void received_unlock_msg(struct messaging_context *msg,
|
||||
void *private_data,
|
||||
uint32_t msg_type,
|
||||
struct server_id server_id,
|
||||
DATA_BLOB *data)
|
||||
{
|
||||
struct smbd_server_connection *sconn =
|
||||
talloc_get_type_abort(private_data,
|
||||
struct smbd_server_connection);
|
||||
|
||||
DEBUG(10,("received_unlock_msg\n"));
|
||||
process_blocking_lock_queue(sconn);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Process the blocking lock queue. Note that this is only called as root.
|
||||
*****************************************************************************/
|
||||
|
||||
void process_blocking_lock_queue(struct smbd_server_connection *sconn)
|
||||
{
|
||||
struct blocking_lock_record *blr, *next = NULL;
|
||||
|
||||
if (sconn->using_smb2) {
|
||||
process_blocking_lock_queue_smb2(sconn, timeval_current());
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Go through the queue and see if we can get any of the locks.
|
||||
*/
|
||||
|
||||
for (blr = sconn->smb1.locks.blocking_lock_queue; blr; blr = next) {
|
||||
struct byte_range_lock *br_lck = NULL;
|
||||
|
||||
next = blr->next;
|
||||
|
||||
/*
|
||||
* Go through the remaining locks and try and obtain them.
|
||||
* The call returns True if all locks were obtained successfully
|
||||
* and False if we still need to wait.
|
||||
*/
|
||||
|
||||
DEBUG(10, ("Processing BLR = %p\n", blr));
|
||||
|
||||
/*
|
||||
* Connections with pending locks are not marked as idle.
|
||||
*/
|
||||
blr->fsp->conn->lastused_count++;
|
||||
|
||||
/*
|
||||
* Remove the pending lock we're waiting on.
|
||||
* If we need to keep waiting blocking_lock_record_process()
|
||||
* will re-add it.
|
||||
*/
|
||||
|
||||
br_lck = brl_get_locks(talloc_tos(), blr->fsp);
|
||||
if (br_lck) {
|
||||
brl_lock_cancel(br_lck,
|
||||
blr->smblctx,
|
||||
messaging_server_id(sconn->msg_ctx),
|
||||
blr->offset,
|
||||
blr->count,
|
||||
blr->lock_flav);
|
||||
}
|
||||
TALLOC_FREE(br_lck);
|
||||
|
||||
if(!blocking_lock_record_process(blr)) {
|
||||
DEBUG(10, ("still waiting for lock. BLR = %p\n", blr));
|
||||
continue;
|
||||
}
|
||||
|
||||
DEBUG(10, ("BLR_process returned true: removing BLR = %p\n",
|
||||
blr));
|
||||
|
||||
DLIST_REMOVE(sconn->smb1.locks.blocking_lock_queue, blr);
|
||||
TALLOC_FREE(blr);
|
||||
}
|
||||
|
||||
recalc_brl_timeout(sconn);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Handle a cancel message. Lock already moved onto the cancel queue.
|
||||
*****************************************************************************/
|
||||
|
||||
#define MSG_BLOCKING_LOCK_CANCEL_SIZE (sizeof(struct blocking_lock_record *) + sizeof(NTSTATUS))
|
||||
|
||||
static void process_blocking_lock_cancel_message(struct messaging_context *ctx,
|
||||
void *private_data,
|
||||
uint32_t msg_type,
|
||||
struct server_id server_id,
|
||||
DATA_BLOB *data)
|
||||
{
|
||||
NTSTATUS err;
|
||||
const char *msg = (const char *)data->data;
|
||||
struct blocking_lock_record *blr;
|
||||
struct smbd_server_connection *sconn =
|
||||
talloc_get_type_abort(private_data,
|
||||
struct smbd_server_connection);
|
||||
|
||||
if (data->data == NULL) {
|
||||
smb_panic("process_blocking_lock_cancel_message: null msg");
|
||||
}
|
||||
|
||||
if (data->length != MSG_BLOCKING_LOCK_CANCEL_SIZE) {
|
||||
DEBUG(0, ("process_blocking_lock_cancel_message: "
|
||||
"Got invalid msg len %d\n", (int)data->length));
|
||||
smb_panic("process_blocking_lock_cancel_message: bad msg");
|
||||
}
|
||||
|
||||
memcpy(&blr, msg, sizeof(blr));
|
||||
memcpy(&err, &msg[sizeof(blr)], sizeof(NTSTATUS));
|
||||
|
||||
DEBUG(10,("process_blocking_lock_cancel_message: returning error %s\n",
|
||||
nt_errstr(err) ));
|
||||
|
||||
blocking_lock_reply_error(blr, err);
|
||||
DLIST_REMOVE(sconn->smb1.locks.blocking_lock_cancelled_queue, blr);
|
||||
TALLOC_FREE(blr);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Send ourselves a blocking lock cancelled message. Handled asynchronously above.
|
||||
Returns the blocking_lock_record that is being cancelled.
|
||||
Only called from the SMB1 code.
|
||||
*****************************************************************************/
|
||||
|
||||
struct blocking_lock_record *blocking_lock_cancel_smb1(files_struct *fsp,
|
||||
uint64_t smblctx,
|
||||
uint64_t offset,
|
||||
uint64_t count,
|
||||
enum brl_flavour lock_flav,
|
||||
unsigned char locktype,
|
||||
NTSTATUS err)
|
||||
{
|
||||
struct smbd_server_connection *sconn = fsp->conn->sconn;
|
||||
char msg[MSG_BLOCKING_LOCK_CANCEL_SIZE];
|
||||
struct blocking_lock_record *blr;
|
||||
|
||||
if (!sconn->smb1.locks.blocking_lock_cancel_state) {
|
||||
/* Register our message. */
|
||||
messaging_register(sconn->msg_ctx, sconn,
|
||||
MSG_SMB_BLOCKING_LOCK_CANCEL,
|
||||
process_blocking_lock_cancel_message);
|
||||
|
||||
sconn->smb1.locks.blocking_lock_cancel_state = True;
|
||||
}
|
||||
|
||||
for (blr = sconn->smb1.locks.blocking_lock_queue; blr; blr = blr->next) {
|
||||
if (fsp == blr->fsp &&
|
||||
smblctx == blr->smblctx &&
|
||||
offset == blr->offset &&
|
||||
count == blr->count &&
|
||||
lock_flav == blr->lock_flav) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!blr) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Check the flags are right. */
|
||||
if (blr->req->cmd == SMBlockingX &&
|
||||
(locktype & LOCKING_ANDX_LARGE_FILES) !=
|
||||
(CVAL(blr->req->vwv+3, 0) & LOCKING_ANDX_LARGE_FILES)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Move to cancelled queue. */
|
||||
DLIST_REMOVE(sconn->smb1.locks.blocking_lock_queue, blr);
|
||||
DLIST_ADD(sconn->smb1.locks.blocking_lock_cancelled_queue, blr);
|
||||
|
||||
/* Create the message. */
|
||||
memcpy(msg, &blr, sizeof(blr));
|
||||
memcpy(&msg[sizeof(blr)], &err, sizeof(NTSTATUS));
|
||||
|
||||
messaging_send_buf(sconn->msg_ctx, messaging_server_id(sconn->msg_ctx),
|
||||
MSG_SMB_BLOCKING_LOCK_CANCEL,
|
||||
(uint8_t *)&msg, sizeof(msg));
|
||||
|
||||
return blr;
|
||||
}
|
||||
|
||||
NTSTATUS smbd_do_locks_try(
|
||||
struct messaging_context *msg_ctx,
|
||||
struct files_struct *fsp,
|
||||
|
@ -115,12 +115,6 @@ DATA_BLOB negprot_spnego(TALLOC_CTX *ctx, struct smbXsrv_connection *xconn);
|
||||
void smbd_lock_socket(struct smbXsrv_connection *xconn);
|
||||
void smbd_unlock_socket(struct smbXsrv_connection *xconn);
|
||||
|
||||
NTSTATUS smbd_do_locking(struct smb_request *req,
|
||||
files_struct *fsp,
|
||||
int32_t timeout,
|
||||
uint16_t num_locks,
|
||||
struct smbd_lock_element *locks,
|
||||
bool *async);
|
||||
NTSTATUS smbd_do_unlocking(struct smb_request *req,
|
||||
files_struct *fsp,
|
||||
uint16_t num_ulocks,
|
||||
@ -323,23 +317,6 @@ struct deferred_open_record;
|
||||
void send_break_message_smb2(files_struct *fsp,
|
||||
uint32_t break_from,
|
||||
uint32_t break_to);
|
||||
struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req);
|
||||
bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
|
||||
struct smb_request *req,
|
||||
files_struct *fsp,
|
||||
int lock_timeout,
|
||||
int lock_num,
|
||||
uint64_t smblctx,
|
||||
enum brl_type lock_type,
|
||||
enum brl_flavour lock_flav,
|
||||
uint64_t offset,
|
||||
uint64_t count,
|
||||
uint64_t blocking_smblctx);
|
||||
void process_blocking_lock_queue_smb2(
|
||||
struct smbd_server_connection *sconn, struct timeval tv_curr);
|
||||
void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
|
||||
struct byte_range_lock *br_lck,
|
||||
enum file_close_type close_type);
|
||||
/* From smbd/smb2_create.c */
|
||||
int map_smb2_oplock_levels_to_samba(uint8_t in_oplock_level);
|
||||
bool get_deferred_open_message_state_smb2(struct smbd_smb2_request *smb2req,
|
||||
@ -916,27 +893,7 @@ struct smbd_server_connection {
|
||||
|
||||
struct {
|
||||
struct notify_mid_map *notify_mid_maps;
|
||||
|
||||
struct {
|
||||
/* dlink list we store pending lock records on. */
|
||||
struct blocking_lock_record *blocking_lock_queue;
|
||||
/* dlink list we move cancelled lock records onto. */
|
||||
struct blocking_lock_record *blocking_lock_cancelled_queue;
|
||||
|
||||
/* The event that makes us process our blocking lock queue */
|
||||
struct tevent_timer *brl_timeout;
|
||||
|
||||
bool blocking_lock_unlock_state;
|
||||
bool blocking_lock_cancel_state;
|
||||
} locks;
|
||||
} smb1;
|
||||
struct {
|
||||
struct {
|
||||
/* The event that makes us process our blocking lock queue */
|
||||
struct tevent_timer *brl_timeout;
|
||||
bool blocking_lock_unlock_state;
|
||||
} locks;
|
||||
} smb2;
|
||||
|
||||
struct pthreadpool_tevent *pool;
|
||||
|
||||
|
@ -95,41 +95,8 @@ bool aio_add_req_to_fsp(files_struct *fsp, struct tevent_req *req);
|
||||
|
||||
/* The following definitions come from smbd/blocking.c */
|
||||
|
||||
void brl_timeout_fn(struct tevent_context *event_ctx,
|
||||
struct tevent_timer *te,
|
||||
struct timeval now,
|
||||
void *private_data);
|
||||
struct timeval timeval_brl_min(const struct timeval *tv1,
|
||||
const struct timeval *tv2);
|
||||
void process_blocking_lock_queue(struct smbd_server_connection *sconn);
|
||||
bool push_blocking_lock_request( struct byte_range_lock *br_lck,
|
||||
struct smb_request *req,
|
||||
files_struct *fsp,
|
||||
int lock_timeout,
|
||||
int lock_num,
|
||||
uint64_t smblctx,
|
||||
enum brl_type lock_type,
|
||||
enum brl_flavour lock_flav,
|
||||
uint64_t offset,
|
||||
uint64_t count,
|
||||
uint64_t blocking_smblctx);
|
||||
void smbd_cancel_pending_lock_requests_by_fid(files_struct *fsp,
|
||||
struct byte_range_lock *br_lck,
|
||||
enum file_close_type close_type);
|
||||
void cancel_pending_lock_requests_by_fid(files_struct *fsp,
|
||||
struct byte_range_lock *br_lck,
|
||||
enum file_close_type close_type);
|
||||
void remove_pending_lock_requests_by_mid_smb1(
|
||||
struct smbd_server_connection *sconn, uint64_t mid);
|
||||
bool blocking_lock_was_deferred_smb1(
|
||||
struct smbd_server_connection *sconn, uint64_t mid);
|
||||
struct blocking_lock_record *blocking_lock_cancel_smb1(files_struct *fsp,
|
||||
uint64_t smblctx,
|
||||
uint64_t offset,
|
||||
uint64_t count,
|
||||
enum brl_flavour lock_flav,
|
||||
unsigned char locktype,
|
||||
NTSTATUS err);
|
||||
NTSTATUS smbd_do_locks_try(
|
||||
struct messaging_context *msg_ctx,
|
||||
struct files_struct *fsp,
|
||||
|
@ -8174,146 +8174,6 @@ uint64_t get_lock_offset(const uint8_t *data, int data_offset,
|
||||
return offset;
|
||||
}
|
||||
|
||||
NTSTATUS smbd_do_locking(struct smb_request *req,
|
||||
files_struct *fsp,
|
||||
int32_t timeout,
|
||||
uint16_t num_locks,
|
||||
struct smbd_lock_element *locks,
|
||||
bool *async)
|
||||
{
|
||||
connection_struct *conn = req->conn;
|
||||
int i;
|
||||
NTSTATUS status = NT_STATUS_OK;
|
||||
|
||||
*async = false;
|
||||
|
||||
/* Setup the timeout in seconds. */
|
||||
|
||||
if (!lp_blocking_locks(SNUM(conn))) {
|
||||
timeout = 0;
|
||||
}
|
||||
|
||||
for(i = 0; i < (int)num_locks; i++) {
|
||||
struct smbd_lock_element *e = &locks[i];
|
||||
|
||||
DBG_DEBUG("lock start=%"PRIu64", len=%"PRIu64" for smblctx "
|
||||
"%"PRIu64", file %s timeout = %"PRIi32"\n",
|
||||
e->offset,
|
||||
e->count,
|
||||
e->smblctx,
|
||||
fsp_str_dbg(fsp),
|
||||
timeout);
|
||||
|
||||
{
|
||||
bool blocking_lock = (timeout != 0);
|
||||
bool defer_lock = false;
|
||||
struct byte_range_lock *br_lck;
|
||||
struct server_id blocker_pid;
|
||||
uint64_t block_smblctx;
|
||||
|
||||
br_lck = do_lock(req->sconn->msg_ctx,
|
||||
fsp,
|
||||
e->smblctx,
|
||||
e->count,
|
||||
e->offset,
|
||||
e->brltype,
|
||||
WINDOWS_LOCK,
|
||||
blocking_lock,
|
||||
&status,
|
||||
&blocker_pid,
|
||||
&block_smblctx);
|
||||
|
||||
if (br_lck && blocking_lock && ERROR_WAS_LOCK_DENIED(status)) {
|
||||
/* Windows internal resolution for blocking locks seems
|
||||
to be about 200ms... Don't wait for less than that. JRA. */
|
||||
if (timeout != -1) {
|
||||
timeout = MAX(timeout, lp_lock_spin_time());
|
||||
}
|
||||
defer_lock = true;
|
||||
}
|
||||
|
||||
/* If a lock sent with timeout of zero would fail, and
|
||||
* this lock has been requested multiple times,
|
||||
* according to brl_lock_failed() we convert this
|
||||
* request to a blocking lock with a timeout of between
|
||||
* 150 - 300 milliseconds.
|
||||
*
|
||||
* If lp_lock_spin_time() has been set to 0, we skip
|
||||
* this blocking retry and fail immediately.
|
||||
*
|
||||
* Replacement for do_lock_spin(). JRA. */
|
||||
|
||||
if (!req->sconn->using_smb2 &&
|
||||
br_lck && lp_blocking_locks(SNUM(conn)) &&
|
||||
lp_lock_spin_time() && !blocking_lock &&
|
||||
NT_STATUS_EQUAL((status),
|
||||
NT_STATUS_FILE_LOCK_CONFLICT))
|
||||
{
|
||||
defer_lock = true;
|
||||
timeout = lp_lock_spin_time();
|
||||
}
|
||||
|
||||
if (br_lck && defer_lock) {
|
||||
/*
|
||||
* A blocking lock was requested. Package up
|
||||
* this smb into a queued request and push it
|
||||
* onto the blocking lock queue.
|
||||
*/
|
||||
if(push_blocking_lock_request(br_lck,
|
||||
req,
|
||||
fsp,
|
||||
timeout,
|
||||
i,
|
||||
e->smblctx,
|
||||
e->brltype,
|
||||
WINDOWS_LOCK,
|
||||
e->offset,
|
||||
e->count,
|
||||
block_smblctx)) {
|
||||
TALLOC_FREE(br_lck);
|
||||
*async = true;
|
||||
return NT_STATUS_OK;
|
||||
}
|
||||
}
|
||||
|
||||
TALLOC_FREE(br_lck);
|
||||
}
|
||||
|
||||
if (!NT_STATUS_IS_OK(status)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* If any of the above locks failed, then we must unlock
|
||||
all of the previous locks (X/Open spec). */
|
||||
|
||||
if (num_locks != 0 && !NT_STATUS_IS_OK(status)) {
|
||||
|
||||
/*
|
||||
* Ensure we don't do a remove on the lock that just failed,
|
||||
* as under POSIX rules, if we have a lock already there, we
|
||||
* will delete it (and we shouldn't) .....
|
||||
*/
|
||||
for(i--; i >= 0; i--) {
|
||||
struct smbd_lock_element *e = &locks[i];
|
||||
|
||||
do_unlock(req->sconn->msg_ctx,
|
||||
fsp,
|
||||
e->smblctx,
|
||||
e->count,
|
||||
e->offset,
|
||||
WINDOWS_LOCK);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
DBG_NOTICE("%s num_locks=%"PRIu16"\n",
|
||||
fsp_fnum_dbg(fsp),
|
||||
num_locks);
|
||||
|
||||
return NT_STATUS_OK;
|
||||
}
|
||||
|
||||
NTSTATUS smbd_do_unlocking(struct smb_request *req,
|
||||
files_struct *fsp,
|
||||
uint16_t num_ulocks,
|
||||
|
@ -300,31 +300,6 @@ static void smbd_parent_id_cache_delete(struct messaging_context *ctx,
|
||||
messaging_send_to_children(ctx, msg_type, msg_data);
|
||||
}
|
||||
|
||||
#ifdef CLUSTER_SUPPORT
|
||||
static int smbd_parent_ctdb_reconfigured(
|
||||
struct tevent_context *ev,
|
||||
uint32_t src_vnn, uint32_t dst_vnn, uint64_t dst_srvid,
|
||||
const uint8_t *msg, size_t msglen, void *private_data)
|
||||
{
|
||||
struct messaging_context *msg_ctx = talloc_get_type_abort(
|
||||
private_data, struct messaging_context);
|
||||
|
||||
DEBUG(10, ("Got %s message\n", (dst_srvid == CTDB_SRVID_RECONFIGURE)
|
||||
? "cluster reconfigure" : "SAMBA_NOTIFY"));
|
||||
|
||||
/*
|
||||
* Someone from the family died, validate our locks
|
||||
*/
|
||||
|
||||
if (am_parent) {
|
||||
messaging_send_buf(msg_ctx, am_parent->cleanupd,
|
||||
MSG_SMB_BRL_VALIDATE, NULL, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void add_child_pid(struct smbd_parent_context *parent,
|
||||
pid_t pid)
|
||||
{
|
||||
@ -761,32 +736,6 @@ static bool cleanupd_init_recv(struct tevent_req *req)
|
||||
return state->ok;
|
||||
}
|
||||
|
||||
/*
|
||||
at most every smbd:cleanuptime seconds (default 20), we scan the BRL
|
||||
and locking database for entries to cleanup. As a side effect this
|
||||
also cleans up dead entries in the connections database (due to the
|
||||
traversal in message_send_all()
|
||||
|
||||
Using a timer for this prevents a flood of traversals when a large
|
||||
number of clients disconnect at the same time (perhaps due to a
|
||||
network outage).
|
||||
*/
|
||||
|
||||
static void cleanup_timeout_fn(struct tevent_context *event_ctx,
|
||||
struct tevent_timer *te,
|
||||
struct timeval now,
|
||||
void *private_data)
|
||||
{
|
||||
struct smbd_parent_context *parent =
|
||||
talloc_get_type_abort(private_data,
|
||||
struct smbd_parent_context);
|
||||
|
||||
parent->cleanup_te = NULL;
|
||||
|
||||
messaging_send_buf(parent->msg_ctx, parent->cleanupd,
|
||||
MSG_SMB_BRL_VALIDATE, NULL, 0);
|
||||
}
|
||||
|
||||
static void cleanupd_started(struct tevent_req *req)
|
||||
{
|
||||
bool ok;
|
||||
@ -889,25 +838,6 @@ static void remove_child_pid(struct smbd_parent_context *parent,
|
||||
nt_errstr(status));
|
||||
}
|
||||
}
|
||||
|
||||
if (unclean_shutdown) {
|
||||
/* a child terminated uncleanly so tickle all
|
||||
processes to see if they can grab any of the
|
||||
pending locks
|
||||
*/
|
||||
DEBUG(3,(__location__ " Unclean shutdown of pid %u\n",
|
||||
(unsigned int)pid));
|
||||
if (parent->cleanup_te == NULL) {
|
||||
/* call the cleanup timer, but not too often */
|
||||
int cleanup_time = lp_parm_int(-1, "smbd", "cleanuptime", 20);
|
||||
parent->cleanup_te = tevent_add_timer(parent->ev_ctx,
|
||||
parent,
|
||||
timeval_current_ofs(cleanup_time, 0),
|
||||
cleanup_timeout_fn,
|
||||
parent);
|
||||
DEBUG(1,("Scheduled cleanup of brl and lock database after unclean shutdown\n"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -1312,17 +1242,6 @@ static bool open_sockets_smbd(struct smbd_parent_context *parent,
|
||||
messaging_register(msg_ctx, NULL, MSG_SMB_NOTIFY_STARTED,
|
||||
smb_parent_send_to_children);
|
||||
|
||||
#ifdef CLUSTER_SUPPORT
|
||||
if (lp_clustering()) {
|
||||
struct ctdbd_connection *conn = messaging_ctdb_connection();
|
||||
|
||||
register_with_ctdbd(conn, CTDB_SRVID_RECONFIGURE,
|
||||
smbd_parent_ctdb_reconfigured, msg_ctx);
|
||||
register_with_ctdbd(conn, CTDB_SRVID_SAMBA_NOTIFY,
|
||||
smbd_parent_ctdb_reconfigured, msg_ctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef DEVELOPER
|
||||
messaging_register(msg_ctx, NULL, MSG_SMB_INJECT_FAULT,
|
||||
msg_inject_fault);
|
||||
@ -1688,7 +1607,6 @@ extern void build_options(bool screen);
|
||||
char *np_dir = NULL;
|
||||
static const struct smbd_shim smbd_shim_fns =
|
||||
{
|
||||
.cancel_pending_lock_requests_by_fid = smbd_cancel_pending_lock_requests_by_fid,
|
||||
.send_stat_cache_delete_message = smbd_send_stat_cache_delete_message,
|
||||
.change_to_root_user = smbd_change_to_root_user,
|
||||
.become_authenticated_pipe_user = smbd_become_authenticated_pipe_user,
|
||||
|
@ -47,9 +47,6 @@ struct smbd_smb2_lock_state {
|
||||
struct smbd_lock_element *locks;
|
||||
};
|
||||
|
||||
static void remove_pending_lock(struct smbd_smb2_lock_state *state,
|
||||
struct blocking_lock_record *blr);
|
||||
|
||||
static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
|
||||
struct tevent_context *ev,
|
||||
struct smbd_smb2_request *smb2req,
|
||||
@ -502,498 +499,3 @@ static bool smbd_smb2_lock_cancel(struct tevent_req *req)
|
||||
tevent_req_nterror(req, NT_STATUS_CANCELLED);
|
||||
return true;
|
||||
}
|
||||
|
||||
/****************************************************************
|
||||
Got a message saying someone unlocked a file. Re-schedule all
|
||||
blocking lock requests as we don't know if anything overlapped.
|
||||
*****************************************************************/
|
||||
|
||||
static void received_unlock_msg(struct messaging_context *msg,
|
||||
void *private_data,
|
||||
uint32_t msg_type,
|
||||
struct server_id server_id,
|
||||
DATA_BLOB *data)
|
||||
{
|
||||
struct smbd_server_connection *sconn =
|
||||
talloc_get_type_abort(private_data,
|
||||
struct smbd_server_connection);
|
||||
|
||||
DEBUG(10,("received_unlock_msg (SMB2)\n"));
|
||||
|
||||
process_blocking_lock_queue_smb2(sconn, timeval_current());
|
||||
}
|
||||
|
||||
/****************************************************************
|
||||
Function to get the blr on a pending record.
|
||||
*****************************************************************/
|
||||
|
||||
struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
|
||||
{
|
||||
struct smbd_smb2_lock_state *state = NULL;
|
||||
const uint8_t *inhdr;
|
||||
|
||||
if (!smb2req) {
|
||||
return NULL;
|
||||
}
|
||||
if (smb2req->subreq == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (!tevent_req_is_in_progress(smb2req->subreq)) {
|
||||
return NULL;
|
||||
}
|
||||
inhdr = SMBD_SMB2_IN_HDR_PTR(smb2req);
|
||||
if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
|
||||
return NULL;
|
||||
}
|
||||
state = tevent_req_data(smb2req->subreq,
|
||||
struct smbd_smb2_lock_state);
|
||||
if (!state) {
|
||||
return NULL;
|
||||
}
|
||||
return state->blr;
|
||||
}
|
||||
/****************************************************************
|
||||
Set up the next brl timeout.
|
||||
*****************************************************************/
|
||||
|
||||
static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
|
||||
{
|
||||
struct smbXsrv_connection *xconn = NULL;
|
||||
struct timeval next_timeout = timeval_zero();
|
||||
int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
|
||||
|
||||
TALLOC_FREE(sconn->smb2.locks.brl_timeout);
|
||||
|
||||
if (sconn->client != NULL) {
|
||||
xconn = sconn->client->connections;
|
||||
}
|
||||
|
||||
for (; xconn != NULL; xconn = xconn->next) {
|
||||
struct smbd_smb2_request *smb2req, *nextreq;
|
||||
|
||||
for (smb2req = xconn->smb2.requests; smb2req; smb2req = nextreq) {
|
||||
struct blocking_lock_record *blr =
|
||||
get_pending_smb2req_blr(smb2req);
|
||||
|
||||
nextreq = smb2req->next;
|
||||
|
||||
if (blr == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!timeval_is_zero(&blr->expire_time)) {
|
||||
next_timeout = timeval_brl_min(&next_timeout,
|
||||
&blr->expire_time);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
|
||||
* a POSIX lock, so calculate a timeout of
|
||||
* 10 seconds into the future.
|
||||
*/
|
||||
if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
|
||||
struct timeval psx_to;
|
||||
|
||||
psx_to = timeval_current_ofs(10, 0);
|
||||
next_timeout = timeval_brl_min(&next_timeout,
|
||||
&psx_to);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (timeval_is_zero(&next_timeout)) {
|
||||
DEBUG(10, ("recalc_smb2_brl_timeout:Next "
|
||||
"timeout = Infinite.\n"));
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* To account for unclean shutdowns by clients we need a
|
||||
* maximum timeout that we use for checking pending locks. If
|
||||
* we have any pending locks at all, then check if the pending
|
||||
* lock can continue at least every brl:recalctime seconds
|
||||
* (default 5 seconds).
|
||||
*
|
||||
* This saves us needing to do a message_send_all() in the
|
||||
* SIGCHLD handler in the parent daemon. That
|
||||
* message_send_all() caused O(n^2) work to be done when IP
|
||||
* failovers happened in clustered Samba, which could make the
|
||||
* entire system unusable for many minutes.
|
||||
*/
|
||||
|
||||
if (max_brl_timeout > 0) {
|
||||
struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
|
||||
next_timeout = timeval_brl_min(&next_timeout, &min_to);
|
||||
}
|
||||
|
||||
if (DEBUGLVL(10)) {
|
||||
struct timeval cur, from_now;
|
||||
|
||||
cur = timeval_current();
|
||||
from_now = timeval_until(&cur, &next_timeout);
|
||||
DEBUG(10, ("recalc_smb2_brl_timeout: Next "
|
||||
"timeout = %d.%d seconds from now.\n",
|
||||
(int)from_now.tv_sec, (int)from_now.tv_usec));
|
||||
}
|
||||
|
||||
sconn->smb2.locks.brl_timeout = tevent_add_timer(
|
||||
sconn->ev_ctx,
|
||||
NULL,
|
||||
next_timeout,
|
||||
brl_timeout_fn,
|
||||
sconn);
|
||||
if (!sconn->smb2.locks.brl_timeout) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/****************************************************************
|
||||
Get an SMB2 lock request to go async. lock_timeout should
|
||||
always be -1 here.
|
||||
*****************************************************************/
|
||||
|
||||
bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
|
||||
struct smb_request *smb1req,
|
||||
files_struct *fsp,
|
||||
int lock_timeout,
|
||||
int lock_num,
|
||||
uint64_t smblctx,
|
||||
enum brl_type lock_type,
|
||||
enum brl_flavour lock_flav,
|
||||
uint64_t offset,
|
||||
uint64_t count,
|
||||
uint64_t blocking_smblctx)
|
||||
{
|
||||
struct smbd_server_connection *sconn = smb1req->sconn;
|
||||
struct smbd_smb2_request *smb2req = smb1req->smb2req;
|
||||
struct tevent_req *req = NULL;
|
||||
struct smbd_smb2_lock_state *state = NULL;
|
||||
struct blocking_lock_record *blr = NULL;
|
||||
struct server_id blocker_pid;
|
||||
NTSTATUS status = NT_STATUS_OK;
|
||||
|
||||
if (!smb2req) {
|
||||
return false;
|
||||
}
|
||||
req = smb2req->subreq;
|
||||
if (!req) {
|
||||
return false;
|
||||
}
|
||||
if (!tevent_req_is_in_progress(smb2req->subreq)) {
|
||||
return false;
|
||||
}
|
||||
state = tevent_req_data(req, struct smbd_smb2_lock_state);
|
||||
if (!state) {
|
||||
return false;
|
||||
}
|
||||
|
||||
blr = talloc_zero(state, struct blocking_lock_record);
|
||||
if (!blr) {
|
||||
return false;
|
||||
}
|
||||
blr->fsp = fsp;
|
||||
|
||||
if (lock_timeout == -1) {
|
||||
blr->expire_time.tv_sec = 0;
|
||||
blr->expire_time.tv_usec = 0; /* Never expire. */
|
||||
} else {
|
||||
blr->expire_time = timeval_current_ofs_msec(lock_timeout);
|
||||
}
|
||||
|
||||
blr->lock_num = lock_num;
|
||||
blr->smblctx = smblctx;
|
||||
blr->blocking_smblctx = blocking_smblctx;
|
||||
blr->lock_flav = lock_flav;
|
||||
blr->lock_type = lock_type;
|
||||
blr->offset = offset;
|
||||
blr->count = count;
|
||||
|
||||
/* Specific brl_lock() implementations can fill this in. */
|
||||
blr->blr_private = NULL;
|
||||
|
||||
/* Add a pending lock record for this. */
|
||||
status = brl_lock(sconn->msg_ctx,
|
||||
br_lck,
|
||||
smblctx,
|
||||
messaging_server_id(sconn->msg_ctx),
|
||||
offset,
|
||||
count,
|
||||
lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
|
||||
blr->lock_flav,
|
||||
true,
|
||||
&blocker_pid,
|
||||
NULL);
|
||||
|
||||
if (!NT_STATUS_IS_OK(status)) {
|
||||
DEBUG(0,("push_blocking_lock_request_smb2: "
|
||||
"failed to add PENDING_LOCK record.\n"));
|
||||
TALLOC_FREE(blr);
|
||||
return false;
|
||||
}
|
||||
state->blr = blr;
|
||||
|
||||
DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
|
||||
fsp_str_dbg(fsp),
|
||||
lock_timeout ));
|
||||
|
||||
recalc_smb2_brl_timeout(sconn);
|
||||
|
||||
/* Ensure we'll receive messages when this is unlocked. */
|
||||
if (!sconn->smb2.locks.blocking_lock_unlock_state) {
|
||||
messaging_register(sconn->msg_ctx, sconn,
|
||||
MSG_SMB_UNLOCK, received_unlock_msg);
|
||||
sconn->smb2.locks.blocking_lock_unlock_state = true;
|
||||
}
|
||||
|
||||
/* allow this request to be canceled */
|
||||
tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/****************************************************************
|
||||
Remove a pending lock record under lock.
|
||||
*****************************************************************/
|
||||
|
||||
static void remove_pending_lock(struct smbd_smb2_lock_state *state,
|
||||
struct blocking_lock_record *blr)
|
||||
{
|
||||
struct byte_range_lock *br_lck = brl_get_locks(
|
||||
state, blr->fsp);
|
||||
|
||||
DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr));
|
||||
|
||||
if (br_lck) {
|
||||
brl_lock_cancel(br_lck,
|
||||
blr->smblctx,
|
||||
messaging_server_id(blr->fsp->conn->sconn->msg_ctx),
|
||||
blr->offset,
|
||||
blr->count,
|
||||
blr->lock_flav);
|
||||
TALLOC_FREE(br_lck);
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************
|
||||
Re-proccess a blocking lock request.
|
||||
This is equivalent to process_lockingX() inside smbd/blocking.c
|
||||
*****************************************************************/
|
||||
|
||||
static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req,
|
||||
struct timeval tv_curr)
|
||||
{
|
||||
NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
|
||||
struct blocking_lock_record *blr = NULL;
|
||||
struct smbd_smb2_lock_state *state = NULL;
|
||||
struct byte_range_lock *br_lck = NULL;
|
||||
struct smbd_lock_element *e = NULL;
|
||||
files_struct *fsp = NULL;
|
||||
|
||||
if (!smb2req->subreq) {
|
||||
return;
|
||||
}
|
||||
SMBPROFILE_IOBYTES_ASYNC_SET_BUSY(smb2req->profile);
|
||||
|
||||
state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
|
||||
if (!state) {
|
||||
return;
|
||||
}
|
||||
|
||||
blr = state->blr;
|
||||
fsp = blr->fsp;
|
||||
|
||||
/* We can only have one blocked lock in SMB2. */
|
||||
SMB_ASSERT(state->lock_count == 1);
|
||||
SMB_ASSERT(blr->lock_num == 0);
|
||||
|
||||
/* Try and get the outstanding lock. */
|
||||
e = &state->locks[blr->lock_num];
|
||||
|
||||
br_lck = do_lock(fsp->conn->sconn->msg_ctx,
|
||||
fsp,
|
||||
e->smblctx,
|
||||
e->count,
|
||||
e->offset,
|
||||
e->brltype,
|
||||
WINDOWS_LOCK,
|
||||
true,
|
||||
&status,
|
||||
NULL,
|
||||
&blr->blocking_smblctx);
|
||||
|
||||
TALLOC_FREE(br_lck);
|
||||
|
||||
if (NT_STATUS_IS_OK(status)) {
|
||||
/*
|
||||
* Success - we got the lock.
|
||||
*/
|
||||
|
||||
DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
|
||||
"%s, num_locks=%d\n",
|
||||
fsp_str_dbg(fsp),
|
||||
fsp_fnum_dbg(fsp),
|
||||
(int)state->lock_count));
|
||||
|
||||
remove_pending_lock(state, blr);
|
||||
tevent_req_done(smb2req->subreq);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
|
||||
!NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
|
||||
/*
|
||||
* We have other than a "can't get lock"
|
||||
* error. Return an error.
|
||||
*/
|
||||
remove_pending_lock(state, blr);
|
||||
tevent_req_nterror(smb2req->subreq, status);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We couldn't get the lock for this record.
|
||||
* If the time has expired, return a lock error.
|
||||
*/
|
||||
|
||||
if (!timeval_is_zero(&blr->expire_time) &&
|
||||
timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
|
||||
remove_pending_lock(state, blr);
|
||||
tevent_req_nterror(smb2req->subreq, NT_STATUS_LOCK_NOT_GRANTED);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Still can't get the lock - keep waiting.
|
||||
*/
|
||||
|
||||
DEBUG(10,("reprocess_blocked_smb2_lock: failed to get lock "
|
||||
"for file %s, %s. Still waiting....\n",
|
||||
fsp_str_dbg(fsp),
|
||||
fsp_fnum_dbg(fsp)));
|
||||
|
||||
SMBPROFILE_IOBYTES_ASYNC_SET_IDLE(smb2req->profile);
|
||||
return;
|
||||
}
|
||||
|
||||
/****************************************************************
|
||||
Attempt to proccess all outstanding blocking locks pending on
|
||||
the request queue.
|
||||
*****************************************************************/
|
||||
|
||||
void process_blocking_lock_queue_smb2(
|
||||
struct smbd_server_connection *sconn, struct timeval tv_curr)
|
||||
{
|
||||
struct smbXsrv_connection *xconn = NULL;
|
||||
|
||||
if (sconn != NULL && sconn->client != NULL) {
|
||||
xconn = sconn->client->connections;
|
||||
}
|
||||
|
||||
for (; xconn != NULL; xconn = xconn->next) {
|
||||
struct smbd_smb2_request *smb2req, *nextreq;
|
||||
|
||||
for (smb2req = xconn->smb2.requests; smb2req; smb2req = nextreq) {
|
||||
const uint8_t *inhdr;
|
||||
|
||||
nextreq = smb2req->next;
|
||||
|
||||
if (smb2req->subreq == NULL) {
|
||||
/* This message has been processed. */
|
||||
continue;
|
||||
}
|
||||
if (!tevent_req_is_in_progress(smb2req->subreq)) {
|
||||
/* This message has been processed. */
|
||||
continue;
|
||||
}
|
||||
|
||||
inhdr = SMBD_SMB2_IN_HDR_PTR(smb2req);
|
||||
if (SVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
|
||||
reprocess_blocked_smb2_lock(smb2req, tv_curr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
recalc_smb2_brl_timeout(sconn);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Remove any locks on this fd. Called from file_close().
|
||||
****************************************************************************/
|
||||
|
||||
void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
|
||||
struct byte_range_lock *br_lck,
|
||||
enum file_close_type close_type)
|
||||
{
|
||||
struct smbd_server_connection *sconn = fsp->conn->sconn;
|
||||
struct smbXsrv_connection *xconn = NULL;
|
||||
|
||||
if (sconn != NULL && sconn->client != NULL) {
|
||||
xconn = sconn->client->connections;
|
||||
}
|
||||
|
||||
for (; xconn != NULL; xconn = xconn->next) {
|
||||
struct smbd_smb2_request *smb2req, *nextreq;
|
||||
|
||||
for (smb2req = xconn->smb2.requests; smb2req; smb2req = nextreq) {
|
||||
struct smbd_smb2_lock_state *state = NULL;
|
||||
files_struct *fsp_curr = NULL;
|
||||
struct blocking_lock_record *blr = NULL;
|
||||
const uint8_t *inhdr;
|
||||
|
||||
nextreq = smb2req->next;
|
||||
|
||||
if (smb2req->subreq == NULL) {
|
||||
/* This message has been processed. */
|
||||
continue;
|
||||
}
|
||||
if (!tevent_req_is_in_progress(smb2req->subreq)) {
|
||||
/* This message has been processed. */
|
||||
continue;
|
||||
}
|
||||
|
||||
inhdr = SMBD_SMB2_IN_HDR_PTR(smb2req);
|
||||
if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
|
||||
/* Not a lock call. */
|
||||
continue;
|
||||
}
|
||||
|
||||
state = tevent_req_data(smb2req->subreq,
|
||||
struct smbd_smb2_lock_state);
|
||||
if (!state) {
|
||||
/* Strange - is this even possible ? */
|
||||
continue;
|
||||
}
|
||||
|
||||
fsp_curr = smb2req->compat_chain_fsp;
|
||||
if (fsp_curr == NULL) {
|
||||
/* Strange - is this even possible ? */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (fsp_curr != fsp) {
|
||||
/* It's not our fid */
|
||||
continue;
|
||||
}
|
||||
|
||||
blr = state->blr;
|
||||
|
||||
/* Remove the entries from the lock db. */
|
||||
brl_lock_cancel(br_lck,
|
||||
blr->smblctx,
|
||||
messaging_server_id(sconn->msg_ctx),
|
||||
blr->offset,
|
||||
blr->count,
|
||||
blr->lock_flav);
|
||||
|
||||
/* Finally end the request. */
|
||||
if (close_type == SHUTDOWN_CLOSE) {
|
||||
tevent_req_done(smb2req->subreq);
|
||||
} else {
|
||||
tevent_req_nterror(smb2req->subreq,
|
||||
NT_STATUS_RANGE_NOT_LOCKED);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -39,10 +39,6 @@ static void smbd_cleanupd_process_exited(struct messaging_context *msg,
|
||||
void *private_data, uint32_t msg_type,
|
||||
struct server_id server_id,
|
||||
DATA_BLOB *data);
|
||||
static void smbd_cleanupd_unlock(struct messaging_context *msg,
|
||||
void *private_data, uint32_t msg_type,
|
||||
struct server_id server_id,
|
||||
DATA_BLOB *data);
|
||||
|
||||
struct tevent_req *smbd_cleanupd_send(TALLOC_CTX *mem_ctx,
|
||||
struct tevent_context *ev,
|
||||
@ -71,12 +67,6 @@ struct tevent_req *smbd_cleanupd_send(TALLOC_CTX *mem_ctx,
|
||||
return tevent_req_post(req, ev);
|
||||
}
|
||||
|
||||
status = messaging_register(msg, NULL, MSG_SMB_BRL_VALIDATE,
|
||||
smbd_cleanupd_unlock);
|
||||
if (tevent_req_nterror(req, status)) {
|
||||
return tevent_req_post(req, ev);
|
||||
}
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
@ -90,17 +80,6 @@ static void smbd_cleanupd_shutdown(struct messaging_context *msg,
|
||||
tevent_req_done(req);
|
||||
}
|
||||
|
||||
static void smbd_cleanupd_unlock(struct messaging_context *msg,
|
||||
void *private_data, uint32_t msg_type,
|
||||
struct server_id server_id,
|
||||
DATA_BLOB *data)
|
||||
{
|
||||
DBG_WARNING("Cleaning up brl and lock database after unclean "
|
||||
"shutdown\n");
|
||||
|
||||
brl_revalidate(msg, private_data, msg_type, server_id, data);
|
||||
}
|
||||
|
||||
struct cleanup_child {
|
||||
struct cleanup_child *prev, *next;
|
||||
pid_t pid;
|
||||
|
@ -857,36 +857,6 @@ static bool do_ip_dropped(struct tevent_context *ev_ctx,
|
||||
strlen(argv[1]) + 1);
|
||||
}
|
||||
|
||||
/* force a blocking lock retry */
|
||||
|
||||
static bool do_lockretry(struct tevent_context *ev_ctx,
|
||||
struct messaging_context *msg_ctx,
|
||||
const struct server_id pid,
|
||||
const int argc, const char **argv)
|
||||
{
|
||||
if (argc != 1) {
|
||||
fprintf(stderr, "Usage: smbcontrol <dest> lockretry\n");
|
||||
return False;
|
||||
}
|
||||
|
||||
return send_message(msg_ctx, pid, MSG_SMB_UNLOCK, NULL, 0);
|
||||
}
|
||||
|
||||
/* force a validation of all brl entries, including re-sends. */
|
||||
|
||||
static bool do_brl_revalidate(struct tevent_context *ev_ctx,
|
||||
struct messaging_context *msg_ctx,
|
||||
const struct server_id pid,
|
||||
const int argc, const char **argv)
|
||||
{
|
||||
if (argc != 1) {
|
||||
fprintf(stderr, "Usage: smbcontrol <dest> brl-revalidate\n");
|
||||
return False;
|
||||
}
|
||||
|
||||
return send_message(msg_ctx, pid, MSG_SMB_BRL_VALIDATE, NULL, 0);
|
||||
}
|
||||
|
||||
/* Display talloc pool usage */
|
||||
|
||||
static bool do_poolusage(struct tevent_context *ev_ctx,
|
||||
@ -1484,16 +1454,6 @@ static const struct {
|
||||
.fn = do_ip_dropped,
|
||||
.help = "Tell winbind that an IP got dropped",
|
||||
},
|
||||
{
|
||||
.name = "lockretry",
|
||||
.fn = do_lockretry,
|
||||
.help = "Force a blocking lock retry",
|
||||
},
|
||||
{
|
||||
.name = "brl-revalidate",
|
||||
.fn = do_brl_revalidate,
|
||||
.help = "Revalidate all brl entries",
|
||||
},
|
||||
{
|
||||
.name = "pool-usage",
|
||||
.fn = do_poolusage,
|
||||
|
@ -241,8 +241,6 @@ static void print_brl(struct file_id id,
|
||||
} lock_types[] = {
|
||||
{ READ_LOCK, "R" },
|
||||
{ WRITE_LOCK, "W" },
|
||||
{ PENDING_READ_LOCK, "PR" },
|
||||
{ PENDING_WRITE_LOCK, "PW" },
|
||||
{ UNLOCK_LOCK, "U" }
|
||||
};
|
||||
const char *desc="X";
|
||||
|
Loading…
Reference in New Issue
Block a user