1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-11 05:18:09 +03:00

s3:locking: add brl_req_guid() and brl_req_mem_ctx() helper functions

This allows the vfs backend to detect a retry and keep state between
the retries.

BUG: https://bugzilla.samba.org/show_bug.cgi?id=14113

Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Volker Lendecke <vl@samba.org>
This commit is contained in:
Stefan Metzmacher 2019-08-08 19:26:28 +02:00
parent 7471b0f632
commit 66d92f37c3
11 changed files with 106 additions and 5 deletions

View File

@ -30,6 +30,7 @@ enum brl_type {READ_LOCK, WRITE_LOCK, UNLOCK_LOCK};
enum brl_flavour {WINDOWS_LOCK = 0, POSIX_LOCK = 1};
#include "librpc/gen_ndr/server_id.h"
#include "librpc/gen_ndr/misc.h"
/* This contains elements that differentiate locks. The smbpid is a
client supplied pid, and is essentially the locking context for
@ -62,6 +63,7 @@ struct lock_struct {
};
struct smbd_lock_element {
struct GUID req_guid;
uint64_t smblctx;
enum brl_type brltype;
uint64_t offset;

View File

@ -46,6 +46,8 @@ static struct db_context *brlock_db;
struct byte_range_lock {
struct files_struct *fsp;
TALLOC_CTX *req_mem_ctx;
const struct GUID *req_guid;
unsigned int num_locks;
bool modified;
struct lock_struct *lock_data;
@ -84,6 +86,25 @@ struct files_struct *brl_fsp(struct byte_range_lock *brl)
return brl->fsp;
}
TALLOC_CTX *brl_req_mem_ctx(const struct byte_range_lock *brl)
{
if (brl->req_mem_ctx == NULL) {
return talloc_get_type_abort(brl, struct byte_range_lock);
}
return brl->req_mem_ctx;
}
const struct GUID *brl_req_guid(const struct byte_range_lock *brl)
{
if (brl->req_guid == NULL) {
static const struct GUID brl_zero_req_guid;
return &brl_zero_req_guid;
}
return brl->req_guid;
}
/****************************************************************************
See if two locking contexts are equal.
****************************************************************************/
@ -1823,6 +1844,25 @@ struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx, files_struct *fsp)
return br_lck;
}
struct byte_range_lock *brl_get_locks_for_locking(TALLOC_CTX *mem_ctx,
files_struct *fsp,
TALLOC_CTX *req_mem_ctx,
const struct GUID *req_guid)
{
struct byte_range_lock *br_lck = NULL;
br_lck = brl_get_locks(mem_ctx, fsp);
if (br_lck == NULL) {
return NULL;
}
SMB_ASSERT(req_mem_ctx != NULL);
br_lck->req_mem_ctx = req_mem_ctx;
SMB_ASSERT(req_guid != NULL);
br_lck->req_guid = req_guid;
return br_lck;
}
struct brl_get_locks_readonly_state {
TALLOC_CTX *mem_ctx;
struct byte_range_lock **br_lock;
@ -1884,14 +1924,11 @@ struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
/*
* No locks on this file. Return an empty br_lock.
*/
br_lock = talloc(fsp, struct byte_range_lock);
br_lock = talloc_zero(fsp, struct byte_range_lock);
if (br_lock == NULL) {
return NULL;
}
br_lock->num_locks = 0;
br_lock->lock_data = NULL;
} else if (!NT_STATUS_IS_OK(status)) {
DEBUG(3, ("Could not parse byte range lock record: "
"%s\n", nt_errstr(status)));

View File

@ -232,6 +232,8 @@ static void decrement_current_lock_count(files_struct *fsp,
struct do_lock_state {
struct files_struct *fsp;
TALLOC_CTX *req_mem_ctx;
const struct GUID *req_guid;
uint64_t smblctx;
uint64_t count;
uint64_t offset;
@ -251,7 +253,10 @@ static void do_lock_fn(
struct do_lock_state *state = private_data;
struct byte_range_lock *br_lck = NULL;
br_lck = brl_get_locks(talloc_tos(), state->fsp);
br_lck = brl_get_locks_for_locking(talloc_tos(),
state->fsp,
state->req_mem_ctx,
state->req_guid);
if (br_lck == NULL) {
state->status = NT_STATUS_NO_MEMORY;
return;
@ -272,6 +277,8 @@ static void do_lock_fn(
}
NTSTATUS do_lock(files_struct *fsp,
TALLOC_CTX *req_mem_ctx,
const struct GUID *req_guid,
uint64_t smblctx,
uint64_t count,
uint64_t offset,
@ -282,6 +289,8 @@ NTSTATUS do_lock(files_struct *fsp,
{
struct do_lock_state state = {
.fsp = fsp,
.req_mem_ctx = req_mem_ctx,
.req_guid = req_guid,
.smblctx = smblctx,
.count = count,
.offset = offset,

View File

@ -30,6 +30,8 @@ void brl_shutdown(void);
unsigned int brl_num_locks(const struct byte_range_lock *brl);
struct files_struct *brl_fsp(struct byte_range_lock *brl);
TALLOC_CTX *brl_req_mem_ctx(const struct byte_range_lock *brl);
const struct GUID *brl_req_guid(const struct byte_range_lock *brl);
bool byte_range_valid(uint64_t ofs, uint64_t len);
bool byte_range_overlap(uint64_t ofs1,
@ -76,6 +78,10 @@ int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
br_off start, br_off size,
void *private_data),
void *private_data);
struct byte_range_lock *brl_get_locks_for_locking(TALLOC_CTX *mem_ctx,
files_struct *fsp,
TALLOC_CTX *req_mem_ctx,
const struct GUID *req_guid);
struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
files_struct *fsp);
struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp);
@ -100,6 +106,8 @@ NTSTATUS query_lock(files_struct *fsp,
enum brl_type *plock_type,
enum brl_flavour lock_flav);
NTSTATUS do_lock(files_struct *fsp,
TALLOC_CTX *req_mem_ctx,
const struct GUID *req_guid,
uint64_t smblctx,
uint64_t count,
uint64_t offset,

View File

@ -585,6 +585,7 @@ static NTSTATUS fruit_check_access(vfs_handle_struct *handle,
bool netatalk_already_open_for_writing = false;
bool netatalk_already_open_with_deny_read = false;
bool netatalk_already_open_with_deny_write = false;
struct GUID req_guid = GUID_random();
/* FIXME: hardcoded data fork, add resource fork */
enum apple_fork fork_type = APPLE_FORK_DATA;
@ -648,8 +649,11 @@ static NTSTATUS fruit_check_access(vfs_handle_struct *handle,
/* Set NetAtalk locks matching our access */
if (access_mask & FILE_READ_DATA) {
off = access_to_netatalk_brl(fork_type, FILE_READ_DATA);
req_guid.time_hi_and_version = __LINE__;
status = do_lock(
fsp,
talloc_tos(),
&req_guid,
fsp->op->global->open_persistent_id,
1,
off,
@ -665,8 +669,11 @@ static NTSTATUS fruit_check_access(vfs_handle_struct *handle,
if (!share_for_read) {
off = denymode_to_netatalk_brl(fork_type, DENY_READ);
req_guid.time_hi_and_version = __LINE__;
status = do_lock(
fsp,
talloc_tos(),
&req_guid,
fsp->op->global->open_persistent_id,
1,
off,
@ -682,8 +689,11 @@ static NTSTATUS fruit_check_access(vfs_handle_struct *handle,
if (access_mask & FILE_WRITE_DATA) {
off = access_to_netatalk_brl(fork_type, FILE_WRITE_DATA);
req_guid.time_hi_and_version = __LINE__;
status = do_lock(
fsp,
talloc_tos(),
&req_guid,
fsp->op->global->open_persistent_id,
1,
off,
@ -699,8 +709,11 @@ static NTSTATUS fruit_check_access(vfs_handle_struct *handle,
if (!share_for_write) {
off = denymode_to_netatalk_brl(fork_type, DENY_WRITE);
req_guid.time_hi_and_version = __LINE__;
status = do_lock(
fsp,
talloc_tos(),
&req_guid,
fsp->op->global->open_persistent_id,
1,
off,

View File

@ -45,6 +45,8 @@ NTSTATUS smbd_do_locks_try(
status = do_lock(
fsp,
locks, /* req_mem_ctx */
&e->req_guid,
e->smblctx,
e->count,
e->offset,

View File

@ -109,3 +109,21 @@ void smbd_init_globals(void)
ZERO_STRUCT(sec_ctx_stack);
}
struct GUID smbd_request_guid(struct smb_request *smb1req, uint16_t idx)
{
struct GUID v = {
.time_low = (uint32_t)smb1req->mid,
.time_hi_and_version = idx,
};
if (smb1req->smb2req != NULL) {
v.time_mid = (uint16_t)smb1req->smb2req->current_idx;
} else {
v.time_mid = (uint16_t)(uintptr_t)smb1req->vwv;
}
SBVAL((uint8_t *)&v, 8, (uintptr_t)smb1req->xconn);
return v;
}

View File

@ -115,6 +115,8 @@ DATA_BLOB negprot_spnego(TALLOC_CTX *ctx, struct smbXsrv_connection *xconn);
void smbd_lock_socket(struct smbXsrv_connection *xconn);
void smbd_unlock_socket(struct smbXsrv_connection *xconn);
struct GUID smbd_request_guid(struct smb_request *smb1req, uint16_t idx);
NTSTATUS smbd_do_unlocking(struct smb_request *req,
files_struct *fsp,
uint16_t num_ulocks,

View File

@ -3944,6 +3944,7 @@ void reply_lockread(struct smb_request *req)
*/
*lck = (struct smbd_lock_element) {
.req_guid = smbd_request_guid(req, 0),
.smblctx = req->smbpid,
.brltype = WRITE_LOCK,
.count = SVAL(req->vwv+1, 0),
@ -4971,6 +4972,7 @@ void reply_writeunlock(struct smb_request *req)
if (numtowrite && !fsp->print_file) {
struct smbd_lock_element l = {
.req_guid = smbd_request_guid(req, 0),
.smblctx = req->smbpid,
.brltype = UNLOCK_LOCK,
.offset = startpos,
@ -5866,6 +5868,7 @@ void reply_lock(struct smb_request *req)
}
*lck = (struct smbd_lock_element) {
.req_guid = smbd_request_guid(req, 0),
.smblctx = req->smbpid,
.brltype = WRITE_LOCK,
.count = IVAL(req->vwv+1, 0),
@ -5957,6 +5960,7 @@ void reply_unlock(struct smb_request *req)
}
lck = (struct smbd_lock_element) {
.req_guid = smbd_request_guid(req, 0),
.smblctx = req->smbpid,
.brltype = UNLOCK_LOCK,
.offset = IVAL(req->vwv+3, 0),
@ -8550,6 +8554,8 @@ void reply_lockingX(struct smb_request *req)
* smb_unlkrng structs
*/
for (i = 0; i < num_ulocks; i++) {
ulocks[i].req_guid = smbd_request_guid(req,
UINT16_MAX - i),
ulocks[i].smblctx = get_lock_pid(
data, i, large_file_format);
ulocks[i].count = get_lock_count(
@ -8607,6 +8613,7 @@ void reply_lockingX(struct smb_request *req)
}
for (i = 0; i < num_locks; i++) {
locks[i].req_guid = smbd_request_guid(req, i),
locks[i].smblctx = get_lock_pid(data, i, large_file_format);
locks[i].count = get_lock_count(data, i, large_file_format);
locks[i].offset = get_lock_offset(data, i, large_file_format);

View File

@ -318,6 +318,7 @@ static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
return tevent_req_post(req, ev);
}
locks[i].req_guid = smbd_request_guid(smb2req->smb1req, i);
locks[i].smblctx = fsp->op->global->open_persistent_id;
locks[i].offset = in_locks[i].offset;
locks[i].count = in_locks[i].length;

View File

@ -7668,6 +7668,7 @@ static NTSTATUS smb_set_posix_lock(connection_struct *conn,
if (lock_type == UNLOCK_LOCK) {
struct smbd_lock_element l = {
.req_guid = smbd_request_guid(req, 0),
.smblctx = smblctx,
.brltype = UNLOCK_LOCK,
.offset = offset,
@ -7683,6 +7684,7 @@ static NTSTATUS smb_set_posix_lock(connection_struct *conn,
}
*lck = (struct smbd_lock_element) {
.req_guid = smbd_request_guid(req, 0),
.smblctx = smblctx,
.brltype = lock_type,
.count = count,