1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-06 13:18:07 +03:00

s3: smbd: Add async internals of reply_close().

Waits until all aio requests on the closing fsps
before returning to the client.

Slightly modified version of the existing async
reply_close code, updated to use the wait_queue
pattern standardized in reply_tdis, reply_ulogoffX
and reply_exit.

Done this way (commented out) so it is a clean
diff and it's clear what is being added.

The next commit will remove the old version.

Signed-off-by: Jeremy Allison <jra@samba.org>
Reviewed-by: Ralph Boehme <slow@samba.org>
This commit is contained in:
Jeremy Allison 2020-03-18 15:09:51 -07:00 committed by Ralph Boehme
parent 20290d02a0
commit fef2054dd0

View File

@ -6145,6 +6145,135 @@ static void do_smb1_close(struct tevent_req *req)
smb_request_done(smb1req);
}
#if 0
struct reply_close_state {
files_struct *fsp;
struct tevent_queue *wait_queue;
};
static void reply_close_wait_done(struct tevent_req *subreq);
/****************************************************************************
Async SMB1 close.
Note, on failure here we deallocate and return NULL to allow the caller to
SMB1 return an error of ERRnomem immediately.
****************************************************************************/
static struct tevent_req *reply_close_send(struct smb_request *smb1req,
files_struct *fsp)
{
struct tevent_req *req;
struct reply_close_state *state;
struct tevent_req *subreq;
struct smbd_server_connection *sconn = smb1req->sconn;
req = tevent_req_create(smb1req, &state,
struct reply_close_state);
if (req == NULL) {
return NULL;
}
state->wait_queue = tevent_queue_create(state,
"reply_close_wait_queue");
if (tevent_req_nomem(state->wait_queue, req)) {
TALLOC_FREE(req);
return NULL;
}
/*
* Flag the file as close in progress.
* This will prevent any more IO being
* done on it.
*/
fsp->closing = true;
/*
* Now wait until all aio requests on this fsp are
* finished.
*
* We don't set a callback, as we just want to block the
* wait queue and the talloc_free() of fsp->aio_request
* will remove the item from the wait queue.
*/
subreq = tevent_queue_wait_send(fsp->aio_requests,
sconn->ev_ctx,
state->wait_queue);
if (tevent_req_nomem(subreq, req)) {
TALLOC_FREE(req);
return NULL;
}
/*
* Now we add our own waiter to the end of the queue,
* this way we get notified when all pending requests are finished
* and reply to the outstanding SMB1 request.
*/
subreq = tevent_queue_wait_send(state,
sconn->ev_ctx,
state->wait_queue);
if (tevent_req_nomem(subreq, req)) {
TALLOC_FREE(req);
return NULL;
}
/*
* We're really going async - move the SMB1 request from
* a talloc stackframe above us to the conn talloc-context.
* We need this to stick around until the wait_done
* callback is invoked.
*/
smb1req = talloc_move(sconn, &smb1req);
tevent_req_set_callback(subreq, reply_close_wait_done, req);
return req;
}
static void reply_close_wait_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
tevent_queue_wait_recv(subreq);
TALLOC_FREE(subreq);
tevent_req_done(req);
}
static NTSTATUS reply_close_recv(struct tevent_req *req)
{
return tevent_req_simple_recv_ntstatus(req);
}
static void reply_close_done(struct tevent_req *req)
{
struct smb_request *smb1req = tevent_req_callback_data(
req, struct smb_request);
struct reply_close_state *state = tevent_req_data(req,
struct reply_close_state);
NTSTATUS status;
status = reply_close_recv(req);
TALLOC_FREE(req);
if (!NT_STATUS_IS_OK(status)) {
TALLOC_FREE(smb1req);
exit_server(__location__ ": reply_close_recv failed");
return;
}
status = close_file(smb1req, state->fsp, NORMAL_CLOSE);
if (NT_STATUS_IS_OK(status)) {
reply_outbuf(smb1req, 0, 0);
} else {
reply_nterror(smb1req, status);
}
/*
* The following call is needed to push the
* reply data back out the socket after async
* return. Plus it frees smb1req.
*/
smb_request_done(smb1req);
}
#endif
/****************************************************************************
Reply to a writeclose (Core+ protocol).
****************************************************************************/