cifs: smbd: Properly process errors on ib_post_send
When processing errors from ib_post_send(), the transport state needs to be rolled back to the condition before the error. Refactor the old code to make it easy to roll back on IB errors, and fix this. Signed-off-by: Long Li <longli@microsoft.com> Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
parent
eda1c54f14
commit
f1b7b862bf
@ -800,130 +800,6 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Build and prepare the SMBD packet header
|
||||
* This function waits for avaialbe send credits and build a SMBD packet
|
||||
* header. The caller then optional append payload to the packet after
|
||||
* the header
|
||||
* intput values
|
||||
* size: the size of the payload
|
||||
* remaining_data_length: remaining data to send if this is part of a
|
||||
* fragmented packet
|
||||
* output values
|
||||
* request_out: the request allocated from this function
|
||||
* return values: 0 on success, otherwise actual error code returned
|
||||
*/
|
||||
static int smbd_create_header(struct smbd_connection *info,
|
||||
int size, int remaining_data_length,
|
||||
struct smbd_request **request_out)
|
||||
{
|
||||
struct smbd_request *request;
|
||||
struct smbd_data_transfer *packet;
|
||||
int header_length;
|
||||
int new_credits;
|
||||
int rc;
|
||||
|
||||
/* Wait for send credits. A SMBD packet needs one credit */
|
||||
rc = wait_event_interruptible(info->wait_send_queue,
|
||||
atomic_read(&info->send_credits) > 0 ||
|
||||
info->transport_status != SMBD_CONNECTED);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (info->transport_status != SMBD_CONNECTED) {
|
||||
log_outgoing(ERR, "disconnected not sending\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
atomic_dec(&info->send_credits);
|
||||
|
||||
request = mempool_alloc(info->request_mempool, GFP_KERNEL);
|
||||
if (!request) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
request->info = info;
|
||||
|
||||
/* Fill in the packet header */
|
||||
packet = smbd_request_payload(request);
|
||||
packet->credits_requested = cpu_to_le16(info->send_credit_target);
|
||||
|
||||
new_credits = manage_credits_prior_sending(info);
|
||||
atomic_add(new_credits, &info->receive_credits);
|
||||
packet->credits_granted = cpu_to_le16(new_credits);
|
||||
|
||||
info->send_immediate = false;
|
||||
|
||||
packet->flags = 0;
|
||||
if (manage_keep_alive_before_sending(info))
|
||||
packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
|
||||
|
||||
packet->reserved = 0;
|
||||
if (!size)
|
||||
packet->data_offset = 0;
|
||||
else
|
||||
packet->data_offset = cpu_to_le32(24);
|
||||
packet->data_length = cpu_to_le32(size);
|
||||
packet->remaining_data_length = cpu_to_le32(remaining_data_length);
|
||||
packet->padding = 0;
|
||||
|
||||
log_outgoing(INFO, "credits_requested=%d credits_granted=%d "
|
||||
"data_offset=%d data_length=%d remaining_data_length=%d\n",
|
||||
le16_to_cpu(packet->credits_requested),
|
||||
le16_to_cpu(packet->credits_granted),
|
||||
le32_to_cpu(packet->data_offset),
|
||||
le32_to_cpu(packet->data_length),
|
||||
le32_to_cpu(packet->remaining_data_length));
|
||||
|
||||
/* Map the packet to DMA */
|
||||
header_length = sizeof(struct smbd_data_transfer);
|
||||
/* If this is a packet without payload, don't send padding */
|
||||
if (!size)
|
||||
header_length = offsetof(struct smbd_data_transfer, padding);
|
||||
|
||||
request->num_sge = 1;
|
||||
request->sge[0].addr = ib_dma_map_single(info->id->device,
|
||||
(void *)packet,
|
||||
header_length,
|
||||
DMA_TO_DEVICE);
|
||||
if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
|
||||
mempool_free(request, info->request_mempool);
|
||||
rc = -EIO;
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
request->sge[0].length = header_length;
|
||||
request->sge[0].lkey = info->pd->local_dma_lkey;
|
||||
|
||||
*request_out = request;
|
||||
return 0;
|
||||
|
||||
err_dma:
|
||||
/* roll back receive credits */
|
||||
spin_lock(&info->lock_new_credits_offered);
|
||||
info->new_credits_offered += new_credits;
|
||||
spin_unlock(&info->lock_new_credits_offered);
|
||||
atomic_sub(new_credits, &info->receive_credits);
|
||||
|
||||
err_alloc:
|
||||
/* roll back send credits */
|
||||
atomic_inc(&info->send_credits);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void smbd_destroy_header(struct smbd_connection *info,
|
||||
struct smbd_request *request)
|
||||
{
|
||||
|
||||
ib_dma_unmap_single(info->id->device,
|
||||
request->sge[0].addr,
|
||||
request->sge[0].length,
|
||||
DMA_TO_DEVICE);
|
||||
mempool_free(request, info->request_mempool);
|
||||
atomic_inc(&info->send_credits);
|
||||
}
|
||||
|
||||
/* Post the send request */
|
||||
static int smbd_post_send(struct smbd_connection *info,
|
||||
struct smbd_request *request)
|
||||
@ -951,20 +827,9 @@ static int smbd_post_send(struct smbd_connection *info,
|
||||
send_wr.opcode = IB_WR_SEND;
|
||||
send_wr.send_flags = IB_SEND_SIGNALED;
|
||||
|
||||
wait_sq:
|
||||
wait_event(info->wait_post_send,
|
||||
atomic_read(&info->send_pending) < info->send_credit_target);
|
||||
if (unlikely(atomic_inc_return(&info->send_pending) >
|
||||
info->send_credit_target)) {
|
||||
atomic_dec(&info->send_pending);
|
||||
goto wait_sq;
|
||||
}
|
||||
|
||||
rc = ib_post_send(info->id->qp, &send_wr, NULL);
|
||||
if (rc) {
|
||||
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
|
||||
if (atomic_dec_and_test(&info->send_pending))
|
||||
wake_up(&info->wait_send_pending);
|
||||
smbd_disconnect_rdma_connection(info);
|
||||
rc = -EAGAIN;
|
||||
} else
|
||||
@ -980,14 +845,107 @@ static int smbd_post_send_sgl(struct smbd_connection *info,
|
||||
{
|
||||
int num_sgs;
|
||||
int i, rc;
|
||||
int header_length;
|
||||
struct smbd_request *request;
|
||||
struct smbd_data_transfer *packet;
|
||||
int new_credits;
|
||||
struct scatterlist *sg;
|
||||
|
||||
rc = smbd_create_header(
|
||||
info, data_length, remaining_data_length, &request);
|
||||
wait_credit:
|
||||
/* Wait for send credits. A SMBD packet needs one credit */
|
||||
rc = wait_event_interruptible(info->wait_send_queue,
|
||||
atomic_read(&info->send_credits) > 0 ||
|
||||
info->transport_status != SMBD_CONNECTED);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto err_wait_credit;
|
||||
|
||||
if (info->transport_status != SMBD_CONNECTED) {
|
||||
log_outgoing(ERR, "disconnected not sending on wait_credit\n");
|
||||
rc = -EAGAIN;
|
||||
goto err_wait_credit;
|
||||
}
|
||||
if (unlikely(atomic_dec_return(&info->send_credits) < 0)) {
|
||||
atomic_inc(&info->send_credits);
|
||||
goto wait_credit;
|
||||
}
|
||||
|
||||
wait_send_queue:
|
||||
wait_event(info->wait_post_send,
|
||||
atomic_read(&info->send_pending) < info->send_credit_target ||
|
||||
info->transport_status != SMBD_CONNECTED);
|
||||
|
||||
if (info->transport_status != SMBD_CONNECTED) {
|
||||
log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
|
||||
rc = -EAGAIN;
|
||||
goto err_wait_send_queue;
|
||||
}
|
||||
|
||||
if (unlikely(atomic_inc_return(&info->send_pending) >
|
||||
info->send_credit_target)) {
|
||||
atomic_dec(&info->send_pending);
|
||||
goto wait_send_queue;
|
||||
}
|
||||
|
||||
request = mempool_alloc(info->request_mempool, GFP_KERNEL);
|
||||
if (!request) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
request->info = info;
|
||||
|
||||
/* Fill in the packet header */
|
||||
packet = smbd_request_payload(request);
|
||||
packet->credits_requested = cpu_to_le16(info->send_credit_target);
|
||||
|
||||
new_credits = manage_credits_prior_sending(info);
|
||||
atomic_add(new_credits, &info->receive_credits);
|
||||
packet->credits_granted = cpu_to_le16(new_credits);
|
||||
|
||||
info->send_immediate = false;
|
||||
|
||||
packet->flags = 0;
|
||||
if (manage_keep_alive_before_sending(info))
|
||||
packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
|
||||
|
||||
packet->reserved = 0;
|
||||
if (!data_length)
|
||||
packet->data_offset = 0;
|
||||
else
|
||||
packet->data_offset = cpu_to_le32(24);
|
||||
packet->data_length = cpu_to_le32(data_length);
|
||||
packet->remaining_data_length = cpu_to_le32(remaining_data_length);
|
||||
packet->padding = 0;
|
||||
|
||||
log_outgoing(INFO, "credits_requested=%d credits_granted=%d "
|
||||
"data_offset=%d data_length=%d remaining_data_length=%d\n",
|
||||
le16_to_cpu(packet->credits_requested),
|
||||
le16_to_cpu(packet->credits_granted),
|
||||
le32_to_cpu(packet->data_offset),
|
||||
le32_to_cpu(packet->data_length),
|
||||
le32_to_cpu(packet->remaining_data_length));
|
||||
|
||||
/* Map the packet to DMA */
|
||||
header_length = sizeof(struct smbd_data_transfer);
|
||||
/* If this is a packet without payload, don't send padding */
|
||||
if (!data_length)
|
||||
header_length = offsetof(struct smbd_data_transfer, padding);
|
||||
|
||||
request->num_sge = 1;
|
||||
request->sge[0].addr = ib_dma_map_single(info->id->device,
|
||||
(void *)packet,
|
||||
header_length,
|
||||
DMA_TO_DEVICE);
|
||||
if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
|
||||
rc = -EIO;
|
||||
request->sge[0].addr = 0;
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
request->sge[0].length = header_length;
|
||||
request->sge[0].lkey = info->pd->local_dma_lkey;
|
||||
|
||||
/* Fill in the packet data payload */
|
||||
num_sgs = sgl ? sg_nents(sgl) : 0;
|
||||
for_each_sg(sgl, sg, num_sgs, i) {
|
||||
request->sge[i+1].addr =
|
||||
@ -997,7 +955,7 @@ static int smbd_post_send_sgl(struct smbd_connection *info,
|
||||
info->id->device, request->sge[i+1].addr)) {
|
||||
rc = -EIO;
|
||||
request->sge[i+1].addr = 0;
|
||||
goto dma_mapping_failure;
|
||||
goto err_dma;
|
||||
}
|
||||
request->sge[i+1].length = sg->length;
|
||||
request->sge[i+1].lkey = info->pd->local_dma_lkey;
|
||||
@ -1008,14 +966,30 @@ static int smbd_post_send_sgl(struct smbd_connection *info,
|
||||
if (!rc)
|
||||
return 0;
|
||||
|
||||
dma_mapping_failure:
|
||||
for (i = 1; i < request->num_sge; i++)
|
||||
err_dma:
|
||||
for (i = 0; i < request->num_sge; i++)
|
||||
if (request->sge[i].addr)
|
||||
ib_dma_unmap_single(info->id->device,
|
||||
request->sge[i].addr,
|
||||
request->sge[i].length,
|
||||
DMA_TO_DEVICE);
|
||||
smbd_destroy_header(info, request);
|
||||
mempool_free(request, info->request_mempool);
|
||||
|
||||
/* roll back receive credits and credits to be offered */
|
||||
spin_lock(&info->lock_new_credits_offered);
|
||||
info->new_credits_offered += new_credits;
|
||||
spin_unlock(&info->lock_new_credits_offered);
|
||||
atomic_sub(new_credits, &info->receive_credits);
|
||||
|
||||
err_alloc:
|
||||
if (atomic_dec_and_test(&info->send_pending))
|
||||
wake_up(&info->wait_send_pending);
|
||||
|
||||
err_wait_send_queue:
|
||||
/* roll back send credits and pending */
|
||||
atomic_inc(&info->send_credits);
|
||||
|
||||
err_wait_credit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user