nvmet-tcp: optimize tcp stack TX when data digest is used

If we have a 4-byte data digest to send to the wire, but we
have more data to send, set MSG_MORE to tell the stack
that more is coming.

Reviewed-by: Mark Wunderlich <mark.wunderlich@intel.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
Sagi Grimberg 2020-03-12 16:06:39 -07:00 committed by Keith Busch
parent 8d8a50e20d
commit e90d172b11

View File

@ -626,7 +626,7 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
return 1; return 1;
} }
static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd) static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{ {
struct nvmet_tcp_queue *queue = cmd->queue; struct nvmet_tcp_queue *queue = cmd->queue;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
@ -636,6 +636,9 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
}; };
int ret; int ret;
if (!last_in_batch && cmd->queue->send_list_len)
msg.msg_flags |= MSG_MORE;
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (unlikely(ret <= 0)) if (unlikely(ret <= 0))
return ret; return ret;
@ -676,7 +679,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
} }
if (cmd->state == NVMET_TCP_SEND_DDGST) { if (cmd->state == NVMET_TCP_SEND_DDGST) {
ret = nvmet_try_send_ddgst(cmd); ret = nvmet_try_send_ddgst(cmd, last_in_batch);
if (ret <= 0) if (ret <= 0)
goto done_send; goto done_send;
} }