nvmet-tcp: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage
When transmitting data, call down into TCP using a single sendmsg with MSG_SPLICE_PAGES to indicate that content should be spliced rather than copied instead of calling sendpage. Signed-off-by: David Howells <dhowells@redhat.com> Tested-by: Sagi Grimberg <sagi@grimberg.me> Acked-by: Willem de Bruijn <willemb@google.com> cc: Keith Busch <kbusch@kernel.org> cc: Jens Axboe <axboe@fb.com> cc: Christoph Hellwig <hch@lst.de> cc: Chaitanya Kulkarni <kch@nvidia.com> cc: Jens Axboe <axboe@kernel.dk> cc: Matthew Wilcox <willy@infradead.org> cc: linux-nvme@lists.infradead.org Link: https://lore.kernel.org/r/20230623225513.2732256-9-dhowells@redhat.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
7769887817
commit
c336a79983
@ -576,13 +576,17 @@ static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
|
||||
|
||||
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
|
||||
{
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
|
||||
};
|
||||
struct bio_vec bvec;
|
||||
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
|
||||
int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
|
||||
int ret;
|
||||
|
||||
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
|
||||
offset_in_page(cmd->data_pdu) + cmd->offset,
|
||||
left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
|
||||
bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
|
||||
ret = sock_sendmsg(cmd->queue->sock, &msg);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
@ -603,17 +607,21 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
||||
int ret;
|
||||
|
||||
while (cmd->cur_sg) {
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
|
||||
};
|
||||
struct page *page = sg_page(cmd->cur_sg);
|
||||
struct bio_vec bvec;
|
||||
u32 left = cmd->cur_sg->length - cmd->offset;
|
||||
int flags = MSG_DONTWAIT;
|
||||
|
||||
if ((!last_in_batch && cmd->queue->send_list_len) ||
|
||||
cmd->wbytes_done + left < cmd->req.transfer_len ||
|
||||
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
|
||||
left, flags);
|
||||
bvec_set_page(&bvec, page, left, cmd->offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
|
||||
ret = sock_sendmsg(cmd->queue->sock, &msg);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
@ -649,18 +657,20 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
||||
static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
|
||||
bool last_in_batch)
|
||||
{
|
||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
|
||||
struct bio_vec bvec;
|
||||
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
|
||||
int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
|
||||
int flags = MSG_DONTWAIT;
|
||||
int ret;
|
||||
|
||||
if (!last_in_batch && cmd->queue->send_list_len)
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
else
|
||||
flags |= MSG_EOR;
|
||||
msg.msg_flags |= MSG_EOR;
|
||||
|
||||
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
|
||||
offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
|
||||
bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
|
||||
ret = sock_sendmsg(cmd->queue->sock, &msg);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
cmd->offset += ret;
|
||||
@ -677,18 +687,20 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
|
||||
|
||||
static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
||||
{
|
||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
|
||||
struct bio_vec bvec;
|
||||
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
|
||||
int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
|
||||
int flags = MSG_DONTWAIT;
|
||||
int ret;
|
||||
|
||||
if (!last_in_batch && cmd->queue->send_list_len)
|
||||
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
else
|
||||
flags |= MSG_EOR;
|
||||
msg.msg_flags |= MSG_EOR;
|
||||
|
||||
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
|
||||
offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
|
||||
bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
|
||||
ret = sock_sendmsg(cmd->queue->sock, &msg);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
cmd->offset += ret;
|
||||
|
Loading…
Reference in New Issue
Block a user