sunrpc: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage
When transmitting data, call down into TCP using sendmsg with MSG_SPLICE_PAGES to indicate that content should be spliced rather than performing sendpage calls to transmit header, data pages and trailer. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Chuck Lever <chuck.lever@oracle.com> cc: Trond Myklebust <trond.myklebust@hammerspace.com> cc: Anna Schumaker <anna@kernel.org> cc: Jeff Layton <jlayton@kernel.org> cc: Jens Axboe <axboe@kernel.dk> cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
345ee3e812
commit
5df5dd03a8
@ -161,16 +161,15 @@ static inline bool svc_put_not_last(struct svc_serv *serv)
|
||||
extern u32 svc_max_payload(const struct svc_rqst *rqstp);
|
||||
|
||||
/*
|
||||
* RPC Requsts and replies are stored in one or more pages.
|
||||
* RPC Requests and replies are stored in one or more pages.
|
||||
* We maintain an array of pages for each server thread.
|
||||
* Requests are copied into these pages as they arrive. Remaining
|
||||
* pages are available to write the reply into.
|
||||
*
|
||||
* Pages are sent using ->sendpage so each server thread needs to
|
||||
* allocate more to replace those used in sending. To help keep track
|
||||
* of these pages we have a receive list where all pages initialy live,
|
||||
* and a send list where pages are moved to when there are to be part
|
||||
* of a reply.
|
||||
* Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each server thread
|
||||
* needs to allocate more to replace those used in sending. To help keep track
|
||||
* of these pages we have a receive list where all pages initialy live, and a
|
||||
* send list where pages are moved to when there are to be part of a reply.
|
||||
*
|
||||
* We use xdr_buf for holding responses as it fits well with NFS
|
||||
* read responses (that have a header, and some data pages, and possibly
|
||||
|
@ -1203,13 +1203,14 @@ err_noclose:
|
||||
static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
|
||||
int flags)
|
||||
{
|
||||
return kernel_sendpage(sock, virt_to_page(vec->iov_base),
|
||||
offset_in_page(vec->iov_base),
|
||||
vec->iov_len, flags);
|
||||
struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | flags, };
|
||||
|
||||
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, vec->iov_len);
|
||||
return sock_sendmsg(sock, &msg);
|
||||
}
|
||||
|
||||
/*
|
||||
* kernel_sendpage() is used exclusively to reduce the number of
|
||||
* MSG_SPLICE_PAGES is used exclusively to reduce the number of
|
||||
* copy operations in this path. Therefore the caller must ensure
|
||||
* that the pages backing @xdr are unchanging.
|
||||
*
|
||||
@ -1249,28 +1250,13 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
|
||||
if (ret != head->iov_len)
|
||||
goto out;
|
||||
|
||||
if (xdr->page_len) {
|
||||
unsigned int offset, len, remaining;
|
||||
struct bio_vec *bvec;
|
||||
|
||||
bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT);
|
||||
offset = offset_in_page(xdr->page_base);
|
||||
remaining = xdr->page_len;
|
||||
while (remaining > 0) {
|
||||
len = min(remaining, bvec->bv_len - offset);
|
||||
ret = kernel_sendpage(sock, bvec->bv_page,
|
||||
bvec->bv_offset + offset,
|
||||
len, 0);
|
||||
msg.msg_flags = MSG_SPLICE_PAGES;
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, xdr->bvec,
|
||||
xdr_buf_pagecount(xdr), xdr->page_len);
|
||||
ret = sock_sendmsg(sock, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
*sentp += ret;
|
||||
if (ret != len)
|
||||
goto out;
|
||||
remaining -= len;
|
||||
offset = 0;
|
||||
bvec++;
|
||||
}
|
||||
}
|
||||
|
||||
if (tail->iov_len) {
|
||||
ret = svc_tcp_send_kvec(sock, tail, 0);
|
||||
|
Loading…
Reference in New Issue
Block a user