ip: convert tcp_sendmsg() to iov_iter primitives
patch is actually smaller than it seems to be - most of it is unindenting the inner loop body in tcp_sendmsg() itself... the bit in tcp_input.c is going to get reverted very soon - that's what memcpy_from_msg() will become, but not in this commit; let's keep it reasonably contained... There's one potentially subtle change here: in case of short copy from userland, mainline tcp_send_syn_data() discards the skb it has allocated and falls back to normal path, where we'll send as much as possible after rereading the same data again. This patch trims SYN+data skb instead - that way we don't need to copy from the same place twice. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
cacdc7d2f9
commit
57be5bdad7
@ -1803,27 +1803,25 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
|
||||
}
|
||||
|
||||
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
|
||||
char __user *from, char *to,
|
||||
struct iov_iter *from, char *to,
|
||||
int copy, int offset)
|
||||
{
|
||||
if (skb->ip_summed == CHECKSUM_NONE) {
|
||||
int err = 0;
|
||||
__wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
|
||||
if (err)
|
||||
return err;
|
||||
__wsum csum = 0;
|
||||
if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
|
||||
return -EFAULT;
|
||||
skb->csum = csum_block_add(skb->csum, csum, offset);
|
||||
} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
|
||||
if (!access_ok(VERIFY_READ, from, copy) ||
|
||||
__copy_from_user_nocache(to, from, copy))
|
||||
if (copy_from_iter_nocache(to, copy, from) != copy)
|
||||
return -EFAULT;
|
||||
} else if (copy_from_user(to, from, copy))
|
||||
} else if (copy_from_iter(to, copy, from) != copy)
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
|
||||
char __user *from, int copy)
|
||||
struct iov_iter *from, int copy)
|
||||
{
|
||||
int err, offset = skb->len;
|
||||
|
||||
@ -1835,7 +1833,7 @@ static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
|
||||
static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
|
||||
struct sk_buff *skb,
|
||||
struct page *page,
|
||||
int off, int copy)
|
||||
|
@ -1067,11 +1067,10 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
|
||||
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
size_t size)
|
||||
{
|
||||
const struct iovec *iov;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
int iovlen, flags, err, copied = 0;
|
||||
int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
|
||||
int flags, err, copied = 0;
|
||||
int mss_now = 0, size_goal, copied_syn = 0;
|
||||
bool sg;
|
||||
long timeo;
|
||||
|
||||
@ -1084,7 +1083,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
goto out;
|
||||
else if (err)
|
||||
goto out_err;
|
||||
offset = copied_syn;
|
||||
}
|
||||
|
||||
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
||||
@ -1118,8 +1116,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
mss_now = tcp_send_mss(sk, &size_goal, flags);
|
||||
|
||||
/* Ok commence sending. */
|
||||
iovlen = msg->msg_iter.nr_segs;
|
||||
iov = msg->msg_iter.iov;
|
||||
copied = 0;
|
||||
|
||||
err = -EPIPE;
|
||||
@ -1128,22 +1124,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
|
||||
sg = !!(sk->sk_route_caps & NETIF_F_SG);
|
||||
|
||||
while (--iovlen >= 0) {
|
||||
size_t seglen = iov->iov_len;
|
||||
unsigned char __user *from = iov->iov_base;
|
||||
|
||||
iov++;
|
||||
if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */
|
||||
if (offset >= seglen) {
|
||||
offset -= seglen;
|
||||
continue;
|
||||
}
|
||||
seglen -= offset;
|
||||
from += offset;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
while (seglen > 0) {
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
int copy = 0;
|
||||
int max = size_goal;
|
||||
|
||||
@ -1187,14 +1168,14 @@ new_segment:
|
||||
}
|
||||
|
||||
/* Try to append data to the end of skb. */
|
||||
if (copy > seglen)
|
||||
copy = seglen;
|
||||
if (copy > iov_iter_count(&msg->msg_iter))
|
||||
copy = iov_iter_count(&msg->msg_iter);
|
||||
|
||||
/* Where to copy to? */
|
||||
if (skb_availroom(skb) > 0) {
|
||||
/* We have some space in skb head. Superb! */
|
||||
copy = min_t(int, copy, skb_availroom(skb));
|
||||
err = skb_add_data_nocache(sk, skb, from, copy);
|
||||
err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
|
||||
if (err)
|
||||
goto do_fault;
|
||||
} else {
|
||||
@ -1219,7 +1200,7 @@ new_segment:
|
||||
if (!sk_wmem_schedule(sk, copy))
|
||||
goto wait_for_memory;
|
||||
|
||||
err = skb_copy_to_page_nocache(sk, from, skb,
|
||||
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
|
||||
pfrag->page,
|
||||
pfrag->offset,
|
||||
copy);
|
||||
@ -1244,9 +1225,8 @@ new_segment:
|
||||
TCP_SKB_CB(skb)->end_seq += copy;
|
||||
tcp_skb_pcount_set(skb, 0);
|
||||
|
||||
from += copy;
|
||||
copied += copy;
|
||||
if ((seglen -= copy) == 0 && iovlen == 0) {
|
||||
if (!iov_iter_count(&msg->msg_iter)) {
|
||||
tcp_tx_timestamp(sk, skb);
|
||||
goto out;
|
||||
}
|
||||
@ -1273,7 +1253,6 @@ wait_for_memory:
|
||||
|
||||
mss_now = tcp_send_mss(sk, &size_goal, flags);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (copied)
|
||||
|
@ -4368,7 +4368,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
|
||||
goto err_free;
|
||||
|
||||
if (memcpy_from_msg(skb_put(skb, size), msg, size))
|
||||
if (copy_from_iter(skb_put(skb, size), size, &msg->msg_iter) != size)
|
||||
goto err_free;
|
||||
|
||||
TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
|
||||
|
@ -3055,7 +3055,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_fastopen_request *fo = tp->fastopen_req;
|
||||
int syn_loss = 0, space, err = 0;
|
||||
int syn_loss = 0, space, err = 0, copied;
|
||||
unsigned long last_syn_loss = 0;
|
||||
struct sk_buff *syn_data;
|
||||
|
||||
@ -3093,11 +3093,16 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
||||
goto fallback;
|
||||
syn_data->ip_summed = CHECKSUM_PARTIAL;
|
||||
memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
|
||||
if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
|
||||
fo->data->msg_iter.iov, 0, space))) {
|
||||
copied = copy_from_iter(skb_put(syn_data, space), space,
|
||||
&fo->data->msg_iter);
|
||||
if (unlikely(!copied)) {
|
||||
kfree_skb(syn_data);
|
||||
goto fallback;
|
||||
}
|
||||
if (copied != space) {
|
||||
skb_trim(syn_data, copied);
|
||||
space = copied;
|
||||
}
|
||||
|
||||
/* No more data pending in inet_wait_for_connect() */
|
||||
if (space == fo->size)
|
||||
|
Loading…
Reference in New Issue
Block a user