Merge branch 'net-cleanup-datagram-receive-helpers'
Paolo Abeni says: ==================== net: cleanup datagram receive helpers Several receive helpers have an optional destructor argument, which uglify the code a bit and is taxed by retpoline overhead. This series refactor the code so that we can drop such optional argument, cleaning the helpers a bit and avoiding an indirect call in fast path. The first patch refactor a bit the caller, so that the second patch actually dropping the argument is more straight-forward v1 -> v2: - call scm_stat_del() only when not peeking - Kirill - fix build issue with CONFIG_INET_ESPINTCP ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9a834f9b04
@ -3514,23 +3514,15 @@ int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
|
|||||||
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
|
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
|
||||||
struct sk_buff_head *queue,
|
struct sk_buff_head *queue,
|
||||||
unsigned int flags,
|
unsigned int flags,
|
||||||
void (*destructor)(struct sock *sk,
|
|
||||||
struct sk_buff *skb),
|
|
||||||
int *off, int *err,
|
int *off, int *err,
|
||||||
struct sk_buff **last);
|
struct sk_buff **last);
|
||||||
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
|
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
|
||||||
struct sk_buff_head *queue,
|
struct sk_buff_head *queue,
|
||||||
unsigned int flags,
|
unsigned int flags, int *off, int *err,
|
||||||
void (*destructor)(struct sock *sk,
|
|
||||||
struct sk_buff *skb),
|
|
||||||
int *off, int *err,
|
|
||||||
struct sk_buff **last);
|
struct sk_buff **last);
|
||||||
struct sk_buff *__skb_recv_datagram(struct sock *sk,
|
struct sk_buff *__skb_recv_datagram(struct sock *sk,
|
||||||
struct sk_buff_head *sk_queue,
|
struct sk_buff_head *sk_queue,
|
||||||
unsigned int flags,
|
unsigned int flags, int *off, int *err);
|
||||||
void (*destructor)(struct sock *sk,
|
|
||||||
struct sk_buff *skb),
|
|
||||||
int *off, int *err);
|
|
||||||
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
|
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
|
||||||
int *err);
|
int *err);
|
||||||
__poll_t datagram_poll(struct file *file, struct socket *sock,
|
__poll_t datagram_poll(struct file *file, struct socket *sock,
|
||||||
|
@ -42,7 +42,7 @@ struct unix_skb_parms {
|
|||||||
} __randomize_layout;
|
} __randomize_layout;
|
||||||
|
|
||||||
struct scm_stat {
|
struct scm_stat {
|
||||||
u32 nr_fds;
|
atomic_t nr_fds;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
|
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
|
||||||
|
@ -166,8 +166,6 @@ done:
|
|||||||
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
|
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
|
||||||
struct sk_buff_head *queue,
|
struct sk_buff_head *queue,
|
||||||
unsigned int flags,
|
unsigned int flags,
|
||||||
void (*destructor)(struct sock *sk,
|
|
||||||
struct sk_buff *skb),
|
|
||||||
int *off, int *err,
|
int *off, int *err,
|
||||||
struct sk_buff **last)
|
struct sk_buff **last)
|
||||||
{
|
{
|
||||||
@ -198,8 +196,6 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
|
|||||||
refcount_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
} else {
|
} else {
|
||||||
__skb_unlink(skb, queue);
|
__skb_unlink(skb, queue);
|
||||||
if (destructor)
|
|
||||||
destructor(sk, skb);
|
|
||||||
}
|
}
|
||||||
*off = _off;
|
*off = _off;
|
||||||
return skb;
|
return skb;
|
||||||
@ -212,7 +208,6 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
|
|||||||
* @sk: socket
|
* @sk: socket
|
||||||
* @queue: socket queue from which to receive
|
* @queue: socket queue from which to receive
|
||||||
* @flags: MSG\_ flags
|
* @flags: MSG\_ flags
|
||||||
* @destructor: invoked under the receive lock on successful dequeue
|
|
||||||
* @off: an offset in bytes to peek skb from. Returns an offset
|
* @off: an offset in bytes to peek skb from. Returns an offset
|
||||||
* within an skb where data actually starts
|
* within an skb where data actually starts
|
||||||
* @err: error code returned
|
* @err: error code returned
|
||||||
@ -245,10 +240,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
|
|||||||
*/
|
*/
|
||||||
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
|
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
|
||||||
struct sk_buff_head *queue,
|
struct sk_buff_head *queue,
|
||||||
unsigned int flags,
|
unsigned int flags, int *off, int *err,
|
||||||
void (*destructor)(struct sock *sk,
|
|
||||||
struct sk_buff *skb),
|
|
||||||
int *off, int *err,
|
|
||||||
struct sk_buff **last)
|
struct sk_buff **last)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
@ -269,8 +261,8 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
|
|||||||
* However, this function was correct in any case. 8)
|
* However, this function was correct in any case. 8)
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&queue->lock, cpu_flags);
|
spin_lock_irqsave(&queue->lock, cpu_flags);
|
||||||
skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
|
skb = __skb_try_recv_from_queue(sk, queue, flags, off, &error,
|
||||||
off, &error, last);
|
last);
|
||||||
spin_unlock_irqrestore(&queue->lock, cpu_flags);
|
spin_unlock_irqrestore(&queue->lock, cpu_flags);
|
||||||
if (error)
|
if (error)
|
||||||
goto no_packet;
|
goto no_packet;
|
||||||
@ -293,10 +285,7 @@ EXPORT_SYMBOL(__skb_try_recv_datagram);
|
|||||||
|
|
||||||
struct sk_buff *__skb_recv_datagram(struct sock *sk,
|
struct sk_buff *__skb_recv_datagram(struct sock *sk,
|
||||||
struct sk_buff_head *sk_queue,
|
struct sk_buff_head *sk_queue,
|
||||||
unsigned int flags,
|
unsigned int flags, int *off, int *err)
|
||||||
void (*destructor)(struct sock *sk,
|
|
||||||
struct sk_buff *skb),
|
|
||||||
int *off, int *err)
|
|
||||||
{
|
{
|
||||||
struct sk_buff *skb, *last;
|
struct sk_buff *skb, *last;
|
||||||
long timeo;
|
long timeo;
|
||||||
@ -304,8 +293,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk,
|
|||||||
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
|
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
skb = __skb_try_recv_datagram(sk, sk_queue, flags, destructor,
|
skb = __skb_try_recv_datagram(sk, sk_queue, flags, off, err,
|
||||||
off, err, &last);
|
&last);
|
||||||
if (skb)
|
if (skb)
|
||||||
return skb;
|
return skb;
|
||||||
|
|
||||||
@ -326,7 +315,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
|
|||||||
|
|
||||||
return __skb_recv_datagram(sk, &sk->sk_receive_queue,
|
return __skb_recv_datagram(sk, &sk->sk_receive_queue,
|
||||||
flags | (noblock ? MSG_DONTWAIT : 0),
|
flags | (noblock ? MSG_DONTWAIT : 0),
|
||||||
NULL, &off, err);
|
&off, err);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(skb_recv_datagram);
|
EXPORT_SYMBOL(skb_recv_datagram);
|
||||||
|
|
||||||
|
@ -1671,10 +1671,11 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
|
|||||||
error = -EAGAIN;
|
error = -EAGAIN;
|
||||||
do {
|
do {
|
||||||
spin_lock_bh(&queue->lock);
|
spin_lock_bh(&queue->lock);
|
||||||
skb = __skb_try_recv_from_queue(sk, queue, flags,
|
skb = __skb_try_recv_from_queue(sk, queue, flags, off,
|
||||||
udp_skb_destructor,
|
err, &last);
|
||||||
off, err, &last);
|
|
||||||
if (skb) {
|
if (skb) {
|
||||||
|
if (!(flags & MSG_PEEK))
|
||||||
|
udp_skb_destructor(sk, skb);
|
||||||
spin_unlock_bh(&queue->lock);
|
spin_unlock_bh(&queue->lock);
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
@ -1692,9 +1693,10 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
|
|||||||
spin_lock(&sk_queue->lock);
|
spin_lock(&sk_queue->lock);
|
||||||
skb_queue_splice_tail_init(sk_queue, queue);
|
skb_queue_splice_tail_init(sk_queue, queue);
|
||||||
|
|
||||||
skb = __skb_try_recv_from_queue(sk, queue, flags,
|
skb = __skb_try_recv_from_queue(sk, queue, flags, off,
|
||||||
udp_skb_dtor_locked,
|
err, &last);
|
||||||
off, err, &last);
|
if (skb && !(flags & MSG_PEEK))
|
||||||
|
udp_skb_dtor_locked(sk, skb);
|
||||||
spin_unlock(&sk_queue->lock);
|
spin_unlock(&sk_queue->lock);
|
||||||
spin_unlock_bh(&queue->lock);
|
spin_unlock_bh(&queue->lock);
|
||||||
if (skb)
|
if (skb)
|
||||||
|
@ -690,7 +690,8 @@ static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
|
|||||||
|
|
||||||
if (sk) {
|
if (sk) {
|
||||||
u = unix_sk(sock->sk);
|
u = unix_sk(sock->sk);
|
||||||
seq_printf(m, "scm_fds: %u\n", READ_ONCE(u->scm_stat.nr_fds));
|
seq_printf(m, "scm_fds: %u\n",
|
||||||
|
atomic_read(&u->scm_stat.nr_fds));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -1602,10 +1603,8 @@ static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
|
|||||||
struct scm_fp_list *fp = UNIXCB(skb).fp;
|
struct scm_fp_list *fp = UNIXCB(skb).fp;
|
||||||
struct unix_sock *u = unix_sk(sk);
|
struct unix_sock *u = unix_sk(sk);
|
||||||
|
|
||||||
lockdep_assert_held(&sk->sk_receive_queue.lock);
|
|
||||||
|
|
||||||
if (unlikely(fp && fp->count))
|
if (unlikely(fp && fp->count))
|
||||||
u->scm_stat.nr_fds += fp->count;
|
atomic_add(fp->count, &u->scm_stat.nr_fds);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
|
static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
|
||||||
@ -1613,10 +1612,8 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
|
|||||||
struct scm_fp_list *fp = UNIXCB(skb).fp;
|
struct scm_fp_list *fp = UNIXCB(skb).fp;
|
||||||
struct unix_sock *u = unix_sk(sk);
|
struct unix_sock *u = unix_sk(sk);
|
||||||
|
|
||||||
lockdep_assert_held(&sk->sk_receive_queue.lock);
|
|
||||||
|
|
||||||
if (unlikely(fp && fp->count))
|
if (unlikely(fp && fp->count))
|
||||||
u->scm_stat.nr_fds -= fp->count;
|
atomic_sub(fp->count, &u->scm_stat.nr_fds);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1805,10 +1802,8 @@ restart_locked:
|
|||||||
if (sock_flag(other, SOCK_RCVTSTAMP))
|
if (sock_flag(other, SOCK_RCVTSTAMP))
|
||||||
__net_timestamp(skb);
|
__net_timestamp(skb);
|
||||||
maybe_add_creds(skb, sock, other);
|
maybe_add_creds(skb, sock, other);
|
||||||
spin_lock(&other->sk_receive_queue.lock);
|
|
||||||
scm_stat_add(other, skb);
|
scm_stat_add(other, skb);
|
||||||
__skb_queue_tail(&other->sk_receive_queue, skb);
|
skb_queue_tail(&other->sk_receive_queue, skb);
|
||||||
spin_unlock(&other->sk_receive_queue.lock);
|
|
||||||
unix_state_unlock(other);
|
unix_state_unlock(other);
|
||||||
other->sk_data_ready(other);
|
other->sk_data_ready(other);
|
||||||
sock_put(other);
|
sock_put(other);
|
||||||
@ -1910,10 +1905,8 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||||||
goto pipe_err_free;
|
goto pipe_err_free;
|
||||||
|
|
||||||
maybe_add_creds(skb, sock, other);
|
maybe_add_creds(skb, sock, other);
|
||||||
spin_lock(&other->sk_receive_queue.lock);
|
|
||||||
scm_stat_add(other, skb);
|
scm_stat_add(other, skb);
|
||||||
__skb_queue_tail(&other->sk_receive_queue, skb);
|
skb_queue_tail(&other->sk_receive_queue, skb);
|
||||||
spin_unlock(&other->sk_receive_queue.lock);
|
|
||||||
unix_state_unlock(other);
|
unix_state_unlock(other);
|
||||||
other->sk_data_ready(other);
|
other->sk_data_ready(other);
|
||||||
sent += size;
|
sent += size;
|
||||||
@ -2113,9 +2106,12 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||||||
|
|
||||||
skip = sk_peek_offset(sk, flags);
|
skip = sk_peek_offset(sk, flags);
|
||||||
skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
|
skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
|
||||||
scm_stat_del, &skip, &err, &last);
|
&skip, &err, &last);
|
||||||
if (skb)
|
if (skb) {
|
||||||
|
if (!(flags & MSG_PEEK))
|
||||||
|
scm_stat_del(sk, skb);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&u->iolock);
|
mutex_unlock(&u->iolock);
|
||||||
|
|
||||||
@ -2409,9 +2405,7 @@ unlock:
|
|||||||
sk_peek_offset_bwd(sk, chunk);
|
sk_peek_offset_bwd(sk, chunk);
|
||||||
|
|
||||||
if (UNIXCB(skb).fp) {
|
if (UNIXCB(skb).fp) {
|
||||||
spin_lock(&sk->sk_receive_queue.lock);
|
|
||||||
scm_stat_del(sk, skb);
|
scm_stat_del(sk, skb);
|
||||||
spin_unlock(&sk->sk_receive_queue.lock);
|
|
||||||
unix_detach_fds(&scm, skb);
|
unix_detach_fds(&scm, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|||||||
|
|
||||||
flags |= nonblock ? MSG_DONTWAIT : 0;
|
flags |= nonblock ? MSG_DONTWAIT : 0;
|
||||||
|
|
||||||
skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, NULL, &off, &err);
|
skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user