unix: uses an atomic type for scm files accounting

So the scm_stat_{add,del} helper can be invoked with no
additional lock held.

This clean-up the code a bit and will make the next
patch easier.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Paolo Abeni 2020-02-28 14:45:21 +01:00 committed by David S. Miller
parent e955376277
commit 7782040b95
2 changed files with 7 additions and 16 deletions

View File

@ -42,7 +42,7 @@ struct unix_skb_parms {
} __randomize_layout; } __randomize_layout;
struct scm_stat { struct scm_stat {
u32 nr_fds; atomic_t nr_fds;
}; };
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))

View File

@ -690,7 +690,8 @@ static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
if (sk) { if (sk) {
u = unix_sk(sock->sk); u = unix_sk(sock->sk);
seq_printf(m, "scm_fds: %u\n", READ_ONCE(u->scm_stat.nr_fds)); seq_printf(m, "scm_fds: %u\n",
atomic_read(&u->scm_stat.nr_fds));
} }
} }
#else #else
@ -1602,10 +1603,8 @@ static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp; struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk); struct unix_sock *u = unix_sk(sk);
lockdep_assert_held(&sk->sk_receive_queue.lock);
if (unlikely(fp && fp->count)) if (unlikely(fp && fp->count))
u->scm_stat.nr_fds += fp->count; atomic_add(fp->count, &u->scm_stat.nr_fds);
} }
static void scm_stat_del(struct sock *sk, struct sk_buff *skb) static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
@ -1613,10 +1612,8 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp; struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk); struct unix_sock *u = unix_sk(sk);
lockdep_assert_held(&sk->sk_receive_queue.lock);
if (unlikely(fp && fp->count)) if (unlikely(fp && fp->count))
u->scm_stat.nr_fds -= fp->count; atomic_sub(fp->count, &u->scm_stat.nr_fds);
} }
/* /*
@ -1805,10 +1802,8 @@ restart_locked:
if (sock_flag(other, SOCK_RCVTSTAMP)) if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb); __net_timestamp(skb);
maybe_add_creds(skb, sock, other); maybe_add_creds(skb, sock, other);
spin_lock(&other->sk_receive_queue.lock);
scm_stat_add(other, skb); scm_stat_add(other, skb);
__skb_queue_tail(&other->sk_receive_queue, skb); skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other); unix_state_unlock(other);
other->sk_data_ready(other); other->sk_data_ready(other);
sock_put(other); sock_put(other);
@ -1910,10 +1905,8 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
goto pipe_err_free; goto pipe_err_free;
maybe_add_creds(skb, sock, other); maybe_add_creds(skb, sock, other);
spin_lock(&other->sk_receive_queue.lock);
scm_stat_add(other, skb); scm_stat_add(other, skb);
__skb_queue_tail(&other->sk_receive_queue, skb); skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other); unix_state_unlock(other);
other->sk_data_ready(other); other->sk_data_ready(other);
sent += size; sent += size;
@ -2409,9 +2402,7 @@ unlock:
sk_peek_offset_bwd(sk, chunk); sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp) { if (UNIXCB(skb).fp) {
spin_lock(&sk->sk_receive_queue.lock);
scm_stat_del(sk, skb); scm_stat_del(sk, skb);
spin_unlock(&sk->sk_receive_queue.lock);
unix_detach_fds(&scm, skb); unix_detach_fds(&scm, skb);
} }