Merge branch 'net-various-KCSAN-inspired-fixes'
Eric Dumazet says: ==================== net: various KCSAN inspired fixes This is a series of minor fixes, mostly dealing with lockless accesses to socket 'sk_ack_backlog', 'sk_max_ack_backlog' ane neighbour 'confirmed' fields. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3edcc56801
@ -57,8 +57,8 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
|
||||
unsigned long now = jiffies;
|
||||
|
||||
/* avoid dirtying neighbour */
|
||||
if (n->confirmed != now)
|
||||
n->confirmed = now;
|
||||
if (READ_ONCE(n->confirmed) != now)
|
||||
WRITE_ONCE(n->confirmed, now);
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
|
@ -414,8 +414,8 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
|
||||
unsigned long now = jiffies;
|
||||
|
||||
/* avoid dirtying neighbour */
|
||||
if (n->confirmed != now)
|
||||
n->confirmed = now;
|
||||
if (READ_ONCE(n->confirmed) != now)
|
||||
WRITE_ONCE(n->confirmed, now);
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
@ -431,8 +431,8 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
|
||||
unsigned long now = jiffies;
|
||||
|
||||
/* avoid dirtying neighbour */
|
||||
if (n->confirmed != now)
|
||||
n->confirmed = now;
|
||||
if (READ_ONCE(n->confirmed) != now)
|
||||
WRITE_ONCE(n->confirmed, now);
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
|
@ -859,17 +859,17 @@ static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
|
||||
|
||||
static inline void sk_acceptq_removed(struct sock *sk)
|
||||
{
|
||||
sk->sk_ack_backlog--;
|
||||
WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
|
||||
}
|
||||
|
||||
static inline void sk_acceptq_added(struct sock *sk)
|
||||
{
|
||||
sk->sk_ack_backlog++;
|
||||
WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
|
||||
}
|
||||
|
||||
static inline bool sk_acceptq_is_full(const struct sock *sk)
|
||||
{
|
||||
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
|
||||
return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1939,8 +1939,8 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
|
||||
|
||||
static inline void sk_dst_confirm(struct sock *sk)
|
||||
{
|
||||
if (!sk->sk_dst_pending_confirm)
|
||||
sk->sk_dst_pending_confirm = 1;
|
||||
if (!READ_ONCE(sk->sk_dst_pending_confirm))
|
||||
WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
|
||||
}
|
||||
|
||||
static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
|
||||
@ -1950,10 +1950,10 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
|
||||
unsigned long now = jiffies;
|
||||
|
||||
/* avoid dirtying neighbour */
|
||||
if (n->confirmed != now)
|
||||
n->confirmed = now;
|
||||
if (sk && sk->sk_dst_pending_confirm)
|
||||
sk->sk_dst_pending_confirm = 0;
|
||||
if (READ_ONCE(n->confirmed) != now)
|
||||
WRITE_ONCE(n->confirmed, now);
|
||||
if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
|
||||
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||
dev_kfree_skb(skb);
|
||||
goto as_indicate_complete;
|
||||
}
|
||||
sk->sk_ack_backlog++;
|
||||
sk_acceptq_added(sk);
|
||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
|
||||
sk->sk_state_change(sk);
|
||||
|
@ -381,7 +381,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
msg->pvc.sap_addr.vpi,
|
||||
msg->pvc.sap_addr.vci);
|
||||
dev_kfree_skb(skb);
|
||||
sk->sk_ack_backlog--;
|
||||
sk_acceptq_removed(sk);
|
||||
if (error) {
|
||||
sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
|
||||
&old_vcc->qos, error);
|
||||
|
@ -1384,7 +1384,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
|
||||
/* Now attach up the new socket */
|
||||
kfree_skb(skb);
|
||||
sk->sk_ack_backlog--;
|
||||
sk_acceptq_removed(sk);
|
||||
newsock->state = SS_CONNECTED;
|
||||
|
||||
out:
|
||||
|
@ -356,7 +356,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
make->sk_state = TCP_ESTABLISHED;
|
||||
|
||||
sk->sk_ack_backlog++;
|
||||
sk_acceptq_added(sk);
|
||||
bh_unlock_sock(sk);
|
||||
} else {
|
||||
if (!mine)
|
||||
|
@ -173,7 +173,7 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
|
||||
else
|
||||
release_sock(sk);
|
||||
|
||||
parent->sk_ack_backlog++;
|
||||
sk_acceptq_added(parent);
|
||||
}
|
||||
EXPORT_SYMBOL(bt_accept_enqueue);
|
||||
|
||||
@ -185,7 +185,7 @@ void bt_accept_unlink(struct sock *sk)
|
||||
BT_DBG("sk %p state %d", sk, sk->sk_state);
|
||||
|
||||
list_del_init(&bt_sk(sk)->accept_q);
|
||||
bt_sk(sk)->parent->sk_ack_backlog--;
|
||||
sk_acceptq_removed(bt_sk(sk)->parent);
|
||||
bt_sk(sk)->parent = NULL;
|
||||
sock_put(sk);
|
||||
}
|
||||
|
@ -2052,8 +2052,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
|
||||
goto nla_put_failure;
|
||||
{
|
||||
unsigned long now = jiffies;
|
||||
unsigned int flush_delta = now - tbl->last_flush;
|
||||
unsigned int rand_delta = now - tbl->last_rand;
|
||||
long flush_delta = now - tbl->last_flush;
|
||||
long rand_delta = now - tbl->last_rand;
|
||||
struct neigh_hash_table *nht;
|
||||
struct ndt_config ndc = {
|
||||
.ndtc_key_len = tbl->key_len,
|
||||
|
@ -944,7 +944,7 @@ int inet_dccp_listen(struct socket *sock, int backlog)
|
||||
if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
|
||||
goto out;
|
||||
|
||||
sk->sk_max_ack_backlog = backlog;
|
||||
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
|
||||
/* Really, if the socket is already in listen state
|
||||
* we can only allow the backlog to be adjusted.
|
||||
*/
|
||||
|
@ -1091,7 +1091,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
}
|
||||
|
||||
cb = DN_SKB_CB(skb);
|
||||
sk->sk_ack_backlog--;
|
||||
sk_acceptq_removed(sk);
|
||||
newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
|
||||
if (newsk == NULL) {
|
||||
release_sock(sk);
|
||||
|
@ -328,7 +328,7 @@ static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
|
||||
sk->sk_ack_backlog++;
|
||||
sk_acceptq_added(sk);
|
||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
sk->sk_state_change(sk);
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ int inet_listen(struct socket *sock, int backlog)
|
||||
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
|
||||
goto out;
|
||||
|
||||
sk->sk_max_ack_backlog = backlog;
|
||||
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
|
||||
/* Really, if the socket is already in listen state
|
||||
* we can only allow the backlog to be adjusted.
|
||||
*/
|
||||
|
@ -716,7 +716,7 @@ static void reqsk_timer_handler(struct timer_list *t)
|
||||
* ones are about to clog our table.
|
||||
*/
|
||||
qlen = reqsk_queue_len(queue);
|
||||
if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
|
||||
if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
|
||||
int young = reqsk_queue_len_young(queue) << 1;
|
||||
|
||||
while (thresh > 2) {
|
||||
|
@ -226,17 +226,17 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
||||
r->idiag_timer = 1;
|
||||
r->idiag_retrans = icsk->icsk_retransmits;
|
||||
r->idiag_expires =
|
||||
jiffies_to_msecs(icsk->icsk_timeout - jiffies);
|
||||
jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
|
||||
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
|
||||
r->idiag_timer = 4;
|
||||
r->idiag_retrans = icsk->icsk_probes_out;
|
||||
r->idiag_expires =
|
||||
jiffies_to_msecs(icsk->icsk_timeout - jiffies);
|
||||
jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
|
||||
} else if (timer_pending(&sk->sk_timer)) {
|
||||
r->idiag_timer = 2;
|
||||
r->idiag_retrans = icsk->icsk_probes_out;
|
||||
r->idiag_expires =
|
||||
jiffies_to_msecs(sk->sk_timer.expires - jiffies);
|
||||
jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
|
||||
} else {
|
||||
r->idiag_timer = 0;
|
||||
r->idiag_expires = 0;
|
||||
@ -342,16 +342,13 @@ static int inet_twsk_diag_fill(struct sock *sk,
|
||||
r = nlmsg_data(nlh);
|
||||
BUG_ON(tw->tw_state != TCP_TIME_WAIT);
|
||||
|
||||
tmo = tw->tw_timer.expires - jiffies;
|
||||
if (tmo < 0)
|
||||
tmo = 0;
|
||||
|
||||
inet_diag_msg_common_fill(r, sk);
|
||||
r->idiag_retrans = 0;
|
||||
|
||||
r->idiag_state = tw->tw_substate;
|
||||
r->idiag_timer = 3;
|
||||
r->idiag_expires = jiffies_to_msecs(tmo);
|
||||
tmo = tw->tw_timer.expires - jiffies;
|
||||
r->idiag_expires = jiffies_delta_to_msecs(tmo);
|
||||
r->idiag_rqueue = 0;
|
||||
r->idiag_wqueue = 0;
|
||||
r->idiag_uid = 0;
|
||||
@ -385,7 +382,7 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
|
||||
offsetof(struct sock, sk_cookie));
|
||||
|
||||
tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
|
||||
r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
|
||||
r->idiag_expires = jiffies_delta_to_msecs(tmo);
|
||||
r->idiag_rqueue = 0;
|
||||
r->idiag_wqueue = 0;
|
||||
r->idiag_uid = 0;
|
||||
|
@ -3225,8 +3225,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
||||
* tcpi_unacked -> Number of children ready for accept()
|
||||
* tcpi_sacked -> max backlog
|
||||
*/
|
||||
info->tcpi_unacked = sk->sk_ack_backlog;
|
||||
info->tcpi_sacked = sk->sk_max_ack_backlog;
|
||||
info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
|
||||
info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -21,8 +21,8 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||
struct tcp_info *info = _info;
|
||||
|
||||
if (inet_sk_state_load(sk) == TCP_LISTEN) {
|
||||
r->idiag_rqueue = sk->sk_ack_backlog;
|
||||
r->idiag_wqueue = sk->sk_max_ack_backlog;
|
||||
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
|
||||
r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
|
||||
} else if (sk->sk_type == SOCK_STREAM) {
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
|
@ -2451,7 +2451,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
|
||||
|
||||
state = inet_sk_state_load(sk);
|
||||
if (state == TCP_LISTEN)
|
||||
rx_queue = sk->sk_ack_backlog;
|
||||
rx_queue = READ_ONCE(sk->sk_ack_backlog);
|
||||
else
|
||||
/* Because we don't lock the socket,
|
||||
* we might find a transient negative value.
|
||||
|
@ -1891,7 +1891,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||
|
||||
state = inet_sk_state_load(sp);
|
||||
if (state == TCP_LISTEN)
|
||||
rx_queue = sp->sk_ack_backlog;
|
||||
rx_queue = READ_ONCE(sp->sk_ack_backlog);
|
||||
else
|
||||
/* Because we don't lock the socket,
|
||||
* we might find a transient negative value.
|
||||
|
@ -705,7 +705,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
|
||||
/* put original socket back into a clean listen state. */
|
||||
sk->sk_state = TCP_LISTEN;
|
||||
sk->sk_ack_backlog--;
|
||||
sk_acceptq_removed(sk);
|
||||
dprintk("%s: ok success on %02X, client on %02X\n", __func__,
|
||||
llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap);
|
||||
frees:
|
||||
|
@ -906,7 +906,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
/* Now attach up the new socket */
|
||||
skb->sk = NULL;
|
||||
kfree_skb(skb);
|
||||
sk->sk_ack_backlog--;
|
||||
sk_acceptq_removed(sk);
|
||||
|
||||
out_release:
|
||||
release_sock(sk);
|
||||
@ -1011,7 +1011,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
|
||||
make_rose->va = 0;
|
||||
make_rose->vr = 0;
|
||||
make_rose->vl = 0;
|
||||
sk->sk_ack_backlog++;
|
||||
sk_acceptq_added(sk);
|
||||
|
||||
rose_insert_socket(make);
|
||||
|
||||
|
@ -521,7 +521,7 @@ META_COLLECTOR(int_sk_ack_bl)
|
||||
*err = -1;
|
||||
return;
|
||||
}
|
||||
dst->value = sk->sk_ack_backlog;
|
||||
dst->value = READ_ONCE(sk->sk_ack_backlog);
|
||||
}
|
||||
|
||||
META_COLLECTOR(int_sk_max_ack_bl)
|
||||
@ -532,7 +532,7 @@ META_COLLECTOR(int_sk_max_ack_bl)
|
||||
*err = -1;
|
||||
return;
|
||||
}
|
||||
dst->value = sk->sk_max_ack_backlog;
|
||||
dst->value = READ_ONCE(sk->sk_max_ack_backlog);
|
||||
}
|
||||
|
||||
META_COLLECTOR(int_sk_prio)
|
||||
|
@ -324,7 +324,7 @@ void sctp_association_free(struct sctp_association *asoc)
|
||||
* socket.
|
||||
*/
|
||||
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
|
||||
sk->sk_ack_backlog--;
|
||||
sk_acceptq_removed(sk);
|
||||
}
|
||||
|
||||
/* Mark as dead, so other users can know this structure is
|
||||
@ -1073,7 +1073,7 @@ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
|
||||
|
||||
/* Decrement the backlog value for a TCP-style socket. */
|
||||
if (sctp_style(oldsk, TCP))
|
||||
oldsk->sk_ack_backlog--;
|
||||
sk_acceptq_removed(oldsk);
|
||||
|
||||
/* Release references to the old endpoint and the sock. */
|
||||
sctp_endpoint_put(assoc->ep);
|
||||
|
@ -425,8 +425,8 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||
r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
|
||||
r->idiag_wqueue = infox->asoc->sndbuf_used;
|
||||
} else {
|
||||
r->idiag_rqueue = sk->sk_ack_backlog;
|
||||
r->idiag_wqueue = sk->sk_max_ack_backlog;
|
||||
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
|
||||
r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
|
||||
}
|
||||
if (infox->sctpinfo)
|
||||
sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
|
||||
|
@ -164,7 +164,7 @@ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
|
||||
|
||||
/* Increment the backlog value for a TCP-style listening socket. */
|
||||
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
|
||||
sk->sk_ack_backlog++;
|
||||
sk_acceptq_added(sk);
|
||||
}
|
||||
|
||||
/* Free the endpoint structure. Delay cleanup until
|
||||
|
@ -8376,7 +8376,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
|
||||
}
|
||||
}
|
||||
|
||||
sk->sk_max_ack_backlog = backlog;
|
||||
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
|
||||
return sctp_hash_endpoint(ep);
|
||||
}
|
||||
|
||||
@ -8430,7 +8430,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
|
||||
|
||||
/* If we are already listening, just update the backlog */
|
||||
if (sctp_sstate(sk, LISTENING))
|
||||
sk->sk_max_ack_backlog = backlog;
|
||||
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
|
||||
else {
|
||||
err = sctp_listen_start(sk, backlog);
|
||||
if (err)
|
||||
|
@ -439,7 +439,7 @@ static void vsock_pending_work(struct work_struct *work)
|
||||
if (vsock_is_pending(sk)) {
|
||||
vsock_remove_pending(listener, sk);
|
||||
|
||||
listener->sk_ack_backlog--;
|
||||
sk_acceptq_removed(listener);
|
||||
} else if (!vsk->rejected) {
|
||||
/* We are not on the pending list and accept() did not reject
|
||||
* us, so we must have been accepted by our user process. We
|
||||
@ -1299,7 +1299,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
err = -listener->sk_err;
|
||||
|
||||
if (connected) {
|
||||
listener->sk_ack_backlog--;
|
||||
sk_acceptq_removed(listener);
|
||||
|
||||
lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
|
||||
vconnected = vsock_sk(connected);
|
||||
|
@ -428,7 +428,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
|
||||
|
||||
if (conn_from_host) {
|
||||
new->sk_state = TCP_ESTABLISHED;
|
||||
sk->sk_ack_backlog++;
|
||||
sk_acceptq_added(sk);
|
||||
|
||||
hvs_addr_init(&vnew->local_addr, if_type);
|
||||
hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
|
||||
|
@ -1066,7 +1066,7 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sk->sk_ack_backlog++;
|
||||
sk_acceptq_added(sk);
|
||||
|
||||
lock_sock_nested(child, SINGLE_DEPTH_NESTING);
|
||||
|
||||
|
@ -1098,7 +1098,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
|
||||
}
|
||||
|
||||
vsock_add_pending(sk, pending);
|
||||
sk->sk_ack_backlog++;
|
||||
sk_acceptq_added(sk);
|
||||
|
||||
pending->sk_state = TCP_SYN_SENT;
|
||||
vmci_trans(vpending)->produce_size =
|
||||
|
@ -891,7 +891,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
/* Now attach up the new socket */
|
||||
skb->sk = NULL;
|
||||
kfree_skb(skb);
|
||||
sk->sk_ack_backlog--;
|
||||
sk_acceptq_removed(sk);
|
||||
newsock->state = SS_CONNECTED;
|
||||
rc = 0;
|
||||
out2:
|
||||
@ -1062,7 +1062,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
|
||||
skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len);
|
||||
makex25->calluserdata.cudlength = skb->len;
|
||||
|
||||
sk->sk_ack_backlog++;
|
||||
sk_acceptq_added(sk);
|
||||
|
||||
x25_insert_socket(make);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user