Merge branch 'socket-option-lockless'
Eric Dumazet says: ==================== net: more data-races fixes and lockless socket options This is yet another round of data-races fixes, and lockless socket options. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2be825ebb9
@ -877,7 +877,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
|
||||
|
||||
skb->dev = dev;
|
||||
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->priority = READ_ONCE(sk->sk_priority);
|
||||
skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
|
||||
|
||||
ph = skb_put(skb, total_len + sizeof(struct pppoe_hdr));
|
||||
|
@ -541,7 +541,7 @@ static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->priority = READ_ONCE(sk->sk_priority);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
@ -2007,21 +2007,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
|
||||
/* sk_tx_queue_mapping accept only upto a 16-bit value */
|
||||
if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
|
||||
return;
|
||||
sk->sk_tx_queue_mapping = tx_queue;
|
||||
/* Paired with READ_ONCE() in sk_tx_queue_get() and
|
||||
* other WRITE_ONCE() because socket lock might be not held.
|
||||
*/
|
||||
WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
|
||||
}
|
||||
|
||||
#define NO_QUEUE_MAPPING USHRT_MAX
|
||||
|
||||
static inline void sk_tx_queue_clear(struct sock *sk)
|
||||
{
|
||||
sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
|
||||
/* Paired with READ_ONCE() in sk_tx_queue_get() and
|
||||
* other WRITE_ONCE() because socket lock might be not held.
|
||||
*/
|
||||
WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
|
||||
}
|
||||
|
||||
static inline int sk_tx_queue_get(const struct sock *sk)
|
||||
{
|
||||
if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
|
||||
return sk->sk_tx_queue_mapping;
|
||||
if (sk) {
|
||||
/* Paired with WRITE_ONCE() in sk_tx_queue_clear()
|
||||
* and sk_tx_queue_set().
|
||||
*/
|
||||
int val = READ_ONCE(sk->sk_tx_queue_mapping);
|
||||
|
||||
if (val != NO_QUEUE_MAPPING)
|
||||
return val;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -2170,7 +2182,7 @@ static inline void __dst_negative_advice(struct sock *sk)
|
||||
if (ndst != dst) {
|
||||
rcu_assign_pointer(sk->sk_dst_cache, ndst);
|
||||
sk_tx_queue_clear(sk);
|
||||
sk->sk_dst_pending_confirm = 0;
|
||||
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2187,7 +2199,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
|
||||
struct dst_entry *old_dst;
|
||||
|
||||
sk_tx_queue_clear(sk);
|
||||
sk->sk_dst_pending_confirm = 0;
|
||||
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
|
||||
old_dst = rcu_dereference_protected(sk->sk_dst_cache,
|
||||
lockdep_sock_is_held(sk));
|
||||
rcu_assign_pointer(sk->sk_dst_cache, dst);
|
||||
@ -2200,7 +2212,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
|
||||
struct dst_entry *old_dst;
|
||||
|
||||
sk_tx_queue_clear(sk);
|
||||
sk->sk_dst_pending_confirm = 0;
|
||||
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
|
||||
old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
|
||||
dst_release(old_dst);
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
|
||||
ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
if (ssk && sk_fullsock(ssk)) {
|
||||
__entry->snd_wnd = tcp_sk(ssk)->snd_wnd;
|
||||
__entry->pace = ssk->sk_pacing_rate;
|
||||
__entry->pace = READ_ONCE(ssk->sk_pacing_rate);
|
||||
} else {
|
||||
__entry->snd_wnd = 0;
|
||||
__entry->pace = 0;
|
||||
|
@ -664,7 +664,7 @@ out_unlock:
|
||||
|
||||
sendit:
|
||||
if (skb->sk)
|
||||
skb->priority = skb->sk->sk_priority;
|
||||
skb->priority = READ_ONCE(skb->sk->sk_priority);
|
||||
if (dev_queue_xmit(skb))
|
||||
goto drop;
|
||||
sent:
|
||||
|
@ -939,7 +939,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
|
||||
sock_init_data(NULL, sk);
|
||||
|
||||
sk->sk_type = osk->sk_type;
|
||||
sk->sk_priority = osk->sk_priority;
|
||||
sk->sk_priority = READ_ONCE(osk->sk_priority);
|
||||
sk->sk_protocol = osk->sk_protocol;
|
||||
sk->sk_rcvbuf = osk->sk_rcvbuf;
|
||||
sk->sk_sndbuf = osk->sk_sndbuf;
|
||||
|
@ -1615,7 +1615,7 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
|
||||
return ERR_PTR(-ENOTCONN);
|
||||
}
|
||||
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->priority = READ_ONCE(sk->sk_priority);
|
||||
|
||||
bt_cb(skb)->l2cap.chan = chan;
|
||||
|
||||
|
@ -884,7 +884,7 @@ static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
|
||||
skcb = j1939_skb_to_cb(skb);
|
||||
memset(skcb, 0, sizeof(*skcb));
|
||||
skcb->addr = jsk->addr;
|
||||
skcb->priority = j1939_prio(sk->sk_priority);
|
||||
skcb->priority = j1939_prio(READ_ONCE(sk->sk_priority));
|
||||
|
||||
if (msg->msg_name) {
|
||||
struct sockaddr_can *addr = msg->msg_name;
|
||||
|
@ -881,7 +881,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
}
|
||||
|
||||
skb->dev = dev;
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->priority = READ_ONCE(sk->sk_priority);
|
||||
skb->mark = READ_ONCE(sk->sk_mark);
|
||||
skb->tstamp = sockc.transmit_time;
|
||||
|
||||
|
163
net/core/sock.c
163
net/core/sock.c
@ -600,7 +600,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
|
||||
INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
|
||||
dst, cookie) == NULL) {
|
||||
sk_tx_queue_clear(sk);
|
||||
sk->sk_dst_pending_confirm = 0;
|
||||
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
|
||||
RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
|
||||
dst_release(dst);
|
||||
return NULL;
|
||||
@ -806,9 +806,7 @@ EXPORT_SYMBOL(sock_no_linger);
|
||||
|
||||
void sock_set_priority(struct sock *sk, u32 priority)
|
||||
{
|
||||
lock_sock(sk);
|
||||
WRITE_ONCE(sk->sk_priority, priority);
|
||||
release_sock(sk);
|
||||
}
|
||||
EXPORT_SYMBOL(sock_set_priority);
|
||||
|
||||
@ -1118,6 +1116,83 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
|
||||
|
||||
valbool = val ? 1 : 0;
|
||||
|
||||
/* handle options which do not require locking the socket. */
|
||||
switch (optname) {
|
||||
case SO_PRIORITY:
|
||||
if ((val >= 0 && val <= 6) ||
|
||||
sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
|
||||
sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
|
||||
sock_set_priority(sk, val);
|
||||
return 0;
|
||||
}
|
||||
return -EPERM;
|
||||
case SO_PASSSEC:
|
||||
assign_bit(SOCK_PASSSEC, &sock->flags, valbool);
|
||||
return 0;
|
||||
case SO_PASSCRED:
|
||||
assign_bit(SOCK_PASSCRED, &sock->flags, valbool);
|
||||
return 0;
|
||||
case SO_PASSPIDFD:
|
||||
assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool);
|
||||
return 0;
|
||||
case SO_TYPE:
|
||||
case SO_PROTOCOL:
|
||||
case SO_DOMAIN:
|
||||
case SO_ERROR:
|
||||
return -ENOPROTOOPT;
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
case SO_BUSY_POLL:
|
||||
if (val < 0)
|
||||
return -EINVAL;
|
||||
WRITE_ONCE(sk->sk_ll_usec, val);
|
||||
return 0;
|
||||
case SO_PREFER_BUSY_POLL:
|
||||
if (valbool && !sockopt_capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
|
||||
return 0;
|
||||
case SO_BUSY_POLL_BUDGET:
|
||||
if (val > READ_ONCE(sk->sk_busy_poll_budget) &&
|
||||
!sockopt_capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
if (val < 0 || val > U16_MAX)
|
||||
return -EINVAL;
|
||||
WRITE_ONCE(sk->sk_busy_poll_budget, val);
|
||||
return 0;
|
||||
#endif
|
||||
case SO_MAX_PACING_RATE:
|
||||
{
|
||||
unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
|
||||
unsigned long pacing_rate;
|
||||
|
||||
if (sizeof(ulval) != sizeof(val) &&
|
||||
optlen >= sizeof(ulval) &&
|
||||
copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
if (ulval != ~0UL)
|
||||
cmpxchg(&sk->sk_pacing_status,
|
||||
SK_PACING_NONE,
|
||||
SK_PACING_NEEDED);
|
||||
/* Pairs with READ_ONCE() from sk_getsockopt() */
|
||||
WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
|
||||
pacing_rate = READ_ONCE(sk->sk_pacing_rate);
|
||||
if (ulval < pacing_rate)
|
||||
WRITE_ONCE(sk->sk_pacing_rate, ulval);
|
||||
return 0;
|
||||
}
|
||||
case SO_TXREHASH:
|
||||
if (val < -1 || val > 1)
|
||||
return -EINVAL;
|
||||
if ((u8)val == SOCK_TXREHASH_DEFAULT)
|
||||
val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
|
||||
/* Paired with READ_ONCE() in tcp_rtx_synack()
|
||||
* and sk_getsockopt().
|
||||
*/
|
||||
WRITE_ONCE(sk->sk_txrehash, (u8)val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
sockopt_lock_sock(sk);
|
||||
|
||||
switch (optname) {
|
||||
@ -1133,12 +1208,6 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
|
||||
case SO_REUSEPORT:
|
||||
sk->sk_reuseport = valbool;
|
||||
break;
|
||||
case SO_TYPE:
|
||||
case SO_PROTOCOL:
|
||||
case SO_DOMAIN:
|
||||
case SO_ERROR:
|
||||
ret = -ENOPROTOOPT;
|
||||
break;
|
||||
case SO_DONTROUTE:
|
||||
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
|
||||
sk_dst_reset(sk);
|
||||
@ -1213,15 +1282,6 @@ set_sndbuf:
|
||||
sk->sk_no_check_tx = valbool;
|
||||
break;
|
||||
|
||||
case SO_PRIORITY:
|
||||
if ((val >= 0 && val <= 6) ||
|
||||
sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
|
||||
sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
|
||||
WRITE_ONCE(sk->sk_priority, val);
|
||||
else
|
||||
ret = -EPERM;
|
||||
break;
|
||||
|
||||
case SO_LINGER:
|
||||
if (optlen < sizeof(ling)) {
|
||||
ret = -EINVAL; /* 1003.1g */
|
||||
@ -1247,14 +1307,6 @@ set_sndbuf:
|
||||
case SO_BSDCOMPAT:
|
||||
break;
|
||||
|
||||
case SO_PASSCRED:
|
||||
assign_bit(SOCK_PASSCRED, &sock->flags, valbool);
|
||||
break;
|
||||
|
||||
case SO_PASSPIDFD:
|
||||
assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool);
|
||||
break;
|
||||
|
||||
case SO_TIMESTAMP_OLD:
|
||||
case SO_TIMESTAMP_NEW:
|
||||
case SO_TIMESTAMPNS_OLD:
|
||||
@ -1360,9 +1412,6 @@ set_sndbuf:
|
||||
sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
|
||||
break;
|
||||
|
||||
case SO_PASSSEC:
|
||||
assign_bit(SOCK_PASSSEC, &sock->flags, valbool);
|
||||
break;
|
||||
case SO_MARK:
|
||||
if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
|
||||
!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
|
||||
@ -1404,50 +1453,7 @@ set_sndbuf:
|
||||
sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
case SO_BUSY_POLL:
|
||||
if (val < 0)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
WRITE_ONCE(sk->sk_ll_usec, val);
|
||||
break;
|
||||
case SO_PREFER_BUSY_POLL:
|
||||
if (valbool && !sockopt_capable(CAP_NET_ADMIN))
|
||||
ret = -EPERM;
|
||||
else
|
||||
WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
|
||||
break;
|
||||
case SO_BUSY_POLL_BUDGET:
|
||||
if (val > READ_ONCE(sk->sk_busy_poll_budget) && !sockopt_capable(CAP_NET_ADMIN)) {
|
||||
ret = -EPERM;
|
||||
} else {
|
||||
if (val < 0 || val > U16_MAX)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
WRITE_ONCE(sk->sk_busy_poll_budget, val);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
||||
case SO_MAX_PACING_RATE:
|
||||
{
|
||||
unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
|
||||
|
||||
if (sizeof(ulval) != sizeof(val) &&
|
||||
optlen >= sizeof(ulval) &&
|
||||
copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
if (ulval != ~0UL)
|
||||
cmpxchg(&sk->sk_pacing_status,
|
||||
SK_PACING_NONE,
|
||||
SK_PACING_NEEDED);
|
||||
/* Pairs with READ_ONCE() from sk_getsockopt() */
|
||||
WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
|
||||
sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
|
||||
break;
|
||||
}
|
||||
case SO_INCOMING_CPU:
|
||||
reuseport_update_incoming_cpu(sk, val);
|
||||
break;
|
||||
@ -1532,19 +1538,6 @@ set_sndbuf:
|
||||
break;
|
||||
}
|
||||
|
||||
case SO_TXREHASH:
|
||||
if (val < -1 || val > 1) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if ((u8)val == SOCK_TXREHASH_DEFAULT)
|
||||
val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
|
||||
/* Paired with READ_ONCE() in tcp_rtx_synack()
|
||||
* and sk_getsockopt().
|
||||
*/
|
||||
WRITE_ONCE(sk->sk_txrehash, (u8)val);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -ENOPROTOOPT;
|
||||
break;
|
||||
|
@ -239,7 +239,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
|
||||
if (!opt)
|
||||
opt = rcu_dereference(np->opt);
|
||||
err = ip6_xmit(sk, skb, &fl6, READ_ONCE(sk->sk_mark), opt,
|
||||
np->tclass, sk->sk_priority);
|
||||
np->tclass, READ_ONCE(sk->sk_priority));
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
|
||||
* For cgroup2 classid is always zero.
|
||||
*/
|
||||
if (!classid)
|
||||
classid = sk->sk_priority;
|
||||
classid = READ_ONCE(sk->sk_priority);
|
||||
|
||||
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
|
||||
goto errout;
|
||||
|
@ -1449,7 +1449,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
|
||||
ip_options_build(skb, opt, cork->addr, rt);
|
||||
}
|
||||
|
||||
skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
|
||||
skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
|
||||
skb->mark = cork->mark;
|
||||
skb->tstamp = cork->transmit_time;
|
||||
/*
|
||||
|
@ -258,7 +258,7 @@ static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
|
||||
u64 rate = bw;
|
||||
|
||||
rate = bbr_rate_bytes_per_sec(sk, rate, gain);
|
||||
rate = min_t(u64, rate, sk->sk_max_pacing_rate);
|
||||
rate = min_t(u64, rate, READ_ONCE(sk->sk_max_pacing_rate));
|
||||
return rate;
|
||||
}
|
||||
|
||||
@ -278,7 +278,8 @@ static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
|
||||
}
|
||||
bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT;
|
||||
do_div(bw, rtt_us);
|
||||
sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
|
||||
WRITE_ONCE(sk->sk_pacing_rate,
|
||||
bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain));
|
||||
}
|
||||
|
||||
/* Pace using current bw estimate and a gain factor. */
|
||||
@ -290,14 +291,14 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
|
||||
|
||||
if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
|
||||
bbr_init_pacing_rate_from_rtt(sk);
|
||||
if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
|
||||
sk->sk_pacing_rate = rate;
|
||||
if (bbr_full_bw_reached(sk) || rate > READ_ONCE(sk->sk_pacing_rate))
|
||||
WRITE_ONCE(sk->sk_pacing_rate, rate);
|
||||
}
|
||||
|
||||
/* override sysctl_tcp_min_tso_segs */
|
||||
__bpf_kfunc static u32 bbr_min_tso_segs(struct sock *sk)
|
||||
{
|
||||
return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
|
||||
return READ_ONCE(sk->sk_pacing_rate) < (bbr_min_tso_rate >> 3) ? 1 : 2;
|
||||
}
|
||||
|
||||
static u32 bbr_tso_segs_goal(struct sock *sk)
|
||||
@ -309,7 +310,7 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
|
||||
* driver provided sk_gso_max_size.
|
||||
*/
|
||||
bytes = min_t(unsigned long,
|
||||
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
|
||||
READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift),
|
||||
GSO_LEGACY_MAX_SIZE - 1 - MAX_TCP_HEADER);
|
||||
segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
|
||||
|
||||
|
@ -927,8 +927,8 @@ static void tcp_update_pacing_rate(struct sock *sk)
|
||||
* without any lock. We want to make sure compiler wont store
|
||||
* intermediate values in this location.
|
||||
*/
|
||||
WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate,
|
||||
sk->sk_max_pacing_rate));
|
||||
WRITE_ONCE(sk->sk_pacing_rate,
|
||||
min_t(u64, rate, READ_ONCE(sk->sk_max_pacing_rate)));
|
||||
}
|
||||
|
||||
/* Calculate rto without backoff. This is the second half of Van Jacobson's
|
||||
|
@ -828,7 +828,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
|
||||
ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
|
||||
inet_twsk(sk)->tw_mark : sk->sk_mark;
|
||||
ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
|
||||
inet_twsk(sk)->tw_priority : sk->sk_priority;
|
||||
inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
|
||||
transmit_time = tcp_transmit_time(sk);
|
||||
xfrm_sk_clone_policy(ctl_sk, sk);
|
||||
txhash = (sk->sk_state == TCP_TIME_WAIT) ?
|
||||
|
@ -292,7 +292,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
|
||||
|
||||
tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
|
||||
tw->tw_mark = sk->sk_mark;
|
||||
tw->tw_priority = sk->sk_priority;
|
||||
tw->tw_priority = READ_ONCE(sk->sk_priority);
|
||||
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
|
||||
tcptw->tw_rcv_nxt = tp->rcv_nxt;
|
||||
tcptw->tw_snd_nxt = tp->snd_nxt;
|
||||
|
@ -1201,7 +1201,7 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (sk->sk_pacing_status != SK_PACING_NONE) {
|
||||
unsigned long rate = sk->sk_pacing_rate;
|
||||
unsigned long rate = READ_ONCE(sk->sk_pacing_rate);
|
||||
|
||||
/* Original sch_fq does not pace first 10 MSS
|
||||
* Note that tp->data_segs_out overflows after 2^32 packets,
|
||||
@ -1325,7 +1325,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
|
||||
skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
|
||||
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
|
||||
|
||||
skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
|
||||
skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
|
||||
|
||||
/* Build TCP header and checksum it. */
|
||||
th = (struct tcphdr *)skb->data;
|
||||
@ -1973,7 +1973,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
|
||||
unsigned long bytes;
|
||||
u32 r;
|
||||
|
||||
bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);
|
||||
bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift);
|
||||
|
||||
r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
|
||||
if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
|
||||
@ -2553,7 +2553,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
|
||||
|
||||
limit = max_t(unsigned long,
|
||||
2 * skb->truesize,
|
||||
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
|
||||
READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift));
|
||||
if (sk->sk_pacing_status == SK_PACING_NONE)
|
||||
limit = min_t(unsigned long, limit,
|
||||
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
|
||||
@ -2561,7 +2561,8 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
|
||||
|
||||
if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
|
||||
tcp_sk(sk)->tcp_tx_delay) {
|
||||
u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
|
||||
u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) *
|
||||
tcp_sk(sk)->tcp_tx_delay;
|
||||
|
||||
/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
|
||||
* approximate our needs assuming an ~100% skb->truesize overhead.
|
||||
|
@ -133,7 +133,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
|
||||
fl6.daddr = sk->sk_v6_daddr;
|
||||
|
||||
res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
|
||||
np->tclass, sk->sk_priority);
|
||||
np->tclass, READ_ONCE(sk->sk_priority));
|
||||
rcu_read_unlock();
|
||||
return res;
|
||||
}
|
||||
|
@ -1984,7 +1984,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
|
||||
hdr->saddr = fl6->saddr;
|
||||
hdr->daddr = *final_dst;
|
||||
|
||||
skb->priority = sk->sk_priority;
|
||||
skb->priority = READ_ONCE(sk->sk_priority);
|
||||
skb->mark = cork->base.mark;
|
||||
skb->tstamp = cork->base.transmit_time;
|
||||
|
||||
|
@ -565,7 +565,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
|
||||
if (!opt)
|
||||
opt = rcu_dereference(np->opt);
|
||||
err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark),
|
||||
opt, tclass, sk->sk_priority);
|
||||
opt, tclass, READ_ONCE(sk->sk_priority));
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
@ -1058,7 +1058,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
|
||||
trace_tcp_send_reset(sk, skb);
|
||||
if (inet6_test_bit(REPFLOW, sk))
|
||||
label = ip6_flowlabel(ipv6h);
|
||||
priority = sk->sk_priority;
|
||||
priority = READ_ONCE(sk->sk_priority);
|
||||
txhash = sk->sk_txhash;
|
||||
}
|
||||
if (sk->sk_state == TCP_TIME_WAIT) {
|
||||
|
@ -89,7 +89,7 @@ static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, in
|
||||
sock_valbool_flag(ssk, SOCK_KEEPOPEN, !!val);
|
||||
break;
|
||||
case SO_PRIORITY:
|
||||
ssk->sk_priority = val;
|
||||
WRITE_ONCE(ssk->sk_priority, val);
|
||||
break;
|
||||
case SO_SNDBUF:
|
||||
case SO_SNDBUFFORCE:
|
||||
|
@ -487,7 +487,7 @@ static struct sock *nr_make_new(struct sock *osk)
|
||||
sock_init_data(NULL, sk);
|
||||
|
||||
sk->sk_type = osk->sk_type;
|
||||
sk->sk_priority = osk->sk_priority;
|
||||
sk->sk_priority = READ_ONCE(osk->sk_priority);
|
||||
sk->sk_protocol = osk->sk_protocol;
|
||||
sk->sk_rcvbuf = osk->sk_rcvbuf;
|
||||
sk->sk_sndbuf = osk->sk_sndbuf;
|
||||
|
@ -583,7 +583,7 @@ static struct sock *rose_make_new(struct sock *osk)
|
||||
#endif
|
||||
|
||||
sk->sk_type = osk->sk_type;
|
||||
sk->sk_priority = osk->sk_priority;
|
||||
sk->sk_priority = READ_ONCE(osk->sk_priority);
|
||||
sk->sk_protocol = osk->sk_protocol;
|
||||
sk->sk_rcvbuf = osk->sk_rcvbuf;
|
||||
sk->sk_sndbuf = osk->sk_sndbuf;
|
||||
|
@ -546,7 +546,7 @@ META_COLLECTOR(int_sk_prio)
|
||||
*err = -1;
|
||||
return;
|
||||
}
|
||||
dst->value = sk->sk_priority;
|
||||
dst->value = READ_ONCE(sk->sk_priority);
|
||||
}
|
||||
|
||||
META_COLLECTOR(int_sk_rcvlowat)
|
||||
|
@ -668,7 +668,7 @@ begin:
|
||||
*/
|
||||
if (!skb->tstamp) {
|
||||
if (skb->sk)
|
||||
rate = min(skb->sk->sk_pacing_rate, rate);
|
||||
rate = min(READ_ONCE(skb->sk->sk_pacing_rate), rate);
|
||||
|
||||
if (rate <= q->low_rate_threshold) {
|
||||
f->credit = 0;
|
||||
|
@ -247,7 +247,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
|
||||
rcu_read_lock();
|
||||
res = ip6_xmit(sk, skb, fl6, sk->sk_mark,
|
||||
rcu_dereference(np->opt),
|
||||
tclass, sk->sk_priority);
|
||||
tclass, READ_ONCE(sk->sk_priority));
|
||||
rcu_read_unlock();
|
||||
return res;
|
||||
}
|
||||
|
@ -493,7 +493,7 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
|
||||
nsk->sk_sndtimeo = osk->sk_sndtimeo;
|
||||
nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
|
||||
nsk->sk_mark = READ_ONCE(osk->sk_mark);
|
||||
nsk->sk_priority = osk->sk_priority;
|
||||
nsk->sk_priority = READ_ONCE(osk->sk_priority);
|
||||
nsk->sk_rcvlowat = osk->sk_rcvlowat;
|
||||
nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
|
||||
nsk->sk_err = osk->sk_err;
|
||||
|
@ -598,7 +598,7 @@ static struct sock *x25_make_new(struct sock *osk)
|
||||
x25 = x25_sk(sk);
|
||||
|
||||
sk->sk_type = osk->sk_type;
|
||||
sk->sk_priority = osk->sk_priority;
|
||||
sk->sk_priority = READ_ONCE(osk->sk_priority);
|
||||
sk->sk_protocol = osk->sk_protocol;
|
||||
sk->sk_rcvbuf = osk->sk_rcvbuf;
|
||||
sk->sk_sndbuf = osk->sk_sndbuf;
|
||||
|
@ -684,7 +684,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
|
||||
}
|
||||
|
||||
skb->dev = dev;
|
||||
skb->priority = xs->sk.sk_priority;
|
||||
skb->priority = READ_ONCE(xs->sk.sk_priority);
|
||||
skb->mark = READ_ONCE(xs->sk.sk_mark);
|
||||
skb->destructor = xsk_destruct_skb;
|
||||
xsk_set_destructor_arg(skb);
|
||||
|
Loading…
x
Reference in New Issue
Block a user