net: inet: Open code inet_hash2 and inet_unhash2
This patch folds lhash2 related functions into __inet_hash and inet_unhash. This will make the removal of the listening_hash in a latter patch easier to review. First, this patch folds inet_hash2 into __inet_hash. For unhash, the current call sequence is like inet_unhash() => __inet_unhash() => inet_unhash2(). The specific testing cases in __inet_unhash() are mostly related to TCP_LISTEN sk and its caller inet_unhash() already has the TCP_LISTEN test, so this patch folds both __inet_unhash() and inet_unhash2() into inet_unhash(). Note that all listening_hash users also have lhash2 initialized, so the !h->lhash2 check is no longer needed. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
8ea1eebb49
commit
e8d0059000
@ -193,40 +193,6 @@ inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
|
||||
return inet_lhash2_bucket(h, hash);
|
||||
}
|
||||
|
||||
static void inet_hash2(struct inet_hashinfo *h, struct sock *sk)
|
||||
{
|
||||
struct inet_listen_hashbucket *ilb2;
|
||||
|
||||
if (!h->lhash2)
|
||||
return;
|
||||
|
||||
ilb2 = inet_lhash2_bucket_sk(h, sk);
|
||||
|
||||
spin_lock(&ilb2->lock);
|
||||
if (sk->sk_reuseport && sk->sk_family == AF_INET6)
|
||||
hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
|
||||
&ilb2->head);
|
||||
else
|
||||
hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
|
||||
&ilb2->head);
|
||||
spin_unlock(&ilb2->lock);
|
||||
}
|
||||
|
||||
static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk)
|
||||
{
|
||||
struct inet_listen_hashbucket *ilb2;
|
||||
|
||||
if (!h->lhash2 ||
|
||||
WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node)))
|
||||
return;
|
||||
|
||||
ilb2 = inet_lhash2_bucket_sk(h, sk);
|
||||
|
||||
spin_lock(&ilb2->lock);
|
||||
hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
|
||||
spin_unlock(&ilb2->lock);
|
||||
}
|
||||
|
||||
static inline int compute_score(struct sock *sk, struct net *net,
|
||||
const unsigned short hnum, const __be32 daddr,
|
||||
const int dif, const int sdif)
|
||||
@ -631,6 +597,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
|
||||
int __inet_hash(struct sock *sk, struct sock *osk)
|
||||
{
|
||||
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
|
||||
struct inet_listen_hashbucket *ilb2;
|
||||
struct inet_listen_hashbucket *ilb;
|
||||
int err = 0;
|
||||
|
||||
@ -642,22 +609,29 @@ int __inet_hash(struct sock *sk, struct sock *osk)
|
||||
}
|
||||
WARN_ON(!sk_unhashed(sk));
|
||||
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
|
||||
ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
|
||||
|
||||
spin_lock(&ilb->lock);
|
||||
spin_lock(&ilb2->lock);
|
||||
if (sk->sk_reuseport) {
|
||||
err = inet_reuseport_add_sock(sk, ilb);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
|
||||
sk->sk_family == AF_INET6)
|
||||
sk->sk_family == AF_INET6) {
|
||||
hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
|
||||
&ilb2->head);
|
||||
__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
|
||||
else
|
||||
} else {
|
||||
hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
|
||||
&ilb2->head);
|
||||
__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
|
||||
inet_hash2(hashinfo, sk);
|
||||
}
|
||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||
unlock:
|
||||
spin_unlock(&ilb2->lock);
|
||||
spin_unlock(&ilb->lock);
|
||||
|
||||
return err;
|
||||
@ -675,22 +649,6 @@ int inet_hash(struct sock *sk)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_hash);
|
||||
|
||||
static void __inet_unhash(struct sock *sk, struct inet_listen_hashbucket *ilb)
|
||||
{
|
||||
if (sk_unhashed(sk))
|
||||
return;
|
||||
|
||||
if (rcu_access_pointer(sk->sk_reuseport_cb))
|
||||
reuseport_stop_listen_sock(sk);
|
||||
if (ilb) {
|
||||
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
|
||||
|
||||
inet_unhash2(hashinfo, sk);
|
||||
}
|
||||
__sk_nulls_del_node_init_rcu(sk);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||
}
|
||||
|
||||
void inet_unhash(struct sock *sk)
|
||||
{
|
||||
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
|
||||
@ -699,20 +657,40 @@ void inet_unhash(struct sock *sk)
|
||||
return;
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN) {
|
||||
struct inet_listen_hashbucket *ilb2;
|
||||
struct inet_listen_hashbucket *ilb;
|
||||
|
||||
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
|
||||
ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
|
||||
/* Don't disable bottom halves while acquiring the lock to
|
||||
* avoid circular locking dependency on PREEMPT_RT.
|
||||
*/
|
||||
spin_lock(&ilb->lock);
|
||||
__inet_unhash(sk, ilb);
|
||||
spin_lock(&ilb2->lock);
|
||||
if (sk_unhashed(sk)) {
|
||||
spin_unlock(&ilb2->lock);
|
||||
spin_unlock(&ilb->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (rcu_access_pointer(sk->sk_reuseport_cb))
|
||||
reuseport_stop_listen_sock(sk);
|
||||
|
||||
hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
|
||||
__sk_nulls_del_node_init_rcu(sk);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||
spin_unlock(&ilb2->lock);
|
||||
spin_unlock(&ilb->lock);
|
||||
} else {
|
||||
spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
|
||||
|
||||
spin_lock_bh(lock);
|
||||
__inet_unhash(sk, NULL);
|
||||
if (sk_unhashed(sk)) {
|
||||
spin_unlock_bh(lock);
|
||||
return;
|
||||
}
|
||||
__sk_nulls_del_node_init_rcu(sk);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||
spin_unlock_bh(lock);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user