tcp: Move skb_steal_sock() to request_sock.h
We will support arbitrary SYN Cookie with BPF. If BPF prog validates ACK and kfunc allocates a reqsk, it will be carried to TCP stack as skb->sk with req->syncookie 1. In skb_steal_sock(), we need to check inet_reqsk(sk)->syncookie to see if the reqsk is created by kfunc. However, inet_reqsk() is not available in sock.h. Let's move skb_steal_sock() to request_sock.h. While at it, we refactor skb_steal_sock() so it returns early if skb->sk is NULL to minimise the following patch. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Link: https://lore.kernel.org/r/20240115205514.68364-3-kuniyu@amazon.com Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
b18afb6f42
commit
95e752b529
@ -83,6 +83,34 @@ static inline struct sock *req_to_sk(struct request_sock *req)
|
||||
return (struct sock *)req;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_steal_sock - steal a socket from an sk_buff
|
||||
* @skb: sk_buff to steal the socket from
|
||||
* @refcounted: is set to true if the socket is reference-counted
|
||||
* @prefetched: is set to true if the socket was assigned from bpf
|
||||
*/
|
||||
static inline struct sock *skb_steal_sock(struct sk_buff *skb,
|
||||
bool *refcounted, bool *prefetched)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
if (!sk) {
|
||||
*prefetched = false;
|
||||
*refcounted = false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*prefetched = skb_sk_is_prefetched(skb);
|
||||
if (*prefetched)
|
||||
*refcounted = sk_is_refcounted(sk);
|
||||
else
|
||||
*refcounted = true;
|
||||
|
||||
skb->destructor = NULL;
|
||||
skb->sk = NULL;
|
||||
return sk;
|
||||
}
|
||||
|
||||
static inline struct request_sock *
|
||||
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
|
||||
bool attach_listener)
|
||||
|
@ -2814,31 +2814,6 @@ sk_is_refcounted(struct sock *sk)
|
||||
return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_steal_sock - steal a socket from an sk_buff
|
||||
* @skb: sk_buff to steal the socket from
|
||||
* @refcounted: is set to true if the socket is reference-counted
|
||||
* @prefetched: is set to true if the socket was assigned from bpf
|
||||
*/
|
||||
static inline struct sock *
|
||||
skb_steal_sock(struct sk_buff *skb, bool *refcounted, bool *prefetched)
|
||||
{
|
||||
if (skb->sk) {
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
*refcounted = true;
|
||||
*prefetched = skb_sk_is_prefetched(skb);
|
||||
if (*prefetched)
|
||||
*refcounted = sk_is_refcounted(sk);
|
||||
skb->destructor = NULL;
|
||||
skb->sk = NULL;
|
||||
return sk;
|
||||
}
|
||||
*prefetched = false;
|
||||
*refcounted = false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Checks if this SKB belongs to an HW offloaded socket
|
||||
* and whether any SW fallbacks are required based on dev.
|
||||
* Check decrypted mark in case skb_orphan() cleared socket.
|
||||
|
Loading…
x
Reference in New Issue
Block a user