tcp: make the dropreason really work when calling tcp_rcv_state_process()
Update three callers including both ipv4 and ipv6 and let the dropreason mechanism work in reality. Signed-off-by: Jason Xing <kernelxing@tencent.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: David Ahern <dsahern@kernel.org> Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
7d6ed9afde
commit
b982569593
@ -396,8 +396,8 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
|
|||||||
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
||||||
struct request_sock *req, bool fastopen,
|
struct request_sock *req, bool fastopen,
|
||||||
bool *lost_race);
|
bool *lost_race);
|
||||||
int tcp_child_process(struct sock *parent, struct sock *child,
|
enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
|
||||||
struct sk_buff *skb);
|
struct sk_buff *skb);
|
||||||
void tcp_enter_loss(struct sock *sk);
|
void tcp_enter_loss(struct sock *sk);
|
||||||
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
|
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
|
||||||
void tcp_clear_retrans(struct tcp_sock *tp);
|
void tcp_clear_retrans(struct tcp_sock *tp);
|
||||||
|
@ -1926,7 +1926,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||||||
} else
|
} else
|
||||||
sock_rps_save_rxhash(sk, skb);
|
sock_rps_save_rxhash(sk, skb);
|
||||||
|
|
||||||
if (tcp_rcv_state_process(sk, skb)) {
|
reason = tcp_rcv_state_process(sk, skb);
|
||||||
|
if (reason) {
|
||||||
rsk = sk;
|
rsk = sk;
|
||||||
goto reset;
|
goto reset;
|
||||||
}
|
}
|
||||||
|
@ -911,11 +911,11 @@ EXPORT_SYMBOL(tcp_check_req);
|
|||||||
* be created.
|
* be created.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int tcp_child_process(struct sock *parent, struct sock *child,
|
enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
__releases(&((child)->sk_lock.slock))
|
__releases(&((child)->sk_lock.slock))
|
||||||
{
|
{
|
||||||
int ret = 0;
|
enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
|
||||||
int state = child->sk_state;
|
int state = child->sk_state;
|
||||||
|
|
||||||
/* record sk_napi_id and sk_rx_queue_mapping of child. */
|
/* record sk_napi_id and sk_rx_queue_mapping of child. */
|
||||||
@ -923,7 +923,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
|
|||||||
|
|
||||||
tcp_segs_in(tcp_sk(child), skb);
|
tcp_segs_in(tcp_sk(child), skb);
|
||||||
if (!sock_owned_by_user(child)) {
|
if (!sock_owned_by_user(child)) {
|
||||||
ret = tcp_rcv_state_process(child, skb);
|
reason = tcp_rcv_state_process(child, skb);
|
||||||
/* Wakeup parent, send SIGIO */
|
/* Wakeup parent, send SIGIO */
|
||||||
if (state == TCP_SYN_RECV && child->sk_state != state)
|
if (state == TCP_SYN_RECV && child->sk_state != state)
|
||||||
parent->sk_data_ready(parent);
|
parent->sk_data_ready(parent);
|
||||||
@ -937,6 +937,6 @@ int tcp_child_process(struct sock *parent, struct sock *child,
|
|||||||
|
|
||||||
bh_unlock_sock(child);
|
bh_unlock_sock(child);
|
||||||
sock_put(child);
|
sock_put(child);
|
||||||
return ret;
|
return reason;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(tcp_child_process);
|
EXPORT_SYMBOL(tcp_child_process);
|
||||||
|
@ -1663,7 +1663,8 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||||||
} else
|
} else
|
||||||
sock_rps_save_rxhash(sk, skb);
|
sock_rps_save_rxhash(sk, skb);
|
||||||
|
|
||||||
if (tcp_rcv_state_process(sk, skb))
|
reason = tcp_rcv_state_process(sk, skb);
|
||||||
|
if (reason)
|
||||||
goto reset;
|
goto reset;
|
||||||
if (opt_skb)
|
if (opt_skb)
|
||||||
goto ipv6_pktoptions;
|
goto ipv6_pktoptions;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user