tcp: add route_req method to tcp_request_sock_ops

Create wrappers with same signature for the IPv4/IPv6 request routing
calls and use these wrappers (via route_req method from
tcp_request_sock_ops) in tcp_v4_conn_request and tcp_v6_conn_request
with the purpose of unifying the two functions in a later patch.

We can later drop the wrapper functions and modify inet_csk_route_req
and inet6_cks_route_req to use the same signature.

Signed-off-by: Octavian Purdila <octavian.purdila@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Octavian Purdila 2014-06-25 17:09:55 +03:00 committed by David S. Miller
parent fb7b37a7f3
commit d94e0417ad
3 changed files with 52 additions and 13 deletions

View File

@ -1605,6 +1605,9 @@ struct tcp_request_sock_ops {
__u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb, __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
__u16 *mss); __u16 *mss);
#endif #endif
struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
const struct request_sock *req,
bool *strict);
}; };
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES

View File

@ -1248,6 +1248,22 @@ static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
ireq->opt = tcp_v4_save_options(skb); ireq->opt = tcp_v4_save_options(skb);
} }
static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
const struct request_sock *req,
bool *strict)
{
struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
if (strict) {
if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
*strict = true;
else
*strict = false;
}
return dst;
}
struct request_sock_ops tcp_request_sock_ops __read_mostly = { struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.family = PF_INET, .family = PF_INET,
.obj_size = sizeof(struct tcp_request_sock), .obj_size = sizeof(struct tcp_request_sock),
@ -1267,6 +1283,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
.cookie_init_seq = cookie_v4_init_sequence, .cookie_init_seq = cookie_v4_init_sequence,
#endif #endif
.route_req = tcp_v4_route_req,
}; };
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@ -1346,11 +1363,13 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* timewait bucket, so that all the necessary checks * timewait bucket, so that all the necessary checks
* are made in the function processing timewait state. * are made in the function processing timewait state.
*/ */
if (tmp_opt.saw_tstamp && if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
tcp_death_row.sysctl_tw_recycle && bool strict;
(dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
fl4.daddr == saddr) { dst = af_ops->route_req(sk, (struct flowi *)&fl4, req,
if (!tcp_peer_is_proven(req, dst, true)) { &strict);
if (dst && strict &&
!tcp_peer_is_proven(req, dst, true)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release; goto drop_and_release;
} }
@ -1374,8 +1393,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
isn = tcp_v4_init_sequence(skb); isn = tcp_v4_init_sequence(skb);
} }
if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) if (!dst) {
goto drop_and_free; dst = af_ops->route_req(sk, (struct flowi *)&fl4, req, NULL);
if (!dst)
goto drop_and_free;
}
tcp_rsk(req)->snt_isn = isn; tcp_rsk(req)->snt_isn = isn;
tcp_openreq_init_rwin(req, sk, dst); tcp_openreq_init_rwin(req, sk, dst);

View File

@ -745,6 +745,16 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
} }
} }
static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
const struct request_sock *req,
bool *strict)
{
if (strict)
*strict = true;
return inet6_csk_route_req(sk, &fl->u.ip6, req);
}
struct request_sock_ops tcp6_request_sock_ops __read_mostly = { struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.family = AF_INET6, .family = AF_INET6,
.obj_size = sizeof(struct tcp6_request_sock), .obj_size = sizeof(struct tcp6_request_sock),
@ -764,6 +774,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
.cookie_init_seq = cookie_v6_init_sequence, .cookie_init_seq = cookie_v6_init_sequence,
#endif #endif
.route_req = tcp_v6_route_req,
}; };
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
@ -1078,10 +1089,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
* timewait bucket, so that all the necessary checks * timewait bucket, so that all the necessary checks
* are made in the function processing timewait state. * are made in the function processing timewait state.
*/ */
if (tmp_opt.saw_tstamp && if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
tcp_death_row.sysctl_tw_recycle && dst = af_ops->route_req(sk, (struct flowi *)&fl6, req,
(dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) { NULL);
if (!tcp_peer_is_proven(req, dst, true)) { if (dst && !tcp_peer_is_proven(req, dst, true)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release; goto drop_and_release;
} }
@ -1110,8 +1121,11 @@ have_isn:
if (security_inet_conn_request(sk, skb, req)) if (security_inet_conn_request(sk, skb, req))
goto drop_and_release; goto drop_and_release;
if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL) if (!dst) {
goto drop_and_free; dst = af_ops->route_req(sk, (struct flowi *)&fl6, req, NULL);
if (!dst)
goto drop_and_free;
}
tcp_rsk(req)->snt_isn = isn; tcp_rsk(req)->snt_isn = isn;
tcp_openreq_init_rwin(req, sk, dst); tcp_openreq_init_rwin(req, sk, dst);