udp: restrict offloads to one namespace
udp tunnel offloads tend to aggregate datagrams based on inner headers. gro engine gets notified by tunnel implementations about possible offloads. The match is solely based on the port number. Imagine a tunnel bound to port 53, the offloading will look into all DNS packets and tries to aggregate them based on the inner data found within. This could lead to data corruption and malformed DNS packets. While this patch minimizes the problem and helps an administrator to find the issue by querying ip tunnel/fou, a better way would be to match on the specific destination ip address so if a user space socket is bound to the same address it will conflict. Cc: Tom Herbert <tom@herbertland.com> Cc: Eric Dumazet <edumazet@google.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
07b9b37c22
commit
787d7ac308
@ -376,7 +376,7 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (sa_family == AF_INET) {
|
if (sa_family == AF_INET) {
|
||||||
err = udp_add_offload(&gs->udp_offloads);
|
err = udp_add_offload(sock_net(sk), &gs->udp_offloads);
|
||||||
if (err)
|
if (err)
|
||||||
pr_warn("geneve: udp_add_offload failed with status %d\n",
|
pr_warn("geneve: udp_add_offload failed with status %d\n",
|
||||||
err);
|
err);
|
||||||
|
@ -621,7 +621,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (sa_family == AF_INET) {
|
if (sa_family == AF_INET) {
|
||||||
err = udp_add_offload(&vs->udp_offloads);
|
err = udp_add_offload(net, &vs->udp_offloads);
|
||||||
if (err)
|
if (err)
|
||||||
pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
|
pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ int inet_del_offload(const struct net_offload *prot, unsigned char num);
|
|||||||
void inet_register_protosw(struct inet_protosw *p);
|
void inet_register_protosw(struct inet_protosw *p);
|
||||||
void inet_unregister_protosw(struct inet_protosw *p);
|
void inet_unregister_protosw(struct inet_protosw *p);
|
||||||
|
|
||||||
int udp_add_offload(struct udp_offload *prot);
|
int udp_add_offload(struct net *net, struct udp_offload *prot);
|
||||||
void udp_del_offload(struct udp_offload *prot);
|
void udp_del_offload(struct udp_offload *prot);
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|
@ -498,7 +498,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
|
|||||||
sk->sk_allocation = GFP_ATOMIC;
|
sk->sk_allocation = GFP_ATOMIC;
|
||||||
|
|
||||||
if (cfg->udp_config.family == AF_INET) {
|
if (cfg->udp_config.family == AF_INET) {
|
||||||
err = udp_add_offload(&fou->udp_offloads);
|
err = udp_add_offload(net, &fou->udp_offloads);
|
||||||
if (err)
|
if (err)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
|
|||||||
|
|
||||||
struct udp_offload_priv {
|
struct udp_offload_priv {
|
||||||
struct udp_offload *offload;
|
struct udp_offload *offload;
|
||||||
|
possible_net_t net;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
struct udp_offload_priv __rcu *next;
|
struct udp_offload_priv __rcu *next;
|
||||||
};
|
};
|
||||||
@ -241,13 +242,14 @@ out:
|
|||||||
return segs;
|
return segs;
|
||||||
}
|
}
|
||||||
|
|
||||||
int udp_add_offload(struct udp_offload *uo)
|
int udp_add_offload(struct net *net, struct udp_offload *uo)
|
||||||
{
|
{
|
||||||
struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
|
struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
|
||||||
|
|
||||||
if (!new_offload)
|
if (!new_offload)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
write_pnet(&new_offload->net, net);
|
||||||
new_offload->offload = uo;
|
new_offload->offload = uo;
|
||||||
|
|
||||||
spin_lock(&udp_offload_lock);
|
spin_lock(&udp_offload_lock);
|
||||||
@ -311,7 +313,8 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
uo_priv = rcu_dereference(udp_offload_base);
|
uo_priv = rcu_dereference(udp_offload_base);
|
||||||
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
|
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
|
||||||
if (uo_priv->offload->port == uh->dest &&
|
if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
|
||||||
|
uo_priv->offload->port == uh->dest &&
|
||||||
uo_priv->offload->callbacks.gro_receive)
|
uo_priv->offload->callbacks.gro_receive)
|
||||||
goto unflush;
|
goto unflush;
|
||||||
}
|
}
|
||||||
@ -389,7 +392,8 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
|
|||||||
|
|
||||||
uo_priv = rcu_dereference(udp_offload_base);
|
uo_priv = rcu_dereference(udp_offload_base);
|
||||||
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
|
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
|
||||||
if (uo_priv->offload->port == uh->dest &&
|
if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
|
||||||
|
uo_priv->offload->port == uh->dest &&
|
||||||
uo_priv->offload->callbacks.gro_complete)
|
uo_priv->offload->callbacks.gro_complete)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user