inet: restore gso for vxlan

Alexei reported a performance regression on vxlan, caused
by commit 3347c96029 "ipv4: gso: make inet_gso_segment() stackable"

GSO vxlan packets were not properly segmented, adding IP fragments
while they were not expected.

Rename 'bool tunnel' to 'bool encap', and add a new boolean
to express the fact that UDP should be fragmented.
This fragmentation is triggered by skb->encapsulation being set.

Remove a "skb->encapsulation = 1" added in above commit,
as its not needed, as frags inherit skb->frag from original
GSO skb.

Reported-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Tested-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2013-10-27 18:18:16 -07:00 committed by David S. Miller
parent 1f2cd845d3
commit 8c3a897bfa

View File

@ -1251,8 +1251,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops; const struct net_offload *ops;
unsigned int offset = 0; unsigned int offset = 0;
bool udpfrag, encap;
struct iphdr *iph; struct iphdr *iph;
bool tunnel;
int proto; int proto;
int nhoff; int nhoff;
int ihl; int ihl;
@ -1290,8 +1290,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
goto out; goto out;
__skb_pull(skb, ihl); __skb_pull(skb, ihl);
tunnel = SKB_GSO_CB(skb)->encap_level > 0; encap = SKB_GSO_CB(skb)->encap_level > 0;
if (tunnel) if (encap)
features = skb->dev->hw_enc_features & netif_skb_features(skb); features = skb->dev->hw_enc_features & netif_skb_features(skb);
SKB_GSO_CB(skb)->encap_level += ihl; SKB_GSO_CB(skb)->encap_level += ihl;
@ -1306,24 +1306,23 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (IS_ERR_OR_NULL(segs)) if (IS_ERR_OR_NULL(segs))
goto out; goto out;
udpfrag = !!skb->encapsulation && proto == IPPROTO_UDP;
skb = segs; skb = segs;
do { do {
iph = (struct iphdr *)(skb_mac_header(skb) + nhoff); iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
if (!tunnel && proto == IPPROTO_UDP) { if (udpfrag) {
iph->id = htons(id); iph->id = htons(id);
iph->frag_off = htons(offset >> 3); iph->frag_off = htons(offset >> 3);
if (skb->next != NULL) if (skb->next != NULL)
iph->frag_off |= htons(IP_MF); iph->frag_off |= htons(IP_MF);
offset += skb->len - nhoff - ihl; offset += skb->len - nhoff - ihl;
} else { } else {
iph->id = htons(id++); iph->id = htons(id++);
} }
iph->tot_len = htons(skb->len - nhoff); iph->tot_len = htons(skb->len - nhoff);
ip_send_check(iph); ip_send_check(iph);
if (tunnel) { if (encap)
skb_reset_inner_headers(skb); skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
skb->network_header = (u8 *)iph - skb->head; skb->network_header = (u8 *)iph - skb->head;
} while ((skb = skb->next)); } while ((skb = skb->next));