linux/net/ipv6/ip6_offload.c

305 lines
7.0 KiB
C
Raw Normal View History

/*
* IPV6 GSO/GRO offload support
* Linux INET6 implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/socket.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/printk.h>
#include <net/protocol.h>
#include <net/ipv6.h>
#include "ip6_offload.h"
static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
{
const struct net_offload *ops = NULL;
for (;;) {
struct ipv6_opt_hdr *opth;
int len;
if (proto != NEXTHDR_HOP) {
ops = rcu_dereference(inet6_offloads[proto]);
if (unlikely(!ops))
break;
if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
break;
}
if (unlikely(!pskb_may_pull(skb, 8)))
break;
opth = (void *)skb->data;
len = ipv6_optlen(opth);
if (unlikely(!pskb_may_pull(skb, len)))
break;
proto = opth->nexthdr;
__skb_pull(skb, len);
}
return proto;
}
static int ipv6_gso_send_check(struct sk_buff *skb)
{
const struct ipv6hdr *ipv6h;
const struct net_offload *ops;
int err = -EINVAL;
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
ipv6h = ipv6_hdr(skb);
__skb_pull(skb, sizeof(*ipv6h));
err = -EPROTONOSUPPORT;
ops = rcu_dereference(inet6_offloads[
ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
if (likely(ops && ops->callbacks.gso_send_check)) {
skb_reset_transport_header(skb);
err = ops->callbacks.gso_send_check(skb);
}
out:
return err;
}
static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
const struct net_offload *ops;
int proto;
struct frag_hdr *fptr;
unsigned int unfrag_ip6hlen;
u8 *prevhdr;
int offset = 0;
bool tunnel;
int nhoff;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_IPIP |
SKB_GSO_SIT |
SKB_GSO_UDP_TUNNEL |
MPLS: Add limited GSO support In the case where a non-MPLS packet is received and an MPLS stack is added it may well be the case that the original skb is GSO but the NIC used for transmit does not support GSO of MPLS packets. The aim of this code is to provide GSO in software for MPLS packets whose skbs are GSO. SKB Usage: When an implementation adds an MPLS stack to a non-MPLS packet it should do the following to skb metadata: * Set skb->inner_protocol to the old non-MPLS ethertype of the packet. skb->inner_protocol is added by this patch. * Set skb->protocol to the new MPLS ethertype of the packet. * Set skb->network_header to correspond to the end of the L3 header, including the MPLS label stack. I have posted a patch, "[PATCH v3.29] datapath: Add basic MPLS support to kernel" which adds MPLS support to the kernel datapath of Open vSwtich. That patch sets the above requirements in datapath/actions.c:push_mpls() and was used to exercise this code. The datapath patch is against the Open vSwtich tree but it is intended that it be added to the Open vSwtich code present in the mainline Linux kernel at some point. Features: I believe that the approach that I have taken is at least partially consistent with the handling of other protocols. Jesse, I understand that you have some ideas here. I am more than happy to change my implementation. This patch adds dev->mpls_features which may be used by devices to advertise features supported for MPLS packets. A new NETIF_F_MPLS_GSO feature is added for devices which support hardware MPLS GSO offload. Currently no devices support this and MPLS GSO always falls back to software. Alternate Implementation: One possible alternate implementation is to teach netif_skb_features() and skb_network_protocol() about MPLS, in a similar way to their understanding of VLANs. I believe this would avoid the need for net/mpls/mpls_gso.c and in particular the calls to __skb_push() and __skb_push() in mpls_gso_segment(). I have decided on the implementation in this patch as it should not introduce any overhead in the case where mpls_gso is not compiled into the kernel or inserted as a module. MPLS GSO suggested by Jesse Gross. Based in part on "v4 GRE: Add TCP segmentation offload for GRE" by Pravin B Shelar. Cc: Jesse Gross <jesse@nicira.com> Cc: Pravin B Shelar <pshelar@nicira.com> Signed-off-by: Simon Horman <horms@verge.net.au> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-05-24 01:02:52 +04:00
SKB_GSO_MPLS |
SKB_GSO_TCPV6 |
0)))
goto out;
skb_reset_network_header(skb);
nhoff = skb_network_header(skb) - skb_mac_header(skb);
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
tunnel = SKB_GSO_CB(skb)->encap_level > 0;
if (tunnel)
features = skb->dev->hw_enc_features & netif_skb_features(skb);
SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
ipv6h = ipv6_hdr(skb);
__skb_pull(skb, sizeof(*ipv6h));
segs = ERR_PTR(-EPROTONOSUPPORT);
proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
ops = rcu_dereference(inet6_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment)) {
skb_reset_transport_header(skb);
segs = ops->callbacks.gso_segment(skb, features);
}
if (IS_ERR(segs))
goto out;
for (skb = segs; skb; skb = skb->next) {
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
if (tunnel) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
skb->network_header = (u8 *)ipv6h - skb->head;
if (!tunnel && proto == IPPROTO_UDP) {
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
fptr->frag_off = htons(offset);
if (skb->next != NULL)
fptr->frag_off |= htons(IP6_MF);
offset += (ntohs(ipv6h->payload_len) -
sizeof(struct frag_hdr));
}
}
out:
return segs;
}
static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
const struct net_offload *ops;
struct sk_buff **pp = NULL;
struct sk_buff *p;
struct ipv6hdr *iph;
unsigned int nlen;
unsigned int hlen;
unsigned int off;
int flush = 1;
int proto;
__wsum csum;
off = skb_gro_offset(skb);
hlen = off + sizeof(*iph);
iph = skb_gro_header_fast(skb, off);
if (skb_gro_header_hard(skb, hlen)) {
iph = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!iph))
goto out;
}
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
flush += ntohs(iph->payload_len) != skb_gro_len(skb);
rcu_read_lock();
proto = iph->nexthdr;
ops = rcu_dereference(inet6_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive) {
__pskb_pull(skb, skb_gro_offset(skb));
proto = ipv6_gso_pull_exthdrs(skb, proto);
skb_gro_pull(skb, -skb_transport_offset(skb));
skb_reset_transport_header(skb);
__skb_push(skb, skb_gro_offset(skb));
ops = rcu_dereference(inet6_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive)
goto out_unlock;
iph = ipv6_hdr(skb);
}
NAPI_GRO_CB(skb)->proto = proto;
flush--;
nlen = skb_network_header_len(skb);
for (p = *head; p; p = p->next) {
const struct ipv6hdr *iph2;
__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
if (!NAPI_GRO_CB(p)->same_flow)
continue;
iph2 = ipv6_hdr(p);
first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
/* All fields must match except length and Traffic Class. */
if (nlen != skb_network_header_len(p) ||
(first_word & htonl(0xF00FFFFF)) ||
memcmp(&iph->nexthdr, &iph2->nexthdr,
nlen - offsetof(struct ipv6hdr, nexthdr))) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
/* flush if Traffic Class fields are different */
NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
NAPI_GRO_CB(p)->flush |= flush;
}
NAPI_GRO_CB(skb)->flush |= flush;
csum = skb->csum;
skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
pp = ops->callbacks.gro_receive(head, skb);
skb->csum = csum;
out_unlock:
rcu_read_unlock();
out:
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
}
static int ipv6_gro_complete(struct sk_buff *skb)
{
const struct net_offload *ops;
struct ipv6hdr *iph = ipv6_hdr(skb);
int err = -ENOSYS;
iph->payload_len = htons(skb->len - skb_network_offset(skb) -
sizeof(*iph));
rcu_read_lock();
ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
goto out_unlock;
err = ops->callbacks.gro_complete(skb);
out_unlock:
rcu_read_unlock();
return err;
}
static struct packet_offload ipv6_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IPV6),
.callbacks = {
.gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment,
.gro_receive = ipv6_gro_receive,
.gro_complete = ipv6_gro_complete,
},
};
static const struct net_offload sit_offload = {
.callbacks = {
.gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment,
},
};
static int __init ipv6_offload_init(void)
{
if (tcpv6_offload_init() < 0)
pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
if (udp_offload_init() < 0)
pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
if (ipv6_exthdrs_offload_init() < 0)
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
dev_add_offload(&ipv6_packet_offload);
inet_add_offload(&sit_offload, IPPROTO_IPV6);
return 0;
}
fs_initcall(ipv6_offload_init);