2010-08-18 17:10:35 +04:00
/*
* Checksum updating actions
*
* Copyright ( c ) 2010 Gregoire Baron < baronchon @ n7mm . org >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
# include <linux/types.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/spinlock.h>
# include <linux/netlink.h>
# include <net/netlink.h>
# include <linux/rtnetlink.h>
# include <linux/skbuff.h>
# include <net/ip.h>
# include <net/ipv6.h>
# include <net/icmp.h>
# include <linux/icmpv6.h>
# include <linux/igmp.h>
# include <net/tcp.h>
# include <net/udp.h>
2010-08-23 07:31:14 +04:00
# include <net/ip6_checksum.h>
2017-01-09 13:24:21 +03:00
# include <net/sctp/checksum.h>
2010-08-18 17:10:35 +04:00
# include <net/act_api.h>
# include <linux/tc_act/tc_csum.h>
# include <net/tc_act/tc_csum.h>
static const struct nla_policy csum_policy [ TCA_CSUM_MAX + 1 ] = {
[ TCA_CSUM_PARMS ] = { . len = sizeof ( struct tc_csum ) , } ,
} ;
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 04:58:21 +03:00
static unsigned int csum_net_id ;
2016-07-26 02:09:41 +03:00
static struct tc_action_ops act_csum_ops ;
2016-02-23 02:57:53 +03:00
static int tcf_csum_init ( struct net * net , struct nlattr * nla ,
2016-07-26 02:09:41 +03:00
struct nlattr * est , struct tc_action * * a , int ovr ,
2016-02-23 02:57:53 +03:00
int bind )
2010-08-18 17:10:35 +04:00
{
2016-02-23 02:57:53 +03:00
struct tc_action_net * tn = net_generic ( net , csum_net_id ) ;
2010-08-18 17:10:35 +04:00
struct nlattr * tb [ TCA_CSUM_MAX + 1 ] ;
struct tc_csum * parm ;
struct tcf_csum * p ;
int ret = 0 , err ;
if ( nla = = NULL )
return - EINVAL ;
2017-04-12 15:34:07 +03:00
err = nla_parse_nested ( tb , TCA_CSUM_MAX , nla , csum_policy , NULL ) ;
2010-08-18 17:10:35 +04:00
if ( err < 0 )
return err ;
if ( tb [ TCA_CSUM_PARMS ] = = NULL )
return - EINVAL ;
parm = nla_data ( tb [ TCA_CSUM_PARMS ] ) ;
2017-08-30 09:31:59 +03:00
if ( ! tcf_idr_check ( tn , parm - > index , a , bind ) ) {
ret = tcf_idr_create ( tn , parm - > index , est , a ,
& act_csum_ops , bind , false ) ;
2014-02-12 05:07:31 +04:00
if ( ret )
return ret ;
2010-08-18 17:10:35 +04:00
ret = ACT_P_CREATED ;
} else {
2013-12-23 17:02:11 +04:00
if ( bind ) /* dont override defaults */
return 0 ;
2017-08-30 09:31:59 +03:00
tcf_idr_release ( * a , bind ) ;
2013-12-23 17:02:11 +04:00
if ( ! ovr )
2010-08-18 17:10:35 +04:00
return - EEXIST ;
}
2016-07-26 02:09:41 +03:00
p = to_tcf_csum ( * a ) ;
2010-08-18 17:10:35 +04:00
spin_lock_bh ( & p - > tcf_lock ) ;
p - > tcf_action = parm - > action ;
p - > update_flags = parm - > update_flags ;
spin_unlock_bh ( & p - > tcf_lock ) ;
if ( ret = = ACT_P_CREATED )
2017-08-30 09:31:59 +03:00
tcf_idr_insert ( tn , * a ) ;
2010-08-18 17:10:35 +04:00
return ret ;
}
/**
* tcf_csum_skb_nextlayer - Get next layer pointer
* @ skb : sk_buff to use
* @ ihl : previous summed headers length
* @ ipl : complete packet length
* @ jhl : next header length
*
* Check the expected next layer availability in the specified sk_buff .
* Return the next layer pointer if pass , NULL otherwise .
*/
static void * tcf_csum_skb_nextlayer ( struct sk_buff * skb ,
unsigned int ihl , unsigned int ipl ,
unsigned int jhl )
{
int ntkoff = skb_network_offset ( skb ) ;
int hl = ihl + jhl ;
if ( ! pskb_may_pull ( skb , ipl + ntkoff ) | | ( ipl < hl ) | |
2016-02-20 01:05:25 +03:00
skb_try_make_writable ( skb , hl + ntkoff ) )
2010-08-18 17:10:35 +04:00
return NULL ;
else
return ( void * ) ( skb_network_header ( skb ) + ihl ) ;
}
2016-09-18 15:45:33 +03:00
static int tcf_csum_ipv4_icmp ( struct sk_buff * skb , unsigned int ihl ,
unsigned int ipl )
2010-08-18 17:10:35 +04:00
{
struct icmphdr * icmph ;
icmph = tcf_csum_skb_nextlayer ( skb , ihl , ipl , sizeof ( * icmph ) ) ;
if ( icmph = = NULL )
return 0 ;
icmph - > checksum = 0 ;
skb - > csum = csum_partial ( icmph , ipl - ihl , 0 ) ;
icmph - > checksum = csum_fold ( skb - > csum ) ;
skb - > ip_summed = CHECKSUM_NONE ;
return 1 ;
}
static int tcf_csum_ipv4_igmp ( struct sk_buff * skb ,
unsigned int ihl , unsigned int ipl )
{
struct igmphdr * igmph ;
igmph = tcf_csum_skb_nextlayer ( skb , ihl , ipl , sizeof ( * igmph ) ) ;
if ( igmph = = NULL )
return 0 ;
igmph - > csum = 0 ;
skb - > csum = csum_partial ( igmph , ipl - ihl , 0 ) ;
igmph - > csum = csum_fold ( skb - > csum ) ;
skb - > ip_summed = CHECKSUM_NONE ;
return 1 ;
}
2016-09-18 15:45:33 +03:00
static int tcf_csum_ipv6_icmp ( struct sk_buff * skb , unsigned int ihl ,
unsigned int ipl )
2010-08-18 17:10:35 +04:00
{
struct icmp6hdr * icmp6h ;
2013-04-12 22:07:47 +04:00
const struct ipv6hdr * ip6h ;
2010-08-18 17:10:35 +04:00
icmp6h = tcf_csum_skb_nextlayer ( skb , ihl , ipl , sizeof ( * icmp6h ) ) ;
if ( icmp6h = = NULL )
return 0 ;
2013-04-12 22:07:47 +04:00
ip6h = ipv6_hdr ( skb ) ;
2010-08-18 17:10:35 +04:00
icmp6h - > icmp6_cksum = 0 ;
skb - > csum = csum_partial ( icmp6h , ipl - ihl , 0 ) ;
icmp6h - > icmp6_cksum = csum_ipv6_magic ( & ip6h - > saddr , & ip6h - > daddr ,
ipl - ihl , IPPROTO_ICMPV6 ,
skb - > csum ) ;
skb - > ip_summed = CHECKSUM_NONE ;
return 1 ;
}
2016-09-18 15:45:33 +03:00
static int tcf_csum_ipv4_tcp ( struct sk_buff * skb , unsigned int ihl ,
unsigned int ipl )
2010-08-18 17:10:35 +04:00
{
struct tcphdr * tcph ;
2013-04-12 22:07:47 +04:00
const struct iphdr * iph ;
2010-08-18 17:10:35 +04:00
2017-03-23 12:39:40 +03:00
if ( skb_is_gso ( skb ) & & skb_shinfo ( skb ) - > gso_type & SKB_GSO_TCPV4 )
return 1 ;
2010-08-18 17:10:35 +04:00
tcph = tcf_csum_skb_nextlayer ( skb , ihl , ipl , sizeof ( * tcph ) ) ;
if ( tcph = = NULL )
return 0 ;
2013-04-12 22:07:47 +04:00
iph = ip_hdr ( skb ) ;
2010-08-18 17:10:35 +04:00
tcph - > check = 0 ;
skb - > csum = csum_partial ( tcph , ipl - ihl , 0 ) ;
tcph - > check = tcp_v4_check ( ipl - ihl ,
iph - > saddr , iph - > daddr , skb - > csum ) ;
skb - > ip_summed = CHECKSUM_NONE ;
return 1 ;
}
2016-09-18 15:45:33 +03:00
static int tcf_csum_ipv6_tcp ( struct sk_buff * skb , unsigned int ihl ,
unsigned int ipl )
2010-08-18 17:10:35 +04:00
{
struct tcphdr * tcph ;
2013-04-12 22:07:47 +04:00
const struct ipv6hdr * ip6h ;
2010-08-18 17:10:35 +04:00
2017-03-23 12:39:40 +03:00
if ( skb_is_gso ( skb ) & & skb_shinfo ( skb ) - > gso_type & SKB_GSO_TCPV6 )
return 1 ;
2010-08-18 17:10:35 +04:00
tcph = tcf_csum_skb_nextlayer ( skb , ihl , ipl , sizeof ( * tcph ) ) ;
if ( tcph = = NULL )
return 0 ;
2013-04-12 22:07:47 +04:00
ip6h = ipv6_hdr ( skb ) ;
2010-08-18 17:10:35 +04:00
tcph - > check = 0 ;
skb - > csum = csum_partial ( tcph , ipl - ihl , 0 ) ;
tcph - > check = csum_ipv6_magic ( & ip6h - > saddr , & ip6h - > daddr ,
ipl - ihl , IPPROTO_TCP ,
skb - > csum ) ;
skb - > ip_summed = CHECKSUM_NONE ;
return 1 ;
}
2016-09-18 15:45:33 +03:00
static int tcf_csum_ipv4_udp ( struct sk_buff * skb , unsigned int ihl ,
unsigned int ipl , int udplite )
2010-08-18 17:10:35 +04:00
{
struct udphdr * udph ;
2013-04-12 22:07:47 +04:00
const struct iphdr * iph ;
2010-08-18 17:10:35 +04:00
u16 ul ;
2010-08-23 07:27:58 +04:00
/*
* Support both UDP and UDPLITE checksum algorithms , Don ' t use
* udph - > len to get the real length without any protocol check ,
2010-08-18 17:10:35 +04:00
* UDPLITE uses udph - > len for another thing ,
* Use iph - > tot_len , or just ipl .
*/
udph = tcf_csum_skb_nextlayer ( skb , ihl , ipl , sizeof ( * udph ) ) ;
if ( udph = = NULL )
return 0 ;
2013-04-12 22:07:47 +04:00
iph = ip_hdr ( skb ) ;
2010-08-18 17:10:35 +04:00
ul = ntohs ( udph - > len ) ;
if ( udplite | | udph - > check ) {
udph - > check = 0 ;
if ( udplite ) {
if ( ul = = 0 )
skb - > csum = csum_partial ( udph , ipl - ihl , 0 ) ;
else if ( ( ul > = sizeof ( * udph ) ) & & ( ul < = ipl - ihl ) )
skb - > csum = csum_partial ( udph , ul , 0 ) ;
else
goto ignore_obscure_skb ;
} else {
if ( ul ! = ipl - ihl )
goto ignore_obscure_skb ;
skb - > csum = csum_partial ( udph , ul , 0 ) ;
}
udph - > check = csum_tcpudp_magic ( iph - > saddr , iph - > daddr ,
ul , iph - > protocol ,
skb - > csum ) ;
if ( ! udph - > check )
udph - > check = CSUM_MANGLED_0 ;
}
skb - > ip_summed = CHECKSUM_NONE ;
ignore_obscure_skb :
return 1 ;
}
2016-09-18 15:45:33 +03:00
static int tcf_csum_ipv6_udp ( struct sk_buff * skb , unsigned int ihl ,
unsigned int ipl , int udplite )
2010-08-18 17:10:35 +04:00
{
struct udphdr * udph ;
2013-04-12 22:07:47 +04:00
const struct ipv6hdr * ip6h ;
2010-08-18 17:10:35 +04:00
u16 ul ;
2010-08-23 07:27:58 +04:00
/*
* Support both UDP and UDPLITE checksum algorithms , Don ' t use
* udph - > len to get the real length without any protocol check ,
2010-08-18 17:10:35 +04:00
* UDPLITE uses udph - > len for another thing ,
* Use ip6h - > payload_len + sizeof ( * ip6h ) . . . , or just ipl .
*/
udph = tcf_csum_skb_nextlayer ( skb , ihl , ipl , sizeof ( * udph ) ) ;
if ( udph = = NULL )
return 0 ;
2013-04-12 22:07:47 +04:00
ip6h = ipv6_hdr ( skb ) ;
2010-08-18 17:10:35 +04:00
ul = ntohs ( udph - > len ) ;
udph - > check = 0 ;
if ( udplite ) {
if ( ul = = 0 )
skb - > csum = csum_partial ( udph , ipl - ihl , 0 ) ;
else if ( ( ul > = sizeof ( * udph ) ) & & ( ul < = ipl - ihl ) )
skb - > csum = csum_partial ( udph , ul , 0 ) ;
else
goto ignore_obscure_skb ;
} else {
if ( ul ! = ipl - ihl )
goto ignore_obscure_skb ;
skb - > csum = csum_partial ( udph , ul , 0 ) ;
}
udph - > check = csum_ipv6_magic ( & ip6h - > saddr , & ip6h - > daddr , ul ,
udplite ? IPPROTO_UDPLITE : IPPROTO_UDP ,
skb - > csum ) ;
if ( ! udph - > check )
udph - > check = CSUM_MANGLED_0 ;
skb - > ip_summed = CHECKSUM_NONE ;
ignore_obscure_skb :
return 1 ;
}
2017-01-09 13:24:21 +03:00
static int tcf_csum_sctp ( struct sk_buff * skb , unsigned int ihl ,
unsigned int ipl )
{
struct sctphdr * sctph ;
if ( skb_is_gso ( skb ) & & skb_shinfo ( skb ) - > gso_type & SKB_GSO_SCTP )
return 1 ;
sctph = tcf_csum_skb_nextlayer ( skb , ihl , ipl , sizeof ( * sctph ) ) ;
if ( ! sctph )
return 0 ;
sctph - > checksum = sctp_compute_cksum ( skb ,
skb_network_offset ( skb ) + ihl ) ;
skb - > ip_summed = CHECKSUM_NONE ;
2017-05-18 16:44:40 +03:00
skb - > csum_not_inet = 0 ;
2017-01-09 13:24:21 +03:00
return 1 ;
}
2010-08-18 17:10:35 +04:00
static int tcf_csum_ipv4 ( struct sk_buff * skb , u32 update_flags )
{
2013-04-12 22:07:47 +04:00
const struct iphdr * iph ;
2010-08-18 17:10:35 +04:00
int ntkoff ;
ntkoff = skb_network_offset ( skb ) ;
if ( ! pskb_may_pull ( skb , sizeof ( * iph ) + ntkoff ) )
goto fail ;
iph = ip_hdr ( skb ) ;
switch ( iph - > frag_off & htons ( IP_OFFSET ) ? 0 : iph - > protocol ) {
case IPPROTO_ICMP :
if ( update_flags & TCA_CSUM_UPDATE_FLAG_ICMP )
2010-08-23 07:27:58 +04:00
if ( ! tcf_csum_ipv4_icmp ( skb , iph - > ihl * 4 ,
ntohs ( iph - > tot_len ) ) )
2010-08-18 17:10:35 +04:00
goto fail ;
break ;
case IPPROTO_IGMP :
if ( update_flags & TCA_CSUM_UPDATE_FLAG_IGMP )
2010-08-23 07:27:58 +04:00
if ( ! tcf_csum_ipv4_igmp ( skb , iph - > ihl * 4 ,
ntohs ( iph - > tot_len ) ) )
2010-08-18 17:10:35 +04:00
goto fail ;
break ;
case IPPROTO_TCP :
if ( update_flags & TCA_CSUM_UPDATE_FLAG_TCP )
2013-04-12 22:07:47 +04:00
if ( ! tcf_csum_ipv4_tcp ( skb , iph - > ihl * 4 ,
2010-08-23 07:27:58 +04:00
ntohs ( iph - > tot_len ) ) )
2010-08-18 17:10:35 +04:00
goto fail ;
break ;
case IPPROTO_UDP :
if ( update_flags & TCA_CSUM_UPDATE_FLAG_UDP )
2013-04-12 22:07:47 +04:00
if ( ! tcf_csum_ipv4_udp ( skb , iph - > ihl * 4 ,
2010-08-23 07:27:58 +04:00
ntohs ( iph - > tot_len ) , 0 ) )
2010-08-18 17:10:35 +04:00
goto fail ;
break ;
case IPPROTO_UDPLITE :
if ( update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE )
2013-04-12 22:07:47 +04:00
if ( ! tcf_csum_ipv4_udp ( skb , iph - > ihl * 4 ,
2010-08-23 07:27:58 +04:00
ntohs ( iph - > tot_len ) , 1 ) )
2010-08-18 17:10:35 +04:00
goto fail ;
break ;
2017-01-09 13:24:21 +03:00
case IPPROTO_SCTP :
if ( ( update_flags & TCA_CSUM_UPDATE_FLAG_SCTP ) & &
! tcf_csum_sctp ( skb , iph - > ihl * 4 , ntohs ( iph - > tot_len ) ) )
goto fail ;
break ;
2010-08-18 17:10:35 +04:00
}
if ( update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR ) {
2016-02-20 01:05:25 +03:00
if ( skb_try_make_writable ( skb , sizeof ( * iph ) + ntkoff ) )
2010-08-18 17:10:35 +04:00
goto fail ;
2013-04-12 22:07:47 +04:00
ip_send_check ( ip_hdr ( skb ) ) ;
2010-08-18 17:10:35 +04:00
}
return 1 ;
fail :
return 0 ;
}
2016-09-18 15:45:33 +03:00
static int tcf_csum_ipv6_hopopts ( struct ipv6_opt_hdr * ip6xh , unsigned int ixhl ,
unsigned int * pl )
2010-08-18 17:10:35 +04:00
{
int off , len , optlen ;
unsigned char * xh = ( void * ) ip6xh ;
off = sizeof ( * ip6xh ) ;
len = ixhl - off ;
while ( len > 1 ) {
2010-08-23 07:27:58 +04:00
switch ( xh [ off ] ) {
2012-05-17 10:00:25 +04:00
case IPV6_TLV_PAD1 :
2010-08-18 17:10:35 +04:00
optlen = 1 ;
break ;
case IPV6_TLV_JUMBO :
optlen = xh [ off + 1 ] + 2 ;
if ( optlen ! = 6 | | len < 6 | | ( off & 3 ) ! = 2 )
/* wrong jumbo option length/alignment */
return 0 ;
* pl = ntohl ( * ( __be32 * ) ( xh + off + 2 ) ) ;
goto done ;
default :
optlen = xh [ off + 1 ] + 2 ;
if ( optlen > len )
/* ignore obscure options */
goto done ;
break ;
}
off + = optlen ;
len - = optlen ;
}
done :
return 1 ;
}
static int tcf_csum_ipv6 ( struct sk_buff * skb , u32 update_flags )
{
struct ipv6hdr * ip6h ;
struct ipv6_opt_hdr * ip6xh ;
unsigned int hl , ixhl ;
unsigned int pl ;
int ntkoff ;
u8 nexthdr ;
ntkoff = skb_network_offset ( skb ) ;
hl = sizeof ( * ip6h ) ;
if ( ! pskb_may_pull ( skb , hl + ntkoff ) )
goto fail ;
ip6h = ipv6_hdr ( skb ) ;
pl = ntohs ( ip6h - > payload_len ) ;
nexthdr = ip6h - > nexthdr ;
do {
switch ( nexthdr ) {
case NEXTHDR_FRAGMENT :
goto ignore_skb ;
case NEXTHDR_ROUTING :
case NEXTHDR_HOP :
case NEXTHDR_DEST :
if ( ! pskb_may_pull ( skb , hl + sizeof ( * ip6xh ) + ntkoff ) )
goto fail ;
ip6xh = ( void * ) ( skb_network_header ( skb ) + hl ) ;
ixhl = ipv6_optlen ( ip6xh ) ;
if ( ! pskb_may_pull ( skb , hl + ixhl + ntkoff ) )
goto fail ;
2013-04-12 22:07:47 +04:00
ip6xh = ( void * ) ( skb_network_header ( skb ) + hl ) ;
2010-08-18 17:10:35 +04:00
if ( ( nexthdr = = NEXTHDR_HOP ) & &
! ( tcf_csum_ipv6_hopopts ( ip6xh , ixhl , & pl ) ) )
goto fail ;
nexthdr = ip6xh - > nexthdr ;
hl + = ixhl ;
break ;
case IPPROTO_ICMPV6 :
if ( update_flags & TCA_CSUM_UPDATE_FLAG_ICMP )
2013-04-12 22:07:47 +04:00
if ( ! tcf_csum_ipv6_icmp ( skb ,
2010-08-18 17:10:35 +04:00
hl , pl + sizeof ( * ip6h ) ) )
goto fail ;
goto done ;
case IPPROTO_TCP :
if ( update_flags & TCA_CSUM_UPDATE_FLAG_TCP )
2013-04-12 22:07:47 +04:00
if ( ! tcf_csum_ipv6_tcp ( skb ,
2010-08-18 17:10:35 +04:00
hl , pl + sizeof ( * ip6h ) ) )
goto fail ;
goto done ;
case IPPROTO_UDP :
if ( update_flags & TCA_CSUM_UPDATE_FLAG_UDP )
2013-04-12 22:07:47 +04:00
if ( ! tcf_csum_ipv6_udp ( skb , hl ,
2010-08-23 07:27:58 +04:00
pl + sizeof ( * ip6h ) , 0 ) )
2010-08-18 17:10:35 +04:00
goto fail ;
goto done ;
case IPPROTO_UDPLITE :
if ( update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE )
2013-04-12 22:07:47 +04:00
if ( ! tcf_csum_ipv6_udp ( skb , hl ,
2010-08-23 07:27:58 +04:00
pl + sizeof ( * ip6h ) , 1 ) )
2010-08-18 17:10:35 +04:00
goto fail ;
goto done ;
2017-01-09 13:24:21 +03:00
case IPPROTO_SCTP :
if ( ( update_flags & TCA_CSUM_UPDATE_FLAG_SCTP ) & &
! tcf_csum_sctp ( skb , hl , pl + sizeof ( * ip6h ) ) )
goto fail ;
goto done ;
2010-08-18 17:10:35 +04:00
default :
goto ignore_skb ;
}
} while ( pskb_may_pull ( skb , hl + 1 + ntkoff ) ) ;
done :
ignore_skb :
return 1 ;
fail :
return 0 ;
}
2016-09-18 15:45:33 +03:00
static int tcf_csum ( struct sk_buff * skb , const struct tc_action * a ,
struct tcf_result * res )
2010-08-18 17:10:35 +04:00
{
2016-07-26 02:09:41 +03:00
struct tcf_csum * p = to_tcf_csum ( a ) ;
2010-08-18 17:10:35 +04:00
int action ;
u32 update_flags ;
spin_lock ( & p - > tcf_lock ) ;
2016-06-06 13:32:53 +03:00
tcf_lastuse_update ( & p - > tcf_tm ) ;
2011-01-09 11:30:54 +03:00
bstats_update ( & p - > tcf_bstats , skb ) ;
2010-08-18 17:10:35 +04:00
action = p - > tcf_action ;
update_flags = p - > update_flags ;
spin_unlock ( & p - > tcf_lock ) ;
if ( unlikely ( action = = TC_ACT_SHOT ) )
goto drop ;
2015-01-13 19:13:43 +03:00
switch ( tc_skb_protocol ( skb ) ) {
2010-08-18 17:10:35 +04:00
case cpu_to_be16 ( ETH_P_IP ) :
if ( ! tcf_csum_ipv4 ( skb , update_flags ) )
goto drop ;
break ;
case cpu_to_be16 ( ETH_P_IPV6 ) :
if ( ! tcf_csum_ipv6 ( skb , update_flags ) )
goto drop ;
break ;
}
return action ;
drop :
spin_lock ( & p - > tcf_lock ) ;
p - > tcf_qstats . drops + + ;
spin_unlock ( & p - > tcf_lock ) ;
return TC_ACT_SHOT ;
}
2016-09-18 15:45:33 +03:00
static int tcf_csum_dump ( struct sk_buff * skb , struct tc_action * a , int bind ,
int ref )
2010-08-18 17:10:35 +04:00
{
unsigned char * b = skb_tail_pointer ( skb ) ;
2016-07-26 02:09:41 +03:00
struct tcf_csum * p = to_tcf_csum ( a ) ;
2010-08-18 17:10:35 +04:00
struct tc_csum opt = {
. update_flags = p - > update_flags ,
. index = p - > tcf_index ,
. action = p - > tcf_action ,
. refcnt = p - > tcf_refcnt - ref ,
. bindcnt = p - > tcf_bindcnt - bind ,
} ;
struct tcf_t t ;
2012-03-29 13:11:39 +04:00
if ( nla_put ( skb , TCA_CSUM_PARMS , sizeof ( opt ) , & opt ) )
goto nla_put_failure ;
2016-06-06 13:32:55 +03:00
tcf_tm_dump ( & t , & p - > tcf_tm ) ;
2016-04-26 11:06:18 +03:00
if ( nla_put_64bit ( skb , TCA_CSUM_TM , sizeof ( t ) , & t , TCA_CSUM_PAD ) )
2012-03-29 13:11:39 +04:00
goto nla_put_failure ;
2010-08-18 17:10:35 +04:00
return skb - > len ;
nla_put_failure :
nlmsg_trim ( skb , b ) ;
return - 1 ;
}
2016-02-23 02:57:53 +03:00
static int tcf_csum_walker ( struct net * net , struct sk_buff * skb ,
struct netlink_callback * cb , int type ,
2016-07-26 02:09:41 +03:00
const struct tc_action_ops * ops )
2016-02-23 02:57:53 +03:00
{
struct tc_action_net * tn = net_generic ( net , csum_net_id ) ;
2016-07-26 02:09:41 +03:00
return tcf_generic_walker ( tn , skb , cb , type , ops ) ;
2016-02-23 02:57:53 +03:00
}
2016-07-26 02:09:41 +03:00
static int tcf_csum_search ( struct net * net , struct tc_action * * a , u32 index )
2016-02-23 02:57:53 +03:00
{
struct tc_action_net * tn = net_generic ( net , csum_net_id ) ;
2017-08-30 09:31:59 +03:00
return tcf_idr_search ( tn , a , index ) ;
2016-02-23 02:57:53 +03:00
}
2010-08-18 17:10:35 +04:00
static struct tc_action_ops act_csum_ops = {
2010-08-23 07:27:58 +04:00
. kind = " csum " ,
. type = TCA_ACT_CSUM ,
. owner = THIS_MODULE ,
. act = tcf_csum ,
. dump = tcf_csum_dump ,
. init = tcf_csum_init ,
2016-02-23 02:57:53 +03:00
. walk = tcf_csum_walker ,
. lookup = tcf_csum_search ,
2016-07-26 02:09:41 +03:00
. size = sizeof ( struct tcf_csum ) ,
2016-02-23 02:57:53 +03:00
} ;
static __net_init int csum_init_net ( struct net * net )
{
struct tc_action_net * tn = net_generic ( net , csum_net_id ) ;
2017-11-01 20:23:50 +03:00
return tc_action_net_init ( tn , & act_csum_ops , net ) ;
2016-02-23 02:57:53 +03:00
}
static void __net_exit csum_exit_net ( struct net * net )
{
struct tc_action_net * tn = net_generic ( net , csum_net_id ) ;
tc_action_net_exit ( tn ) ;
}
static struct pernet_operations csum_net_ops = {
. init = csum_init_net ,
. exit = csum_exit_net ,
. id = & csum_net_id ,
. size = sizeof ( struct tc_action_net ) ,
2010-08-18 17:10:35 +04:00
} ;
MODULE_DESCRIPTION ( " Checksum updating actions " ) ;
MODULE_LICENSE ( " GPL " ) ;
static int __init csum_init_module ( void )
{
2016-02-23 02:57:53 +03:00
return tcf_register_action ( & act_csum_ops , & csum_net_ops ) ;
2010-08-18 17:10:35 +04:00
}
static void __exit csum_cleanup_module ( void )
{
2016-02-23 02:57:53 +03:00
tcf_unregister_action ( & act_csum_ops , & csum_net_ops ) ;
2010-08-18 17:10:35 +04:00
}
module_init ( csum_init_module ) ;
module_exit ( csum_cleanup_module ) ;