2015-05-12 14:56:15 +02:00
# include <linux/kernel.h>
2011-11-28 05:22:18 +00:00
# include <linux/skbuff.h>
2012-01-24 16:03:33 -05:00
# include <linux/export.h>
2011-11-28 05:22:18 +00:00
# include <linux/ip.h>
# include <linux/ipv6.h>
# include <linux/if_vlan.h>
2017-08-09 14:41:19 +02:00
# include <net/dsa.h>
2011-11-28 05:22:18 +00:00
# include <net/ip.h>
2012-07-18 08:11:12 +00:00
# include <net/ipv6.h>
2016-08-09 12:38:24 +08:00
# include <net/gre.h>
# include <net/pptp.h>
2013-03-19 06:39:30 +00:00
# include <linux/igmp.h>
# include <linux/icmp.h>
# include <linux/sctp.h>
# include <linux/dccp.h>
2011-11-28 05:22:18 +00:00
# include <linux/if_tunnel.h>
# include <linux/if_pppox.h>
# include <linux/ppp_defs.h>
2015-05-12 14:56:16 +02:00
# include <linux/stddef.h>
2015-05-12 14:56:19 +02:00
# include <linux/if_ether.h>
2015-06-04 09:16:46 -07:00
# include <linux/mpls.h>
2017-05-23 18:40:44 +02:00
# include <linux/tcp.h>
2015-05-12 14:56:07 +02:00
# include <net/flow_dissector.h>
2014-09-05 19:20:26 -04:00
# include <scsi/fc/fc_fcoe.h>
2011-11-28 05:22:18 +00:00
2015-09-01 21:19:17 -07:00
static void dissector_set_key ( struct flow_dissector * flow_dissector ,
enum flow_dissector_key_id key_id )
2015-05-12 14:56:15 +02:00
{
flow_dissector - > used_keys | = ( 1 < < key_id ) ;
}
void skb_flow_dissector_init ( struct flow_dissector * flow_dissector ,
const struct flow_dissector_key * key ,
unsigned int key_count )
{
unsigned int i ;
memset ( flow_dissector , 0 , sizeof ( * flow_dissector ) ) ;
for ( i = 0 ; i < key_count ; i + + , key + + ) {
/* User should make sure that every key target offset is withing
* boundaries of unsigned short .
*/
BUG_ON ( key - > offset > USHRT_MAX ) ;
2015-09-01 21:19:17 -07:00
BUG_ON ( dissector_uses_key ( flow_dissector ,
key - > key_id ) ) ;
2015-05-12 14:56:15 +02:00
2015-09-01 21:19:17 -07:00
dissector_set_key ( flow_dissector , key - > key_id ) ;
2015-05-12 14:56:15 +02:00
flow_dissector - > offset [ key - > key_id ] = key - > offset ;
}
2015-06-04 09:16:39 -07:00
/* Ensure that the dissector always includes control and basic key.
* That way we are able to avoid handling lack of these in fast path .
2015-05-12 14:56:15 +02:00
*/
2015-09-01 21:19:17 -07:00
BUG_ON ( ! dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_CONTROL ) ) ;
BUG_ON ( ! dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_BASIC ) ) ;
2015-05-12 14:56:15 +02:00
}
EXPORT_SYMBOL ( skb_flow_dissector_init ) ;
2016-12-07 13:48:27 +01:00
/**
* skb_flow_get_be16 - extract be16 entity
* @ skb : sk_buff to extract from
* @ poff : offset to extract at
* @ data : raw buffer pointer to the packet
* @ hlen : packet header length
*
* The function will try to retrieve a be32 entity at
* offset poff
*/
2017-01-09 11:18:01 -08:00
static __be16 skb_flow_get_be16 ( const struct sk_buff * skb , int poff ,
void * data , int hlen )
2016-12-07 13:48:27 +01:00
{
__be16 * u , _u ;
u = __skb_header_pointer ( skb , poff , sizeof ( _u ) , data , hlen , & _u ) ;
if ( u )
return * u ;
return 0 ;
}
2013-10-02 13:39:24 +02:00
/**
2014-08-25 17:03:46 -07:00
* __skb_flow_get_ports - extract the upper layer ports and return them
* @ skb : sk_buff to extract the ports from
2013-10-02 13:39:24 +02:00
* @ thoff : transport header offset
* @ ip_proto : protocol for which to get port offset
2014-08-25 17:03:46 -07:00
* @ data : raw buffer pointer to the packet , if NULL use skb - > data
* @ hlen : packet header length , if @ data is NULL use skb_headlen ( skb )
2013-10-02 13:39:24 +02:00
*
* The function will try to retrieve the ports at offset thoff + poff where poff
* is the protocol port offset returned from proto_ports_offset
*/
2014-08-23 12:13:41 -07:00
__be32 __skb_flow_get_ports ( const struct sk_buff * skb , int thoff , u8 ip_proto ,
void * data , int hlen )
2013-10-02 13:39:24 +02:00
{
int poff = proto_ports_offset ( ip_proto ) ;
2014-08-23 12:13:41 -07:00
if ( ! data ) {
data = skb - > data ;
hlen = skb_headlen ( skb ) ;
}
2013-10-02 13:39:24 +02:00
if ( poff > = 0 ) {
__be32 * ports , _ports ;
2014-08-23 12:13:41 -07:00
ports = __skb_header_pointer ( skb , thoff + poff ,
sizeof ( _ports ) , data , hlen , & _ports ) ;
2013-10-02 13:39:24 +02:00
if ( ports )
return * ports ;
}
return 0 ;
}
2014-08-23 12:13:41 -07:00
EXPORT_SYMBOL ( __skb_flow_get_ports ) ;
2013-10-02 13:39:24 +02:00
2017-03-06 16:39:51 +01:00
enum flow_dissect_ret {
FLOW_DISSECT_RET_OUT_GOOD ,
FLOW_DISSECT_RET_OUT_BAD ,
2017-03-06 16:39:55 +01:00
FLOW_DISSECT_RET_OUT_PROTO_AGAIN ,
2017-03-06 16:39:51 +01:00
} ;
2017-03-06 16:39:52 +01:00
static enum flow_dissect_ret
__skb_flow_dissect_mpls ( const struct sk_buff * skb ,
struct flow_dissector * flow_dissector ,
void * target_container , void * data , int nhoff , int hlen )
{
struct flow_dissector_key_keyid * key_keyid ;
struct mpls_label * hdr , _hdr [ 2 ] ;
2017-04-22 16:52:46 -04:00
u32 entry , label ;
2017-03-06 16:39:52 +01:00
if ( ! dissector_uses_key ( flow_dissector ,
2017-04-22 16:52:46 -04:00
FLOW_DISSECTOR_KEY_MPLS_ENTROPY ) & &
! dissector_uses_key ( flow_dissector , FLOW_DISSECTOR_KEY_MPLS ) )
2017-03-06 16:39:52 +01:00
return FLOW_DISSECT_RET_OUT_GOOD ;
hdr = __skb_header_pointer ( skb , nhoff , sizeof ( _hdr ) , data ,
hlen , & _hdr ) ;
if ( ! hdr )
return FLOW_DISSECT_RET_OUT_BAD ;
2017-04-22 16:52:46 -04:00
entry = ntohl ( hdr [ 0 ] . entry ) ;
label = ( entry & MPLS_LS_LABEL_MASK ) > > MPLS_LS_LABEL_SHIFT ;
if ( dissector_uses_key ( flow_dissector , FLOW_DISSECTOR_KEY_MPLS ) ) {
struct flow_dissector_key_mpls * key_mpls ;
key_mpls = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_MPLS ,
target_container ) ;
key_mpls - > mpls_label = label ;
key_mpls - > mpls_ttl = ( entry & MPLS_LS_TTL_MASK )
> > MPLS_LS_TTL_SHIFT ;
key_mpls - > mpls_tc = ( entry & MPLS_LS_TC_MASK )
> > MPLS_LS_TC_SHIFT ;
key_mpls - > mpls_bos = ( entry & MPLS_LS_S_MASK )
> > MPLS_LS_S_SHIFT ;
}
if ( label = = MPLS_LABEL_ENTROPY ) {
2017-03-06 16:39:52 +01:00
key_keyid = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY ,
target_container ) ;
key_keyid - > keyid = hdr [ 1 ] . entry & htonl ( MPLS_LS_LABEL_MASK ) ;
}
return FLOW_DISSECT_RET_OUT_GOOD ;
}
2017-03-06 16:39:51 +01:00
static enum flow_dissect_ret
__skb_flow_dissect_arp ( const struct sk_buff * skb ,
struct flow_dissector * flow_dissector ,
void * target_container , void * data , int nhoff , int hlen )
{
struct flow_dissector_key_arp * key_arp ;
struct {
unsigned char ar_sha [ ETH_ALEN ] ;
unsigned char ar_sip [ 4 ] ;
unsigned char ar_tha [ ETH_ALEN ] ;
unsigned char ar_tip [ 4 ] ;
} * arp_eth , _arp_eth ;
const struct arphdr * arp ;
2017-04-06 07:25:07 -07:00
struct arphdr _arp ;
2017-03-06 16:39:51 +01:00
if ( ! dissector_uses_key ( flow_dissector , FLOW_DISSECTOR_KEY_ARP ) )
return FLOW_DISSECT_RET_OUT_GOOD ;
arp = __skb_header_pointer ( skb , nhoff , sizeof ( _arp ) , data ,
hlen , & _arp ) ;
if ( ! arp )
return FLOW_DISSECT_RET_OUT_BAD ;
if ( arp - > ar_hrd ! = htons ( ARPHRD_ETHER ) | |
arp - > ar_pro ! = htons ( ETH_P_IP ) | |
arp - > ar_hln ! = ETH_ALEN | |
arp - > ar_pln ! = 4 | |
( arp - > ar_op ! = htons ( ARPOP_REPLY ) & &
arp - > ar_op ! = htons ( ARPOP_REQUEST ) ) )
return FLOW_DISSECT_RET_OUT_BAD ;
arp_eth = __skb_header_pointer ( skb , nhoff + sizeof ( _arp ) ,
sizeof ( _arp_eth ) , data ,
hlen , & _arp_eth ) ;
if ( ! arp_eth )
return FLOW_DISSECT_RET_OUT_BAD ;
key_arp = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_ARP ,
target_container ) ;
memcpy ( & key_arp - > sip , arp_eth - > ar_sip , sizeof ( key_arp - > sip ) ) ;
memcpy ( & key_arp - > tip , arp_eth - > ar_tip , sizeof ( key_arp - > tip ) ) ;
/* Only store the lower byte of the opcode;
* this covers ARPOP_REPLY and ARPOP_REQUEST .
*/
key_arp - > op = ntohs ( arp - > ar_op ) & 0xff ;
ether_addr_copy ( key_arp - > sha , arp_eth - > ar_sha ) ;
ether_addr_copy ( key_arp - > tha , arp_eth - > ar_tha ) ;
return FLOW_DISSECT_RET_OUT_GOOD ;
}
2017-03-06 16:39:55 +01:00
static enum flow_dissect_ret
__skb_flow_dissect_gre ( const struct sk_buff * skb ,
struct flow_dissector_key_control * key_control ,
struct flow_dissector * flow_dissector ,
void * target_container , void * data ,
__be16 * p_proto , int * p_nhoff , int * p_hlen ,
unsigned int flags )
{
struct flow_dissector_key_keyid * key_keyid ;
struct gre_base_hdr * hdr , _hdr ;
int offset = 0 ;
u16 gre_ver ;
hdr = __skb_header_pointer ( skb , * p_nhoff , sizeof ( _hdr ) ,
data , * p_hlen , & _hdr ) ;
if ( ! hdr )
return FLOW_DISSECT_RET_OUT_BAD ;
/* Only look inside GRE without routing */
if ( hdr - > flags & GRE_ROUTING )
return FLOW_DISSECT_RET_OUT_GOOD ;
/* Only look inside GRE for version 0 and 1 */
gre_ver = ntohs ( hdr - > flags & GRE_VERSION ) ;
if ( gre_ver > 1 )
return FLOW_DISSECT_RET_OUT_GOOD ;
* p_proto = hdr - > protocol ;
if ( gre_ver ) {
/* Version1 must be PPTP, and check the flags */
if ( ! ( * p_proto = = GRE_PROTO_PPP & & ( hdr - > flags & GRE_KEY ) ) )
return FLOW_DISSECT_RET_OUT_GOOD ;
}
offset + = sizeof ( struct gre_base_hdr ) ;
if ( hdr - > flags & GRE_CSUM )
offset + = sizeof ( ( ( struct gre_full_hdr * ) 0 ) - > csum ) +
sizeof ( ( ( struct gre_full_hdr * ) 0 ) - > reserved1 ) ;
if ( hdr - > flags & GRE_KEY ) {
const __be32 * keyid ;
__be32 _keyid ;
keyid = __skb_header_pointer ( skb , * p_nhoff + offset ,
sizeof ( _keyid ) ,
data , * p_hlen , & _keyid ) ;
if ( ! keyid )
return FLOW_DISSECT_RET_OUT_BAD ;
if ( dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_GRE_KEYID ) ) {
key_keyid = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_GRE_KEYID ,
target_container ) ;
if ( gre_ver = = 0 )
key_keyid - > keyid = * keyid ;
else
key_keyid - > keyid = * keyid & GRE_PPTP_KEY_MASK ;
}
offset + = sizeof ( ( ( struct gre_full_hdr * ) 0 ) - > key ) ;
}
if ( hdr - > flags & GRE_SEQ )
offset + = sizeof ( ( ( struct pptp_gre_header * ) 0 ) - > seq ) ;
if ( gre_ver = = 0 ) {
if ( * p_proto = = htons ( ETH_P_TEB ) ) {
const struct ethhdr * eth ;
struct ethhdr _eth ;
eth = __skb_header_pointer ( skb , * p_nhoff + offset ,
sizeof ( _eth ) ,
data , * p_hlen , & _eth ) ;
if ( ! eth )
return FLOW_DISSECT_RET_OUT_BAD ;
* p_proto = eth - > h_proto ;
offset + = sizeof ( * eth ) ;
/* Cap headers that we access via pointers at the
* end of the Ethernet header as our maximum alignment
* at that point is only 2 bytes .
*/
if ( NET_IP_ALIGN )
* p_hlen = * p_nhoff + offset ;
}
} else { /* version 1, must be PPTP */
u8 _ppp_hdr [ PPP_HDRLEN ] ;
u8 * ppp_hdr ;
if ( hdr - > flags & GRE_ACK )
offset + = sizeof ( ( ( struct pptp_gre_header * ) 0 ) - > ack ) ;
ppp_hdr = __skb_header_pointer ( skb , * p_nhoff + offset ,
sizeof ( _ppp_hdr ) ,
data , * p_hlen , _ppp_hdr ) ;
if ( ! ppp_hdr )
return FLOW_DISSECT_RET_OUT_BAD ;
switch ( PPP_PROTOCOL ( ppp_hdr ) ) {
case PPP_IP :
* p_proto = htons ( ETH_P_IP ) ;
break ;
case PPP_IPV6 :
* p_proto = htons ( ETH_P_IPV6 ) ;
break ;
default :
/* Could probably catch some more like MPLS */
break ;
}
offset + = PPP_HDRLEN ;
}
* p_nhoff + = offset ;
key_control - > flags | = FLOW_DIS_ENCAPSULATION ;
if ( flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP )
return FLOW_DISSECT_RET_OUT_GOOD ;
return FLOW_DISSECT_RET_OUT_PROTO_AGAIN ;
}
2017-05-23 18:40:44 +02:00
static void
__skb_flow_dissect_tcp ( const struct sk_buff * skb ,
struct flow_dissector * flow_dissector ,
void * target_container , void * data , int thoff , int hlen )
{
struct flow_dissector_key_tcp * key_tcp ;
struct tcphdr * th , _th ;
if ( ! dissector_uses_key ( flow_dissector , FLOW_DISSECTOR_KEY_TCP ) )
return ;
th = __skb_header_pointer ( skb , thoff , sizeof ( _th ) , data , hlen , & _th ) ;
if ( ! th )
return ;
if ( unlikely ( __tcp_hdrlen ( th ) < sizeof ( _th ) ) )
return ;
key_tcp = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_TCP ,
target_container ) ;
key_tcp - > flags = ( * ( __be16 * ) & tcp_flag_word ( th ) & htons ( 0x0FFF ) ) ;
}
2017-06-01 21:37:37 +03:00
static void
__skb_flow_dissect_ipv4 ( const struct sk_buff * skb ,
struct flow_dissector * flow_dissector ,
void * target_container , void * data , const struct iphdr * iph )
{
struct flow_dissector_key_ip * key_ip ;
if ( ! dissector_uses_key ( flow_dissector , FLOW_DISSECTOR_KEY_IP ) )
return ;
key_ip = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_IP ,
target_container ) ;
key_ip - > tos = iph - > tos ;
key_ip - > ttl = iph - > ttl ;
}
static void
__skb_flow_dissect_ipv6 ( const struct sk_buff * skb ,
struct flow_dissector * flow_dissector ,
void * target_container , void * data , const struct ipv6hdr * iph )
{
struct flow_dissector_key_ip * key_ip ;
if ( ! dissector_uses_key ( flow_dissector , FLOW_DISSECTOR_KEY_IP ) )
return ;
key_ip = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_IP ,
target_container ) ;
key_ip - > tos = ipv6_get_dsfield ( iph ) ;
key_ip - > ttl = iph - > hop_limit ;
}
2014-08-25 17:03:47 -07:00
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @ skb : sk_buff to extract the flow from , can be NULL if the rest are specified
2015-05-12 14:56:16 +02:00
* @ flow_dissector : list of keys to dissect
* @ target_container : target structure to put dissected values into
2014-08-25 17:03:47 -07:00
* @ data : raw buffer pointer to the packet , if NULL use skb - > data
* @ proto : protocol for which to get the flow , if @ data is NULL use skb - > protocol
* @ nhoff : network header offset , if @ data is NULL use skb_network_offset ( skb )
* @ hlen : packet header length , if @ data is NULL use skb_headlen ( skb )
*
2015-05-12 14:56:16 +02:00
* The function will try to retrieve individual keys into target specified
* by flow_dissector from either the skbuff or a raw buffer specified by the
* rest parameters .
*
* Caller must take care of zeroing target container memory .
2014-08-25 17:03:47 -07:00
*/
2015-05-12 14:56:16 +02:00
bool __skb_flow_dissect ( const struct sk_buff * skb ,
struct flow_dissector * flow_dissector ,
void * target_container ,
2015-09-01 09:24:27 -07:00
void * data , __be16 proto , int nhoff , int hlen ,
unsigned int flags )
2011-11-28 05:22:18 +00:00
{
2015-06-04 09:16:39 -07:00
struct flow_dissector_key_control * key_control ;
2015-05-12 14:56:16 +02:00
struct flow_dissector_key_basic * key_basic ;
struct flow_dissector_key_addrs * key_addrs ;
struct flow_dissector_key_ports * key_ports ;
2016-12-07 13:48:27 +01:00
struct flow_dissector_key_icmp * key_icmp ;
2015-06-04 09:16:43 -07:00
struct flow_dissector_key_tags * key_tags ;
2016-08-17 13:36:11 +03:00
struct flow_dissector_key_vlan * key_vlan ;
2016-08-17 13:36:10 +03:00
bool skip_vlan = false ;
2015-06-25 15:10:32 +02:00
u8 ip_proto = 0 ;
2016-11-09 16:04:46 -08:00
bool ret ;
2011-11-28 05:22:18 +00:00
2014-08-23 12:13:41 -07:00
if ( ! data ) {
data = skb - > data ;
2016-08-17 13:36:10 +03:00
proto = skb_vlan_tag_present ( skb ) ?
skb - > vlan_proto : skb - > protocol ;
2014-08-25 17:03:47 -07:00
nhoff = skb_network_offset ( skb ) ;
2014-08-23 12:13:41 -07:00
hlen = skb_headlen ( skb ) ;
2017-08-10 10:09:03 +02:00
# if IS_ENABLED(CONFIG_NET_DSA)
2017-08-09 14:41:19 +02:00
if ( unlikely ( netdev_uses_dsa ( skb - > dev ) ) ) {
const struct dsa_device_ops * ops ;
int offset ;
ops = skb - > dev - > dsa_ptr - > tag_ops ;
if ( ops - > flow_dissect & &
! ops - > flow_dissect ( skb , & proto , & offset ) ) {
hlen - = offset ;
nhoff + = offset ;
}
}
2017-08-10 10:09:03 +02:00
# endif
2014-08-23 12:13:41 -07:00
}
2015-06-04 09:16:39 -07:00
/* It is ensured by skb_flow_dissector_init() that control key will
* be always present .
*/
key_control = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_CONTROL ,
target_container ) ;
2015-05-12 14:56:16 +02:00
/* It is ensured by skb_flow_dissector_init() that basic key will
* be always present .
*/
key_basic = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_BASIC ,
target_container ) ;
2011-11-28 05:22:18 +00:00
2015-09-01 21:19:17 -07:00
if ( dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_ETH_ADDRS ) ) {
2015-05-12 14:56:19 +02:00
struct ethhdr * eth = eth_hdr ( skb ) ;
struct flow_dissector_key_eth_addrs * key_eth_addrs ;
key_eth_addrs = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_ETH_ADDRS ,
target_container ) ;
memcpy ( key_eth_addrs , & eth - > h_dest , sizeof ( * key_eth_addrs ) ) ;
}
2017-03-06 16:39:54 +01:00
proto_again :
2011-11-28 05:22:18 +00:00
switch ( proto ) {
2014-03-12 10:04:17 -07:00
case htons ( ETH_P_IP ) : {
2011-11-28 05:22:18 +00:00
const struct iphdr * iph ;
struct iphdr _iph ;
ip :
2014-08-23 12:13:41 -07:00
iph = __skb_header_pointer ( skb , nhoff , sizeof ( _iph ) , data , hlen , & _iph ) ;
2013-11-01 15:01:10 +08:00
if ( ! iph | | iph - > ihl < 5 )
2015-09-01 09:24:26 -07:00
goto out_bad ;
2013-11-07 08:37:28 -08:00
nhoff + = iph - > ihl * 4 ;
2011-11-28 05:22:18 +00:00
2013-11-07 08:37:28 -08:00
ip_proto = iph - > protocol ;
2016-02-24 09:29:38 -08:00
if ( dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_IPV4_ADDRS ) ) {
key_addrs = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_IPV4_ADDRS ,
target_container ) ;
memcpy ( & key_addrs - > v4addrs , & iph - > saddr ,
sizeof ( key_addrs - > v4addrs ) ) ;
key_control - > addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS ;
}
2015-09-01 09:24:28 -07:00
if ( ip_is_fragment ( iph ) ) {
2015-09-01 16:46:08 -07:00
key_control - > flags | = FLOW_DIS_IS_FRAGMENT ;
2015-09-01 09:24:28 -07:00
if ( iph - > frag_off & htons ( IP_OFFSET ) ) {
goto out_good ;
} else {
2015-09-01 16:46:08 -07:00
key_control - > flags | = FLOW_DIS_FIRST_FRAG ;
2015-09-01 09:24:28 -07:00
if ( ! ( flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG ) )
goto out_good ;
}
}
2017-06-01 21:37:37 +03:00
__skb_flow_dissect_ipv4 ( skb , flow_dissector ,
target_container , data , iph ) ;
2015-09-01 09:24:30 -07:00
if ( flags & FLOW_DISSECTOR_F_STOP_AT_L3 )
goto out_good ;
2011-11-28 05:22:18 +00:00
break ;
}
2014-03-12 10:04:17 -07:00
case htons ( ETH_P_IPV6 ) : {
2011-11-28 05:22:18 +00:00
const struct ipv6hdr * iph ;
struct ipv6hdr _iph ;
2014-07-01 21:33:01 -07:00
2011-11-28 05:22:18 +00:00
ipv6 :
2014-08-23 12:13:41 -07:00
iph = __skb_header_pointer ( skb , nhoff , sizeof ( _iph ) , data , hlen , & _iph ) ;
2011-11-28 05:22:18 +00:00
if ( ! iph )
2015-09-01 09:24:26 -07:00
goto out_bad ;
2011-11-28 05:22:18 +00:00
ip_proto = iph - > nexthdr ;
nhoff + = sizeof ( struct ipv6hdr ) ;
2014-07-01 21:33:01 -07:00
2015-09-01 21:19:17 -07:00
if ( dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_IPV6_ADDRS ) ) {
2016-02-24 09:29:57 -08:00
key_addrs = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_IPV6_ADDRS ,
target_container ) ;
2014-10-10 12:09:12 -07:00
2016-02-24 09:29:57 -08:00
memcpy ( & key_addrs - > v6addrs , & iph - > saddr ,
sizeof ( key_addrs - > v6addrs ) ) ;
2015-06-04 09:16:40 -07:00
key_control - > addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS ;
2015-05-12 14:56:18 +02:00
}
2015-06-04 09:16:44 -07:00
2016-02-09 02:49:54 -08:00
if ( ( dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_FLOW_LABEL ) | |
( flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL ) ) & &
ip6_flowlabel ( iph ) ) {
__be32 flow_label = ip6_flowlabel ( iph ) ;
2015-09-01 21:19:17 -07:00
if ( dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_FLOW_LABEL ) ) {
2015-06-04 09:16:44 -07:00
key_tags = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_FLOW_LABEL ,
target_container ) ;
key_tags - > flow_label = ntohl ( flow_label ) ;
2015-05-22 11:05:58 +02:00
}
2015-09-01 09:24:31 -07:00
if ( flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL )
goto out_good ;
2014-07-01 21:33:01 -07:00
}
2017-06-01 21:37:37 +03:00
__skb_flow_dissect_ipv6 ( skb , flow_dissector ,
target_container , data , iph ) ;
2015-09-01 09:24:30 -07:00
if ( flags & FLOW_DISSECTOR_F_STOP_AT_L3 )
goto out_good ;
2011-11-28 05:22:18 +00:00
break ;
}
2014-03-12 10:04:17 -07:00
case htons ( ETH_P_8021AD ) :
case htons ( ETH_P_8021Q ) : {
2011-11-28 05:22:18 +00:00
const struct vlan_hdr * vlan ;
2016-10-24 23:40:30 +02:00
struct vlan_hdr _vlan ;
bool vlan_tag_present = skb & & skb_vlan_tag_present ( skb ) ;
2011-11-28 05:22:18 +00:00
2016-10-24 23:40:30 +02:00
if ( vlan_tag_present )
2016-08-17 13:36:10 +03:00
proto = skb - > protocol ;
2016-10-24 23:40:30 +02:00
if ( ! vlan_tag_present | | eth_type_vlan ( skb - > protocol ) ) {
2016-08-17 13:36:10 +03:00
vlan = __skb_header_pointer ( skb , nhoff , sizeof ( _vlan ) ,
data , hlen , & _vlan ) ;
if ( ! vlan )
goto out_bad ;
proto = vlan - > h_vlan_encapsulated_proto ;
nhoff + = sizeof ( * vlan ) ;
if ( skip_vlan )
2017-03-06 16:39:54 +01:00
goto proto_again ;
2016-08-17 13:36:10 +03:00
}
2011-11-28 05:22:18 +00:00
2016-08-17 13:36:10 +03:00
skip_vlan = true ;
2015-09-01 21:19:17 -07:00
if ( dissector_uses_key ( flow_dissector ,
2016-08-17 13:36:11 +03:00
FLOW_DISSECTOR_KEY_VLAN ) ) {
key_vlan = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_VLAN ,
2015-06-04 09:16:43 -07:00
target_container ) ;
2016-10-24 23:40:30 +02:00
if ( vlan_tag_present ) {
2016-08-17 13:36:11 +03:00
key_vlan - > vlan_id = skb_vlan_tag_get_id ( skb ) ;
key_vlan - > vlan_priority =
( skb_vlan_tag_get_prio ( skb ) > > VLAN_PRIO_SHIFT ) ;
} else {
key_vlan - > vlan_id = ntohs ( vlan - > h_vlan_TCI ) &
2016-08-17 13:36:10 +03:00
VLAN_VID_MASK ;
2016-08-17 13:36:11 +03:00
key_vlan - > vlan_priority =
( ntohs ( vlan - > h_vlan_TCI ) &
VLAN_PRIO_MASK ) > > VLAN_PRIO_SHIFT ;
}
2015-06-04 09:16:43 -07:00
}
2017-03-06 16:39:54 +01:00
goto proto_again ;
2011-11-28 05:22:18 +00:00
}
2014-03-12 10:04:17 -07:00
case htons ( ETH_P_PPP_SES ) : {
2011-11-28 05:22:18 +00:00
struct {
struct pppoe_hdr hdr ;
__be16 proto ;
} * hdr , _hdr ;
2014-08-23 12:13:41 -07:00
hdr = __skb_header_pointer ( skb , nhoff , sizeof ( _hdr ) , data , hlen , & _hdr ) ;
2011-11-28 05:22:18 +00:00
if ( ! hdr )
2015-09-01 09:24:26 -07:00
goto out_bad ;
2011-11-28 05:22:18 +00:00
proto = hdr - > proto ;
nhoff + = PPPOE_SES_HLEN ;
switch ( proto ) {
2014-03-12 10:04:17 -07:00
case htons ( PPP_IP ) :
2011-11-28 05:22:18 +00:00
goto ip ;
2014-03-12 10:04:17 -07:00
case htons ( PPP_IPV6 ) :
2011-11-28 05:22:18 +00:00
goto ipv6 ;
default :
2015-09-01 09:24:26 -07:00
goto out_bad ;
2011-11-28 05:22:18 +00:00
}
}
2015-01-22 17:10:32 +01:00
case htons ( ETH_P_TIPC ) : {
struct {
__be32 pre [ 3 ] ;
__be32 srcnode ;
} * hdr , _hdr ;
hdr = __skb_header_pointer ( skb , nhoff , sizeof ( _hdr ) , data , hlen , & _hdr ) ;
if ( ! hdr )
2015-09-01 09:24:26 -07:00
goto out_bad ;
2015-05-12 14:56:16 +02:00
2015-09-01 21:19:17 -07:00
if ( dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_TIPC_ADDRS ) ) {
2015-05-12 14:56:16 +02:00
key_addrs = skb_flow_dissector_target ( flow_dissector ,
2015-06-04 09:16:41 -07:00
FLOW_DISSECTOR_KEY_TIPC_ADDRS ,
2015-05-12 14:56:16 +02:00
target_container ) ;
2015-06-04 09:16:41 -07:00
key_addrs - > tipcaddrs . srcnode = hdr - > srcnode ;
key_control - > addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS ;
2015-05-12 14:56:16 +02:00
}
2015-09-01 09:24:26 -07:00
goto out_good ;
2015-01-22 17:10:32 +01:00
}
2015-06-04 09:16:46 -07:00
case htons ( ETH_P_MPLS_UC ) :
2017-03-06 16:39:52 +01:00
case htons ( ETH_P_MPLS_MC ) :
2015-06-04 09:16:46 -07:00
mpls :
2017-03-06 16:39:52 +01:00
switch ( __skb_flow_dissect_mpls ( skb , flow_dissector ,
target_container , data ,
nhoff , hlen ) ) {
case FLOW_DISSECT_RET_OUT_GOOD :
2015-09-01 09:24:26 -07:00
goto out_good ;
2017-03-06 16:39:52 +01:00
case FLOW_DISSECT_RET_OUT_BAD :
2017-03-06 16:39:55 +01:00
default :
2017-03-06 16:39:52 +01:00
goto out_bad ;
2015-06-04 09:16:46 -07:00
}
2014-09-05 19:20:26 -04:00
case htons ( ETH_P_FCOE ) :
2016-02-24 09:29:51 -08:00
if ( ( hlen - nhoff ) < FCOE_HEADER_LEN )
goto out_bad ;
nhoff + = FCOE_HEADER_LEN ;
goto out_good ;
2017-01-11 14:05:42 +01:00
case htons ( ETH_P_ARP ) :
2017-03-06 16:39:51 +01:00
case htons ( ETH_P_RARP ) :
switch ( __skb_flow_dissect_arp ( skb , flow_dissector ,
target_container , data ,
nhoff , hlen ) ) {
case FLOW_DISSECT_RET_OUT_GOOD :
goto out_good ;
case FLOW_DISSECT_RET_OUT_BAD :
2017-03-06 16:39:55 +01:00
default :
2017-01-11 14:05:42 +01:00
goto out_bad ;
}
2011-11-28 05:22:18 +00:00
default :
2015-09-01 09:24:26 -07:00
goto out_bad ;
2011-11-28 05:22:18 +00:00
}
2015-06-12 09:01:06 -07:00
ip_proto_again :
2011-11-28 05:22:18 +00:00
switch ( ip_proto ) {
2017-03-06 16:39:55 +01:00
case IPPROTO_GRE :
switch ( __skb_flow_dissect_gre ( skb , key_control , flow_dissector ,
target_container , data ,
& proto , & nhoff , & hlen , flags ) ) {
case FLOW_DISSECT_RET_OUT_GOOD :
2017-03-06 16:39:53 +01:00
goto out_good ;
2017-03-06 16:39:55 +01:00
case FLOW_DISSECT_RET_OUT_BAD :
goto out_bad ;
case FLOW_DISSECT_RET_OUT_PROTO_AGAIN :
goto proto_again ;
2011-11-28 05:22:18 +00:00
}
2015-06-12 09:01:06 -07:00
case NEXTHDR_HOP :
case NEXTHDR_ROUTING :
case NEXTHDR_DEST : {
u8 _opthdr [ 2 ] , * opthdr ;
if ( proto ! = htons ( ETH_P_IPV6 ) )
break ;
opthdr = __skb_header_pointer ( skb , nhoff , sizeof ( _opthdr ) ,
data , hlen , & _opthdr ) ;
2015-06-12 19:31:32 -07:00
if ( ! opthdr )
2015-09-01 09:24:26 -07:00
goto out_bad ;
2015-06-12 09:01:06 -07:00
2015-06-12 19:31:32 -07:00
ip_proto = opthdr [ 0 ] ;
nhoff + = ( opthdr [ 1 ] + 1 ) < < 3 ;
2015-06-12 09:01:06 -07:00
goto ip_proto_again ;
}
2015-09-01 09:24:29 -07:00
case NEXTHDR_FRAGMENT : {
struct frag_hdr _fh , * fh ;
if ( proto ! = htons ( ETH_P_IPV6 ) )
break ;
fh = __skb_header_pointer ( skb , nhoff , sizeof ( _fh ) ,
data , hlen , & _fh ) ;
if ( ! fh )
goto out_bad ;
2015-09-01 16:46:08 -07:00
key_control - > flags | = FLOW_DIS_IS_FRAGMENT ;
2015-09-01 09:24:29 -07:00
nhoff + = sizeof ( _fh ) ;
2016-02-24 09:29:44 -08:00
ip_proto = fh - > nexthdr ;
2015-09-01 09:24:29 -07:00
if ( ! ( fh - > frag_off & htons ( IP6_OFFSET ) ) ) {
2015-09-01 16:46:08 -07:00
key_control - > flags | = FLOW_DIS_FIRST_FRAG ;
2016-02-24 09:29:44 -08:00
if ( flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG )
2015-09-01 09:24:29 -07:00
goto ip_proto_again ;
}
goto out_good ;
}
2011-11-28 05:22:18 +00:00
case IPPROTO_IPIP :
2013-07-29 11:07:36 -07:00
proto = htons ( ETH_P_IP ) ;
2015-09-01 09:24:32 -07:00
2015-09-01 16:46:08 -07:00
key_control - > flags | = FLOW_DIS_ENCAPSULATION ;
2015-09-01 09:24:32 -07:00
if ( flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP )
goto out_good ;
2013-07-29 11:07:36 -07:00
goto ip ;
2013-07-29 11:07:42 -07:00
case IPPROTO_IPV6 :
proto = htons ( ETH_P_IPV6 ) ;
2015-09-01 09:24:32 -07:00
2015-09-01 16:46:08 -07:00
key_control - > flags | = FLOW_DIS_ENCAPSULATION ;
2015-09-01 09:24:32 -07:00
if ( flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP )
goto out_good ;
2013-07-29 11:07:42 -07:00
goto ipv6 ;
2015-06-04 09:16:46 -07:00
case IPPROTO_MPLS :
proto = htons ( ETH_P_MPLS_UC ) ;
goto mpls ;
2017-05-23 18:40:44 +02:00
case IPPROTO_TCP :
__skb_flow_dissect_tcp ( skb , flow_dissector , target_container ,
data , nhoff , hlen ) ;
break ;
2011-11-28 05:22:18 +00:00
default :
break ;
}
2015-09-01 21:19:17 -07:00
if ( dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_PORTS ) ) {
2015-05-12 14:56:16 +02:00
key_ports = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_PORTS ,
target_container ) ;
key_ports - > ports = __skb_flow_get_ports ( skb , nhoff , ip_proto ,
data , hlen ) ;
}
2014-10-10 12:09:12 -07:00
2016-12-07 13:48:27 +01:00
if ( dissector_uses_key ( flow_dissector ,
FLOW_DISSECTOR_KEY_ICMP ) ) {
key_icmp = skb_flow_dissector_target ( flow_dissector ,
FLOW_DISSECTOR_KEY_ICMP ,
target_container ) ;
key_icmp - > icmp = skb_flow_get_be16 ( skb , nhoff , data , hlen ) ;
}
2015-09-01 09:24:26 -07:00
out_good :
ret = true ;
2016-11-09 16:04:46 -08:00
key_control - > thoff = ( u16 ) nhoff ;
out :
2015-09-01 09:24:26 -07:00
key_basic - > n_proto = proto ;
key_basic - > ip_proto = ip_proto ;
return ret ;
2016-11-09 16:04:46 -08:00
out_bad :
ret = false ;
key_control - > thoff = min_t ( u16 , nhoff , skb ? skb - > len : hlen ) ;
goto out ;
2011-11-28 05:22:18 +00:00
}
2014-08-23 12:13:41 -07:00
EXPORT_SYMBOL ( __skb_flow_dissect ) ;
2013-01-21 00:39:24 +00:00
static u32 hashrnd __read_mostly ;
2013-10-23 20:06:00 +02:00
static __always_inline void __flow_hash_secret_init ( void )
{
net_get_random_once ( & hashrnd , sizeof ( hashrnd ) ) ;
}
2015-09-01 21:19:17 -07:00
static __always_inline u32 __flow_hash_words ( const u32 * words , u32 length ,
u32 keyval )
2015-06-04 09:16:39 -07:00
{
return jhash2 ( words , length , keyval ) ;
}
2015-09-01 21:19:17 -07:00
static inline const u32 * flow_keys_hash_start ( const struct flow_keys * flow )
2013-10-23 20:06:00 +02:00
{
2015-09-01 21:19:17 -07:00
const void * p = flow ;
2015-06-04 09:16:39 -07:00
BUILD_BUG_ON ( FLOW_KEYS_HASH_OFFSET % sizeof ( u32 ) ) ;
2015-09-01 21:19:17 -07:00
return ( const u32 * ) ( p + FLOW_KEYS_HASH_OFFSET ) ;
2015-06-04 09:16:39 -07:00
}
2015-09-01 21:19:17 -07:00
static inline size_t flow_keys_hash_length ( const struct flow_keys * flow )
2015-06-04 09:16:39 -07:00
{
2015-06-04 09:16:40 -07:00
size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof ( flow - > addrs ) ;
2015-06-04 09:16:39 -07:00
BUILD_BUG_ON ( ( sizeof ( * flow ) - FLOW_KEYS_HASH_OFFSET ) % sizeof ( u32 ) ) ;
2015-06-04 09:16:40 -07:00
BUILD_BUG_ON ( offsetof ( typeof ( * flow ) , addrs ) ! =
sizeof ( * flow ) - sizeof ( flow - > addrs ) ) ;
switch ( flow - > control . addr_type ) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS :
diff - = sizeof ( flow - > addrs . v4addrs ) ;
break ;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS :
diff - = sizeof ( flow - > addrs . v6addrs ) ;
break ;
2015-06-04 09:16:41 -07:00
case FLOW_DISSECTOR_KEY_TIPC_ADDRS :
diff - = sizeof ( flow - > addrs . tipcaddrs ) ;
break ;
2015-06-04 09:16:40 -07:00
}
return ( sizeof ( * flow ) - diff ) / sizeof ( u32 ) ;
}
__be32 flow_get_u32_src ( const struct flow_keys * flow )
{
switch ( flow - > control . addr_type ) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS :
return flow - > addrs . v4addrs . src ;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS :
return ( __force __be32 ) ipv6_addr_hash (
& flow - > addrs . v6addrs . src ) ;
2015-06-04 09:16:41 -07:00
case FLOW_DISSECTOR_KEY_TIPC_ADDRS :
return flow - > addrs . tipcaddrs . srcnode ;
2015-06-04 09:16:40 -07:00
default :
return 0 ;
}
}
EXPORT_SYMBOL ( flow_get_u32_src ) ;
__be32 flow_get_u32_dst ( const struct flow_keys * flow )
{
switch ( flow - > control . addr_type ) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS :
return flow - > addrs . v4addrs . dst ;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS :
return ( __force __be32 ) ipv6_addr_hash (
& flow - > addrs . v6addrs . dst ) ;
default :
return 0 ;
}
}
EXPORT_SYMBOL ( flow_get_u32_dst ) ;
static inline void __flow_hash_consistentify ( struct flow_keys * keys )
{
int addr_diff , i ;
switch ( keys - > control . addr_type ) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS :
addr_diff = ( __force u32 ) keys - > addrs . v4addrs . dst -
( __force u32 ) keys - > addrs . v4addrs . src ;
if ( ( addr_diff < 0 ) | |
( addr_diff = = 0 & &
( ( __force u16 ) keys - > ports . dst <
( __force u16 ) keys - > ports . src ) ) ) {
swap ( keys - > addrs . v4addrs . src , keys - > addrs . v4addrs . dst ) ;
swap ( keys - > ports . src , keys - > ports . dst ) ;
}
break ;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS :
addr_diff = memcmp ( & keys - > addrs . v6addrs . dst ,
& keys - > addrs . v6addrs . src ,
sizeof ( keys - > addrs . v6addrs . dst ) ) ;
if ( ( addr_diff < 0 ) | |
( addr_diff = = 0 & &
( ( __force u16 ) keys - > ports . dst <
( __force u16 ) keys - > ports . src ) ) ) {
for ( i = 0 ; i < 4 ; i + + )
swap ( keys - > addrs . v6addrs . src . s6_addr32 [ i ] ,
keys - > addrs . v6addrs . dst . s6_addr32 [ i ] ) ;
swap ( keys - > ports . src , keys - > ports . dst ) ;
}
break ;
}
2013-10-23 20:06:00 +02:00
}
2015-05-01 11:30:12 -07:00
static inline u32 __flow_hash_from_keys ( struct flow_keys * keys , u32 keyval )
2014-07-01 21:32:05 -07:00
{
u32 hash ;
2015-06-04 09:16:40 -07:00
__flow_hash_consistentify ( keys ) ;
2014-07-01 21:32:05 -07:00
2015-09-01 21:19:17 -07:00
hash = __flow_hash_words ( flow_keys_hash_start ( keys ) ,
2015-06-04 09:16:39 -07:00
flow_keys_hash_length ( keys ) , keyval ) ;
2014-07-01 21:32:05 -07:00
if ( ! hash )
hash = 1 ;
return hash ;
}
u32 flow_hash_from_keys ( struct flow_keys * keys )
{
2015-05-01 11:30:12 -07:00
__flow_hash_secret_init ( ) ;
return __flow_hash_from_keys ( keys , hashrnd ) ;
2014-07-01 21:32:05 -07:00
}
EXPORT_SYMBOL ( flow_hash_from_keys ) ;
2015-05-01 11:30:12 -07:00
static inline u32 ___skb_get_hash ( const struct sk_buff * skb ,
struct flow_keys * keys , u32 keyval )
{
2015-09-01 09:24:33 -07:00
skb_flow_dissect_flow_keys ( skb , keys ,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL ) ;
2015-05-01 11:30:12 -07:00
return __flow_hash_from_keys ( keys , keyval ) ;
}
2015-05-01 11:30:17 -07:00
struct _flow_keys_digest_data {
__be16 n_proto ;
u8 ip_proto ;
u8 padding ;
__be32 ports ;
__be32 src ;
__be32 dst ;
} ;
void make_flow_keys_digest ( struct flow_keys_digest * digest ,
const struct flow_keys * flow )
{
struct _flow_keys_digest_data * data =
( struct _flow_keys_digest_data * ) digest ;
BUILD_BUG_ON ( sizeof ( * data ) > sizeof ( * digest ) ) ;
memset ( digest , 0 , sizeof ( * digest ) ) ;
2015-05-12 14:56:16 +02:00
data - > n_proto = flow - > basic . n_proto ;
data - > ip_proto = flow - > basic . ip_proto ;
data - > ports = flow - > ports . ports ;
2015-06-04 09:16:40 -07:00
data - > src = flow - > addrs . v4addrs . src ;
data - > dst = flow - > addrs . v4addrs . dst ;
2015-05-01 11:30:17 -07:00
}
EXPORT_SYMBOL ( make_flow_keys_digest ) ;
2016-07-01 16:07:50 -04:00
static struct flow_dissector flow_keys_dissector_symmetric __read_mostly ;
2016-10-26 18:49:46 +02:00
u32 __skb_get_hash_symmetric ( const struct sk_buff * skb )
2016-07-01 16:07:50 -04:00
{
struct flow_keys keys ;
__flow_hash_secret_init ( ) ;
memset ( & keys , 0 , sizeof ( keys ) ) ;
__skb_flow_dissect ( skb , & flow_keys_dissector_symmetric , & keys ,
NULL , 0 , 0 , 0 ,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL ) ;
return __flow_hash_from_keys ( & keys , hashrnd ) ;
}
EXPORT_SYMBOL_GPL ( __skb_get_hash_symmetric ) ;
2015-05-12 14:56:10 +02:00
/**
* __skb_get_hash : calculate a flow hash
* @ skb : sk_buff to calculate flow hash from
*
* This function calculates a flow hash based on src / dst addresses
2014-03-24 15:34:47 -07:00
* and src / dst port numbers . Sets hash in skb to non - zero hash value
* on success , zero indicates no valid hash . Also , sets l4_hash in skb
2013-01-21 00:39:24 +00:00
* if hash is a canonical 4 - tuple hash over transport ports .
*/
2013-12-15 22:12:06 -08:00
void __skb_get_hash ( struct sk_buff * skb )
2013-01-21 00:39:24 +00:00
{
struct flow_keys keys ;
2016-08-31 14:15:05 +08:00
u32 hash ;
2013-01-21 00:39:24 +00:00
2015-05-01 11:30:12 -07:00
__flow_hash_secret_init ( ) ;
2016-08-31 14:15:05 +08:00
hash = ___skb_get_hash ( skb , & keys , hashrnd ) ;
__skb_set_sw_hash ( skb , hash , flow_keys_have_l4 ( & keys ) ) ;
2013-01-21 00:39:24 +00:00
}
2013-12-15 22:12:06 -08:00
EXPORT_SYMBOL ( __skb_get_hash ) ;
2013-01-21 00:39:24 +00:00
2015-05-01 11:30:12 -07:00
__u32 skb_get_hash_perturb ( const struct sk_buff * skb , u32 perturb )
{
struct flow_keys keys ;
return ___skb_get_hash ( skb , & keys , perturb ) ;
}
EXPORT_SYMBOL ( skb_get_hash_perturb ) ;
2014-09-05 19:20:26 -04:00
u32 __skb_get_poff ( const struct sk_buff * skb , void * data ,
const struct flow_keys * keys , int hlen )
2013-03-19 06:39:30 +00:00
{
2015-06-04 09:16:39 -07:00
u32 poff = keys - > control . thoff ;
2013-03-19 06:39:30 +00:00
2016-02-24 09:29:44 -08:00
/* skip L4 headers for fragments after the first */
if ( ( keys - > control . flags & FLOW_DIS_IS_FRAGMENT ) & &
! ( keys - > control . flags & FLOW_DIS_FIRST_FRAG ) )
return poff ;
2015-05-12 14:56:16 +02:00
switch ( keys - > basic . ip_proto ) {
2013-03-19 06:39:30 +00:00
case IPPROTO_TCP : {
2014-10-10 12:09:12 -07:00
/* access doff as u8 to avoid unaligned access */
const u8 * doff ;
u8 _doff ;
2013-03-19 06:39:30 +00:00
2014-10-10 12:09:12 -07:00
doff = __skb_header_pointer ( skb , poff + 12 , sizeof ( _doff ) ,
data , hlen , & _doff ) ;
if ( ! doff )
2013-03-19 06:39:30 +00:00
return poff ;
2014-10-10 12:09:12 -07:00
poff + = max_t ( u32 , sizeof ( struct tcphdr ) , ( * doff & 0xF0 ) > > 2 ) ;
2013-03-19 06:39:30 +00:00
break ;
}
case IPPROTO_UDP :
case IPPROTO_UDPLITE :
poff + = sizeof ( struct udphdr ) ;
break ;
/* For the rest, we do not really care about header
* extensions at this point for now .
*/
case IPPROTO_ICMP :
poff + = sizeof ( struct icmphdr ) ;
break ;
case IPPROTO_ICMPV6 :
poff + = sizeof ( struct icmp6hdr ) ;
break ;
case IPPROTO_IGMP :
poff + = sizeof ( struct igmphdr ) ;
break ;
case IPPROTO_DCCP :
poff + = sizeof ( struct dccp_hdr ) ;
break ;
case IPPROTO_SCTP :
poff + = sizeof ( struct sctphdr ) ;
break ;
}
return poff ;
}
2015-05-12 14:56:14 +02:00
/**
* skb_get_poff - get the offset to the payload
* @ skb : sk_buff to get the payload offset from
*
* The function will get the offset to the payload as far as it could
* be dissected . The main user is currently BPF , so that we can dynamically
2014-09-05 19:20:26 -04:00
* truncate packets without needing to push actual payload to the user
* space and can analyze headers only , instead .
*/
u32 skb_get_poff ( const struct sk_buff * skb )
{
struct flow_keys keys ;
2015-09-01 09:24:27 -07:00
if ( ! skb_flow_dissect_flow_keys ( skb , & keys , 0 ) )
2014-09-05 19:20:26 -04:00
return 0 ;
return __skb_get_poff ( skb , skb - > data , & keys , skb_headlen ( skb ) ) ;
}
2015-05-12 14:56:16 +02:00
2015-09-01 21:19:17 -07:00
__u32 __get_hash_from_flowi6 ( const struct flowi6 * fl6 , struct flow_keys * keys )
2015-09-01 17:00:24 -07:00
{
memset ( keys , 0 , sizeof ( * keys ) ) ;
memcpy ( & keys - > addrs . v6addrs . src , & fl6 - > saddr ,
sizeof ( keys - > addrs . v6addrs . src ) ) ;
memcpy ( & keys - > addrs . v6addrs . dst , & fl6 - > daddr ,
sizeof ( keys - > addrs . v6addrs . dst ) ) ;
keys - > control . addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS ;
keys - > ports . src = fl6 - > fl6_sport ;
keys - > ports . dst = fl6 - > fl6_dport ;
keys - > keyid . keyid = fl6 - > fl6_gre_key ;
keys - > tags . flow_label = ( __force u32 ) fl6 - > flowlabel ;
keys - > basic . ip_proto = fl6 - > flowi6_proto ;
return flow_hash_from_keys ( keys ) ;
}
EXPORT_SYMBOL ( __get_hash_from_flowi6 ) ;
2015-09-01 21:19:17 -07:00
__u32 __get_hash_from_flowi4 ( const struct flowi4 * fl4 , struct flow_keys * keys )
2015-09-01 17:00:24 -07:00
{
memset ( keys , 0 , sizeof ( * keys ) ) ;
keys - > addrs . v4addrs . src = fl4 - > saddr ;
keys - > addrs . v4addrs . dst = fl4 - > daddr ;
keys - > control . addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS ;
keys - > ports . src = fl4 - > fl4_sport ;
keys - > ports . dst = fl4 - > fl4_dport ;
keys - > keyid . keyid = fl4 - > fl4_gre_key ;
keys - > basic . ip_proto = fl4 - > flowi4_proto ;
return flow_hash_from_keys ( keys ) ;
}
EXPORT_SYMBOL ( __get_hash_from_flowi4 ) ;
2015-05-12 14:56:16 +02:00
static const struct flow_dissector_key flow_keys_dissector_keys [ ] = {
2015-06-04 09:16:39 -07:00
{
. key_id = FLOW_DISSECTOR_KEY_CONTROL ,
. offset = offsetof ( struct flow_keys , control ) ,
} ,
2015-05-12 14:56:16 +02:00
{
. key_id = FLOW_DISSECTOR_KEY_BASIC ,
. offset = offsetof ( struct flow_keys , basic ) ,
} ,
{
. key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS ,
2015-06-04 09:16:40 -07:00
. offset = offsetof ( struct flow_keys , addrs . v4addrs ) ,
} ,
{
. key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS ,
. offset = offsetof ( struct flow_keys , addrs . v6addrs ) ,
2015-05-12 14:56:16 +02:00
} ,
2015-06-04 09:16:41 -07:00
{
. key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS ,
. offset = offsetof ( struct flow_keys , addrs . tipcaddrs ) ,
} ,
2015-05-12 14:56:16 +02:00
{
. key_id = FLOW_DISSECTOR_KEY_PORTS ,
. offset = offsetof ( struct flow_keys , ports ) ,
} ,
2015-06-04 09:16:43 -07:00
{
2016-08-17 13:36:11 +03:00
. key_id = FLOW_DISSECTOR_KEY_VLAN ,
. offset = offsetof ( struct flow_keys , vlan ) ,
2015-06-04 09:16:43 -07:00
} ,
2015-06-04 09:16:44 -07:00
{
. key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL ,
. offset = offsetof ( struct flow_keys , tags ) ,
} ,
2015-06-04 09:16:45 -07:00
{
. key_id = FLOW_DISSECTOR_KEY_GRE_KEYID ,
. offset = offsetof ( struct flow_keys , keyid ) ,
} ,
2015-05-12 14:56:16 +02:00
} ;
2016-07-01 16:07:50 -04:00
static const struct flow_dissector_key flow_keys_dissector_symmetric_keys [ ] = {
{
. key_id = FLOW_DISSECTOR_KEY_CONTROL ,
. offset = offsetof ( struct flow_keys , control ) ,
} ,
{
. key_id = FLOW_DISSECTOR_KEY_BASIC ,
. offset = offsetof ( struct flow_keys , basic ) ,
} ,
{
. key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS ,
. offset = offsetof ( struct flow_keys , addrs . v4addrs ) ,
} ,
{
. key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS ,
. offset = offsetof ( struct flow_keys , addrs . v6addrs ) ,
} ,
{
. key_id = FLOW_DISSECTOR_KEY_PORTS ,
. offset = offsetof ( struct flow_keys , ports ) ,
} ,
} ;
2015-05-12 14:56:16 +02:00
static const struct flow_dissector_key flow_keys_buf_dissector_keys [ ] = {
2015-06-04 09:16:39 -07:00
{
. key_id = FLOW_DISSECTOR_KEY_CONTROL ,
. offset = offsetof ( struct flow_keys , control ) ,
} ,
2015-05-12 14:56:16 +02:00
{
. key_id = FLOW_DISSECTOR_KEY_BASIC ,
. offset = offsetof ( struct flow_keys , basic ) ,
} ,
} ;
struct flow_dissector flow_keys_dissector __read_mostly ;
EXPORT_SYMBOL ( flow_keys_dissector ) ;
struct flow_dissector flow_keys_buf_dissector __read_mostly ;
static int __init init_default_flow_dissectors ( void )
{
skb_flow_dissector_init ( & flow_keys_dissector ,
flow_keys_dissector_keys ,
ARRAY_SIZE ( flow_keys_dissector_keys ) ) ;
2016-07-01 16:07:50 -04:00
skb_flow_dissector_init ( & flow_keys_dissector_symmetric ,
flow_keys_dissector_symmetric_keys ,
ARRAY_SIZE ( flow_keys_dissector_symmetric_keys ) ) ;
2015-05-12 14:56:16 +02:00
skb_flow_dissector_init ( & flow_keys_buf_dissector ,
flow_keys_buf_dissector_keys ,
ARRAY_SIZE ( flow_keys_buf_dissector_keys ) ) ;
return 0 ;
}
2016-11-22 11:17:30 -08:00
core_initcall ( init_default_flow_dissectors ) ;