2018-09-14 07:46:21 -07:00
// SPDX-License-Identifier: GPL-2.0
# include <limits.h>
# include <stddef.h>
# include <stdbool.h>
# include <string.h>
# include <linux/pkt_cls.h>
# include <linux/bpf.h>
# include <linux/in.h>
# include <linux/if_ether.h>
# include <linux/icmp.h>
# include <linux/ip.h>
# include <linux/ipv6.h>
# include <linux/tcp.h>
# include <linux/udp.h>
# include <linux/if_packet.h>
# include <sys/socket.h>
# include <linux/if_tunnel.h>
# include <linux/mpls.h>
2020-01-20 14:06:45 +01:00
# include <bpf/bpf_helpers.h>
# include <bpf/bpf_endian.h>
2018-09-14 07:46:21 -07:00
int _version SEC ( " version " ) = 1 ;
# define PROG(F) SEC(#F) int bpf_func_##F
/* These are the identifiers of the BPF programs that will be used in tail
* calls . Name is limited to 16 characters , with the terminating character and
* bpf_func_ above , we have only 6 to work with , anything after will be cropped .
*/
enum {
IP ,
IPV6 ,
IPV6OP , /* Destination/Hop-by-Hop Options IPv6 Extension header */
IPV6FR , /* Fragmentation IPv6 Extension Header */
MPLS ,
VLAN ,
} ;
# define IP_MF 0x2000
# define IP_OFFSET 0x1FFF
# define IP6_MF 0x0001
# define IP6_OFFSET 0xFFF8
struct vlan_hdr {
__be16 h_vlan_TCI ;
__be16 h_vlan_encapsulated_proto ;
} ;
struct gre_hdr {
__be16 flags ;
__be16 proto ;
} ;
struct frag_hdr {
__u8 nexthdr ;
__u8 reserved ;
__be16 frag_off ;
__be32 identification ;
} ;
2019-06-17 12:26:59 -07:00
struct {
2019-07-05 08:50:11 -07:00
__uint ( type , BPF_MAP_TYPE_PROG_ARRAY ) ;
__uint ( max_entries , 8 ) ;
__uint ( key_size , sizeof ( __u32 ) ) ;
__uint ( value_size , sizeof ( __u32 ) ) ;
} jmp_table SEC ( " .maps " ) ;
2018-09-14 07:46:21 -07:00
2019-06-17 12:26:59 -07:00
struct {
2019-08-12 16:30:39 -07:00
__uint ( type , BPF_MAP_TYPE_HASH ) ;
__uint ( max_entries , 1024 ) ;
2019-07-05 08:50:11 -07:00
__type ( key , __u32 ) ;
__type ( value , struct bpf_flow_keys ) ;
} last_dissection SEC ( " .maps " ) ;
2019-04-22 08:55:50 -07:00
static __always_inline int export_flow_keys ( struct bpf_flow_keys * keys ,
int ret )
{
2019-08-12 16:30:39 -07:00
__u32 key = ( __u32 ) ( keys - > sport ) < < 16 | keys - > dport ;
struct bpf_flow_keys val ;
2019-04-22 08:55:50 -07:00
2019-08-12 16:30:39 -07:00
memcpy ( & val , keys , sizeof ( val ) ) ;
bpf_map_update_elem ( & last_dissection , & key , & val , BPF_ANY ) ;
2019-04-22 08:55:50 -07:00
return ret ;
}
2019-07-25 15:52:30 -07:00
# define IPV6_FLOWLABEL_MASK __bpf_constant_htonl(0x000FFFFF)
static inline __be32 ip6_flowlabel ( const struct ipv6hdr * hdr )
{
return * ( __be32 * ) hdr & IPV6_FLOWLABEL_MASK ;
}
2018-09-14 07:46:21 -07:00
static __always_inline void * bpf_flow_dissect_get_header ( struct __sk_buff * skb ,
__u16 hdr_size ,
void * buffer )
{
void * data_end = ( void * ) ( long ) skb - > data_end ;
void * data = ( void * ) ( long ) skb - > data ;
2018-12-05 20:40:47 -08:00
__u16 thoff = skb - > flow_keys - > thoff ;
2018-09-14 07:46:21 -07:00
__u8 * hdr ;
/* Verifies this variable offset does not overflow */
2018-12-05 20:40:47 -08:00
if ( thoff > ( USHRT_MAX - hdr_size ) )
2018-09-14 07:46:21 -07:00
return NULL ;
2018-12-05 20:40:47 -08:00
hdr = data + thoff ;
2018-09-14 07:46:21 -07:00
if ( hdr + hdr_size < = data_end )
return hdr ;
2018-12-05 20:40:47 -08:00
if ( bpf_skb_load_bytes ( skb , thoff , buffer , hdr_size ) )
2018-09-14 07:46:21 -07:00
return NULL ;
return buffer ;
}
/* Dispatches on ETHERTYPE */
static __always_inline int parse_eth_proto ( struct __sk_buff * skb , __be16 proto )
{
struct bpf_flow_keys * keys = skb - > flow_keys ;
switch ( proto ) {
case bpf_htons ( ETH_P_IP ) :
bpf_tail_call ( skb , & jmp_table , IP ) ;
break ;
case bpf_htons ( ETH_P_IPV6 ) :
bpf_tail_call ( skb , & jmp_table , IPV6 ) ;
break ;
case bpf_htons ( ETH_P_MPLS_MC ) :
case bpf_htons ( ETH_P_MPLS_UC ) :
bpf_tail_call ( skb , & jmp_table , MPLS ) ;
break ;
case bpf_htons ( ETH_P_8021Q ) :
case bpf_htons ( ETH_P_8021AD ) :
bpf_tail_call ( skb , & jmp_table , VLAN ) ;
break ;
default :
/* Protocol not supported */
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
}
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
}
2018-11-09 08:21:40 -08:00
SEC ( " flow_dissector " )
2018-09-14 12:09:05 -07:00
int _dissect ( struct __sk_buff * skb )
2018-09-14 07:46:21 -07:00
{
2019-04-01 13:57:31 -07:00
struct bpf_flow_keys * keys = skb - > flow_keys ;
return parse_eth_proto ( skb , keys - > n_proto ) ;
2018-09-14 07:46:21 -07:00
}
/* Parses on IPPROTO_* */
static __always_inline int parse_ip_proto ( struct __sk_buff * skb , __u8 proto )
{
struct bpf_flow_keys * keys = skb - > flow_keys ;
void * data_end = ( void * ) ( long ) skb - > data_end ;
struct icmphdr * icmp , _icmp ;
struct gre_hdr * gre , _gre ;
struct ethhdr * eth , _eth ;
struct tcphdr * tcp , _tcp ;
struct udphdr * udp , _udp ;
switch ( proto ) {
case IPPROTO_ICMP :
icmp = bpf_flow_dissect_get_header ( skb , sizeof ( * icmp ) , & _icmp ) ;
if ( ! icmp )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
case IPPROTO_IPIP :
keys - > is_encap = true ;
2019-07-25 15:52:31 -07:00
if ( keys - > flags & BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP )
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
return parse_eth_proto ( skb , bpf_htons ( ETH_P_IP ) ) ;
case IPPROTO_IPV6 :
keys - > is_encap = true ;
2019-07-25 15:52:31 -07:00
if ( keys - > flags & BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP )
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
return parse_eth_proto ( skb , bpf_htons ( ETH_P_IPV6 ) ) ;
case IPPROTO_GRE :
gre = bpf_flow_dissect_get_header ( skb , sizeof ( * gre ) , & _gre ) ;
if ( ! gre )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
if ( bpf_htons ( gre - > flags & GRE_VERSION ) )
/* Only inspect standard GRE packets with version 0 */
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
2018-12-05 20:40:47 -08:00
keys - > thoff + = sizeof ( * gre ) ; /* Step over GRE Flags and Proto */
2018-09-14 07:46:21 -07:00
if ( GRE_IS_CSUM ( gre - > flags ) )
2018-12-05 20:40:47 -08:00
keys - > thoff + = 4 ; /* Step over chksum and Padding */
2018-09-14 07:46:21 -07:00
if ( GRE_IS_KEY ( gre - > flags ) )
2018-12-05 20:40:47 -08:00
keys - > thoff + = 4 ; /* Step over key */
2018-09-14 07:46:21 -07:00
if ( GRE_IS_SEQ ( gre - > flags ) )
2018-12-05 20:40:47 -08:00
keys - > thoff + = 4 ; /* Step over sequence number */
2018-09-14 07:46:21 -07:00
keys - > is_encap = true ;
2019-07-25 15:52:31 -07:00
if ( keys - > flags & BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP )
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
if ( gre - > proto = = bpf_htons ( ETH_P_TEB ) ) {
eth = bpf_flow_dissect_get_header ( skb , sizeof ( * eth ) ,
& _eth ) ;
if ( ! eth )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
2018-12-05 20:40:47 -08:00
keys - > thoff + = sizeof ( * eth ) ;
2018-09-14 07:46:21 -07:00
return parse_eth_proto ( skb , eth - > h_proto ) ;
} else {
return parse_eth_proto ( skb , gre - > proto ) ;
}
case IPPROTO_TCP :
tcp = bpf_flow_dissect_get_header ( skb , sizeof ( * tcp ) , & _tcp ) ;
if ( ! tcp )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
if ( tcp - > doff < 5 )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
if ( ( __u8 * ) tcp + ( tcp - > doff < < 2 ) > data_end )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
keys - > sport = tcp - > source ;
keys - > dport = tcp - > dest ;
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
case IPPROTO_UDP :
case IPPROTO_UDPLITE :
udp = bpf_flow_dissect_get_header ( skb , sizeof ( * udp ) , & _udp ) ;
if ( ! udp )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
keys - > sport = udp - > source ;
keys - > dport = udp - > dest ;
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
default :
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
}
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
}
static __always_inline int parse_ipv6_proto ( struct __sk_buff * skb , __u8 nexthdr )
{
struct bpf_flow_keys * keys = skb - > flow_keys ;
switch ( nexthdr ) {
case IPPROTO_HOPOPTS :
case IPPROTO_DSTOPTS :
bpf_tail_call ( skb , & jmp_table , IPV6OP ) ;
break ;
case IPPROTO_FRAGMENT :
bpf_tail_call ( skb , & jmp_table , IPV6FR ) ;
break ;
default :
return parse_ip_proto ( skb , nexthdr ) ;
}
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
}
PROG ( IP ) ( struct __sk_buff * skb )
{
void * data_end = ( void * ) ( long ) skb - > data_end ;
struct bpf_flow_keys * keys = skb - > flow_keys ;
void * data = ( void * ) ( long ) skb - > data ;
struct iphdr * iph , _iph ;
bool done = false ;
iph = bpf_flow_dissect_get_header ( skb , sizeof ( * iph ) , & _iph ) ;
if ( ! iph )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
/* IP header cannot be smaller than 20 bytes */
if ( iph - > ihl < 5 )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
keys - > addr_proto = ETH_P_IP ;
keys - > ipv4_src = iph - > saddr ;
keys - > ipv4_dst = iph - > daddr ;
2019-07-25 15:52:29 -07:00
keys - > ip_proto = iph - > protocol ;
2018-09-14 07:46:21 -07:00
2018-12-05 20:40:47 -08:00
keys - > thoff + = iph - > ihl < < 2 ;
if ( data + keys - > thoff > data_end )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
if ( iph - > frag_off & bpf_htons ( IP_MF | IP_OFFSET ) ) {
keys - > is_frag = true ;
2019-07-25 15:52:29 -07:00
if ( iph - > frag_off & bpf_htons ( IP_OFFSET ) ) {
2018-09-14 07:46:21 -07:00
/* From second fragment on, packets do not have headers
* we can parse .
*/
done = true ;
2019-07-25 15:52:29 -07:00
} else {
2018-09-14 07:46:21 -07:00
keys - > is_first_frag = true ;
2019-07-25 15:52:29 -07:00
/* No need to parse fragmented packet unless
* explicitly asked for .
*/
if ( ! ( keys - > flags &
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG ) )
done = true ;
}
2018-09-14 07:46:21 -07:00
}
if ( done )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
return parse_ip_proto ( skb , iph - > protocol ) ;
}
PROG ( IPV6 ) ( struct __sk_buff * skb )
{
struct bpf_flow_keys * keys = skb - > flow_keys ;
struct ipv6hdr * ip6h , _ip6h ;
ip6h = bpf_flow_dissect_get_header ( skb , sizeof ( * ip6h ) , & _ip6h ) ;
if ( ! ip6h )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
keys - > addr_proto = ETH_P_IPV6 ;
memcpy ( & keys - > ipv6_src , & ip6h - > saddr , 2 * sizeof ( ip6h - > saddr ) ) ;
2018-12-05 20:40:47 -08:00
keys - > thoff + = sizeof ( struct ipv6hdr ) ;
2019-07-25 15:52:29 -07:00
keys - > ip_proto = ip6h - > nexthdr ;
2019-07-25 15:52:30 -07:00
keys - > flow_label = ip6_flowlabel ( ip6h ) ;
if ( keys - > flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL )
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
return parse_ipv6_proto ( skb , ip6h - > nexthdr ) ;
}
PROG ( IPV6OP ) ( struct __sk_buff * skb )
{
2019-04-22 08:55:50 -07:00
struct bpf_flow_keys * keys = skb - > flow_keys ;
2018-09-14 07:46:21 -07:00
struct ipv6_opt_hdr * ip6h , _ip6h ;
ip6h = bpf_flow_dissect_get_header ( skb , sizeof ( * ip6h ) , & _ip6h ) ;
if ( ! ip6h )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
/* hlen is in 8-octets and does not include the first 8 bytes
* of the header
*/
2019-07-25 15:52:29 -07:00
keys - > thoff + = ( 1 + ip6h - > hdrlen ) < < 3 ;
keys - > ip_proto = ip6h - > nexthdr ;
2018-09-14 07:46:21 -07:00
return parse_ipv6_proto ( skb , ip6h - > nexthdr ) ;
}
PROG ( IPV6FR ) ( struct __sk_buff * skb )
{
struct bpf_flow_keys * keys = skb - > flow_keys ;
struct frag_hdr * fragh , _fragh ;
fragh = bpf_flow_dissect_get_header ( skb , sizeof ( * fragh ) , & _fragh ) ;
if ( ! fragh )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
2018-12-05 20:40:47 -08:00
keys - > thoff + = sizeof ( * fragh ) ;
2018-09-14 07:46:21 -07:00
keys - > is_frag = true ;
2019-07-25 15:52:29 -07:00
keys - > ip_proto = fragh - > nexthdr ;
if ( ! ( fragh - > frag_off & bpf_htons ( IP6_OFFSET ) ) ) {
2018-09-14 07:46:21 -07:00
keys - > is_first_frag = true ;
2019-07-25 15:52:29 -07:00
/* No need to parse fragmented packet unless
* explicitly asked for .
*/
if ( ! ( keys - > flags & BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG ) )
return export_flow_keys ( keys , BPF_OK ) ;
}
2018-09-14 07:46:21 -07:00
return parse_ipv6_proto ( skb , fragh - > nexthdr ) ;
}
PROG ( MPLS ) ( struct __sk_buff * skb )
{
2019-04-22 08:55:50 -07:00
struct bpf_flow_keys * keys = skb - > flow_keys ;
2018-09-14 07:46:21 -07:00
struct mpls_label * mpls , _mpls ;
mpls = bpf_flow_dissect_get_header ( skb , sizeof ( * mpls ) , & _mpls ) ;
if ( ! mpls )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_OK ) ;
2018-09-14 07:46:21 -07:00
}
PROG ( VLAN ) ( struct __sk_buff * skb )
{
struct bpf_flow_keys * keys = skb - > flow_keys ;
struct vlan_hdr * vlan , _vlan ;
/* Account for double-tagging */
2019-04-01 13:57:30 -07:00
if ( keys - > n_proto = = bpf_htons ( ETH_P_8021AD ) ) {
2018-09-14 07:46:21 -07:00
vlan = bpf_flow_dissect_get_header ( skb , sizeof ( * vlan ) , & _vlan ) ;
if ( ! vlan )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
if ( vlan - > h_vlan_encapsulated_proto ! = bpf_htons ( ETH_P_8021Q ) )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
2019-04-01 13:57:30 -07:00
keys - > nhoff + = sizeof ( * vlan ) ;
2018-12-05 20:40:47 -08:00
keys - > thoff + = sizeof ( * vlan ) ;
2018-09-14 07:46:21 -07:00
}
vlan = bpf_flow_dissect_get_header ( skb , sizeof ( * vlan ) , & _vlan ) ;
if ( ! vlan )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
2019-04-01 13:57:30 -07:00
keys - > nhoff + = sizeof ( * vlan ) ;
2018-12-05 20:40:47 -08:00
keys - > thoff + = sizeof ( * vlan ) ;
2018-09-14 07:46:21 -07:00
/* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
if ( vlan - > h_vlan_encapsulated_proto = = bpf_htons ( ETH_P_8021AD ) | |
vlan - > h_vlan_encapsulated_proto = = bpf_htons ( ETH_P_8021Q ) )
2019-04-22 08:55:50 -07:00
return export_flow_keys ( keys , BPF_DROP ) ;
2018-09-14 07:46:21 -07:00
2019-04-01 13:57:31 -07:00
keys - > n_proto = vlan - > h_vlan_encapsulated_proto ;
2018-09-14 07:46:21 -07:00
return parse_eth_proto ( skb , vlan - > h_vlan_encapsulated_proto ) ;
}
char __license [ ] SEC ( " license " ) = " GPL " ;