2019-02-02 14:50:43 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# include <linux/kernel.h>
# include <linux/slab.h>
# include <net/flow_offload.h>
2019-08-07 04:13:52 +03:00
# include <linux/rtnetlink.h>
2019-08-07 04:13:53 +03:00
# include <linux/mutex.h>
2019-02-02 14:50:43 +03:00
2019-02-02 14:50:45 +03:00
struct flow_rule * flow_rule_alloc ( unsigned int num_actions )
2019-02-02 14:50:43 +03:00
{
2019-02-02 14:50:45 +03:00
struct flow_rule * rule ;
2019-05-24 01:56:53 +03:00
rule = kzalloc ( struct_size ( rule , action . entries , num_actions ) ,
2019-02-02 14:50:45 +03:00
GFP_KERNEL ) ;
if ( ! rule )
return NULL ;
rule - > action . num_entries = num_actions ;
return rule ;
2019-02-02 14:50:43 +03:00
}
EXPORT_SYMBOL ( flow_rule_alloc ) ;
# define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
const struct flow_match * __m = & ( __rule ) - > match ; \
struct flow_dissector * __d = ( __m ) - > dissector ; \
\
( __out ) - > key = skb_flow_dissector_target ( __d , __type , ( __m ) - > key ) ; \
( __out ) - > mask = skb_flow_dissector_target ( __d , __type , ( __m ) - > mask ) ; \
2019-06-19 09:41:04 +03:00
void flow_rule_match_meta ( const struct flow_rule * rule ,
struct flow_match_meta * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_META , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_meta ) ;
2019-02-02 14:50:43 +03:00
void flow_rule_match_basic ( const struct flow_rule * rule ,
struct flow_match_basic * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_BASIC , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_basic ) ;
void flow_rule_match_control ( const struct flow_rule * rule ,
struct flow_match_control * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_CONTROL , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_control ) ;
void flow_rule_match_eth_addrs ( const struct flow_rule * rule ,
struct flow_match_eth_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ETH_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_eth_addrs ) ;
void flow_rule_match_vlan ( const struct flow_rule * rule ,
struct flow_match_vlan * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_VLAN , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_vlan ) ;
2019-05-14 23:18:12 +03:00
void flow_rule_match_cvlan ( const struct flow_rule * rule ,
struct flow_match_vlan * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_CVLAN , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_cvlan ) ;
2019-02-02 14:50:43 +03:00
void flow_rule_match_ipv4_addrs ( const struct flow_rule * rule ,
struct flow_match_ipv4_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_IPV4_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_ipv4_addrs ) ;
void flow_rule_match_ipv6_addrs ( const struct flow_rule * rule ,
struct flow_match_ipv6_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_IPV6_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_ipv6_addrs ) ;
void flow_rule_match_ip ( const struct flow_rule * rule ,
struct flow_match_ip * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_IP , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_ip ) ;
void flow_rule_match_ports ( const struct flow_rule * rule ,
struct flow_match_ports * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_PORTS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_ports ) ;
void flow_rule_match_tcp ( const struct flow_rule * rule ,
struct flow_match_tcp * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_TCP , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_tcp ) ;
void flow_rule_match_icmp ( const struct flow_rule * rule ,
struct flow_match_icmp * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ICMP , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_icmp ) ;
void flow_rule_match_mpls ( const struct flow_rule * rule ,
struct flow_match_mpls * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_MPLS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_mpls ) ;
void flow_rule_match_enc_control ( const struct flow_rule * rule ,
struct flow_match_control * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_CONTROL , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_control ) ;
void flow_rule_match_enc_ipv4_addrs ( const struct flow_rule * rule ,
struct flow_match_ipv4_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_ipv4_addrs ) ;
void flow_rule_match_enc_ipv6_addrs ( const struct flow_rule * rule ,
struct flow_match_ipv6_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_ipv6_addrs ) ;
void flow_rule_match_enc_ip ( const struct flow_rule * rule ,
struct flow_match_ip * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_IP , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_ip ) ;
void flow_rule_match_enc_ports ( const struct flow_rule * rule ,
struct flow_match_ports * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_PORTS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_ports ) ;
void flow_rule_match_enc_keyid ( const struct flow_rule * rule ,
struct flow_match_enc_keyid * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_KEYID , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_keyid ) ;
void flow_rule_match_enc_opts ( const struct flow_rule * rule ,
struct flow_match_enc_opts * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_OPTS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_opts ) ;
2019-07-09 23:55:39 +03:00
2019-07-19 19:20:15 +03:00
struct flow_block_cb * flow_block_cb_alloc ( flow_setup_cb_t * cb ,
2019-07-09 23:55:42 +03:00
void * cb_ident , void * cb_priv ,
void ( * release ) ( void * cb_priv ) )
{
struct flow_block_cb * block_cb ;
block_cb = kzalloc ( sizeof ( * block_cb ) , GFP_KERNEL ) ;
if ( ! block_cb )
return ERR_PTR ( - ENOMEM ) ;
block_cb - > cb = cb ;
block_cb - > cb_ident = cb_ident ;
block_cb - > cb_priv = cb_priv ;
block_cb - > release = release ;
return block_cb ;
}
EXPORT_SYMBOL ( flow_block_cb_alloc ) ;
void flow_block_cb_free ( struct flow_block_cb * block_cb )
{
if ( block_cb - > release )
block_cb - > release ( block_cb - > cb_priv ) ;
kfree ( block_cb ) ;
}
EXPORT_SYMBOL ( flow_block_cb_free ) ;
2019-07-19 19:20:16 +03:00
struct flow_block_cb * flow_block_cb_lookup ( struct flow_block * block ,
2019-07-19 19:20:15 +03:00
flow_setup_cb_t * cb , void * cb_ident )
2019-07-09 23:55:43 +03:00
{
struct flow_block_cb * block_cb ;
2019-07-19 19:20:16 +03:00
list_for_each_entry ( block_cb , & block - > cb_list , list ) {
2019-07-19 19:20:14 +03:00
if ( block_cb - > cb = = cb & &
2019-07-09 23:55:43 +03:00
block_cb - > cb_ident = = cb_ident )
return block_cb ;
}
return NULL ;
}
EXPORT_SYMBOL ( flow_block_cb_lookup ) ;
2019-07-09 23:55:44 +03:00
void * flow_block_cb_priv ( struct flow_block_cb * block_cb )
{
return block_cb - > cb_priv ;
}
EXPORT_SYMBOL ( flow_block_cb_priv ) ;
void flow_block_cb_incref ( struct flow_block_cb * block_cb )
{
block_cb - > refcnt + + ;
}
EXPORT_SYMBOL ( flow_block_cb_incref ) ;
unsigned int flow_block_cb_decref ( struct flow_block_cb * block_cb )
{
return - - block_cb - > refcnt ;
}
EXPORT_SYMBOL ( flow_block_cb_decref ) ;
2019-07-19 19:20:15 +03:00
bool flow_block_cb_is_busy ( flow_setup_cb_t * cb , void * cb_ident ,
2019-07-09 23:55:48 +03:00
struct list_head * driver_block_list )
{
struct flow_block_cb * block_cb ;
list_for_each_entry ( block_cb , driver_block_list , driver_list ) {
if ( block_cb - > cb = = cb & &
block_cb - > cb_ident = = cb_ident )
return true ;
}
return false ;
}
EXPORT_SYMBOL ( flow_block_cb_is_busy ) ;
2019-07-09 23:55:39 +03:00
int flow_block_cb_setup_simple ( struct flow_block_offload * f ,
struct list_head * driver_block_list ,
2019-07-19 19:20:15 +03:00
flow_setup_cb_t * cb ,
void * cb_ident , void * cb_priv ,
2019-07-09 23:55:39 +03:00
bool ingress_only )
{
2019-07-09 23:55:46 +03:00
struct flow_block_cb * block_cb ;
2019-07-09 23:55:39 +03:00
if ( ingress_only & &
2019-07-09 23:55:41 +03:00
f - > binder_type ! = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS )
2019-07-09 23:55:39 +03:00
return - EOPNOTSUPP ;
f - > driver_block_list = driver_block_list ;
switch ( f - > command ) {
2019-07-09 23:55:40 +03:00
case FLOW_BLOCK_BIND :
2019-07-09 23:55:48 +03:00
if ( flow_block_cb_is_busy ( cb , cb_ident , driver_block_list ) )
return - EBUSY ;
2019-07-19 19:20:14 +03:00
block_cb = flow_block_cb_alloc ( cb , cb_ident , cb_priv , NULL ) ;
2019-07-09 23:55:46 +03:00
if ( IS_ERR ( block_cb ) )
return PTR_ERR ( block_cb ) ;
flow_block_cb_add ( block_cb , f ) ;
list_add_tail ( & block_cb - > driver_list , driver_block_list ) ;
return 0 ;
2019-07-09 23:55:40 +03:00
case FLOW_BLOCK_UNBIND :
2019-07-19 19:20:16 +03:00
block_cb = flow_block_cb_lookup ( f - > block , cb , cb_ident ) ;
2019-07-09 23:55:46 +03:00
if ( ! block_cb )
return - ENOENT ;
flow_block_cb_remove ( block_cb , f ) ;
list_del ( & block_cb - > driver_list ) ;
2019-07-09 23:55:39 +03:00
return 0 ;
default :
return - EOPNOTSUPP ;
}
}
EXPORT_SYMBOL ( flow_block_cb_setup_simple ) ;
2019-08-07 04:13:52 +03:00
2019-08-07 04:13:53 +03:00
static LIST_HEAD ( block_ing_cb_list ) ;
2019-08-07 04:13:52 +03:00
static struct rhashtable indr_setup_block_ht ;
struct flow_indr_block_cb {
struct list_head list ;
void * cb_priv ;
flow_indr_block_bind_cb_t * cb ;
void * cb_ident ;
} ;
struct flow_indr_block_dev {
struct rhash_head ht_node ;
struct net_device * dev ;
unsigned int refcnt ;
struct list_head cb_list ;
} ;
static const struct rhashtable_params flow_indr_setup_block_ht_params = {
. key_offset = offsetof ( struct flow_indr_block_dev , dev ) ,
. head_offset = offsetof ( struct flow_indr_block_dev , ht_node ) ,
. key_len = sizeof ( struct net_device * ) ,
} ;
static struct flow_indr_block_dev *
flow_indr_block_dev_lookup ( struct net_device * dev )
{
return rhashtable_lookup_fast ( & indr_setup_block_ht , & dev ,
flow_indr_setup_block_ht_params ) ;
}
static struct flow_indr_block_dev *
flow_indr_block_dev_get ( struct net_device * dev )
{
struct flow_indr_block_dev * indr_dev ;
indr_dev = flow_indr_block_dev_lookup ( dev ) ;
if ( indr_dev )
goto inc_ref ;
indr_dev = kzalloc ( sizeof ( * indr_dev ) , GFP_KERNEL ) ;
if ( ! indr_dev )
return NULL ;
INIT_LIST_HEAD ( & indr_dev - > cb_list ) ;
indr_dev - > dev = dev ;
if ( rhashtable_insert_fast ( & indr_setup_block_ht , & indr_dev - > ht_node ,
flow_indr_setup_block_ht_params ) ) {
kfree ( indr_dev ) ;
return NULL ;
}
inc_ref :
indr_dev - > refcnt + + ;
return indr_dev ;
}
static void flow_indr_block_dev_put ( struct flow_indr_block_dev * indr_dev )
{
if ( - - indr_dev - > refcnt )
return ;
rhashtable_remove_fast ( & indr_setup_block_ht , & indr_dev - > ht_node ,
flow_indr_setup_block_ht_params ) ;
kfree ( indr_dev ) ;
}
static struct flow_indr_block_cb *
flow_indr_block_cb_lookup ( struct flow_indr_block_dev * indr_dev ,
flow_indr_block_bind_cb_t * cb , void * cb_ident )
{
struct flow_indr_block_cb * indr_block_cb ;
list_for_each_entry ( indr_block_cb , & indr_dev - > cb_list , list )
if ( indr_block_cb - > cb = = cb & &
indr_block_cb - > cb_ident = = cb_ident )
return indr_block_cb ;
return NULL ;
}
static struct flow_indr_block_cb *
flow_indr_block_cb_add ( struct flow_indr_block_dev * indr_dev , void * cb_priv ,
flow_indr_block_bind_cb_t * cb , void * cb_ident )
{
struct flow_indr_block_cb * indr_block_cb ;
indr_block_cb = flow_indr_block_cb_lookup ( indr_dev , cb , cb_ident ) ;
if ( indr_block_cb )
return ERR_PTR ( - EEXIST ) ;
indr_block_cb = kzalloc ( sizeof ( * indr_block_cb ) , GFP_KERNEL ) ;
if ( ! indr_block_cb )
return ERR_PTR ( - ENOMEM ) ;
indr_block_cb - > cb_priv = cb_priv ;
indr_block_cb - > cb = cb ;
indr_block_cb - > cb_ident = cb_ident ;
list_add ( & indr_block_cb - > list , & indr_dev - > cb_list ) ;
return indr_block_cb ;
}
static void flow_indr_block_cb_del ( struct flow_indr_block_cb * indr_block_cb )
{
list_del ( & indr_block_cb - > list ) ;
kfree ( indr_block_cb ) ;
}
2019-08-07 04:13:53 +03:00
static void flow_block_ing_cmd ( struct net_device * dev ,
flow_indr_block_bind_cb_t * cb ,
void * cb_priv ,
enum flow_block_command command )
{
struct flow_indr_block_ing_entry * entry ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( entry , & block_ing_cb_list , list ) {
entry - > cb ( dev , cb , cb_priv , command ) ;
}
rcu_read_unlock ( ) ;
}
2019-08-07 04:13:52 +03:00
int __flow_indr_block_cb_register ( struct net_device * dev , void * cb_priv ,
flow_indr_block_bind_cb_t * cb ,
void * cb_ident )
{
struct flow_indr_block_cb * indr_block_cb ;
struct flow_indr_block_dev * indr_dev ;
int err ;
indr_dev = flow_indr_block_dev_get ( dev ) ;
if ( ! indr_dev )
return - ENOMEM ;
indr_block_cb = flow_indr_block_cb_add ( indr_dev , cb_priv , cb , cb_ident ) ;
err = PTR_ERR_OR_ZERO ( indr_block_cb ) ;
if ( err )
goto err_dev_put ;
2019-08-07 04:13:53 +03:00
flow_block_ing_cmd ( dev , indr_block_cb - > cb , indr_block_cb - > cb_priv ,
FLOW_BLOCK_BIND ) ;
2019-08-07 04:13:52 +03:00
return 0 ;
err_dev_put :
flow_indr_block_dev_put ( indr_dev ) ;
return err ;
}
EXPORT_SYMBOL_GPL ( __flow_indr_block_cb_register ) ;
int flow_indr_block_cb_register ( struct net_device * dev , void * cb_priv ,
flow_indr_block_bind_cb_t * cb ,
void * cb_ident )
{
int err ;
rtnl_lock ( ) ;
err = __flow_indr_block_cb_register ( dev , cb_priv , cb , cb_ident ) ;
rtnl_unlock ( ) ;
return err ;
}
EXPORT_SYMBOL_GPL ( flow_indr_block_cb_register ) ;
void __flow_indr_block_cb_unregister ( struct net_device * dev ,
flow_indr_block_bind_cb_t * cb ,
void * cb_ident )
{
struct flow_indr_block_cb * indr_block_cb ;
struct flow_indr_block_dev * indr_dev ;
indr_dev = flow_indr_block_dev_lookup ( dev ) ;
if ( ! indr_dev )
return ;
indr_block_cb = flow_indr_block_cb_lookup ( indr_dev , cb , cb_ident ) ;
if ( ! indr_block_cb )
return ;
2019-08-07 04:13:53 +03:00
flow_block_ing_cmd ( dev , indr_block_cb - > cb , indr_block_cb - > cb_priv ,
FLOW_BLOCK_UNBIND ) ;
2019-08-07 04:13:52 +03:00
flow_indr_block_cb_del ( indr_block_cb ) ;
flow_indr_block_dev_put ( indr_dev ) ;
}
EXPORT_SYMBOL_GPL ( __flow_indr_block_cb_unregister ) ;
void flow_indr_block_cb_unregister ( struct net_device * dev ,
flow_indr_block_bind_cb_t * cb ,
void * cb_ident )
{
rtnl_lock ( ) ;
__flow_indr_block_cb_unregister ( dev , cb , cb_ident ) ;
rtnl_unlock ( ) ;
}
EXPORT_SYMBOL_GPL ( flow_indr_block_cb_unregister ) ;
void flow_indr_block_call ( struct net_device * dev ,
struct flow_block_offload * bo ,
enum flow_block_command command )
{
struct flow_indr_block_cb * indr_block_cb ;
struct flow_indr_block_dev * indr_dev ;
indr_dev = flow_indr_block_dev_lookup ( dev ) ;
if ( ! indr_dev )
return ;
list_for_each_entry ( indr_block_cb , & indr_dev - > cb_list , list )
indr_block_cb - > cb ( dev , indr_block_cb - > cb_priv , TC_SETUP_BLOCK ,
bo ) ;
}
EXPORT_SYMBOL_GPL ( flow_indr_block_call ) ;
2019-08-07 04:13:53 +03:00
static DEFINE_MUTEX ( flow_indr_block_ing_cb_lock ) ;
void flow_indr_add_block_ing_cb ( struct flow_indr_block_ing_entry * entry )
{
mutex_lock ( & flow_indr_block_ing_cb_lock ) ;
list_add_tail_rcu ( & entry - > list , & block_ing_cb_list ) ;
mutex_unlock ( & flow_indr_block_ing_cb_lock ) ;
}
EXPORT_SYMBOL_GPL ( flow_indr_add_block_ing_cb ) ;
void flow_indr_del_block_ing_cb ( struct flow_indr_block_ing_entry * entry )
{
mutex_lock ( & flow_indr_block_ing_cb_lock ) ;
list_del_rcu ( & entry - > list ) ;
mutex_unlock ( & flow_indr_block_ing_cb_lock ) ;
}
EXPORT_SYMBOL_GPL ( flow_indr_del_block_ing_cb ) ;
2019-08-07 04:13:52 +03:00
static int __init init_flow_indr_rhashtable ( void )
{
return rhashtable_init ( & indr_setup_block_ht ,
& flow_indr_setup_block_ht_params ) ;
}
subsys_initcall ( init_flow_indr_rhashtable ) ;