2019-02-02 14:50:43 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# include <linux/kernel.h>
# include <linux/slab.h>
# include <net/flow_offload.h>
2019-08-07 04:13:52 +03:00
# include <linux/rtnetlink.h>
2019-08-07 04:13:53 +03:00
# include <linux/mutex.h>
2019-02-02 14:50:43 +03:00
2019-02-02 14:50:45 +03:00
struct flow_rule * flow_rule_alloc ( unsigned int num_actions )
2019-02-02 14:50:43 +03:00
{
2019-02-02 14:50:45 +03:00
struct flow_rule * rule ;
2020-05-20 21:18:10 +03:00
int i ;
2019-02-02 14:50:45 +03:00
2019-05-24 01:56:53 +03:00
rule = kzalloc ( struct_size ( rule , action . entries , num_actions ) ,
2019-02-02 14:50:45 +03:00
GFP_KERNEL ) ;
if ( ! rule )
return NULL ;
rule - > action . num_entries = num_actions ;
2020-05-20 21:18:10 +03:00
/* Pre-fill each action hw_stats with DONT_CARE.
* Caller can override this if it wants stats for a given action .
*/
for ( i = 0 ; i < num_actions ; i + + )
rule - > action . entries [ i ] . hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE ;
2019-02-02 14:50:45 +03:00
return rule ;
2019-02-02 14:50:43 +03:00
}
EXPORT_SYMBOL ( flow_rule_alloc ) ;
# define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
const struct flow_match * __m = & ( __rule ) - > match ; \
struct flow_dissector * __d = ( __m ) - > dissector ; \
\
( __out ) - > key = skb_flow_dissector_target ( __d , __type , ( __m ) - > key ) ; \
( __out ) - > mask = skb_flow_dissector_target ( __d , __type , ( __m ) - > mask ) ; \
2019-06-19 09:41:04 +03:00
void flow_rule_match_meta ( const struct flow_rule * rule ,
struct flow_match_meta * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_META , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_meta ) ;
2019-02-02 14:50:43 +03:00
void flow_rule_match_basic ( const struct flow_rule * rule ,
struct flow_match_basic * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_BASIC , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_basic ) ;
void flow_rule_match_control ( const struct flow_rule * rule ,
struct flow_match_control * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_CONTROL , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_control ) ;
void flow_rule_match_eth_addrs ( const struct flow_rule * rule ,
struct flow_match_eth_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ETH_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_eth_addrs ) ;
void flow_rule_match_vlan ( const struct flow_rule * rule ,
struct flow_match_vlan * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_VLAN , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_vlan ) ;
2019-05-14 23:18:12 +03:00
void flow_rule_match_cvlan ( const struct flow_rule * rule ,
struct flow_match_vlan * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_CVLAN , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_cvlan ) ;
2019-02-02 14:50:43 +03:00
void flow_rule_match_ipv4_addrs ( const struct flow_rule * rule ,
struct flow_match_ipv4_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_IPV4_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_ipv4_addrs ) ;
void flow_rule_match_ipv6_addrs ( const struct flow_rule * rule ,
struct flow_match_ipv6_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_IPV6_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_ipv6_addrs ) ;
void flow_rule_match_ip ( const struct flow_rule * rule ,
struct flow_match_ip * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_IP , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_ip ) ;
void flow_rule_match_ports ( const struct flow_rule * rule ,
struct flow_match_ports * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_PORTS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_ports ) ;
void flow_rule_match_tcp ( const struct flow_rule * rule ,
struct flow_match_tcp * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_TCP , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_tcp ) ;
void flow_rule_match_icmp ( const struct flow_rule * rule ,
struct flow_match_icmp * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ICMP , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_icmp ) ;
void flow_rule_match_mpls ( const struct flow_rule * rule ,
struct flow_match_mpls * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_MPLS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_mpls ) ;
void flow_rule_match_enc_control ( const struct flow_rule * rule ,
struct flow_match_control * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_CONTROL , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_control ) ;
void flow_rule_match_enc_ipv4_addrs ( const struct flow_rule * rule ,
struct flow_match_ipv4_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_ipv4_addrs ) ;
void flow_rule_match_enc_ipv6_addrs ( const struct flow_rule * rule ,
struct flow_match_ipv6_addrs * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_ipv6_addrs ) ;
void flow_rule_match_enc_ip ( const struct flow_rule * rule ,
struct flow_match_ip * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_IP , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_ip ) ;
void flow_rule_match_enc_ports ( const struct flow_rule * rule ,
struct flow_match_ports * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_PORTS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_ports ) ;
void flow_rule_match_enc_keyid ( const struct flow_rule * rule ,
struct flow_match_enc_keyid * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_KEYID , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_keyid ) ;
void flow_rule_match_enc_opts ( const struct flow_rule * rule ,
struct flow_match_enc_opts * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_ENC_OPTS , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_enc_opts ) ;
2019-07-09 23:55:39 +03:00
2020-02-25 13:45:18 +03:00
struct flow_action_cookie * flow_action_cookie_create ( void * data ,
unsigned int len ,
gfp_t gfp )
{
struct flow_action_cookie * cookie ;
cookie = kmalloc ( sizeof ( * cookie ) + len , gfp ) ;
if ( ! cookie )
return NULL ;
cookie - > cookie_len = len ;
memcpy ( cookie - > cookie , data , len ) ;
return cookie ;
}
EXPORT_SYMBOL ( flow_action_cookie_create ) ;
void flow_action_cookie_destroy ( struct flow_action_cookie * cookie )
{
kfree ( cookie ) ;
}
EXPORT_SYMBOL ( flow_action_cookie_destroy ) ;
2020-03-12 13:23:13 +03:00
void flow_rule_match_ct ( const struct flow_rule * rule ,
struct flow_match_ct * out )
{
FLOW_DISSECTOR_MATCH ( rule , FLOW_DISSECTOR_KEY_CT , out ) ;
}
EXPORT_SYMBOL ( flow_rule_match_ct ) ;
2019-07-19 19:20:15 +03:00
struct flow_block_cb * flow_block_cb_alloc ( flow_setup_cb_t * cb ,
2019-07-09 23:55:42 +03:00
void * cb_ident , void * cb_priv ,
void ( * release ) ( void * cb_priv ) )
{
struct flow_block_cb * block_cb ;
block_cb = kzalloc ( sizeof ( * block_cb ) , GFP_KERNEL ) ;
if ( ! block_cb )
return ERR_PTR ( - ENOMEM ) ;
block_cb - > cb = cb ;
block_cb - > cb_ident = cb_ident ;
block_cb - > cb_priv = cb_priv ;
block_cb - > release = release ;
return block_cb ;
}
EXPORT_SYMBOL ( flow_block_cb_alloc ) ;
void flow_block_cb_free ( struct flow_block_cb * block_cb )
{
if ( block_cb - > release )
block_cb - > release ( block_cb - > cb_priv ) ;
kfree ( block_cb ) ;
}
EXPORT_SYMBOL ( flow_block_cb_free ) ;
2019-07-19 19:20:16 +03:00
struct flow_block_cb * flow_block_cb_lookup ( struct flow_block * block ,
2019-07-19 19:20:15 +03:00
flow_setup_cb_t * cb , void * cb_ident )
2019-07-09 23:55:43 +03:00
{
struct flow_block_cb * block_cb ;
2019-07-19 19:20:16 +03:00
list_for_each_entry ( block_cb , & block - > cb_list , list ) {
2019-07-19 19:20:14 +03:00
if ( block_cb - > cb = = cb & &
2019-07-09 23:55:43 +03:00
block_cb - > cb_ident = = cb_ident )
return block_cb ;
}
return NULL ;
}
EXPORT_SYMBOL ( flow_block_cb_lookup ) ;
2019-07-09 23:55:44 +03:00
void * flow_block_cb_priv ( struct flow_block_cb * block_cb )
{
return block_cb - > cb_priv ;
}
EXPORT_SYMBOL ( flow_block_cb_priv ) ;
void flow_block_cb_incref ( struct flow_block_cb * block_cb )
{
block_cb - > refcnt + + ;
}
EXPORT_SYMBOL ( flow_block_cb_incref ) ;
unsigned int flow_block_cb_decref ( struct flow_block_cb * block_cb )
{
return - - block_cb - > refcnt ;
}
EXPORT_SYMBOL ( flow_block_cb_decref ) ;
2019-07-19 19:20:15 +03:00
bool flow_block_cb_is_busy ( flow_setup_cb_t * cb , void * cb_ident ,
2019-07-09 23:55:48 +03:00
struct list_head * driver_block_list )
{
struct flow_block_cb * block_cb ;
list_for_each_entry ( block_cb , driver_block_list , driver_list ) {
if ( block_cb - > cb = = cb & &
block_cb - > cb_ident = = cb_ident )
return true ;
}
return false ;
}
EXPORT_SYMBOL ( flow_block_cb_is_busy ) ;
2019-07-09 23:55:39 +03:00
int flow_block_cb_setup_simple ( struct flow_block_offload * f ,
struct list_head * driver_block_list ,
2019-07-19 19:20:15 +03:00
flow_setup_cb_t * cb ,
void * cb_ident , void * cb_priv ,
2019-07-09 23:55:39 +03:00
bool ingress_only )
{
2019-07-09 23:55:46 +03:00
struct flow_block_cb * block_cb ;
2019-07-09 23:55:39 +03:00
if ( ingress_only & &
2019-07-09 23:55:41 +03:00
f - > binder_type ! = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS )
2019-07-09 23:55:39 +03:00
return - EOPNOTSUPP ;
f - > driver_block_list = driver_block_list ;
switch ( f - > command ) {
2019-07-09 23:55:40 +03:00
case FLOW_BLOCK_BIND :
2019-07-09 23:55:48 +03:00
if ( flow_block_cb_is_busy ( cb , cb_ident , driver_block_list ) )
return - EBUSY ;
2019-07-19 19:20:14 +03:00
block_cb = flow_block_cb_alloc ( cb , cb_ident , cb_priv , NULL ) ;
2019-07-09 23:55:46 +03:00
if ( IS_ERR ( block_cb ) )
return PTR_ERR ( block_cb ) ;
flow_block_cb_add ( block_cb , f ) ;
list_add_tail ( & block_cb - > driver_list , driver_block_list ) ;
return 0 ;
2019-07-09 23:55:40 +03:00
case FLOW_BLOCK_UNBIND :
2019-07-19 19:20:16 +03:00
block_cb = flow_block_cb_lookup ( f - > block , cb , cb_ident ) ;
2019-07-09 23:55:46 +03:00
if ( ! block_cb )
return - ENOENT ;
flow_block_cb_remove ( block_cb , f ) ;
list_del ( & block_cb - > driver_list ) ;
2019-07-09 23:55:39 +03:00
return 0 ;
default :
return - EOPNOTSUPP ;
}
}
EXPORT_SYMBOL ( flow_block_cb_setup_simple ) ;
2019-08-07 04:13:52 +03:00
2020-05-29 03:25:35 +03:00
static DEFINE_MUTEX ( flow_indr_block_lock ) ;
static LIST_HEAD ( flow_block_indr_list ) ;
static LIST_HEAD ( flow_block_indr_dev_list ) ;
struct flow_indr_dev {
struct list_head list ;
flow_indr_block_bind_cb_t * cb ;
void * cb_priv ;
refcount_t refcnt ;
struct rcu_head rcu ;
} ;
static struct flow_indr_dev * flow_indr_dev_alloc ( flow_indr_block_bind_cb_t * cb ,
void * cb_priv )
{
struct flow_indr_dev * indr_dev ;
indr_dev = kmalloc ( sizeof ( * indr_dev ) , GFP_KERNEL ) ;
if ( ! indr_dev )
return NULL ;
indr_dev - > cb = cb ;
indr_dev - > cb_priv = cb_priv ;
refcount_set ( & indr_dev - > refcnt , 1 ) ;
return indr_dev ;
}
int flow_indr_dev_register ( flow_indr_block_bind_cb_t * cb , void * cb_priv )
{
struct flow_indr_dev * indr_dev ;
mutex_lock ( & flow_indr_block_lock ) ;
list_for_each_entry ( indr_dev , & flow_block_indr_dev_list , list ) {
if ( indr_dev - > cb = = cb & &
indr_dev - > cb_priv = = cb_priv ) {
refcount_inc ( & indr_dev - > refcnt ) ;
mutex_unlock ( & flow_indr_block_lock ) ;
return 0 ;
}
}
indr_dev = flow_indr_dev_alloc ( cb , cb_priv ) ;
if ( ! indr_dev ) {
mutex_unlock ( & flow_indr_block_lock ) ;
return - ENOMEM ;
}
list_add ( & indr_dev - > list , & flow_block_indr_dev_list ) ;
mutex_unlock ( & flow_indr_block_lock ) ;
return 0 ;
}
EXPORT_SYMBOL ( flow_indr_dev_register ) ;
2020-06-18 15:49:10 +03:00
static void __flow_block_indr_cleanup ( void ( * release ) ( void * cb_priv ) ,
void * cb_priv ,
2020-05-29 03:25:35 +03:00
struct list_head * cleanup_list )
{
struct flow_block_cb * this , * next ;
list_for_each_entry_safe ( this , next , & flow_block_indr_list , indr . list ) {
2020-06-18 15:49:10 +03:00
if ( this - > release = = release & &
this - > indr . cb_priv = = cb_priv ) {
2020-05-29 03:25:35 +03:00
list_move ( & this - > indr . list , cleanup_list ) ;
return ;
}
}
}
static void flow_block_indr_notify ( struct list_head * cleanup_list )
{
struct flow_block_cb * this , * next ;
list_for_each_entry_safe ( this , next , cleanup_list , indr . list ) {
list_del ( & this - > indr . list ) ;
this - > indr . cleanup ( this ) ;
}
}
void flow_indr_dev_unregister ( flow_indr_block_bind_cb_t * cb , void * cb_priv ,
2020-06-18 15:49:10 +03:00
void ( * release ) ( void * cb_priv ) )
2020-05-29 03:25:35 +03:00
{
struct flow_indr_dev * this , * next , * indr_dev = NULL ;
LIST_HEAD ( cleanup_list ) ;
mutex_lock ( & flow_indr_block_lock ) ;
list_for_each_entry_safe ( this , next , & flow_block_indr_dev_list , list ) {
if ( this - > cb = = cb & &
this - > cb_priv = = cb_priv & &
refcount_dec_and_test ( & this - > refcnt ) ) {
indr_dev = this ;
list_del ( & indr_dev - > list ) ;
break ;
}
}
if ( ! indr_dev ) {
mutex_unlock ( & flow_indr_block_lock ) ;
return ;
}
2020-06-18 15:49:10 +03:00
__flow_block_indr_cleanup ( release , cb_priv , & cleanup_list ) ;
2020-05-29 03:25:35 +03:00
mutex_unlock ( & flow_indr_block_lock ) ;
flow_block_indr_notify ( & cleanup_list ) ;
kfree ( indr_dev ) ;
}
EXPORT_SYMBOL ( flow_indr_dev_unregister ) ;
static void flow_block_indr_init ( struct flow_block_cb * flow_block ,
struct flow_block_offload * bo ,
2020-07-11 00:55:03 +03:00
struct net_device * dev , struct Qdisc * sch , void * data ,
2020-06-18 15:49:10 +03:00
void * cb_priv ,
2020-05-29 03:25:35 +03:00
void ( * cleanup ) ( struct flow_block_cb * block_cb ) )
{
flow_block - > indr . binder_type = bo - > binder_type ;
flow_block - > indr . data = data ;
2020-06-18 15:49:10 +03:00
flow_block - > indr . cb_priv = cb_priv ;
2020-05-29 03:25:35 +03:00
flow_block - > indr . dev = dev ;
2020-07-11 00:55:03 +03:00
flow_block - > indr . sch = sch ;
2020-05-29 03:25:35 +03:00
flow_block - > indr . cleanup = cleanup ;
}
2020-06-18 15:49:08 +03:00
struct flow_block_cb * flow_indr_block_cb_alloc ( flow_setup_cb_t * cb ,
void * cb_ident , void * cb_priv ,
void ( * release ) ( void * cb_priv ) ,
struct flow_block_offload * bo ,
2020-07-11 00:55:03 +03:00
struct net_device * dev ,
struct Qdisc * sch , void * data ,
2020-06-18 15:49:10 +03:00
void * indr_cb_priv ,
2020-06-18 15:49:08 +03:00
void ( * cleanup ) ( struct flow_block_cb * block_cb ) )
{
struct flow_block_cb * block_cb ;
block_cb = flow_block_cb_alloc ( cb , cb_ident , cb_priv , release ) ;
if ( IS_ERR ( block_cb ) )
goto out ;
2020-07-11 00:55:03 +03:00
flow_block_indr_init ( block_cb , bo , dev , sch , data , indr_cb_priv , cleanup ) ;
2020-06-18 15:49:08 +03:00
list_add ( & block_cb - > indr . list , & flow_block_indr_list ) ;
out :
return block_cb ;
}
EXPORT_SYMBOL ( flow_indr_block_cb_alloc ) ;
2020-07-11 00:55:03 +03:00
int flow_indr_dev_setup_offload ( struct net_device * dev , struct Qdisc * sch ,
2020-05-29 03:25:35 +03:00
enum tc_setup_type type , void * data ,
struct flow_block_offload * bo ,
void ( * cleanup ) ( struct flow_block_cb * block_cb ) )
{
struct flow_indr_dev * this ;
mutex_lock ( & flow_indr_block_lock ) ;
list_for_each_entry ( this , & flow_block_indr_dev_list , list )
2020-07-11 00:55:03 +03:00
this - > cb ( dev , sch , this - > cb_priv , type , bo , data , cleanup ) ;
2020-05-29 03:25:35 +03:00
mutex_unlock ( & flow_indr_block_lock ) ;
return list_empty ( & bo - > cb_list ) ? - EOPNOTSUPP : 0 ;
}
EXPORT_SYMBOL ( flow_indr_dev_setup_offload ) ;