2005-04-17 02:20:36 +04:00
# ifndef __NET_PKT_CLS_H
# define __NET_PKT_CLS_H
# include <linux/pkt_cls.h>
# include <net/sch_generic.h>
# include <net/act_api.h>
/* Basic packet classifier frontend definitions. */
2009-11-03 06:26:03 +03:00
struct tcf_walker {
2005-04-17 02:20:36 +04:00
int stop ;
int skip ;
int count ;
2017-08-05 07:31:43 +03:00
int ( * fn ) ( struct tcf_proto * , void * node , struct tcf_walker * ) ;
2005-04-17 02:20:36 +04:00
} ;
2013-07-31 09:47:13 +04:00
int register_tcf_proto_ops ( struct tcf_proto_ops * ops ) ;
int unregister_tcf_proto_ops ( struct tcf_proto_ops * ops ) ;
2005-04-17 02:20:36 +04:00
2017-02-15 13:57:50 +03:00
# ifdef CONFIG_NET_CLS
2017-05-23 19:42:37 +03:00
struct tcf_chain * tcf_chain_get ( struct tcf_block * block , u32 chain_index ,
bool create ) ;
2017-05-17 12:08:01 +03:00
void tcf_chain_put ( struct tcf_chain * chain ) ;
2017-05-17 12:07:55 +03:00
int tcf_block_get ( struct tcf_block * * p_block ,
2017-10-13 15:00:57 +03:00
struct tcf_proto __rcu * * p_filter_chain , struct Qdisc * q ) ;
2017-05-17 12:07:55 +03:00
void tcf_block_put ( struct tcf_block * block ) ;
2017-10-13 15:00:59 +03:00
static inline struct Qdisc * tcf_block_q ( struct tcf_block * block )
{
return block - > q ;
}
static inline struct net_device * tcf_block_dev ( struct tcf_block * block )
{
return tcf_block_q ( block ) - > dev_queue - > dev ;
}
2017-05-17 12:07:54 +03:00
int tcf_classify ( struct sk_buff * skb , const struct tcf_proto * tp ,
struct tcf_result * res , bool compat_mode ) ;
2017-02-15 13:57:50 +03:00
# else
2017-05-17 12:07:55 +03:00
static inline
int tcf_block_get ( struct tcf_block * * p_block ,
2017-10-13 15:00:57 +03:00
struct tcf_proto __rcu * * p_filter_chain , struct Qdisc * q )
2017-05-17 12:07:55 +03:00
{
return 0 ;
}
static inline void tcf_block_put ( struct tcf_block * block )
2017-02-15 13:57:50 +03:00
{
}
2017-05-17 12:07:54 +03:00
2017-10-13 15:00:59 +03:00
static inline struct Qdisc * tcf_block_q ( struct tcf_block * block )
{
return NULL ;
}
static inline struct net_device * tcf_block_dev ( struct tcf_block * block )
{
return NULL ;
}
2017-05-17 12:07:54 +03:00
static inline int tcf_classify ( struct sk_buff * skb , const struct tcf_proto * tp ,
struct tcf_result * res , bool compat_mode )
{
return TC_ACT_UNSPEC ;
}
2017-02-15 13:57:50 +03:00
# endif
2017-02-09 16:38:56 +03:00
2005-04-17 02:20:36 +04:00
static inline unsigned long
__cls_set_class ( unsigned long * clp , unsigned long cl )
{
2014-10-01 03:07:24 +04:00
return xchg ( clp , cl ) ;
2005-04-17 02:20:36 +04:00
}
static inline unsigned long
2017-10-13 15:01:00 +03:00
cls_set_class ( struct Qdisc * q , unsigned long * clp , unsigned long cl )
2005-04-17 02:20:36 +04:00
{
unsigned long old_cl ;
2017-10-13 15:01:00 +03:00
sch_tree_lock ( q ) ;
2005-04-17 02:20:36 +04:00
old_cl = __cls_set_class ( clp , cl ) ;
2017-10-13 15:01:00 +03:00
sch_tree_unlock ( q ) ;
2005-04-17 02:20:36 +04:00
return old_cl ;
}
static inline void
tcf_bind_filter ( struct tcf_proto * tp , struct tcf_result * r , unsigned long base )
{
2017-10-13 15:01:00 +03:00
struct Qdisc * q = tp - > chain - > block - > q ;
2005-04-17 02:20:36 +04:00
unsigned long cl ;
2017-10-13 15:01:00 +03:00
/* Check q as it is not set for shared blocks. In that case,
* setting class is not supported .
*/
if ( ! q )
return ;
cl = q - > ops - > cl_ops - > bind_tcf ( q , base , r - > classid ) ;
cl = cls_set_class ( q , & r - > class , cl ) ;
2005-04-17 02:20:36 +04:00
if ( cl )
2017-10-13 15:01:00 +03:00
q - > ops - > cl_ops - > unbind_tcf ( q , cl ) ;
2005-04-17 02:20:36 +04:00
}
static inline void
tcf_unbind_filter ( struct tcf_proto * tp , struct tcf_result * r )
{
2017-10-13 15:01:00 +03:00
struct Qdisc * q = tp - > chain - > block - > q ;
2005-04-17 02:20:36 +04:00
unsigned long cl ;
2017-10-13 15:01:00 +03:00
if ( ! q )
return ;
2005-04-17 02:20:36 +04:00
if ( ( cl = __cls_set_class ( & r - > class , 0 ) ) ! = 0 )
2017-10-13 15:01:00 +03:00
q - > ops - > cl_ops - > unbind_tcf ( q , cl ) ;
2005-04-17 02:20:36 +04:00
}
2009-11-03 06:26:03 +03:00
struct tcf_exts {
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_NET_CLS_ACT
2013-12-16 08:15:05 +04:00
__u32 type ; /* for backward compat(TCA_OLD_COMPAT) */
2016-08-14 08:35:00 +03:00
int nr_actions ;
struct tc_action * * actions ;
2005-04-17 02:20:36 +04:00
# endif
2013-12-16 08:15:07 +04:00
/* Map to export classifier specific extension TLV types to the
* generic extensions API . Unsupported extensions must be set to 0.
*/
2005-04-17 02:20:36 +04:00
int action ;
int police ;
} ;
2016-08-19 22:36:54 +03:00
static inline int tcf_exts_init ( struct tcf_exts * exts , int action , int police )
2013-12-16 08:15:05 +04:00
{
# ifdef CONFIG_NET_CLS_ACT
2013-12-16 08:15:07 +04:00
exts - > type = 0 ;
2016-08-14 08:35:00 +03:00
exts - > nr_actions = 0 ;
exts - > actions = kcalloc ( TCA_ACT_MAX_PRIO , sizeof ( struct tc_action * ) ,
GFP_KERNEL ) ;
2016-08-19 22:36:54 +03:00
if ( ! exts - > actions )
return - ENOMEM ;
2013-12-16 08:15:05 +04:00
# endif
2013-12-16 08:15:07 +04:00
exts - > action = action ;
exts - > police = police ;
2016-08-19 22:36:54 +03:00
return 0 ;
2013-12-16 08:15:05 +04:00
}
2016-08-14 08:35:00 +03:00
static inline void tcf_exts_to_list ( const struct tcf_exts * exts ,
struct list_head * actions )
{
# ifdef CONFIG_NET_CLS_ACT
int i ;
for ( i = 0 ; i < exts - > nr_actions ; i + + ) {
struct tc_action * a = exts - > actions [ i ] ;
2016-09-27 11:09:51 +03:00
list_add_tail ( & a - > list , actions ) ;
2016-08-14 08:35:00 +03:00
}
# endif
}
2017-05-31 18:06:43 +03:00
static inline void
tcf_exts_stats_update ( const struct tcf_exts * exts ,
u64 bytes , u64 packets , u64 lastuse )
{
# ifdef CONFIG_NET_CLS_ACT
int i ;
preempt_disable ( ) ;
for ( i = 0 ; i < exts - > nr_actions ; i + + ) {
struct tc_action * a = exts - > actions [ i ] ;
tcf_action_stats_update ( a , bytes , packets , lastuse ) ;
}
preempt_enable ( ) ;
# endif
}
2017-08-04 15:28:58 +03:00
/**
* tcf_exts_has_actions - check if at least one action is present
* @ exts : tc filter extensions handle
*
* Returns true if at least one action is present .
*/
static inline bool tcf_exts_has_actions ( struct tcf_exts * exts )
{
2016-08-14 08:34:59 +03:00
# ifdef CONFIG_NET_CLS_ACT
2017-08-04 15:28:58 +03:00
return exts - > nr_actions ;
# else
return false ;
# endif
}
2016-08-14 08:34:59 +03:00
2017-08-04 15:28:58 +03:00
/**
* tcf_exts_has_one_action - check if exactly one action is present
* @ exts : tc filter extensions handle
*
* Returns true if exactly one action is present .
*/
static inline bool tcf_exts_has_one_action ( struct tcf_exts * exts )
{
# ifdef CONFIG_NET_CLS_ACT
return exts - > nr_actions = = 1 ;
# else
return false ;
# endif
}
2016-08-14 08:34:59 +03:00
2017-08-04 15:28:59 +03:00
/**
* tcf_exts_exec - execute tc filter extensions
* @ skb : socket buffer
* @ exts : tc filter extensions handle
* @ res : desired result
*
2017-08-04 15:29:01 +03:00
* Executes all configured extensions . Returns TC_ACT_OK on a normal execution ,
2017-08-04 15:28:59 +03:00
* a negative number if the filter must be considered unmatched or
* a positive action code ( TC_ACT_ * ) which must be returned to the
* underlying layer .
*/
static inline int
tcf_exts_exec ( struct sk_buff * skb , struct tcf_exts * exts ,
struct tcf_result * res )
{
# ifdef CONFIG_NET_CLS_ACT
2017-08-04 15:29:02 +03:00
return tcf_action_exec ( skb , exts - > actions , exts - > nr_actions , res ) ;
2017-08-04 15:28:59 +03:00
# endif
2017-08-04 15:29:01 +03:00
return TC_ACT_OK ;
2017-08-04 15:28:59 +03:00
}
2013-07-31 09:47:13 +04:00
int tcf_exts_validate ( struct net * net , struct tcf_proto * tp ,
struct nlattr * * tb , struct nlattr * rate_tlv ,
2014-04-26 00:54:06 +04:00
struct tcf_exts * exts , bool ovr ) ;
2014-09-25 21:26:37 +04:00
void tcf_exts_destroy ( struct tcf_exts * exts ) ;
2017-08-04 15:29:15 +03:00
void tcf_exts_change ( struct tcf_exts * dst , struct tcf_exts * src ) ;
2013-12-16 08:15:07 +04:00
int tcf_exts_dump ( struct sk_buff * skb , struct tcf_exts * exts ) ;
int tcf_exts_dump_stats ( struct sk_buff * skb , struct tcf_exts * exts ) ;
2005-04-17 02:20:36 +04:00
/**
* struct tcf_pkt_info - packet information
*/
2009-11-03 06:26:03 +03:00
struct tcf_pkt_info {
2005-04-17 02:20:36 +04:00
unsigned char * ptr ;
int nexthdr ;
} ;
# ifdef CONFIG_NET_EMATCH
struct tcf_ematch_ops ;
/**
* struct tcf_ematch - extended match ( ematch )
*
* @ matchid : identifier to allow userspace to reidentify a match
* @ flags : flags specifying attributes and the relation to other matches
* @ ops : the operations lookup table of the corresponding ematch module
* @ datalen : length of the ematch specific configuration data
* @ data : ematch specific data
*/
2009-11-03 06:26:03 +03:00
struct tcf_ematch {
2005-04-17 02:20:36 +04:00
struct tcf_ematch_ops * ops ;
unsigned long data ;
unsigned int datalen ;
u16 matchid ;
u16 flags ;
2014-10-06 08:27:53 +04:00
struct net * net ;
2005-04-17 02:20:36 +04:00
} ;
static inline int tcf_em_is_container ( struct tcf_ematch * em )
{
return ! em - > ops ;
}
static inline int tcf_em_is_simple ( struct tcf_ematch * em )
{
return em - > flags & TCF_EM_SIMPLE ;
}
static inline int tcf_em_is_inverted ( struct tcf_ematch * em )
{
return em - > flags & TCF_EM_INVERT ;
}
static inline int tcf_em_last_match ( struct tcf_ematch * em )
{
return ( em - > flags & TCF_EM_REL_MASK ) = = TCF_EM_REL_END ;
}
static inline int tcf_em_early_end ( struct tcf_ematch * em , int result )
{
if ( tcf_em_last_match ( em ) )
return 1 ;
if ( result = = 0 & & em - > flags & TCF_EM_REL_AND )
return 1 ;
if ( result ! = 0 & & em - > flags & TCF_EM_REL_OR )
return 1 ;
return 0 ;
}
/**
* struct tcf_ematch_tree - ematch tree handle
*
* @ hdr : ematch tree header supplied by userspace
* @ matches : array of ematches
*/
2009-11-03 06:26:03 +03:00
struct tcf_ematch_tree {
2005-04-17 02:20:36 +04:00
struct tcf_ematch_tree_hdr hdr ;
struct tcf_ematch * matches ;
} ;
/**
* struct tcf_ematch_ops - ematch module operations
*
* @ kind : identifier ( kind ) of this ematch module
* @ datalen : length of expected configuration data ( optional )
* @ change : called during validation ( optional )
* @ match : called during ematch tree evaluation , must return 1 / 0
* @ destroy : called during destroyage ( optional )
* @ dump : called during dumping process ( optional )
* @ owner : owner , must be set to THIS_MODULE
* @ link : link to previous / next ematch module ( internal use )
*/
2009-11-03 06:26:03 +03:00
struct tcf_ematch_ops {
2005-04-17 02:20:36 +04:00
int kind ;
int datalen ;
2014-10-06 08:27:53 +04:00
int ( * change ) ( struct net * net , void * ,
2005-04-17 02:20:36 +04:00
int , struct tcf_ematch * ) ;
int ( * match ) ( struct sk_buff * , struct tcf_ematch * ,
struct tcf_pkt_info * ) ;
2014-10-06 08:27:53 +04:00
void ( * destroy ) ( struct tcf_ematch * ) ;
2005-04-17 02:20:36 +04:00
int ( * dump ) ( struct sk_buff * , struct tcf_ematch * ) ;
struct module * owner ;
struct list_head link ;
} ;
2013-07-31 09:47:13 +04:00
int tcf_em_register ( struct tcf_ematch_ops * ) ;
void tcf_em_unregister ( struct tcf_ematch_ops * ) ;
int tcf_em_tree_validate ( struct tcf_proto * , struct nlattr * ,
struct tcf_ematch_tree * ) ;
2014-10-06 08:27:53 +04:00
void tcf_em_tree_destroy ( struct tcf_ematch_tree * ) ;
2013-07-31 09:47:13 +04:00
int tcf_em_tree_dump ( struct sk_buff * , struct tcf_ematch_tree * , int ) ;
int __tcf_em_tree_match ( struct sk_buff * , struct tcf_ematch_tree * ,
struct tcf_pkt_info * ) ;
2005-04-17 02:20:36 +04:00
/**
* tcf_em_tree_match - evaulate an ematch tree
*
* @ skb : socket buffer of the packet in question
* @ tree : ematch tree to be used for evaluation
* @ info : packet information examined by classifier
*
* This function matches @ skb against the ematch tree in @ tree by going
* through all ematches respecting their logic relations returning
* as soon as the result is obvious .
*
* Returns 1 if the ematch tree as - one matches , no ematches are configured
* or ematch is not enabled in the kernel , otherwise 0 is returned .
*/
static inline int tcf_em_tree_match ( struct sk_buff * skb ,
struct tcf_ematch_tree * tree ,
struct tcf_pkt_info * info )
{
if ( tree - > hdr . nmatches )
return __tcf_em_tree_match ( skb , tree , info ) ;
else
return 1 ;
}
2007-07-12 06:46:26 +04:00
# define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
2005-04-17 02:20:36 +04:00
# else /* CONFIG_NET_EMATCH */
2009-11-03 06:26:03 +03:00
struct tcf_ematch_tree {
2005-04-17 02:20:36 +04:00
} ;
# define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
2014-10-06 08:27:53 +04:00
# define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
2005-04-17 02:20:36 +04:00
# define tcf_em_tree_dump(skb, t, tlv) (0)
# define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
# endif /* CONFIG_NET_EMATCH */
static inline unsigned char * tcf_get_base_ptr ( struct sk_buff * skb , int layer )
{
switch ( layer ) {
case TCF_LAYER_LINK :
return skb - > data ;
case TCF_LAYER_NETWORK :
2007-04-11 07:50:43 +04:00
return skb_network_header ( skb ) ;
2005-04-17 02:20:36 +04:00
case TCF_LAYER_TRANSPORT :
2007-04-26 05:04:18 +04:00
return skb_transport_header ( skb ) ;
2005-04-17 02:20:36 +04:00
}
return NULL ;
}
2007-04-21 09:47:35 +04:00
static inline int tcf_valid_offset ( const struct sk_buff * skb ,
const unsigned char * ptr , const int len )
2005-04-17 02:20:36 +04:00
{
2010-12-21 23:43:16 +03:00
return likely ( ( ptr + len ) < = skb_tail_pointer ( skb ) & &
ptr > = skb - > head & &
( ptr < = ( ptr + len ) ) ) ;
2005-04-17 02:20:36 +04:00
}
# ifdef CONFIG_NET_CLS_IND
2007-12-04 12:15:45 +03:00
# include <net/net_namespace.h>
2005-04-17 02:20:36 +04:00
static inline int
2014-01-10 04:14:02 +04:00
tcf_change_indev ( struct net * net , struct nlattr * indev_tlv )
2005-04-17 02:20:36 +04:00
{
2014-01-10 04:14:02 +04:00
char indev [ IFNAMSIZ ] ;
struct net_device * dev ;
2008-01-23 09:11:33 +03:00
if ( nla_strlcpy ( indev , indev_tlv , IFNAMSIZ ) > = IFNAMSIZ )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2014-01-10 04:14:02 +04:00
dev = __dev_get_by_name ( net , indev ) ;
if ( ! dev )
return - ENODEV ;
return dev - > ifindex ;
2005-04-17 02:20:36 +04:00
}
2014-01-10 04:14:02 +04:00
static inline bool
tcf_match_indev ( struct sk_buff * skb , int ifindex )
2005-04-17 02:20:36 +04:00
{
2014-01-10 04:14:02 +04:00
if ( ! ifindex )
return true ;
if ( ! skb - > skb_iif )
return false ;
return ifindex = = skb - > skb_iif ;
2005-04-17 02:20:36 +04:00
}
# endif /* CONFIG_NET_CLS_IND */
2017-10-11 10:41:09 +03:00
int tc_setup_cb_call ( struct tcf_exts * exts , enum tc_setup_type type ,
void * type_data , bool err_stop ) ;
2017-08-07 11:15:29 +03:00
struct tc_cls_common_offload {
u32 chain_index ;
__be16 protocol ;
2017-08-07 11:15:30 +03:00
u32 prio ;
2017-08-09 15:30:32 +03:00
u32 classid ;
2017-08-07 11:15:29 +03:00
} ;
static inline void
tc_cls_common_offload_init ( struct tc_cls_common_offload * cls_common ,
const struct tcf_proto * tp )
{
cls_common - > chain_index = tp - > chain - > index ;
cls_common - > protocol = tp - > protocol ;
2017-08-07 11:15:30 +03:00
cls_common - > prio = tp - > prio ;
2017-08-09 15:30:32 +03:00
cls_common - > classid = tp - > classid ;
2017-08-07 11:15:29 +03:00
}
2016-02-17 08:17:09 +03:00
struct tc_cls_u32_knode {
struct tcf_exts * exts ;
2016-02-18 01:59:30 +03:00
struct tc_u32_sel * sel ;
2016-02-17 08:17:09 +03:00
u32 handle ;
u32 val ;
u32 mask ;
u32 link_handle ;
2016-02-18 01:59:30 +03:00
u8 fshift ;
2016-02-17 08:17:09 +03:00
} ;
struct tc_cls_u32_hnode {
u32 handle ;
u32 prio ;
unsigned int divisor ;
} ;
enum tc_clsu32_command {
TC_CLSU32_NEW_KNODE ,
TC_CLSU32_REPLACE_KNODE ,
TC_CLSU32_DELETE_KNODE ,
TC_CLSU32_NEW_HNODE ,
TC_CLSU32_REPLACE_HNODE ,
TC_CLSU32_DELETE_HNODE ,
} ;
struct tc_cls_u32_offload {
2017-08-07 11:15:29 +03:00
struct tc_cls_common_offload common ;
2016-02-17 08:17:09 +03:00
/* knode values */
enum tc_clsu32_command command ;
union {
struct tc_cls_u32_knode knode ;
struct tc_cls_u32_hnode hnode ;
} ;
} ;
2017-08-09 15:30:35 +03:00
static inline bool tc_can_offload ( const struct net_device * dev )
2016-02-26 18:53:49 +03:00
{
2016-02-26 18:54:13 +03:00
if ( ! ( dev - > features & NETIF_F_HW_TC ) )
return false ;
2016-02-26 18:54:39 +03:00
if ( ! dev - > netdev_ops - > ndo_setup_tc )
return false ;
return true ;
2016-02-26 18:53:49 +03:00
}
2016-12-01 15:06:33 +03:00
static inline bool tc_skip_hw ( u32 flags )
{
return ( flags & TCA_CLS_FLAGS_SKIP_HW ) ? true : false ;
}
2017-08-09 15:30:35 +03:00
static inline bool tc_should_offload ( const struct net_device * dev , u32 flags )
2016-12-01 15:06:33 +03:00
{
if ( tc_skip_hw ( flags ) )
return false ;
2017-08-09 15:30:35 +03:00
return tc_can_offload ( dev ) ;
2016-12-01 15:06:33 +03:00
}
2016-05-13 03:08:23 +03:00
static inline bool tc_skip_sw ( u32 flags )
{
return ( flags & TCA_CLS_FLAGS_SKIP_SW ) ? true : false ;
}
/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
static inline bool tc_flags_valid ( u32 flags )
{
if ( flags & ~ ( TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW ) )
return false ;
if ( ! ( flags ^ ( TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW ) ) )
return false ;
return true ;
}
2017-02-16 11:31:12 +03:00
static inline bool tc_in_hw ( u32 flags )
{
return ( flags & TCA_CLS_FLAGS_IN_HW ) ? true : false ;
}
2016-03-08 13:42:29 +03:00
enum tc_fl_command {
TC_CLSFLOWER_REPLACE ,
TC_CLSFLOWER_DESTROY ,
2016-05-13 15:55:37 +03:00
TC_CLSFLOWER_STATS ,
2016-03-08 13:42:29 +03:00
} ;
struct tc_cls_flower_offload {
2017-08-07 11:15:29 +03:00
struct tc_cls_common_offload common ;
2016-03-08 13:42:29 +03:00
enum tc_fl_command command ;
2016-03-11 12:08:45 +03:00
unsigned long cookie ;
2016-03-08 13:42:29 +03:00
struct flow_dissector * dissector ;
struct fl_flow_key * mask ;
struct fl_flow_key * key ;
struct tcf_exts * exts ;
} ;
2016-07-21 13:03:12 +03:00
enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE ,
TC_CLSMATCHALL_DESTROY ,
} ;
struct tc_cls_matchall_offload {
2017-08-07 11:15:29 +03:00
struct tc_cls_common_offload common ;
2016-07-21 13:03:12 +03:00
enum tc_matchall_command command ;
struct tcf_exts * exts ;
unsigned long cookie ;
} ;
2016-09-21 13:43:53 +03:00
enum tc_clsbpf_command {
TC_CLSBPF_ADD ,
TC_CLSBPF_REPLACE ,
TC_CLSBPF_DESTROY ,
2016-09-21 13:44:02 +03:00
TC_CLSBPF_STATS ,
2016-09-21 13:43:53 +03:00
} ;
struct tc_cls_bpf_offload {
2017-08-07 11:15:29 +03:00
struct tc_cls_common_offload common ;
2016-09-21 13:43:53 +03:00
enum tc_clsbpf_command command ;
struct tcf_exts * exts ;
struct bpf_prog * prog ;
const char * name ;
bool exts_integrated ;
2016-09-21 13:43:54 +03:00
u32 gen_flags ;
2016-09-21 13:43:53 +03:00
} ;
2017-09-07 14:00:06 +03:00
struct tc_mqprio_qopt_offload {
/* struct tc_mqprio_qopt must always be the first element */
struct tc_mqprio_qopt qopt ;
u16 mode ;
u16 shaper ;
u32 flags ;
u64 min_rate [ TC_QOPT_MAX_QUEUE ] ;
u64 max_rate [ TC_QOPT_MAX_QUEUE ] ;
} ;
2017-01-24 15:02:41 +03:00
/* This structure holds cookie structure that is passed from user
* to the kernel for actions and classifiers
*/
struct tc_cookie {
u8 * data ;
u32 len ;
} ;
2005-04-17 02:20:36 +04:00
# endif