2016-07-21 13:03:11 +03:00
/*
* net / sched / cls_matchll . c Match - all classifier
*
* Copyright ( c ) 2016 Jiri Pirko < jiri @ mellanox . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/module.h>
# include <net/sch_generic.h>
# include <net/pkt_cls.h>
struct cls_mall_filter {
struct tcf_exts exts ;
struct tcf_result res ;
u32 handle ;
struct rcu_head rcu ;
2016-07-21 13:03:12 +03:00
u32 flags ;
2016-07-21 13:03:11 +03:00
} ;
struct cls_mall_head {
struct cls_mall_filter * filter ;
struct rcu_head rcu ;
} ;
static int mall_classify ( struct sk_buff * skb , const struct tcf_proto * tp ,
struct tcf_result * res )
{
struct cls_mall_head * head = rcu_dereference_bh ( tp - > root ) ;
struct cls_mall_filter * f = head - > filter ;
2016-07-21 13:03:12 +03:00
if ( tc_skip_sw ( f - > flags ) )
return - 1 ;
2016-07-21 13:03:11 +03:00
return tcf_exts_exec ( skb , & f - > exts , res ) ;
}
static int mall_init ( struct tcf_proto * tp )
{
struct cls_mall_head * head ;
head = kzalloc ( sizeof ( * head ) , GFP_KERNEL ) ;
if ( ! head )
return - ENOBUFS ;
rcu_assign_pointer ( tp - > root , head ) ;
return 0 ;
}
static void mall_destroy_filter ( struct rcu_head * head )
{
struct cls_mall_filter * f = container_of ( head , struct cls_mall_filter , rcu ) ;
tcf_exts_destroy ( & f - > exts ) ;
2016-07-21 13:03:12 +03:00
2016-07-21 13:03:11 +03:00
kfree ( f ) ;
}
2016-07-21 13:03:12 +03:00
static int mall_replace_hw_filter ( struct tcf_proto * tp ,
struct cls_mall_filter * f ,
unsigned long cookie )
{
struct net_device * dev = tp - > q - > dev_queue - > dev ;
struct tc_to_netdev offload ;
struct tc_cls_matchall_offload mall_offload = { 0 } ;
offload . type = TC_SETUP_MATCHALL ;
offload . cls_mall = & mall_offload ;
offload . cls_mall - > command = TC_CLSMATCHALL_REPLACE ;
offload . cls_mall - > exts = & f - > exts ;
offload . cls_mall - > cookie = cookie ;
return dev - > netdev_ops - > ndo_setup_tc ( dev , tp - > q - > handle , tp - > protocol ,
& offload ) ;
}
static void mall_destroy_hw_filter ( struct tcf_proto * tp ,
struct cls_mall_filter * f ,
unsigned long cookie )
{
struct net_device * dev = tp - > q - > dev_queue - > dev ;
struct tc_to_netdev offload ;
struct tc_cls_matchall_offload mall_offload = { 0 } ;
offload . type = TC_SETUP_MATCHALL ;
offload . cls_mall = & mall_offload ;
offload . cls_mall - > command = TC_CLSMATCHALL_DESTROY ;
offload . cls_mall - > exts = NULL ;
offload . cls_mall - > cookie = cookie ;
dev - > netdev_ops - > ndo_setup_tc ( dev , tp - > q - > handle , tp - > protocol ,
& offload ) ;
}
2016-07-21 13:03:11 +03:00
static bool mall_destroy ( struct tcf_proto * tp , bool force )
{
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
2016-07-21 13:03:12 +03:00
struct net_device * dev = tp - > q - > dev_queue - > dev ;
struct cls_mall_filter * f = head - > filter ;
2016-07-21 13:03:11 +03:00
2016-07-21 13:03:12 +03:00
if ( ! force & & f )
2016-07-21 13:03:11 +03:00
return false ;
2016-07-21 13:03:12 +03:00
if ( f ) {
if ( tc_should_offload ( dev , tp , f - > flags ) )
mall_destroy_hw_filter ( tp , f , ( unsigned long ) f ) ;
call_rcu ( & f - > rcu , mall_destroy_filter ) ;
}
2016-07-21 13:03:11 +03:00
kfree_rcu ( head , rcu ) ;
return true ;
}
static unsigned long mall_get ( struct tcf_proto * tp , u32 handle )
{
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
struct cls_mall_filter * f = head - > filter ;
if ( f & & f - > handle = = handle )
return ( unsigned long ) f ;
return 0 ;
}
static const struct nla_policy mall_policy [ TCA_MATCHALL_MAX + 1 ] = {
[ TCA_MATCHALL_UNSPEC ] = { . type = NLA_UNSPEC } ,
[ TCA_MATCHALL_CLASSID ] = { . type = NLA_U32 } ,
} ;
static int mall_set_parms ( struct net * net , struct tcf_proto * tp ,
struct cls_mall_filter * f ,
unsigned long base , struct nlattr * * tb ,
struct nlattr * est , bool ovr )
{
struct tcf_exts e ;
int err ;
2017-01-03 20:20:24 +03:00
err = tcf_exts_init ( & e , TCA_MATCHALL_ACT , 0 ) ;
if ( err )
return err ;
2016-07-21 13:03:11 +03:00
err = tcf_exts_validate ( net , tp , tb , est , & e , ovr ) ;
if ( err < 0 )
2017-01-03 20:20:24 +03:00
goto errout ;
2016-07-21 13:03:11 +03:00
if ( tb [ TCA_MATCHALL_CLASSID ] ) {
f - > res . classid = nla_get_u32 ( tb [ TCA_MATCHALL_CLASSID ] ) ;
tcf_bind_filter ( tp , & f - > res , base ) ;
}
tcf_exts_change ( tp , & f - > exts , & e ) ;
return 0 ;
2017-01-03 20:20:24 +03:00
errout :
tcf_exts_destroy ( & e ) ;
return err ;
2016-07-21 13:03:11 +03:00
}
static int mall_change ( struct net * net , struct sk_buff * in_skb ,
struct tcf_proto * tp , unsigned long base ,
u32 handle , struct nlattr * * tca ,
unsigned long * arg , bool ovr )
{
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
struct cls_mall_filter * fold = ( struct cls_mall_filter * ) * arg ;
2016-07-21 13:03:12 +03:00
struct net_device * dev = tp - > q - > dev_queue - > dev ;
2016-07-21 13:03:11 +03:00
struct cls_mall_filter * f ;
struct nlattr * tb [ TCA_MATCHALL_MAX + 1 ] ;
2016-07-21 13:03:12 +03:00
u32 flags = 0 ;
2016-07-21 13:03:11 +03:00
int err ;
if ( ! tca [ TCA_OPTIONS ] )
return - EINVAL ;
if ( head - > filter )
return - EBUSY ;
if ( fold )
return - EINVAL ;
err = nla_parse_nested ( tb , TCA_MATCHALL_MAX ,
tca [ TCA_OPTIONS ] , mall_policy ) ;
if ( err < 0 )
return err ;
2016-07-21 13:03:12 +03:00
if ( tb [ TCA_MATCHALL_FLAGS ] ) {
flags = nla_get_u32 ( tb [ TCA_MATCHALL_FLAGS ] ) ;
if ( ! tc_flags_valid ( flags ) )
return - EINVAL ;
}
2016-07-21 13:03:11 +03:00
f = kzalloc ( sizeof ( * f ) , GFP_KERNEL ) ;
if ( ! f )
return - ENOBUFS ;
2017-01-03 20:20:24 +03:00
err = tcf_exts_init ( & f - > exts , TCA_MATCHALL_ACT , 0 ) ;
if ( err )
goto err_exts_init ;
2016-07-21 13:03:11 +03:00
if ( ! handle )
handle = 1 ;
f - > handle = handle ;
2016-07-21 13:03:12 +03:00
f - > flags = flags ;
2016-07-21 13:03:11 +03:00
err = mall_set_parms ( net , tp , f , base , tb , tca [ TCA_RATE ] , ovr ) ;
if ( err )
2017-01-03 20:20:24 +03:00
goto err_set_parms ;
2016-07-21 13:03:11 +03:00
2016-07-21 13:03:12 +03:00
if ( tc_should_offload ( dev , tp , flags ) ) {
err = mall_replace_hw_filter ( tp , f , ( unsigned long ) f ) ;
if ( err ) {
if ( tc_skip_sw ( flags ) )
2017-01-03 20:20:24 +03:00
goto err_replace_hw_filter ;
2016-07-21 13:03:12 +03:00
else
err = 0 ;
}
}
2016-07-21 13:03:11 +03:00
* arg = ( unsigned long ) f ;
rcu_assign_pointer ( head - > filter , f ) ;
return 0 ;
2017-01-03 20:20:24 +03:00
err_replace_hw_filter :
err_set_parms :
tcf_exts_destroy ( & f - > exts ) ;
err_exts_init :
2016-07-21 13:03:11 +03:00
kfree ( f ) ;
return err ;
}
static int mall_delete ( struct tcf_proto * tp , unsigned long arg )
{
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
struct cls_mall_filter * f = ( struct cls_mall_filter * ) arg ;
2016-07-21 13:03:12 +03:00
struct net_device * dev = tp - > q - > dev_queue - > dev ;
if ( tc_should_offload ( dev , tp , f - > flags ) )
mall_destroy_hw_filter ( tp , f , ( unsigned long ) f ) ;
2016-07-21 13:03:11 +03:00
RCU_INIT_POINTER ( head - > filter , NULL ) ;
tcf_unbind_filter ( tp , & f - > res ) ;
call_rcu ( & f - > rcu , mall_destroy_filter ) ;
return 0 ;
}
static void mall_walk ( struct tcf_proto * tp , struct tcf_walker * arg )
{
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
struct cls_mall_filter * f = head - > filter ;
if ( arg - > count < arg - > skip )
goto skip ;
if ( arg - > fn ( tp , ( unsigned long ) f , arg ) < 0 )
arg - > stop = 1 ;
skip :
arg - > count + + ;
}
static int mall_dump ( struct net * net , struct tcf_proto * tp , unsigned long fh ,
struct sk_buff * skb , struct tcmsg * t )
{
struct cls_mall_filter * f = ( struct cls_mall_filter * ) fh ;
struct nlattr * nest ;
if ( ! f )
return skb - > len ;
t - > tcm_handle = f - > handle ;
nest = nla_nest_start ( skb , TCA_OPTIONS ) ;
if ( ! nest )
goto nla_put_failure ;
if ( f - > res . classid & &
nla_put_u32 ( skb , TCA_MATCHALL_CLASSID , f - > res . classid ) )
goto nla_put_failure ;
if ( tcf_exts_dump ( skb , & f - > exts ) )
goto nla_put_failure ;
nla_nest_end ( skb , nest ) ;
if ( tcf_exts_dump_stats ( skb , & f - > exts ) < 0 )
goto nla_put_failure ;
return skb - > len ;
nla_put_failure :
nla_nest_cancel ( skb , nest ) ;
return - 1 ;
}
static struct tcf_proto_ops cls_mall_ops __read_mostly = {
. kind = " matchall " ,
. classify = mall_classify ,
. init = mall_init ,
. destroy = mall_destroy ,
. get = mall_get ,
. change = mall_change ,
. delete = mall_delete ,
. walk = mall_walk ,
. dump = mall_dump ,
. owner = THIS_MODULE ,
} ;
static int __init cls_mall_init ( void )
{
return register_tcf_proto_ops ( & cls_mall_ops ) ;
}
static void __exit cls_mall_exit ( void )
{
unregister_tcf_proto_ops ( & cls_mall_ops ) ;
}
module_init ( cls_mall_init ) ;
module_exit ( cls_mall_exit ) ;
MODULE_AUTHOR ( " Jiri Pirko <jiri@mellanox.com> " ) ;
MODULE_DESCRIPTION ( " Match-all classifier " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;