2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2016-07-21 13:03:11 +03:00
/*
* net / sched / cls_matchll . c Match - all classifier
*
* Copyright ( c ) 2016 Jiri Pirko < jiri @ mellanox . com >
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/module.h>
2019-01-17 23:44:25 +03:00
# include <linux/percpu.h>
2016-07-21 13:03:11 +03:00
# include <net/sch_generic.h>
# include <net/pkt_cls.h>
2017-01-31 16:14:29 +03:00
struct cls_mall_head {
2016-07-21 13:03:11 +03:00
struct tcf_exts exts ;
struct tcf_result res ;
u32 handle ;
2016-07-21 13:03:12 +03:00
u32 flags ;
2018-06-26 00:30:07 +03:00
unsigned int in_hw_count ;
2019-01-17 23:44:25 +03:00
struct tc_matchall_pcnt __percpu * pf ;
2018-05-24 01:26:53 +03:00
struct rcu_work rwork ;
2019-06-17 19:02:32 +03:00
bool deleting ;
2016-07-21 13:03:11 +03:00
} ;
static int mall_classify ( struct sk_buff * skb , const struct tcf_proto * tp ,
struct tcf_result * res )
{
struct cls_mall_head * head = rcu_dereference_bh ( tp - > root ) ;
cls_matchall: avoid panic when receiving a packet before filter set
When a matchall classifier is added, there is a small time interval in
which tp->root is NULL. If we receive a packet in this small time slice
a NULL pointer dereference will happen, leading to a kernel panic:
# tc qdisc replace dev eth0 ingress
# tc filter add dev eth0 parent ffff: matchall action gact drop
Unable to handle kernel NULL pointer dereference at virtual address 0000000000000034
Mem abort info:
ESR = 0x96000005
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000005
CM = 0, WnR = 0
user pgtable: 4k pages, 39-bit VAs, pgdp = 00000000a623d530
[0000000000000034] pgd=0000000000000000, pud=0000000000000000
Internal error: Oops: 96000005 [#1] SMP
Modules linked in: cls_matchall sch_ingress nls_iso8859_1 nls_cp437 vfat fat m25p80 spi_nor mtd xhci_plat_hcd xhci_hcd phy_generic sfp mdio_i2c usbcore i2c_mv64xxx marvell10g mvpp2 usb_common spi_orion mvmdio i2c_core sbsa_gwdt phylink ip_tables x_tables autofs4
Process ksoftirqd/0 (pid: 9, stack limit = 0x0000000009de7d62)
CPU: 0 PID: 9 Comm: ksoftirqd/0 Not tainted 5.1.0-rc6 #21
Hardware name: Marvell 8040 MACCHIATOBin Double-shot (DT)
pstate: 40000005 (nZcv daif -PAN -UAO)
pc : mall_classify+0x28/0x78 [cls_matchall]
lr : tcf_classify+0x78/0x138
sp : ffffff80109db9d0
x29: ffffff80109db9d0 x28: ffffffc426058800
x27: 0000000000000000 x26: ffffffc425b0dd00
x25: 0000000020000000 x24: 0000000000000000
x23: ffffff80109dbac0 x22: 0000000000000001
x21: ffffffc428ab5100 x20: ffffffc425b0dd00
x19: ffffff80109dbac0 x18: 0000000000000000
x17: 0000000000000000 x16: 0000000000000000
x15: 0000000000000000 x14: 0000000000000000
x13: ffffffbf108ad288 x12: dead000000000200
x11: 00000000f0000000 x10: 0000000000000001
x9 : ffffffbf1089a220 x8 : 0000000000000001
x7 : ffffffbebffaa950 x6 : 0000000000000000
x5 : 000000442d6ba000 x4 : 0000000000000000
x3 : ffffff8008735ad8 x2 : ffffff80109dbac0
x1 : ffffffc425b0dd00 x0 : ffffff8010592078
Call trace:
mall_classify+0x28/0x78 [cls_matchall]
tcf_classify+0x78/0x138
__netif_receive_skb_core+0x29c/0xa20
__netif_receive_skb_one_core+0x34/0x60
__netif_receive_skb+0x28/0x78
netif_receive_skb_internal+0x2c/0xc0
napi_gro_receive+0x1a0/0x1d8
mvpp2_poll+0x928/0xb18 [mvpp2]
net_rx_action+0x108/0x378
__do_softirq+0x128/0x320
run_ksoftirqd+0x44/0x60
smpboot_thread_fn+0x168/0x1b0
kthread+0x12c/0x130
ret_from_fork+0x10/0x1c
Code: aa0203f3 aa1e03e0 d503201f f9400684 (b9403480)
---[ end trace fc71e2ef7b8ab5a5 ]---
Kernel panic - not syncing: Fatal exception in interrupt
SMP: stopping secondary CPUs
Kernel Offset: disabled
CPU features: 0x002,00002000
Memory Limit: none
Rebooting in 1 seconds..
Fix this by adding a NULL check in mall_classify().
Fixes: ed76f5edccc9 ("net: sched: protect filter_chain list with filter_chain_lock mutex")
Signed-off-by: Matteo Croce <mcroce@redhat.com>
Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-05-02 11:51:05 +03:00
if ( unlikely ( ! head ) )
return - 1 ;
2017-01-31 16:14:29 +03:00
if ( tc_skip_sw ( head - > flags ) )
2016-07-21 13:03:12 +03:00
return - 1 ;
net/sched: cls_matchall: fix crash when used with classful qdisc
this script, edited from Linux Advanced Routing and Traffic Control guide
tc q a dev en0 root handle 1: htb default a
tc c a dev en0 parent 1: classid 1:1 htb rate 6mbit burst 15k
tc c a dev en0 parent 1:1 classid 1:a htb rate 5mbit ceil 6mbit burst 15k
tc c a dev en0 parent 1:1 classid 1:b htb rate 1mbit ceil 6mbit burst 15k
tc f a dev en0 parent 1:0 prio 1 $clsname $clsargs classid 1:b
ping $address -c1
tc -s c s dev en0
classifies traffic to 1:b or 1:a, depending on whether the packet matches
or not the pattern $clsargs of filter $clsname. However, when $clsname is
'matchall', a systematic crash can be observed in htb_classify(). HTB and
classful qdiscs don't assign initial value to struct tcf_result, but then
they expect it to contain valid values after filters have been run. Thus,
current 'matchall' ignores the TCA_MATCHALL_CLASSID attribute, configured
by user, and makes HTB (and classful qdiscs) dereference random pointers.
By assigning head->res to *res in mall_classify(), before the actions are
invoked, we fix this crash and enable TCA_MATCHALL_CLASSID functionality,
that had no effect on 'matchall' classifier since its first introduction.
BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1460213
Reported-by: Jiri Benc <jbenc@redhat.com>
Fixes: b87f7936a932 ("net/sched: introduce Match-all classifier")
Signed-off-by: Davide Caratti <dcaratti@redhat.com>
Acked-by: Yotam Gigi <yotamg@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-09-16 15:02:21 +03:00
* res = head - > res ;
2019-01-17 23:44:25 +03:00
__this_cpu_inc ( head - > pf - > rhit ) ;
2017-01-31 16:14:29 +03:00
return tcf_exts_exec ( skb , & head - > exts , res ) ;
2016-07-21 13:03:11 +03:00
}
static int mall_init ( struct tcf_proto * tp )
{
return 0 ;
}
2017-11-07 00:47:26 +03:00
static void __mall_destroy ( struct cls_mall_head * head )
{
tcf_exts_destroy ( & head - > exts ) ;
tcf_exts_put_net ( & head - > exts ) ;
2019-01-17 23:44:25 +03:00
free_percpu ( head - > pf ) ;
2017-11-07 00:47:26 +03:00
kfree ( head ) ;
}
2017-10-27 04:24:35 +03:00
static void mall_destroy_work ( struct work_struct * work )
{
2018-05-24 01:26:53 +03:00
struct cls_mall_head * head = container_of ( to_rcu_work ( work ) ,
struct cls_mall_head ,
rwork ) ;
2017-10-27 04:24:35 +03:00
rtnl_lock ( ) ;
2017-11-07 00:47:26 +03:00
__mall_destroy ( head ) ;
2017-10-27 04:24:35 +03:00
rtnl_unlock ( ) ;
}
2017-10-19 16:50:33 +03:00
static void mall_destroy_hw_filter ( struct tcf_proto * tp ,
struct cls_mall_head * head ,
2018-01-24 23:54:19 +03:00
unsigned long cookie ,
struct netlink_ext_ack * extack )
2016-07-21 13:03:12 +03:00
{
2017-08-07 11:15:32 +03:00
struct tc_cls_matchall_offload cls_mall = { } ;
2017-10-19 16:50:33 +03:00
struct tcf_block * block = tp - > chain - > block ;
2016-07-21 13:03:12 +03:00
2019-05-07 03:24:21 +03:00
tc_cls_common_offload_init ( & cls_mall . common , tp , head - > flags , extack ) ;
2017-10-19 16:50:33 +03:00
cls_mall . command = TC_CLSMATCHALL_DESTROY ;
2017-08-07 11:15:32 +03:00
cls_mall . cookie = cookie ;
2016-07-21 13:03:12 +03:00
2019-08-26 16:44:59 +03:00
tc_setup_cb_destroy ( block , tp , TC_SETUP_CLSMATCHALL , & cls_mall , false ,
& head - > flags , & head - > in_hw_count , true ) ;
2016-07-21 13:03:12 +03:00
}
2017-10-19 16:50:33 +03:00
static int mall_replace_hw_filter ( struct tcf_proto * tp ,
struct cls_mall_head * head ,
2018-01-20 04:44:44 +03:00
unsigned long cookie ,
struct netlink_ext_ack * extack )
2016-07-21 13:03:12 +03:00
{
2017-08-07 11:15:32 +03:00
struct tc_cls_matchall_offload cls_mall = { } ;
2017-10-19 16:50:33 +03:00
struct tcf_block * block = tp - > chain - > block ;
bool skip_sw = tc_skip_sw ( head - > flags ) ;
int err ;
2016-07-21 13:03:12 +03:00
2019-05-04 14:46:17 +03:00
cls_mall . rule = flow_rule_alloc ( tcf_exts_num_actions ( & head - > exts ) ) ;
if ( ! cls_mall . rule )
return - ENOMEM ;
2019-05-07 03:24:21 +03:00
tc_cls_common_offload_init ( & cls_mall . common , tp , head - > flags , extack ) ;
2017-10-19 16:50:33 +03:00
cls_mall . command = TC_CLSMATCHALL_REPLACE ;
2017-08-07 11:15:32 +03:00
cls_mall . cookie = cookie ;
2016-07-21 13:03:12 +03:00
2020-02-17 13:12:12 +03:00
err = tc_setup_flow_action ( & cls_mall . rule - > action , & head - > exts ) ;
2019-05-04 14:46:17 +03:00
if ( err ) {
kfree ( cls_mall . rule ) ;
mall_destroy_hw_filter ( tp , head , cookie , NULL ) ;
if ( skip_sw )
NL_SET_ERR_MSG_MOD ( extack , " Failed to setup flow action " ) ;
else
err = 0 ;
return err ;
}
2019-08-26 16:44:59 +03:00
err = tc_setup_cb_add ( block , tp , TC_SETUP_CLSMATCHALL , & cls_mall ,
skip_sw , & head - > flags , & head - > in_hw_count , true ) ;
2019-08-29 19:15:16 +03:00
tc_cleanup_flow_action ( & cls_mall . rule - > action ) ;
2019-05-04 14:46:17 +03:00
kfree ( cls_mall . rule ) ;
2019-08-26 16:44:59 +03:00
if ( err ) {
2018-01-24 23:54:19 +03:00
mall_destroy_hw_filter ( tp , head , cookie , NULL ) ;
2017-10-19 16:50:33 +03:00
return err ;
}
if ( skip_sw & & ! ( head - > flags & TCA_CLS_FLAGS_IN_HW ) )
return - EINVAL ;
return 0 ;
2016-07-21 13:03:12 +03:00
}
2019-02-11 11:55:45 +03:00
static void mall_destroy ( struct tcf_proto * tp , bool rtnl_held ,
struct netlink_ext_ack * extack )
2016-07-21 13:03:11 +03:00
{
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
2017-01-31 16:14:29 +03:00
if ( ! head )
2017-04-20 00:21:21 +03:00
return ;
2016-07-21 13:03:11 +03:00
2018-08-14 12:28:26 +03:00
tcf_unbind_filter ( tp , & head - > res ) ;
2017-10-19 16:50:33 +03:00
if ( ! tc_skip_hw ( head - > flags ) )
2018-01-24 23:54:19 +03:00
mall_destroy_hw_filter ( tp , head , ( unsigned long ) head , extack ) ;
2016-07-21 13:03:12 +03:00
2017-11-07 00:47:26 +03:00
if ( tcf_exts_get_net ( & head - > exts ) )
2018-05-24 01:26:53 +03:00
tcf_queue_work ( & head - > rwork , mall_destroy_work ) ;
2017-11-07 00:47:26 +03:00
else
__mall_destroy ( head ) ;
2016-07-21 13:03:11 +03:00
}
2017-08-05 07:31:43 +03:00
static void * mall_get ( struct tcf_proto * tp , u32 handle )
2016-07-21 13:03:11 +03:00
{
2019-03-28 12:35:06 +03:00
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
if ( head & & head - > handle = = handle )
return head ;
2017-08-05 07:31:43 +03:00
return NULL ;
2016-07-21 13:03:11 +03:00
}
static const struct nla_policy mall_policy [ TCA_MATCHALL_MAX + 1 ] = {
[ TCA_MATCHALL_UNSPEC ] = { . type = NLA_UNSPEC } ,
[ TCA_MATCHALL_CLASSID ] = { . type = NLA_U32 } ,
2020-02-11 21:33:39 +03:00
[ TCA_MATCHALL_FLAGS ] = { . type = NLA_U32 } ,
2016-07-21 13:03:11 +03:00
} ;
static int mall_set_parms ( struct net * net , struct tcf_proto * tp ,
2017-01-31 16:14:29 +03:00
struct cls_mall_head * head ,
2016-07-21 13:03:11 +03:00
unsigned long base , struct nlattr * * tb ,
2021-07-30 02:12:14 +03:00
struct nlattr * est , u32 flags ,
2018-01-18 19:20:52 +03:00
struct netlink_ext_ack * extack )
2016-07-21 13:03:11 +03:00
{
int err ;
2021-07-30 02:12:14 +03:00
err = tcf_exts_validate ( net , tp , tb , est , & head - > exts , flags , extack ) ;
2016-07-21 13:03:11 +03:00
if ( err < 0 )
2017-08-04 15:29:08 +03:00
return err ;
2016-07-21 13:03:11 +03:00
if ( tb [ TCA_MATCHALL_CLASSID ] ) {
2017-01-31 16:14:29 +03:00
head - > res . classid = nla_get_u32 ( tb [ TCA_MATCHALL_CLASSID ] ) ;
tcf_bind_filter ( tp , & head - > res , base ) ;
2016-07-21 13:03:11 +03:00
}
return 0 ;
}
static int mall_change ( struct net * net , struct sk_buff * in_skb ,
struct tcf_proto * tp , unsigned long base ,
u32 handle , struct nlattr * * tca ,
2021-07-30 02:12:14 +03:00
void * * arg , u32 flags ,
2019-02-11 11:55:45 +03:00
struct netlink_ext_ack * extack )
2016-07-21 13:03:11 +03:00
{
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
struct nlattr * tb [ TCA_MATCHALL_MAX + 1 ] ;
2017-01-31 16:14:29 +03:00
struct cls_mall_head * new ;
2021-07-30 02:12:14 +03:00
u32 userflags = 0 ;
2016-07-21 13:03:11 +03:00
int err ;
if ( ! tca [ TCA_OPTIONS ] )
return - EINVAL ;
2017-01-31 16:14:29 +03:00
if ( head )
return - EEXIST ;
2016-07-21 13:03:11 +03:00
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 15:07:28 +03:00
err = nla_parse_nested_deprecated ( tb , TCA_MATCHALL_MAX ,
tca [ TCA_OPTIONS ] , mall_policy , NULL ) ;
2016-07-21 13:03:11 +03:00
if ( err < 0 )
return err ;
2016-07-21 13:03:12 +03:00
if ( tb [ TCA_MATCHALL_FLAGS ] ) {
2021-07-30 02:12:14 +03:00
userflags = nla_get_u32 ( tb [ TCA_MATCHALL_FLAGS ] ) ;
if ( ! tc_flags_valid ( userflags ) )
2016-07-21 13:03:12 +03:00
return - EINVAL ;
}
2017-01-31 16:14:29 +03:00
new = kzalloc ( sizeof ( * new ) , GFP_KERNEL ) ;
if ( ! new )
2016-07-21 13:03:11 +03:00
return - ENOBUFS ;
2019-02-21 08:37:42 +03:00
err = tcf_exts_init ( & new - > exts , net , TCA_MATCHALL_ACT , 0 ) ;
2017-01-03 20:20:24 +03:00
if ( err )
goto err_exts_init ;
2016-07-21 13:03:11 +03:00
if ( ! handle )
handle = 1 ;
2017-01-31 16:14:29 +03:00
new - > handle = handle ;
2021-07-30 02:12:14 +03:00
new - > flags = userflags ;
2019-01-17 23:44:25 +03:00
new - > pf = alloc_percpu ( struct tc_matchall_pcnt ) ;
if ( ! new - > pf ) {
err = - ENOMEM ;
goto err_alloc_percpu ;
}
2016-07-21 13:03:11 +03:00
2021-07-30 02:12:14 +03:00
err = mall_set_parms ( net , tp , new , base , tb , tca [ TCA_RATE ] , flags ,
2018-01-18 19:20:52 +03:00
extack ) ;
2016-07-21 13:03:11 +03:00
if ( err )
2017-01-03 20:20:24 +03:00
goto err_set_parms ;
2016-07-21 13:03:11 +03:00
2017-10-19 16:50:33 +03:00
if ( ! tc_skip_hw ( new - > flags ) ) {
2018-01-20 04:44:44 +03:00
err = mall_replace_hw_filter ( tp , new , ( unsigned long ) new ,
extack ) ;
2017-10-19 16:50:33 +03:00
if ( err )
goto err_replace_hw_filter ;
2016-07-21 13:03:12 +03:00
}
2017-02-16 11:31:14 +03:00
if ( ! tc_in_hw ( new - > flags ) )
new - > flags | = TCA_CLS_FLAGS_NOT_IN_HW ;
2016-07-21 13:03:12 +03:00
2017-08-05 07:31:43 +03:00
* arg = head ;
2017-01-31 16:14:29 +03:00
rcu_assign_pointer ( tp - > root , new ) ;
2016-07-21 13:03:11 +03:00
return 0 ;
2017-01-03 20:20:24 +03:00
err_replace_hw_filter :
err_set_parms :
2019-01-17 23:44:25 +03:00
free_percpu ( new - > pf ) ;
err_alloc_percpu :
2017-02-03 00:54:00 +03:00
tcf_exts_destroy ( & new - > exts ) ;
2017-01-03 20:20:24 +03:00
err_exts_init :
2017-01-31 16:14:29 +03:00
kfree ( new ) ;
2016-07-21 13:03:11 +03:00
return err ;
}
2018-01-18 19:20:53 +03:00
static int mall_delete ( struct tcf_proto * tp , void * arg , bool * last ,
2019-02-11 11:55:45 +03:00
bool rtnl_held , struct netlink_ext_ack * extack )
2016-07-21 13:03:11 +03:00
{
2019-06-17 19:02:32 +03:00
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
head - > deleting = true ;
* last = true ;
return 0 ;
2016-07-21 13:03:11 +03:00
}
2019-02-11 11:55:45 +03:00
static void mall_walk ( struct tcf_proto * tp , struct tcf_walker * arg ,
bool rtnl_held )
2016-07-21 13:03:11 +03:00
{
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
if ( arg - > count < arg - > skip )
goto skip ;
2019-02-15 18:17:56 +03:00
2019-06-17 19:02:32 +03:00
if ( ! head | | head - > deleting )
2019-02-15 18:17:56 +03:00
return ;
2017-08-05 07:31:43 +03:00
if ( arg - > fn ( tp , head , arg ) < 0 )
2016-07-21 13:03:11 +03:00
arg - > stop = 1 ;
skip :
arg - > count + + ;
}
2019-07-19 19:20:15 +03:00
static int mall_reoffload ( struct tcf_proto * tp , bool add , flow_setup_cb_t * cb ,
2018-06-26 00:30:07 +03:00
void * cb_priv , struct netlink_ext_ack * extack )
{
struct cls_mall_head * head = rtnl_dereference ( tp - > root ) ;
struct tc_cls_matchall_offload cls_mall = { } ;
struct tcf_block * block = tp - > chain - > block ;
int err ;
if ( tc_skip_hw ( head - > flags ) )
return 0 ;
2019-05-04 14:46:17 +03:00
cls_mall . rule = flow_rule_alloc ( tcf_exts_num_actions ( & head - > exts ) ) ;
if ( ! cls_mall . rule )
return - ENOMEM ;
2019-05-07 03:24:21 +03:00
tc_cls_common_offload_init ( & cls_mall . common , tp , head - > flags , extack ) ;
2018-06-26 00:30:07 +03:00
cls_mall . command = add ?
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY ;
cls_mall . cookie = ( unsigned long ) head ;
2020-02-17 13:12:12 +03:00
err = tc_setup_flow_action ( & cls_mall . rule - > action , & head - > exts ) ;
2019-05-04 14:46:17 +03:00
if ( err ) {
kfree ( cls_mall . rule ) ;
if ( add & & tc_skip_sw ( head - > flags ) ) {
NL_SET_ERR_MSG_MOD ( extack , " Failed to setup flow action " ) ;
return err ;
}
2019-05-09 01:56:07 +03:00
return 0 ;
2019-05-04 14:46:17 +03:00
}
2019-08-26 16:44:59 +03:00
err = tc_setup_cb_reoffload ( block , tp , add , cb , TC_SETUP_CLSMATCHALL ,
& cls_mall , cb_priv , & head - > flags ,
& head - > in_hw_count ) ;
2019-08-29 19:15:16 +03:00
tc_cleanup_flow_action ( & cls_mall . rule - > action ) ;
2019-05-04 14:46:17 +03:00
kfree ( cls_mall . rule ) ;
2019-08-26 16:44:59 +03:00
if ( err )
return err ;
2018-06-26 00:30:07 +03:00
return 0 ;
}
2019-05-04 14:46:23 +03:00
static void mall_stats_hw_filter ( struct tcf_proto * tp ,
struct cls_mall_head * head ,
unsigned long cookie )
{
struct tc_cls_matchall_offload cls_mall = { } ;
struct tcf_block * block = tp - > chain - > block ;
2019-05-07 03:24:21 +03:00
tc_cls_common_offload_init ( & cls_mall . common , tp , head - > flags , NULL ) ;
2019-05-04 14:46:23 +03:00
cls_mall . command = TC_CLSMATCHALL_STATS ;
cls_mall . cookie = cookie ;
2019-08-26 16:44:59 +03:00
tc_setup_cb_call ( block , TC_SETUP_CLSMATCHALL , & cls_mall , false , true ) ;
2019-05-04 14:46:23 +03:00
tcf_exts_stats_update ( & head - > exts , cls_mall . stats . bytes ,
2020-06-19 09:01:07 +03:00
cls_mall . stats . pkts , cls_mall . stats . drops ,
cls_mall . stats . lastused ,
2020-03-28 18:37:43 +03:00
cls_mall . stats . used_hw_stats ,
cls_mall . stats . used_hw_stats_valid ) ;
2019-05-04 14:46:23 +03:00
}
2017-08-05 07:31:43 +03:00
static int mall_dump ( struct net * net , struct tcf_proto * tp , void * fh ,
2019-02-11 11:55:45 +03:00
struct sk_buff * skb , struct tcmsg * t , bool rtnl_held )
2016-07-21 13:03:11 +03:00
{
2019-01-17 23:44:25 +03:00
struct tc_matchall_pcnt gpf = { } ;
2017-08-05 07:31:43 +03:00
struct cls_mall_head * head = fh ;
2016-07-21 13:03:11 +03:00
struct nlattr * nest ;
2019-01-17 23:44:25 +03:00
int cpu ;
2016-07-21 13:03:11 +03:00
2017-01-31 16:14:29 +03:00
if ( ! head )
2016-07-21 13:03:11 +03:00
return skb - > len ;
2019-05-04 14:46:23 +03:00
if ( ! tc_skip_hw ( head - > flags ) )
mall_stats_hw_filter ( tp , head , ( unsigned long ) head ) ;
2017-01-31 16:14:29 +03:00
t - > tcm_handle = head - > handle ;
2016-07-21 13:03:11 +03:00
2019-04-26 12:13:06 +03:00
nest = nla_nest_start_noflag ( skb , TCA_OPTIONS ) ;
2016-07-21 13:03:11 +03:00
if ( ! nest )
goto nla_put_failure ;
2017-01-31 16:14:29 +03:00
if ( head - > res . classid & &
nla_put_u32 ( skb , TCA_MATCHALL_CLASSID , head - > res . classid ) )
2016-07-21 13:03:11 +03:00
goto nla_put_failure ;
2017-02-16 11:31:11 +03:00
if ( head - > flags & & nla_put_u32 ( skb , TCA_MATCHALL_FLAGS , head - > flags ) )
goto nla_put_failure ;
2019-01-17 23:44:25 +03:00
for_each_possible_cpu ( cpu ) {
struct tc_matchall_pcnt * pf = per_cpu_ptr ( head - > pf , cpu ) ;
gpf . rhit + = pf - > rhit ;
}
if ( nla_put_64bit ( skb , TCA_MATCHALL_PCNT ,
sizeof ( struct tc_matchall_pcnt ) ,
& gpf , TCA_MATCHALL_PAD ) )
goto nla_put_failure ;
2017-01-31 16:14:29 +03:00
if ( tcf_exts_dump ( skb , & head - > exts ) )
2016-07-21 13:03:11 +03:00
goto nla_put_failure ;
nla_nest_end ( skb , nest ) ;
2017-01-31 16:14:29 +03:00
if ( tcf_exts_dump_stats ( skb , & head - > exts ) < 0 )
2016-07-21 13:03:11 +03:00
goto nla_put_failure ;
return skb - > len ;
nla_put_failure :
nla_nest_cancel ( skb , nest ) ;
return - 1 ;
}
2020-01-24 03:26:18 +03:00
static void mall_bind_class ( void * fh , u32 classid , unsigned long cl , void * q ,
unsigned long base )
net_sched: add reverse binding for tc class
TC filters when used as classifiers are bound to TC classes.
However, there is a hidden difference when adding them in different
orders:
1. If we add tc classes before its filters, everything is fine.
Logically, the classes exist before we specify their ID's in
filters, it is easy to bind them together, just as in the current
code base.
2. If we add tc filters before the tc classes they bind, we have to
do dynamic lookup in fast path. What's worse, this happens all
the time not just once, because on fast path tcf_result is passed
on stack, there is no way to propagate back to the one in tc filters.
This hidden difference hurts performance silently if we have many tc
classes in hierarchy.
This patch intends to close this gap by doing the reverse binding when
we create a new class, in this case we can actually search all the
filters in its parent, match and fixup by classid. And because
tcf_result is specific to each type of tc filter, we have to introduce
a new ops for each filter to tell how to bind the class.
Note, we still can NOT totally get rid of those class lookup in
->enqueue() because cgroup and flow filters have no way to determine
the classid at setup time, they still have to go through dynamic lookup.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-31 00:30:36 +03:00
{
struct cls_mall_head * head = fh ;
2020-01-24 03:26:18 +03:00
if ( head & & head - > res . classid = = classid ) {
if ( cl )
__tcf_bind_filter ( q , & head - > res , base ) ;
else
__tcf_unbind_filter ( q , & head - > res ) ;
}
net_sched: add reverse binding for tc class
TC filters when used as classifiers are bound to TC classes.
However, there is a hidden difference when adding them in different
orders:
1. If we add tc classes before its filters, everything is fine.
Logically, the classes exist before we specify their ID's in
filters, it is easy to bind them together, just as in the current
code base.
2. If we add tc filters before the tc classes they bind, we have to
do dynamic lookup in fast path. What's worse, this happens all
the time not just once, because on fast path tcf_result is passed
on stack, there is no way to propagate back to the one in tc filters.
This hidden difference hurts performance silently if we have many tc
classes in hierarchy.
This patch intends to close this gap by doing the reverse binding when
we create a new class, in this case we can actually search all the
filters in its parent, match and fixup by classid. And because
tcf_result is specific to each type of tc filter, we have to introduce
a new ops for each filter to tell how to bind the class.
Note, we still can NOT totally get rid of those class lookup in
->enqueue() because cgroup and flow filters have no way to determine
the classid at setup time, they still have to go through dynamic lookup.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-31 00:30:36 +03:00
}
2016-07-21 13:03:11 +03:00
static struct tcf_proto_ops cls_mall_ops __read_mostly = {
. kind = " matchall " ,
. classify = mall_classify ,
. init = mall_init ,
. destroy = mall_destroy ,
. get = mall_get ,
. change = mall_change ,
. delete = mall_delete ,
. walk = mall_walk ,
2018-06-26 00:30:07 +03:00
. reoffload = mall_reoffload ,
2016-07-21 13:03:11 +03:00
. dump = mall_dump ,
net_sched: add reverse binding for tc class
TC filters when used as classifiers are bound to TC classes.
However, there is a hidden difference when adding them in different
orders:
1. If we add tc classes before its filters, everything is fine.
Logically, the classes exist before we specify their ID's in
filters, it is easy to bind them together, just as in the current
code base.
2. If we add tc filters before the tc classes they bind, we have to
do dynamic lookup in fast path. What's worse, this happens all
the time not just once, because on fast path tcf_result is passed
on stack, there is no way to propagate back to the one in tc filters.
This hidden difference hurts performance silently if we have many tc
classes in hierarchy.
This patch intends to close this gap by doing the reverse binding when
we create a new class, in this case we can actually search all the
filters in its parent, match and fixup by classid. And because
tcf_result is specific to each type of tc filter, we have to introduce
a new ops for each filter to tell how to bind the class.
Note, we still can NOT totally get rid of those class lookup in
->enqueue() because cgroup and flow filters have no way to determine
the classid at setup time, they still have to go through dynamic lookup.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-31 00:30:36 +03:00
. bind_class = mall_bind_class ,
2016-07-21 13:03:11 +03:00
. owner = THIS_MODULE ,
} ;
static int __init cls_mall_init ( void )
{
return register_tcf_proto_ops ( & cls_mall_ops ) ;
}
static void __exit cls_mall_exit ( void )
{
unregister_tcf_proto_ops ( & cls_mall_ops ) ;
}
module_init ( cls_mall_init ) ;
module_exit ( cls_mall_exit ) ;
MODULE_AUTHOR ( " Jiri Pirko <jiri@mellanox.com> " ) ;
MODULE_DESCRIPTION ( " Match-all classifier " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;