2017-11-27 23:10:23 +03:00
/*
2018-05-04 04:37:08 +03:00
* Copyright ( C ) 2017 - 2018 Netronome Systems , Inc .
2017-11-27 23:10:23 +03:00
*
* This software is licensed under the GNU General License Version 2 ,
* June 1991 as shown in the file COPYING in the top - level directory of this
* source tree .
*
* THE COPYRIGHT HOLDERS AND / OR OTHER PARTIES PROVIDE THE PROGRAM " AS IS "
* WITHOUT WARRANTY OF ANY KIND , EITHER EXPRESSED OR IMPLIED , INCLUDING ,
* BUT NOT LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE . THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
* OF THE PROGRAM IS WITH YOU . SHOULD THE PROGRAM PROVE DEFECTIVE , YOU ASSUME
* THE COST OF ALL NECESSARY SERVICING , REPAIR OR CORRECTION .
*/
2017-11-03 23:56:17 +03:00
# include <linux/bpf.h>
# include <linux/bpf_verifier.h>
# include <linux/bug.h>
2017-12-28 05:39:09 +03:00
# include <linux/kdev_t.h>
2017-11-03 23:56:17 +03:00
# include <linux/list.h>
2018-07-17 20:53:24 +03:00
# include <linux/lockdep.h>
2017-11-03 23:56:17 +03:00
# include <linux/netdevice.h>
# include <linux/printk.h>
2017-12-28 05:39:09 +03:00
# include <linux/proc_ns.h>
2018-07-17 20:53:24 +03:00
# include <linux/rhashtable.h>
2017-11-03 23:56:17 +03:00
# include <linux/rtnetlink.h>
2017-12-28 05:39:03 +03:00
# include <linux/rwsem.h>
2017-11-03 23:56:17 +03:00
2018-07-17 20:53:24 +03:00
/* Protects offdevs, members of bpf_offload_netdev and offload members
2018-01-12 07:29:09 +03:00
* of all progs .
2017-12-28 05:39:03 +03:00
* RTNL lock cannot be taken when holding this lock .
*/
static DECLARE_RWSEM ( bpf_devs_lock ) ;
2018-07-17 20:53:24 +03:00
2018-07-17 20:53:25 +03:00
struct bpf_offload_dev {
2018-11-09 16:03:25 +03:00
const struct bpf_prog_offload_ops * ops ;
2018-07-17 20:53:25 +03:00
struct list_head netdevs ;
2019-02-12 11:20:39 +03:00
void * priv ;
2018-07-17 20:53:25 +03:00
} ;
2018-07-17 20:53:24 +03:00
struct bpf_offload_netdev {
struct rhash_head l ;
struct net_device * netdev ;
2018-07-17 20:53:25 +03:00
struct bpf_offload_dev * offdev ;
2018-07-17 20:53:24 +03:00
struct list_head progs ;
struct list_head maps ;
2018-07-17 20:53:25 +03:00
struct list_head offdev_netdevs ;
2018-07-17 20:53:24 +03:00
} ;
static const struct rhashtable_params offdevs_params = {
. nelem_hint = 4 ,
. key_len = sizeof ( struct net_device * ) ,
. key_offset = offsetof ( struct bpf_offload_netdev , netdev ) ,
. head_offset = offsetof ( struct bpf_offload_netdev , l ) ,
. automatic_shrinking = true ,
} ;
static struct rhashtable offdevs ;
static bool offdevs_inited ;
2017-11-03 23:56:17 +03:00
2018-01-12 07:29:08 +03:00
static int bpf_dev_offload_check ( struct net_device * netdev )
{
if ( ! netdev )
return - EINVAL ;
if ( ! netdev - > netdev_ops - > ndo_bpf )
return - EOPNOTSUPP ;
return 0 ;
}
2018-07-17 20:53:24 +03:00
static struct bpf_offload_netdev *
bpf_offload_find_netdev ( struct net_device * netdev )
{
lockdep_assert_held ( & bpf_devs_lock ) ;
if ( ! offdevs_inited )
return NULL ;
return rhashtable_lookup_fast ( & offdevs , & netdev , offdevs_params ) ;
}
2017-11-03 23:56:17 +03:00
int bpf_prog_offload_init ( struct bpf_prog * prog , union bpf_attr * attr )
{
2018-07-17 20:53:24 +03:00
struct bpf_offload_netdev * ondev ;
2018-01-12 07:29:07 +03:00
struct bpf_prog_offload * offload ;
2018-01-12 07:29:08 +03:00
int err ;
2017-11-03 23:56:17 +03:00
2017-11-21 02:21:52 +03:00
if ( attr - > prog_type ! = BPF_PROG_TYPE_SCHED_CLS & &
attr - > prog_type ! = BPF_PROG_TYPE_XDP )
return - EINVAL ;
2017-11-03 23:56:17 +03:00
if ( attr - > prog_flags )
return - EINVAL ;
offload = kzalloc ( sizeof ( * offload ) , GFP_USER ) ;
if ( ! offload )
return - ENOMEM ;
offload - > prog = prog ;
2017-12-28 05:39:03 +03:00
offload - > netdev = dev_get_by_index ( current - > nsproxy - > net_ns ,
attr - > prog_ifindex ) ;
2018-01-12 07:29:08 +03:00
err = bpf_dev_offload_check ( offload - > netdev ) ;
if ( err )
goto err_maybe_put ;
2017-11-03 23:56:17 +03:00
2017-12-28 05:39:03 +03:00
down_write ( & bpf_devs_lock ) ;
2018-07-17 20:53:24 +03:00
ondev = bpf_offload_find_netdev ( offload - > netdev ) ;
if ( ! ondev ) {
2018-01-12 07:29:08 +03:00
err = - EINVAL ;
2017-12-28 05:39:03 +03:00
goto err_unlock ;
2018-01-12 07:29:08 +03:00
}
2018-11-09 16:03:26 +03:00
offload - > offdev = ondev - > offdev ;
2017-11-03 23:56:17 +03:00
prog - > aux - > offload = offload ;
2018-07-17 20:53:24 +03:00
list_add_tail ( & offload - > offloads , & ondev - > progs ) ;
2017-12-28 05:39:03 +03:00
dev_put ( offload - > netdev ) ;
up_write ( & bpf_devs_lock ) ;
2017-11-03 23:56:17 +03:00
return 0 ;
2017-12-28 05:39:03 +03:00
err_unlock :
up_write ( & bpf_devs_lock ) ;
2018-01-12 07:29:08 +03:00
err_maybe_put :
if ( offload - > netdev )
dev_put ( offload - > netdev ) ;
2017-12-28 05:39:03 +03:00
kfree ( offload ) ;
2018-01-12 07:29:08 +03:00
return err ;
2017-11-03 23:56:17 +03:00
}
2018-11-09 16:03:31 +03:00
int bpf_prog_offload_verifier_prep ( struct bpf_prog * prog )
2017-11-03 23:56:17 +03:00
{
2018-11-09 16:03:28 +03:00
struct bpf_prog_offload * offload ;
int ret = - ENODEV ;
2017-11-03 23:56:17 +03:00
2018-11-09 16:03:28 +03:00
down_read ( & bpf_devs_lock ) ;
2018-11-09 16:03:31 +03:00
offload = prog - > aux - > offload ;
2018-11-13 12:29:26 +03:00
if ( offload ) {
2018-11-09 16:03:32 +03:00
ret = offload - > offdev - > ops - > prepare ( prog ) ;
2018-11-13 12:29:26 +03:00
offload - > dev_state = ! ret ;
}
2018-11-09 16:03:28 +03:00
up_read ( & bpf_devs_lock ) ;
2017-11-03 23:56:17 +03:00
2018-11-09 16:03:28 +03:00
return ret ;
2017-11-03 23:56:17 +03:00
}
2017-12-28 05:39:05 +03:00
int bpf_prog_offload_verify_insn ( struct bpf_verifier_env * env ,
int insn_idx , int prev_insn_idx )
{
2018-01-12 07:29:07 +03:00
struct bpf_prog_offload * offload ;
2017-12-28 05:39:05 +03:00
int ret = - ENODEV ;
down_read ( & bpf_devs_lock ) ;
offload = env - > prog - > aux - > offload ;
2017-12-28 05:39:06 +03:00
if ( offload )
2018-11-09 16:03:26 +03:00
ret = offload - > offdev - > ops - > insn_hook ( env , insn_idx ,
prev_insn_idx ) ;
2017-12-28 05:39:05 +03:00
up_read ( & bpf_devs_lock ) ;
return ret ;
}
2018-10-07 14:56:47 +03:00
int bpf_prog_offload_finalize ( struct bpf_verifier_env * env )
{
struct bpf_prog_offload * offload ;
int ret = - ENODEV ;
down_read ( & bpf_devs_lock ) ;
offload = env - > prog - > aux - > offload ;
if ( offload ) {
2018-11-09 16:03:27 +03:00
if ( offload - > offdev - > ops - > finalize )
ret = offload - > offdev - > ops - > finalize ( env ) ;
2018-10-07 14:56:47 +03:00
else
ret = 0 ;
}
up_read ( & bpf_devs_lock ) ;
return ret ;
}
2019-01-23 09:45:24 +03:00
void
bpf_prog_offload_replace_insn ( struct bpf_verifier_env * env , u32 off ,
struct bpf_insn * insn )
{
const struct bpf_prog_offload_ops * ops ;
struct bpf_prog_offload * offload ;
int ret = - EOPNOTSUPP ;
down_read ( & bpf_devs_lock ) ;
offload = env - > prog - > aux - > offload ;
if ( offload ) {
ops = offload - > offdev - > ops ;
if ( ! offload - > opt_failed & & ops - > replace_insn )
ret = ops - > replace_insn ( env , off , insn ) ;
offload - > opt_failed | = ret ;
}
up_read ( & bpf_devs_lock ) ;
}
void
bpf_prog_offload_remove_insns ( struct bpf_verifier_env * env , u32 off , u32 cnt )
{
struct bpf_prog_offload * offload ;
int ret = - EOPNOTSUPP ;
down_read ( & bpf_devs_lock ) ;
offload = env - > prog - > aux - > offload ;
if ( offload ) {
if ( ! offload - > opt_failed & & offload - > offdev - > ops - > remove_insns )
ret = offload - > offdev - > ops - > remove_insns ( env , off , cnt ) ;
offload - > opt_failed | = ret ;
}
up_read ( & bpf_devs_lock ) ;
}
2017-11-03 23:56:17 +03:00
static void __bpf_prog_offload_destroy ( struct bpf_prog * prog )
{
2018-01-12 07:29:07 +03:00
struct bpf_prog_offload * offload = prog - > aux - > offload ;
2017-11-03 23:56:17 +03:00
if ( offload - > dev_state )
2018-11-09 16:03:30 +03:00
offload - > offdev - > ops - > destroy ( prog ) ;
2017-11-03 23:56:17 +03:00
2017-12-28 05:39:07 +03:00
/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
bpf_prog_free_id ( prog , true ) ;
2017-11-03 23:56:17 +03:00
list_del_init ( & offload - > offloads ) ;
2017-12-28 05:39:06 +03:00
kfree ( offload ) ;
prog - > aux - > offload = NULL ;
2017-11-03 23:56:17 +03:00
}
void bpf_prog_offload_destroy ( struct bpf_prog * prog )
{
2017-12-28 05:39:03 +03:00
down_write ( & bpf_devs_lock ) ;
2017-12-28 05:39:06 +03:00
if ( prog - > aux - > offload )
__bpf_prog_offload_destroy ( prog ) ;
2017-12-28 05:39:03 +03:00
up_write ( & bpf_devs_lock ) ;
2017-11-03 23:56:17 +03:00
}
static int bpf_prog_offload_translate ( struct bpf_prog * prog )
{
2018-11-09 16:03:29 +03:00
struct bpf_prog_offload * offload ;
int ret = - ENODEV ;
2017-11-03 23:56:17 +03:00
2018-11-09 16:03:29 +03:00
down_read ( & bpf_devs_lock ) ;
offload = prog - > aux - > offload ;
if ( offload )
2018-11-09 16:03:32 +03:00
ret = offload - > offdev - > ops - > translate ( prog ) ;
2018-11-09 16:03:29 +03:00
up_read ( & bpf_devs_lock ) ;
2017-11-03 23:56:17 +03:00
return ret ;
}
static unsigned int bpf_prog_warn_on_exec ( const void * ctx ,
const struct bpf_insn * insn )
{
WARN ( 1 , " attempt to execute device eBPF program on the host! " ) ;
return 0 ;
}
int bpf_prog_offload_compile ( struct bpf_prog * prog )
{
prog - > bpf_func = bpf_prog_warn_on_exec ;
return bpf_prog_offload_translate ( prog ) ;
}
2017-12-28 05:39:09 +03:00
struct ns_get_path_bpf_prog_args {
struct bpf_prog * prog ;
struct bpf_prog_info * info ;
} ;
static struct ns_common * bpf_prog_offload_info_fill_ns ( void * private_data )
{
struct ns_get_path_bpf_prog_args * args = private_data ;
struct bpf_prog_aux * aux = args - > prog - > aux ;
struct ns_common * ns ;
struct net * net ;
rtnl_lock ( ) ;
down_read ( & bpf_devs_lock ) ;
if ( aux - > offload ) {
args - > info - > ifindex = aux - > offload - > netdev - > ifindex ;
net = dev_net ( aux - > offload - > netdev ) ;
get_net ( net ) ;
ns = & net - > ns ;
} else {
args - > info - > ifindex = 0 ;
ns = NULL ;
}
up_read ( & bpf_devs_lock ) ;
rtnl_unlock ( ) ;
return ns ;
}
int bpf_prog_offload_info_fill ( struct bpf_prog_info * info ,
struct bpf_prog * prog )
{
struct ns_get_path_bpf_prog_args args = {
. prog = prog ,
. info = info ,
} ;
2018-01-17 03:05:19 +03:00
struct bpf_prog_aux * aux = prog - > aux ;
2017-12-28 05:39:09 +03:00
struct inode * ns_inode ;
struct path ns_path ;
2018-01-17 03:05:19 +03:00
char __user * uinsns ;
2019-12-06 17:13:27 +03:00
int res ;
2018-01-17 03:05:19 +03:00
u32 ulen ;
2017-12-28 05:39:09 +03:00
res = ns_get_path_cb ( & ns_path , bpf_prog_offload_info_fill_ns , & args ) ;
2019-12-06 17:13:27 +03:00
if ( res ) {
2017-12-28 05:39:09 +03:00
if ( ! info - > ifindex )
return - ENODEV ;
2019-12-06 17:13:27 +03:00
return res ;
2017-12-28 05:39:09 +03:00
}
2018-01-17 03:05:19 +03:00
down_read ( & bpf_devs_lock ) ;
if ( ! aux - > offload ) {
up_read ( & bpf_devs_lock ) ;
return - ENODEV ;
}
ulen = info - > jited_prog_len ;
info - > jited_prog_len = aux - > offload - > jited_len ;
2020-02-12 22:32:27 +03:00
if ( info - > jited_prog_len & & ulen ) {
2018-01-17 03:05:19 +03:00
uinsns = u64_to_user_ptr ( info - > jited_prog_insns ) ;
ulen = min_t ( u32 , info - > jited_prog_len , ulen ) ;
if ( copy_to_user ( uinsns , aux - > offload - > jited_image , ulen ) ) {
up_read ( & bpf_devs_lock ) ;
return - EFAULT ;
}
}
up_read ( & bpf_devs_lock ) ;
2017-12-28 05:39:09 +03:00
ns_inode = ns_path . dentry - > d_inode ;
info - > netns_dev = new_encode_dev ( ns_inode - > i_sb - > s_dev ) ;
info - > netns_ino = ns_inode - > i_ino ;
path_put ( & ns_path ) ;
return 0 ;
}
2017-11-03 23:56:17 +03:00
const struct bpf_prog_ops bpf_offload_prog_ops = {
} ;
2018-01-12 07:29:09 +03:00
static int bpf_map_offload_ndo ( struct bpf_offloaded_map * offmap ,
enum bpf_netdev_command cmd )
{
struct netdev_bpf data = { } ;
struct net_device * netdev ;
ASSERT_RTNL ( ) ;
data . command = cmd ;
data . offmap = offmap ;
/* Caller must make sure netdev is valid */
netdev = offmap - > netdev ;
return netdev - > netdev_ops - > ndo_bpf ( netdev , & data ) ;
}
struct bpf_map * bpf_map_offload_map_alloc ( union bpf_attr * attr )
{
struct net * net = current - > nsproxy - > net_ns ;
2018-07-17 20:53:24 +03:00
struct bpf_offload_netdev * ondev ;
2018-01-12 07:29:09 +03:00
struct bpf_offloaded_map * offmap ;
int err ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return ERR_PTR ( - EPERM ) ;
2018-01-18 06:13:27 +03:00
if ( attr - > map_type ! = BPF_MAP_TYPE_ARRAY & &
attr - > map_type ! = BPF_MAP_TYPE_HASH )
2018-01-12 07:29:09 +03:00
return ERR_PTR ( - EINVAL ) ;
offmap = kzalloc ( sizeof ( * offmap ) , GFP_USER ) ;
if ( ! offmap )
return ERR_PTR ( - ENOMEM ) ;
bpf_map_init_from_attr ( & offmap - > map , attr ) ;
rtnl_lock ( ) ;
down_write ( & bpf_devs_lock ) ;
offmap - > netdev = __dev_get_by_index ( net , attr - > map_ifindex ) ;
err = bpf_dev_offload_check ( offmap - > netdev ) ;
if ( err )
goto err_unlock ;
2018-07-17 20:53:24 +03:00
ondev = bpf_offload_find_netdev ( offmap - > netdev ) ;
if ( ! ondev ) {
err = - EINVAL ;
goto err_unlock ;
}
2018-01-12 07:29:09 +03:00
err = bpf_map_offload_ndo ( offmap , BPF_OFFLOAD_MAP_ALLOC ) ;
if ( err )
goto err_unlock ;
2018-07-17 20:53:24 +03:00
list_add_tail ( & offmap - > offloads , & ondev - > maps ) ;
2018-01-12 07:29:09 +03:00
up_write ( & bpf_devs_lock ) ;
rtnl_unlock ( ) ;
return & offmap - > map ;
err_unlock :
up_write ( & bpf_devs_lock ) ;
rtnl_unlock ( ) ;
kfree ( offmap ) ;
return ERR_PTR ( err ) ;
}
static void __bpf_map_offload_destroy ( struct bpf_offloaded_map * offmap )
{
WARN_ON ( bpf_map_offload_ndo ( offmap , BPF_OFFLOAD_MAP_FREE ) ) ;
/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
bpf_map_free_id ( & offmap - > map , true ) ;
list_del_init ( & offmap - > offloads ) ;
offmap - > netdev = NULL ;
}
void bpf_map_offload_map_free ( struct bpf_map * map )
{
struct bpf_offloaded_map * offmap = map_to_offmap ( map ) ;
rtnl_lock ( ) ;
down_write ( & bpf_devs_lock ) ;
if ( offmap - > netdev )
__bpf_map_offload_destroy ( offmap ) ;
up_write ( & bpf_devs_lock ) ;
rtnl_unlock ( ) ;
kfree ( offmap ) ;
}
int bpf_map_offload_lookup_elem ( struct bpf_map * map , void * key , void * value )
{
struct bpf_offloaded_map * offmap = map_to_offmap ( map ) ;
int ret = - ENODEV ;
down_read ( & bpf_devs_lock ) ;
if ( offmap - > netdev )
ret = offmap - > dev_ops - > map_lookup_elem ( offmap , key , value ) ;
up_read ( & bpf_devs_lock ) ;
return ret ;
}
int bpf_map_offload_update_elem ( struct bpf_map * map ,
void * key , void * value , u64 flags )
{
struct bpf_offloaded_map * offmap = map_to_offmap ( map ) ;
int ret = - ENODEV ;
if ( unlikely ( flags > BPF_EXIST ) )
return - EINVAL ;
down_read ( & bpf_devs_lock ) ;
if ( offmap - > netdev )
ret = offmap - > dev_ops - > map_update_elem ( offmap , key , value ,
flags ) ;
up_read ( & bpf_devs_lock ) ;
return ret ;
}
int bpf_map_offload_delete_elem ( struct bpf_map * map , void * key )
{
struct bpf_offloaded_map * offmap = map_to_offmap ( map ) ;
int ret = - ENODEV ;
down_read ( & bpf_devs_lock ) ;
if ( offmap - > netdev )
ret = offmap - > dev_ops - > map_delete_elem ( offmap , key ) ;
up_read ( & bpf_devs_lock ) ;
return ret ;
}
int bpf_map_offload_get_next_key ( struct bpf_map * map , void * key , void * next_key )
{
struct bpf_offloaded_map * offmap = map_to_offmap ( map ) ;
int ret = - ENODEV ;
down_read ( & bpf_devs_lock ) ;
if ( offmap - > netdev )
ret = offmap - > dev_ops - > map_get_next_key ( offmap , key , next_key ) ;
up_read ( & bpf_devs_lock ) ;
return ret ;
}
2018-01-18 06:13:28 +03:00
struct ns_get_path_bpf_map_args {
struct bpf_offloaded_map * offmap ;
struct bpf_map_info * info ;
} ;
static struct ns_common * bpf_map_offload_info_fill_ns ( void * private_data )
{
struct ns_get_path_bpf_map_args * args = private_data ;
struct ns_common * ns ;
struct net * net ;
rtnl_lock ( ) ;
down_read ( & bpf_devs_lock ) ;
if ( args - > offmap - > netdev ) {
args - > info - > ifindex = args - > offmap - > netdev - > ifindex ;
net = dev_net ( args - > offmap - > netdev ) ;
get_net ( net ) ;
ns = & net - > ns ;
} else {
args - > info - > ifindex = 0 ;
ns = NULL ;
}
up_read ( & bpf_devs_lock ) ;
rtnl_unlock ( ) ;
return ns ;
}
int bpf_map_offload_info_fill ( struct bpf_map_info * info , struct bpf_map * map )
{
struct ns_get_path_bpf_map_args args = {
. offmap = map_to_offmap ( map ) ,
. info = info ,
} ;
struct inode * ns_inode ;
struct path ns_path ;
2019-12-06 17:13:27 +03:00
int res ;
2018-01-18 06:13:28 +03:00
res = ns_get_path_cb ( & ns_path , bpf_map_offload_info_fill_ns , & args ) ;
2019-12-06 17:13:27 +03:00
if ( res ) {
2018-01-18 06:13:28 +03:00
if ( ! info - > ifindex )
return - ENODEV ;
2019-12-06 17:13:27 +03:00
return res ;
2018-01-18 06:13:28 +03:00
}
ns_inode = ns_path . dentry - > d_inode ;
info - > netns_dev = new_encode_dev ( ns_inode - > i_sb - > s_dev ) ;
info - > netns_ino = ns_inode - > i_ino ;
path_put ( & ns_path ) ;
return 0 ;
}
2018-07-17 20:53:26 +03:00
static bool __bpf_offload_dev_match ( struct bpf_prog * prog ,
struct net_device * netdev )
2018-01-12 07:29:09 +03:00
{
2018-07-17 20:53:26 +03:00
struct bpf_offload_netdev * ondev1 , * ondev2 ;
2018-01-12 07:29:09 +03:00
struct bpf_prog_offload * offload ;
2018-05-04 04:37:08 +03:00
if ( ! bpf_prog_is_dev_bound ( prog - > aux ) )
2018-01-12 07:29:09 +03:00
return false ;
offload = prog - > aux - > offload ;
2018-07-17 20:53:26 +03:00
if ( ! offload )
return false ;
if ( offload - > netdev = = netdev )
return true ;
ondev1 = bpf_offload_find_netdev ( offload - > netdev ) ;
ondev2 = bpf_offload_find_netdev ( netdev ) ;
return ondev1 & & ondev2 & & ondev1 - > offdev = = ondev2 - > offdev ;
}
bool bpf_offload_dev_match ( struct bpf_prog * prog , struct net_device * netdev )
{
bool ret ;
down_read ( & bpf_devs_lock ) ;
ret = __bpf_offload_dev_match ( prog , netdev ) ;
up_read ( & bpf_devs_lock ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( bpf_offload_dev_match ) ;
bool bpf_offload_prog_map_match ( struct bpf_prog * prog , struct bpf_map * map )
{
struct bpf_offloaded_map * offmap ;
bool ret ;
if ( ! bpf_map_is_dev_bound ( map ) )
return bpf_map_offload_neutral ( map ) ;
2018-01-12 07:29:09 +03:00
offmap = map_to_offmap ( map ) ;
2018-07-17 20:53:26 +03:00
down_read ( & bpf_devs_lock ) ;
ret = __bpf_offload_dev_match ( prog , offmap - > netdev ) ;
2018-01-12 07:29:09 +03:00
up_read ( & bpf_devs_lock ) ;
return ret ;
}
2018-07-17 20:53:25 +03:00
int bpf_offload_dev_netdev_register ( struct bpf_offload_dev * offdev ,
struct net_device * netdev )
2018-01-12 07:29:09 +03:00
{
2018-07-17 20:53:24 +03:00
struct bpf_offload_netdev * ondev ;
int err ;
2018-01-12 07:29:09 +03:00
2018-07-17 20:53:24 +03:00
ondev = kzalloc ( sizeof ( * ondev ) , GFP_KERNEL ) ;
if ( ! ondev )
return - ENOMEM ;
ondev - > netdev = netdev ;
2018-07-17 20:53:25 +03:00
ondev - > offdev = offdev ;
2018-07-17 20:53:24 +03:00
INIT_LIST_HEAD ( & ondev - > progs ) ;
INIT_LIST_HEAD ( & ondev - > maps ) ;
down_write ( & bpf_devs_lock ) ;
err = rhashtable_insert_fast ( & offdevs , & ondev - > l , offdevs_params ) ;
if ( err ) {
netdev_warn ( netdev , " failed to register for BPF offload \n " ) ;
goto err_unlock_free ;
}
2018-01-12 07:29:09 +03:00
2018-07-17 20:53:25 +03:00
list_add ( & ondev - > offdev_netdevs , & offdev - > netdevs ) ;
2018-07-17 20:53:24 +03:00
up_write ( & bpf_devs_lock ) ;
return 0 ;
err_unlock_free :
up_write ( & bpf_devs_lock ) ;
kfree ( ondev ) ;
return err ;
2018-01-12 07:29:09 +03:00
}
2018-07-17 20:53:24 +03:00
EXPORT_SYMBOL_GPL ( bpf_offload_dev_netdev_register ) ;
2018-01-12 07:29:09 +03:00
2018-07-17 20:53:25 +03:00
void bpf_offload_dev_netdev_unregister ( struct bpf_offload_dev * offdev ,
struct net_device * netdev )
2017-11-03 23:56:17 +03:00
{
2018-07-17 20:53:25 +03:00
struct bpf_offload_netdev * ondev , * altdev ;
2018-07-17 20:53:24 +03:00
struct bpf_offloaded_map * offmap , * mtmp ;
struct bpf_prog_offload * offload , * ptmp ;
2017-11-03 23:56:17 +03:00
ASSERT_RTNL ( ) ;
2018-07-17 20:53:24 +03:00
down_write ( & bpf_devs_lock ) ;
ondev = rhashtable_lookup_fast ( & offdevs , & netdev , offdevs_params ) ;
if ( WARN_ON ( ! ondev ) )
goto unlock ;
2017-11-03 23:56:17 +03:00
2018-07-17 20:53:24 +03:00
WARN_ON ( rhashtable_remove_fast ( & offdevs , & ondev - > l , offdevs_params ) ) ;
2018-07-17 20:53:25 +03:00
list_del ( & ondev - > offdev_netdevs ) ;
/* Try to move the objects to another netdev of the device */
altdev = list_first_entry_or_null ( & offdev - > netdevs ,
struct bpf_offload_netdev ,
offdev_netdevs ) ;
if ( altdev ) {
list_for_each_entry ( offload , & ondev - > progs , offloads )
offload - > netdev = altdev - > netdev ;
list_splice_init ( & ondev - > progs , & altdev - > progs ) ;
list_for_each_entry ( offmap , & ondev - > maps , offloads )
offmap - > netdev = altdev - > netdev ;
list_splice_init ( & ondev - > maps , & altdev - > maps ) ;
} else {
list_for_each_entry_safe ( offload , ptmp , & ondev - > progs , offloads )
__bpf_prog_offload_destroy ( offload - > prog ) ;
list_for_each_entry_safe ( offmap , mtmp , & ondev - > maps , offloads )
__bpf_map_offload_destroy ( offmap ) ;
}
2017-11-03 23:56:17 +03:00
2018-07-17 20:53:24 +03:00
WARN_ON ( ! list_empty ( & ondev - > progs ) ) ;
WARN_ON ( ! list_empty ( & ondev - > maps ) ) ;
kfree ( ondev ) ;
unlock :
up_write ( & bpf_devs_lock ) ;
}
EXPORT_SYMBOL_GPL ( bpf_offload_dev_netdev_unregister ) ;
2018-07-17 20:53:25 +03:00
2018-11-09 16:03:25 +03:00
struct bpf_offload_dev *
2019-02-12 11:20:39 +03:00
bpf_offload_dev_create ( const struct bpf_prog_offload_ops * ops , void * priv )
2018-07-17 20:53:25 +03:00
{
struct bpf_offload_dev * offdev ;
int err ;
down_write ( & bpf_devs_lock ) ;
if ( ! offdevs_inited ) {
err = rhashtable_init ( & offdevs , & offdevs_params ) ;
2019-11-04 12:15:36 +03:00
if ( err ) {
up_write ( & bpf_devs_lock ) ;
2018-07-17 20:53:25 +03:00
return ERR_PTR ( err ) ;
2019-11-04 12:15:36 +03:00
}
2018-07-17 20:53:25 +03:00
offdevs_inited = true ;
}
up_write ( & bpf_devs_lock ) ;
offdev = kzalloc ( sizeof ( * offdev ) , GFP_KERNEL ) ;
if ( ! offdev )
return ERR_PTR ( - ENOMEM ) ;
2018-11-09 16:03:25 +03:00
offdev - > ops = ops ;
2019-02-12 11:20:39 +03:00
offdev - > priv = priv ;
2018-07-17 20:53:25 +03:00
INIT_LIST_HEAD ( & offdev - > netdevs ) ;
return offdev ;
}
EXPORT_SYMBOL_GPL ( bpf_offload_dev_create ) ;
void bpf_offload_dev_destroy ( struct bpf_offload_dev * offdev )
{
WARN_ON ( ! list_empty ( & offdev - > netdevs ) ) ;
kfree ( offdev ) ;
}
EXPORT_SYMBOL_GPL ( bpf_offload_dev_destroy ) ;
2019-02-12 11:20:39 +03:00
void * bpf_offload_dev_priv ( struct bpf_offload_dev * offdev )
{
return offdev - > priv ;
}
EXPORT_SYMBOL_GPL ( bpf_offload_dev_priv ) ;