2021-10-25 09:40:24 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2021. Huawei Technologies Co . , Ltd
*/
# include <linux/kernel.h>
# include <linux/bpf_verifier.h>
# include <linux/bpf.h>
# include <linux/btf.h>
2024-01-20 01:50:02 +03:00
static struct bpf_struct_ops bpf_bpf_dummy_ops ;
2021-10-25 09:40:24 +03:00
/* A common type for test_N with return value in bpf_dummy_ops */
typedef int ( * dummy_ops_test_ret_fn ) ( struct bpf_dummy_ops_state * state , . . . ) ;
2023-12-15 12:12:20 +03:00
static int dummy_ops_test_ret_function ( struct bpf_dummy_ops_state * state , . . . )
{
return 0 ;
}
2021-10-25 09:40:24 +03:00
struct bpf_dummy_ops_test_args {
u64 args [ MAX_BPF_FUNC_ARGS ] ;
struct bpf_dummy_ops_state state ;
} ;
2024-01-20 01:49:54 +03:00
static struct btf * bpf_dummy_ops_btf ;
2021-10-25 09:40:24 +03:00
static struct bpf_dummy_ops_test_args *
dummy_ops_init_args ( const union bpf_attr * kattr , unsigned int nr )
{
__u32 size_in ;
struct bpf_dummy_ops_test_args * args ;
void __user * ctx_in ;
void __user * u_state ;
size_in = kattr - > test . ctx_size_in ;
if ( size_in ! = sizeof ( u64 ) * nr )
return ERR_PTR ( - EINVAL ) ;
args = kzalloc ( sizeof ( * args ) , GFP_KERNEL ) ;
if ( ! args )
return ERR_PTR ( - ENOMEM ) ;
ctx_in = u64_to_user_ptr ( kattr - > test . ctx_in ) ;
if ( copy_from_user ( args - > args , ctx_in , size_in ) )
goto out ;
/* args[0] is 0 means state argument of test_N will be NULL */
u_state = u64_to_user_ptr ( args - > args [ 0 ] ) ;
if ( u_state & & copy_from_user ( & args - > state , u_state ,
sizeof ( args - > state ) ) )
goto out ;
return args ;
out :
kfree ( args ) ;
return ERR_PTR ( - EFAULT ) ;
}
static int dummy_ops_copy_args ( struct bpf_dummy_ops_test_args * args )
{
void __user * u_state ;
u_state = u64_to_user_ptr ( args - > args [ 0 ] ) ;
if ( u_state & & copy_to_user ( u_state , & args - > state , sizeof ( args - > state ) ) )
return - EFAULT ;
return 0 ;
}
static int dummy_ops_call_op ( void * image , struct bpf_dummy_ops_test_args * args )
{
2023-12-15 12:12:20 +03:00
dummy_ops_test_ret_fn test = ( void * ) image + cfi_get_offset ( ) ;
2021-10-25 09:40:24 +03:00
struct bpf_dummy_ops_state * state = NULL ;
/* state needs to be NULL if args[0] is 0 */
if ( args - > args [ 0 ] )
state = & args - > state ;
return test ( state , args - > args [ 1 ] , args - > args [ 2 ] ,
args - > args [ 3 ] , args - > args [ 4 ] ) ;
}
2022-05-10 23:59:19 +03:00
extern const struct bpf_link_ops bpf_struct_ops_link_lops ;
2021-10-25 09:40:24 +03:00
int bpf_struct_ops_test_run ( struct bpf_prog * prog , const union bpf_attr * kattr ,
union bpf_attr __user * uattr )
{
const struct bpf_struct_ops * st_ops = & bpf_bpf_dummy_ops ;
const struct btf_type * func_proto ;
struct bpf_dummy_ops_test_args * args ;
2022-05-10 23:59:19 +03:00
struct bpf_tramp_links * tlinks ;
struct bpf_tramp_link * link = NULL ;
2021-10-25 09:40:24 +03:00
void * image = NULL ;
unsigned int op_idx ;
2024-02-25 01:34:17 +03:00
u32 image_off = 0 ;
2021-10-25 09:40:24 +03:00
int prog_ret ;
2024-01-20 01:49:54 +03:00
s32 type_id ;
2021-10-25 09:40:24 +03:00
int err ;
2024-01-20 01:49:54 +03:00
type_id = btf_find_by_name_kind ( bpf_dummy_ops_btf ,
bpf_bpf_dummy_ops . name ,
BTF_KIND_STRUCT ) ;
if ( type_id < 0 )
return - EINVAL ;
if ( prog - > aux - > attach_btf_id ! = type_id )
2021-10-25 09:40:24 +03:00
return - EOPNOTSUPP ;
func_proto = prog - > aux - > attach_func_proto ;
args = dummy_ops_init_args ( kattr , btf_type_vlen ( func_proto ) ) ;
if ( IS_ERR ( args ) )
return PTR_ERR ( args ) ;
2022-05-10 23:59:19 +03:00
tlinks = kcalloc ( BPF_TRAMP_MAX , sizeof ( * tlinks ) , GFP_KERNEL ) ;
if ( ! tlinks ) {
2021-10-25 09:40:24 +03:00
err = - ENOMEM ;
goto out ;
}
2022-05-10 23:59:19 +03:00
link = kzalloc ( sizeof ( * link ) , GFP_USER ) ;
if ( ! link ) {
err = - ENOMEM ;
goto out ;
}
/* prog doesn't take the ownership of the reference from caller */
bpf_prog_inc ( prog ) ;
bpf_link_init ( & link - > link , BPF_LINK_TYPE_STRUCT_OPS , & bpf_struct_ops_link_lops , prog ) ;
2021-10-25 09:40:24 +03:00
op_idx = prog - > expected_attach_type ;
2022-05-10 23:59:19 +03:00
err = bpf_struct_ops_prepare_trampoline ( tlinks , link ,
2021-10-25 09:40:24 +03:00
& st_ops - > func_models [ op_idx ] ,
2023-12-15 12:12:20 +03:00
& dummy_ops_test_ret_function ,
2024-02-25 01:34:17 +03:00
& image , & image_off ,
true ) ;
2021-10-25 09:40:24 +03:00
if ( err < 0 )
goto out ;
2023-12-07 01:40:50 +03:00
arch_protect_bpf_trampoline ( image , PAGE_SIZE ) ;
2021-10-25 09:40:24 +03:00
prog_ret = dummy_ops_call_op ( image , args ) ;
err = dummy_ops_copy_args ( args ) ;
if ( err )
goto out ;
if ( put_user ( prog_ret , & uattr - > test . retval ) )
err = - EFAULT ;
out :
kfree ( args ) ;
2024-02-25 01:34:17 +03:00
bpf_struct_ops_image_free ( image ) ;
2022-05-10 23:59:19 +03:00
if ( link )
bpf_link_put ( & link - > link ) ;
kfree ( tlinks ) ;
2021-10-25 09:40:24 +03:00
return err ;
}
static int bpf_dummy_init ( struct btf * btf )
{
2024-01-20 01:49:54 +03:00
bpf_dummy_ops_btf = btf ;
2021-10-25 09:40:24 +03:00
return 0 ;
}
static bool bpf_dummy_ops_is_valid_access ( int off , int size ,
enum bpf_access_type type ,
const struct bpf_prog * prog ,
struct bpf_insn_access_aux * info )
{
return bpf_tracing_btf_ctx_access ( off , size , type , prog , info ) ;
}
2023-01-25 19:47:35 +03:00
static int bpf_dummy_ops_check_member ( const struct btf_type * t ,
const struct btf_member * member ,
const struct bpf_prog * prog )
{
u32 moff = __btf_member_bit_offset ( t , member ) / 8 ;
switch ( moff ) {
case offsetof ( struct bpf_dummy_ops , test_sleepable ) :
break ;
default :
2024-03-09 03:47:39 +03:00
if ( prog - > sleepable )
2023-01-25 19:47:35 +03:00
return - EINVAL ;
}
return 0 ;
}
2021-10-25 09:40:24 +03:00
static int bpf_dummy_ops_btf_struct_access ( struct bpf_verifier_log * log ,
2022-11-14 22:15:28 +03:00
const struct bpf_reg_state * reg ,
2023-04-04 07:50:23 +03:00
int off , int size )
2021-10-25 09:40:24 +03:00
{
const struct btf_type * state ;
2022-11-14 22:15:28 +03:00
const struct btf_type * t ;
2021-10-25 09:40:24 +03:00
s32 type_id ;
2022-11-14 22:15:28 +03:00
type_id = btf_find_by_name_kind ( reg - > btf , " bpf_dummy_ops_state " ,
2021-10-25 09:40:24 +03:00
BTF_KIND_STRUCT ) ;
if ( type_id < 0 )
return - EINVAL ;
2022-11-14 22:15:28 +03:00
t = btf_type_by_id ( reg - > btf , reg - > btf_id ) ;
state = btf_type_by_id ( reg - > btf , type_id ) ;
2021-10-25 09:40:24 +03:00
if ( t ! = state ) {
bpf_log ( log , " only access to bpf_dummy_ops_state is supported \n " ) ;
return - EACCES ;
}
2023-04-04 07:50:23 +03:00
if ( off + size > sizeof ( struct bpf_dummy_ops_state ) ) {
bpf_log ( log , " write access at off %d with size %d \n " , off , size ) ;
return - EACCES ;
}
2021-10-25 09:40:24 +03:00
2023-04-04 07:50:22 +03:00
return NOT_INIT ;
2021-10-25 09:40:24 +03:00
}
static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
. is_valid_access = bpf_dummy_ops_is_valid_access ,
. btf_struct_access = bpf_dummy_ops_btf_struct_access ,
} ;
static int bpf_dummy_init_member ( const struct btf_type * t ,
const struct btf_member * member ,
void * kdata , const void * udata )
{
return - EOPNOTSUPP ;
}
static int bpf_dummy_reg ( void * kdata )
{
return - EOPNOTSUPP ;
}
static void bpf_dummy_unreg ( void * kdata )
{
}
2023-12-15 12:12:20 +03:00
static int bpf_dummy_test_1 ( struct bpf_dummy_ops_state * cb )
{
return 0 ;
}
static int bpf_dummy_test_2 ( struct bpf_dummy_ops_state * cb , int a1 , unsigned short a2 ,
char a3 , unsigned long a4 )
{
return 0 ;
}
static int bpf_dummy_test_sleepable ( struct bpf_dummy_ops_state * cb )
{
return 0 ;
}
static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
. test_1 = bpf_dummy_test_1 ,
. test_2 = bpf_dummy_test_2 ,
. test_sleepable = bpf_dummy_test_sleepable ,
} ;
2024-01-20 01:50:02 +03:00
static struct bpf_struct_ops bpf_bpf_dummy_ops = {
2021-10-25 09:40:24 +03:00
. verifier_ops = & bpf_dummy_verifier_ops ,
. init = bpf_dummy_init ,
2023-01-25 19:47:35 +03:00
. check_member = bpf_dummy_ops_check_member ,
2021-10-25 09:40:24 +03:00
. init_member = bpf_dummy_init_member ,
. reg = bpf_dummy_reg ,
. unreg = bpf_dummy_unreg ,
. name = " bpf_dummy_ops " ,
2023-12-15 12:12:20 +03:00
. cfi_stubs = & __bpf_bpf_dummy_ops ,
2024-01-20 01:50:02 +03:00
. owner = THIS_MODULE ,
2021-10-25 09:40:24 +03:00
} ;
2024-01-20 01:50:02 +03:00
static int __init bpf_dummy_struct_ops_init ( void )
{
return register_bpf_struct_ops ( & bpf_bpf_dummy_ops , bpf_dummy_ops ) ;
}
late_initcall ( bpf_dummy_struct_ops_init ) ;