2020-05-09 20:59:23 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
# include <test_progs.h>
# include "bpf_iter_ipv6_route.skel.h"
# include "bpf_iter_netlink.skel.h"
# include "bpf_iter_bpf_map.skel.h"
# include "bpf_iter_task.skel.h"
2020-06-30 09:28:46 +03:00
# include "bpf_iter_task_stack.skel.h"
2020-05-09 20:59:23 +03:00
# include "bpf_iter_task_file.skel.h"
2021-02-12 21:31:07 +03:00
# include "bpf_iter_task_vma.skel.h"
2020-09-28 14:31:10 +03:00
# include "bpf_iter_task_btf.skel.h"
2020-06-24 02:08:23 +03:00
# include "bpf_iter_tcp4.skel.h"
# include "bpf_iter_tcp6.skel.h"
# include "bpf_iter_udp4.skel.h"
# include "bpf_iter_udp6.skel.h"
2021-08-14 04:57:17 +03:00
# include "bpf_iter_unix.skel.h"
2020-05-09 20:59:23 +03:00
# include "bpf_iter_test_kern1.skel.h"
# include "bpf_iter_test_kern2.skel.h"
# include "bpf_iter_test_kern3.skel.h"
# include "bpf_iter_test_kern4.skel.h"
2020-07-23 21:41:20 +03:00
# include "bpf_iter_bpf_hash_map.skel.h"
# include "bpf_iter_bpf_percpu_hash_map.skel.h"
2020-07-23 21:41:21 +03:00
# include "bpf_iter_bpf_array_map.skel.h"
# include "bpf_iter_bpf_percpu_array_map.skel.h"
2020-12-04 14:36:07 +03:00
# include "bpf_iter_bpf_sk_storage_helpers.skel.h"
2020-07-23 21:41:22 +03:00
# include "bpf_iter_bpf_sk_storage_map.skel.h"
2020-07-23 21:41:24 +03:00
# include "bpf_iter_test_kern5.skel.h"
2020-07-29 01:18:01 +03:00
# include "bpf_iter_test_kern6.skel.h"
2022-05-10 18:52:33 +03:00
# include "bpf_iter_bpf_link.skel.h"
2022-07-12 15:31:45 +03:00
# include "bpf_iter_ksym.skel.h"
2020-05-09 20:59:23 +03:00
static int duration ;
static void test_btf_id_or_null ( void )
{
struct bpf_iter_test_kern3 * skel ;
skel = bpf_iter_test_kern3__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_ERR_PTR ( skel , " bpf_iter_test_kern3__open_and_load " ) ) {
2020-05-09 20:59:23 +03:00
bpf_iter_test_kern3__destroy ( skel ) ;
return ;
}
}
static void do_dummy_read ( struct bpf_program * prog )
{
struct bpf_link * link ;
char buf [ 16 ] = { } ;
int iter_fd , len ;
link = bpf_program__attach_iter ( prog , NULL ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-05-09 20:59:23 +03:00
return ;
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-05-09 20:59:23 +03:00
goto free_link ;
/* not check contents, but ensure read() ends without error */
while ( ( len = read ( iter_fd , buf , sizeof ( buf ) ) ) > 0 )
;
CHECK ( len < 0 , " read " , " read failed: %s \n " , strerror ( errno ) ) ;
close ( iter_fd ) ;
free_link :
bpf_link__destroy ( link ) ;
}
2021-02-12 21:31:07 +03:00
static int read_fd_into_buffer ( int fd , char * buf , int size )
{
int bufleft = size ;
int len ;
do {
len = read ( fd , buf , bufleft ) ;
if ( len > 0 ) {
buf + = len ;
bufleft - = len ;
}
} while ( len > 0 ) ;
return len < 0 ? len : size - bufleft ;
}
2020-05-09 20:59:23 +03:00
static void test_ipv6_route ( void )
{
struct bpf_iter_ipv6_route * skel ;
skel = bpf_iter_ipv6_route__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_ipv6_route__open_and_load " ) )
2020-05-09 20:59:23 +03:00
return ;
do_dummy_read ( skel - > progs . dump_ipv6_route ) ;
bpf_iter_ipv6_route__destroy ( skel ) ;
}
static void test_netlink ( void )
{
struct bpf_iter_netlink * skel ;
skel = bpf_iter_netlink__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_netlink__open_and_load " ) )
2020-05-09 20:59:23 +03:00
return ;
do_dummy_read ( skel - > progs . dump_netlink ) ;
bpf_iter_netlink__destroy ( skel ) ;
}
static void test_bpf_map ( void )
{
struct bpf_iter_bpf_map * skel ;
skel = bpf_iter_bpf_map__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_map__open_and_load " ) )
2020-05-09 20:59:23 +03:00
return ;
do_dummy_read ( skel - > progs . dump_bpf_map ) ;
bpf_iter_bpf_map__destroy ( skel ) ;
}
static void test_task ( void )
{
struct bpf_iter_task * skel ;
skel = bpf_iter_task__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_task__open_and_load " ) )
2020-05-09 20:59:23 +03:00
return ;
do_dummy_read ( skel - > progs . dump_task ) ;
bpf_iter_task__destroy ( skel ) ;
}
2022-01-24 21:54:03 +03:00
static void test_task_sleepable ( void )
{
struct bpf_iter_task * skel ;
skel = bpf_iter_task__open_and_load ( ) ;
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_task__open_and_load " ) )
return ;
do_dummy_read ( skel - > progs . dump_task_sleepable ) ;
ASSERT_GT ( skel - > bss - > num_expected_failure_copy_from_user_task , 0 ,
" num_expected_failure_copy_from_user_task " ) ;
ASSERT_GT ( skel - > bss - > num_success_copy_from_user_task , 0 ,
" num_success_copy_from_user_task " ) ;
bpf_iter_task__destroy ( skel ) ;
}
2020-06-30 09:28:46 +03:00
static void test_task_stack ( void )
{
struct bpf_iter_task_stack * skel ;
skel = bpf_iter_task_stack__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_task_stack__open_and_load " ) )
2020-06-30 09:28:46 +03:00
return ;
do_dummy_read ( skel - > progs . dump_task_stack ) ;
2021-04-16 23:47:04 +03:00
do_dummy_read ( skel - > progs . get_task_user_stacks ) ;
2020-06-30 09:28:46 +03:00
bpf_iter_task_stack__destroy ( skel ) ;
}
2020-09-02 05:31:13 +03:00
static void * do_nothing ( void * arg )
{
pthread_exit ( arg ) ;
}
2020-05-09 20:59:23 +03:00
static void test_task_file ( void )
{
struct bpf_iter_task_file * skel ;
2020-09-02 05:31:13 +03:00
pthread_t thread_id ;
void * ret ;
2020-05-09 20:59:23 +03:00
skel = bpf_iter_task_file__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_task_file__open_and_load " ) )
2020-05-09 20:59:23 +03:00
return ;
2020-09-02 05:31:13 +03:00
skel - > bss - > tgid = getpid ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( pthread_create ( & thread_id , NULL , & do_nothing , NULL ) ,
" pthread_create " ) )
2020-09-02 05:31:13 +03:00
goto done ;
2020-05-09 20:59:23 +03:00
do_dummy_read ( skel - > progs . dump_task_file ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_FALSE ( pthread_join ( thread_id , & ret ) | | ret ! = NULL ,
" pthread_join " ) )
2020-09-02 05:31:13 +03:00
goto done ;
2022-05-10 18:52:32 +03:00
ASSERT_EQ ( skel - > bss - > count , 0 , " check_count " ) ;
2020-09-02 05:31:13 +03:00
done :
2020-05-09 20:59:23 +03:00
bpf_iter_task_file__destroy ( skel ) ;
}
2020-09-28 14:31:10 +03:00
# define TASKBUFSZ 32768
static char taskbuf [ TASKBUFSZ ] ;
2020-09-29 15:30:04 +03:00
static int do_btf_read ( struct bpf_iter_task_btf * skel )
2020-09-28 14:31:10 +03:00
{
struct bpf_program * prog = skel - > progs . dump_task_struct ;
struct bpf_iter_task_btf__bss * bss = skel - > bss ;
2021-02-12 21:31:07 +03:00
int iter_fd = - 1 , err ;
2020-09-28 14:31:10 +03:00
struct bpf_link * link ;
char * buf = taskbuf ;
2020-09-29 15:30:04 +03:00
int ret = 0 ;
2020-09-28 14:31:10 +03:00
link = bpf_program__attach_iter ( prog , NULL ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-09-29 15:30:04 +03:00
return ret ;
2020-09-28 14:31:10 +03:00
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-09-28 14:31:10 +03:00
goto free_link ;
2021-02-12 21:31:07 +03:00
err = read_fd_into_buffer ( iter_fd , buf , TASKBUFSZ ) ;
2020-09-28 14:31:10 +03:00
if ( bss - > skip ) {
printf ( " %s:SKIP:no __builtin_btf_type_id \n " , __func__ ) ;
2020-09-29 15:30:04 +03:00
ret = 1 ;
2020-09-28 14:31:10 +03:00
test__skip ( ) ;
goto free_link ;
}
2021-02-12 21:31:07 +03:00
if ( CHECK ( err < 0 , " read " , " read failed: %s \n " , strerror ( errno ) ) )
2020-09-28 14:31:10 +03:00
goto free_link ;
2022-05-10 18:52:32 +03:00
ASSERT_HAS_SUBSTR ( taskbuf , " (struct task_struct) " ,
" check for btf representation of task_struct in iter data " ) ;
2020-09-28 14:31:10 +03:00
free_link :
if ( iter_fd > 0 )
close ( iter_fd ) ;
bpf_link__destroy ( link ) ;
2020-09-29 15:30:04 +03:00
return ret ;
2020-09-28 14:31:10 +03:00
}
static void test_task_btf ( void )
{
struct bpf_iter_task_btf__bss * bss ;
struct bpf_iter_task_btf * skel ;
2020-09-29 15:30:04 +03:00
int ret ;
2020-09-28 14:31:10 +03:00
skel = bpf_iter_task_btf__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_task_btf__open_and_load " ) )
2020-09-28 14:31:10 +03:00
return ;
bss = skel - > bss ;
2020-09-29 15:30:04 +03:00
ret = do_btf_read ( skel ) ;
if ( ret )
goto cleanup ;
2020-09-28 14:31:10 +03:00
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_NEQ ( bss - > tasks , 0 , " no task iteration, did BPF program run? " ) )
2020-09-28 14:31:10 +03:00
goto cleanup ;
2022-05-10 18:52:32 +03:00
ASSERT_EQ ( bss - > seq_err , 0 , " check for unexpected err " ) ;
2020-09-28 14:31:10 +03:00
cleanup :
bpf_iter_task_btf__destroy ( skel ) ;
}
2020-06-24 02:08:23 +03:00
static void test_tcp4 ( void )
{
struct bpf_iter_tcp4 * skel ;
skel = bpf_iter_tcp4__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_tcp4__open_and_load " ) )
2020-06-24 02:08:23 +03:00
return ;
do_dummy_read ( skel - > progs . dump_tcp4 ) ;
bpf_iter_tcp4__destroy ( skel ) ;
}
static void test_tcp6 ( void )
{
struct bpf_iter_tcp6 * skel ;
skel = bpf_iter_tcp6__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_tcp6__open_and_load " ) )
2020-06-24 02:08:23 +03:00
return ;
do_dummy_read ( skel - > progs . dump_tcp6 ) ;
bpf_iter_tcp6__destroy ( skel ) ;
}
static void test_udp4 ( void )
{
struct bpf_iter_udp4 * skel ;
skel = bpf_iter_udp4__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_udp4__open_and_load " ) )
2020-06-24 02:08:23 +03:00
return ;
do_dummy_read ( skel - > progs . dump_udp4 ) ;
bpf_iter_udp4__destroy ( skel ) ;
}
static void test_udp6 ( void )
{
struct bpf_iter_udp6 * skel ;
skel = bpf_iter_udp6__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_udp6__open_and_load " ) )
2020-06-24 02:08:23 +03:00
return ;
do_dummy_read ( skel - > progs . dump_udp6 ) ;
bpf_iter_udp6__destroy ( skel ) ;
}
2021-08-14 04:57:17 +03:00
static void test_unix ( void )
{
struct bpf_iter_unix * skel ;
skel = bpf_iter_unix__open_and_load ( ) ;
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_unix__open_and_load " ) )
return ;
do_dummy_read ( skel - > progs . dump_unix ) ;
bpf_iter_unix__destroy ( skel ) ;
}
2020-05-09 20:59:23 +03:00
/* The expected string is less than 16 bytes */
static int do_read_with_fd ( int iter_fd , const char * expected ,
bool read_one_char )
{
2022-05-10 18:52:32 +03:00
int len , read_buf_len , start ;
2020-05-09 20:59:23 +03:00
char buf [ 16 ] = { } ;
read_buf_len = read_one_char ? 1 : 16 ;
start = 0 ;
while ( ( len = read ( iter_fd , buf + start , read_buf_len ) ) > 0 ) {
start + = len ;
if ( CHECK ( start > = 16 , " read " , " read len %d \n " , len ) )
return - 1 ;
read_buf_len = read_one_char ? 1 : 16 - start ;
}
if ( CHECK ( len < 0 , " read " , " read failed: %s \n " , strerror ( errno ) ) )
return - 1 ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_STREQ ( buf , expected , " read " ) )
2020-05-09 20:59:23 +03:00
return - 1 ;
return 0 ;
}
static void test_anon_iter ( bool read_one_char )
{
struct bpf_iter_test_kern1 * skel ;
struct bpf_link * link ;
int iter_fd , err ;
skel = bpf_iter_test_kern1__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_test_kern1__open_and_load " ) )
2020-05-09 20:59:23 +03:00
return ;
err = bpf_iter_test_kern1__attach ( skel ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " bpf_iter_test_kern1__attach " ) ) {
2020-05-09 20:59:23 +03:00
goto out ;
}
link = skel - > links . dump_task ;
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-05-09 20:59:23 +03:00
goto out ;
do_read_with_fd ( iter_fd , " abcd " , read_one_char ) ;
close ( iter_fd ) ;
out :
bpf_iter_test_kern1__destroy ( skel ) ;
}
static int do_read ( const char * path , const char * expected )
{
int err , iter_fd ;
iter_fd = open ( path , O_RDONLY ) ;
if ( CHECK ( iter_fd < 0 , " open " , " open %s failed: %s \n " ,
path , strerror ( errno ) ) )
return - 1 ;
err = do_read_with_fd ( iter_fd , expected , false ) ;
close ( iter_fd ) ;
return err ;
}
static void test_file_iter ( void )
{
const char * path = " /sys/fs/bpf/bpf_iter_test1 " ;
struct bpf_iter_test_kern1 * skel1 ;
struct bpf_iter_test_kern2 * skel2 ;
struct bpf_link * link ;
int err ;
skel1 = bpf_iter_test_kern1__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel1 , " bpf_iter_test_kern1__open_and_load " ) )
2020-05-09 20:59:23 +03:00
return ;
link = bpf_program__attach_iter ( skel1 - > progs . dump_task , NULL ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-05-09 20:59:23 +03:00
goto out ;
/* unlink this path if it exists. */
unlink ( path ) ;
err = bpf_link__pin ( link , path ) ;
if ( CHECK ( err , " pin_iter " , " pin_iter to %s failed: %d \n " , path , err ) )
goto free_link ;
err = do_read ( path , " abcd " ) ;
if ( err )
goto unlink_path ;
/* file based iterator seems working fine. Let us a link update
* of the underlying link and ` cat ` the iterator again , its content
* should change .
*/
skel2 = bpf_iter_test_kern2__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel2 , " bpf_iter_test_kern2__open_and_load " ) )
2020-05-09 20:59:23 +03:00
goto unlink_path ;
err = bpf_link__update_program ( link , skel2 - > progs . dump_task ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " update_prog " ) )
2020-05-09 20:59:23 +03:00
goto destroy_skel2 ;
do_read ( path , " ABCD " ) ;
destroy_skel2 :
bpf_iter_test_kern2__destroy ( skel2 ) ;
unlink_path :
unlink ( path ) ;
free_link :
bpf_link__destroy ( link ) ;
out :
bpf_iter_test_kern1__destroy ( skel1 ) ;
}
static void test_overflow ( bool test_e2big_overflow , bool ret1 )
{
__u32 map_info_len , total_read_len , expected_read_len ;
int err , iter_fd , map1_fd , map2_fd , len ;
struct bpf_map_info map_info = { } ;
struct bpf_iter_test_kern4 * skel ;
struct bpf_link * link ;
2020-09-28 14:31:08 +03:00
__u32 iter_size ;
2020-05-09 20:59:23 +03:00
char * buf ;
skel = bpf_iter_test_kern4__open ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_test_kern4__open " ) )
2020-05-09 20:59:23 +03:00
return ;
/* create two maps: bpf program will only do bpf_seq_write
* for these two maps . The goal is one map output almost
* fills seq_file buffer and then the other will trigger
* overflow and needs restart .
*/
2021-11-24 22:32:33 +03:00
map1_fd = bpf_map_create ( BPF_MAP_TYPE_ARRAY , NULL , 4 , 8 , 1 , NULL ) ;
if ( CHECK ( map1_fd < 0 , " bpf_map_create " ,
2020-05-09 20:59:23 +03:00
" map_creation failed: %s \n " , strerror ( errno ) ) )
goto out ;
2021-11-24 22:32:33 +03:00
map2_fd = bpf_map_create ( BPF_MAP_TYPE_ARRAY , NULL , 4 , 8 , 1 , NULL ) ;
if ( CHECK ( map2_fd < 0 , " bpf_map_create " ,
2020-05-09 20:59:23 +03:00
" map_creation failed: %s \n " , strerror ( errno ) ) )
goto free_map1 ;
2020-09-28 14:31:08 +03:00
/* bpf_seq_printf kernel buffer is 8 pages, so one map
2020-05-09 20:59:23 +03:00
* bpf_seq_write will mostly fill it , and the other map
* will partially fill and then trigger overflow and need
* bpf_seq_read restart .
*/
2020-09-28 14:31:08 +03:00
iter_size = sysconf ( _SC_PAGE_SIZE ) < < 3 ;
2020-05-09 20:59:23 +03:00
if ( test_e2big_overflow ) {
2020-09-28 14:31:08 +03:00
skel - > rodata - > print_len = ( iter_size + 8 ) / 8 ;
expected_read_len = 2 * ( iter_size + 8 ) ;
2020-05-09 20:59:23 +03:00
} else if ( ! ret1 ) {
2020-09-28 14:31:08 +03:00
skel - > rodata - > print_len = ( iter_size - 8 ) / 8 ;
expected_read_len = 2 * ( iter_size - 8 ) ;
2020-05-09 20:59:23 +03:00
} else {
skel - > rodata - > print_len = 1 ;
expected_read_len = 2 * 8 ;
}
skel - > rodata - > ret1 = ret1 ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( bpf_iter_test_kern4__load ( skel ) ,
" bpf_iter_test_kern4__load " ) )
2020-05-09 20:59:23 +03:00
goto free_map2 ;
/* setup filtering map_id in bpf program */
map_info_len = sizeof ( map_info ) ;
err = bpf_obj_get_info_by_fd ( map1_fd , & map_info , & map_info_len ) ;
if ( CHECK ( err , " get_map_info " , " get map info failed: %s \n " ,
strerror ( errno ) ) )
goto free_map2 ;
skel - > bss - > map1_id = map_info . id ;
err = bpf_obj_get_info_by_fd ( map2_fd , & map_info , & map_info_len ) ;
if ( CHECK ( err , " get_map_info " , " get map info failed: %s \n " ,
strerror ( errno ) ) )
goto free_map2 ;
skel - > bss - > map2_id = map_info . id ;
link = bpf_program__attach_iter ( skel - > progs . dump_bpf_map , NULL ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-05-09 20:59:23 +03:00
goto free_map2 ;
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-05-09 20:59:23 +03:00
goto free_link ;
buf = malloc ( expected_read_len ) ;
if ( ! buf )
goto close_iter ;
/* do read */
total_read_len = 0 ;
if ( test_e2big_overflow ) {
while ( ( len = read ( iter_fd , buf , expected_read_len ) ) > 0 )
total_read_len + = len ;
CHECK ( len ! = - 1 | | errno ! = E2BIG , " read " ,
" expected ret -1, errno E2BIG, but get ret %d, error %s \n " ,
len , strerror ( errno ) ) ;
goto free_buf ;
} else if ( ! ret1 ) {
while ( ( len = read ( iter_fd , buf , expected_read_len ) ) > 0 )
total_read_len + = len ;
if ( CHECK ( len < 0 , " read " , " read failed: %s \n " ,
strerror ( errno ) ) )
goto free_buf ;
} else {
do {
len = read ( iter_fd , buf , expected_read_len ) ;
if ( len > 0 )
total_read_len + = len ;
} while ( len > 0 | | len = = - EAGAIN ) ;
if ( CHECK ( len < 0 , " read " , " read failed: %s \n " ,
strerror ( errno ) ) )
goto free_buf ;
}
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( total_read_len , expected_read_len , " read " ) )
2020-05-09 20:59:23 +03:00
goto free_buf ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > map1_accessed , 1 , " map1_accessed " ) )
2020-05-09 20:59:23 +03:00
goto free_buf ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > map2_accessed , 2 , " map2_accessed " ) )
2020-05-09 20:59:23 +03:00
goto free_buf ;
2022-05-10 18:52:32 +03:00
ASSERT_EQ ( skel - > bss - > map2_seqnum1 , skel - > bss - > map2_seqnum2 , " map2_seqnum " ) ;
2020-05-09 20:59:23 +03:00
free_buf :
free ( buf ) ;
close_iter :
close ( iter_fd ) ;
free_link :
bpf_link__destroy ( link ) ;
free_map2 :
close ( map2_fd ) ;
free_map1 :
close ( map1_fd ) ;
out :
bpf_iter_test_kern4__destroy ( skel ) ;
}
2020-07-23 21:41:20 +03:00
static void test_bpf_hash_map ( void )
{
2021-09-17 07:33:43 +03:00
__u32 expected_key_a = 0 , expected_key_b = 0 ;
2020-07-23 21:41:20 +03:00
DECLARE_LIBBPF_OPTS ( bpf_iter_attach_opts , opts ) ;
struct bpf_iter_bpf_hash_map * skel ;
int err , i , len , map_fd , iter_fd ;
2020-08-05 08:50:58 +03:00
union bpf_iter_link_info linfo ;
2020-07-23 21:41:20 +03:00
__u64 val , expected_val = 0 ;
struct bpf_link * link ;
struct key_t {
int a ;
int b ;
int c ;
} key ;
char buf [ 64 ] ;
skel = bpf_iter_bpf_hash_map__open ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_hash_map__open " ) )
2020-07-23 21:41:20 +03:00
return ;
skel - > bss - > in_test_mode = true ;
err = bpf_iter_bpf_hash_map__load ( skel ) ;
2022-05-10 18:52:31 +03:00
if ( ! ASSERT_OK ( err , " bpf_iter_bpf_hash_map__load " ) )
2020-07-23 21:41:20 +03:00
goto out ;
/* iterator with hashmap2 and hashmap3 should fail */
2020-08-05 08:50:58 +03:00
memset ( & linfo , 0 , sizeof ( linfo ) ) ;
linfo . map . map_fd = bpf_map__fd ( skel - > maps . hashmap2 ) ;
opts . link_info = & linfo ;
opts . link_info_len = sizeof ( linfo ) ;
2020-07-23 21:41:20 +03:00
link = bpf_program__attach_iter ( skel - > progs . dump_bpf_hash_map , & opts ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_ERR_PTR ( link , " attach_iter " ) )
2020-07-23 21:41:20 +03:00
goto out ;
2020-08-05 08:50:58 +03:00
linfo . map . map_fd = bpf_map__fd ( skel - > maps . hashmap3 ) ;
2020-07-23 21:41:20 +03:00
link = bpf_program__attach_iter ( skel - > progs . dump_bpf_hash_map , & opts ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_ERR_PTR ( link , " attach_iter " ) )
2020-07-23 21:41:20 +03:00
goto out ;
/* hashmap1 should be good, update map values here */
map_fd = bpf_map__fd ( skel - > maps . hashmap1 ) ;
for ( i = 0 ; i < bpf_map__max_entries ( skel - > maps . hashmap1 ) ; i + + ) {
key . a = i + 1 ;
key . b = i + 2 ;
key . c = i + 3 ;
val = i + 4 ;
expected_key_a + = key . a ;
expected_key_b + = key . b ;
expected_val + = val ;
err = bpf_map_update_elem ( map_fd , & key , & val , BPF_ANY ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " map_update " ) )
2020-07-23 21:41:20 +03:00
goto out ;
}
2020-08-05 08:50:58 +03:00
linfo . map . map_fd = map_fd ;
2020-07-23 21:41:20 +03:00
link = bpf_program__attach_iter ( skel - > progs . dump_bpf_hash_map , & opts ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-07-23 21:41:20 +03:00
goto out ;
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-07-23 21:41:20 +03:00
goto free_link ;
/* do some tests */
while ( ( len = read ( iter_fd , buf , sizeof ( buf ) ) ) > 0 )
;
if ( CHECK ( len < 0 , " read " , " read failed: %s \n " , strerror ( errno ) ) )
goto close_iter ;
/* test results */
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > key_sum_a , expected_key_a , " key_sum_a " ) )
2020-07-23 21:41:20 +03:00
goto close_iter ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > key_sum_b , expected_key_b , " key_sum_b " ) )
2020-07-23 21:41:20 +03:00
goto close_iter ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > val_sum , expected_val , " val_sum " ) )
2020-07-23 21:41:20 +03:00
goto close_iter ;
close_iter :
close ( iter_fd ) ;
free_link :
bpf_link__destroy ( link ) ;
out :
bpf_iter_bpf_hash_map__destroy ( skel ) ;
}
static void test_bpf_percpu_hash_map ( void )
{
2021-09-17 07:33:43 +03:00
__u32 expected_key_a = 0 , expected_key_b = 0 ;
2020-07-23 21:41:20 +03:00
DECLARE_LIBBPF_OPTS ( bpf_iter_attach_opts , opts ) ;
struct bpf_iter_bpf_percpu_hash_map * skel ;
int err , i , j , len , map_fd , iter_fd ;
2020-08-05 08:50:58 +03:00
union bpf_iter_link_info linfo ;
2020-07-23 21:41:20 +03:00
__u32 expected_val = 0 ;
struct bpf_link * link ;
struct key_t {
int a ;
int b ;
int c ;
} key ;
char buf [ 64 ] ;
void * val ;
skel = bpf_iter_bpf_percpu_hash_map__open ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_percpu_hash_map__open " ) )
2020-07-23 21:41:20 +03:00
return ;
skel - > rodata - > num_cpus = bpf_num_possible_cpus ( ) ;
2021-11-07 19:55:16 +03:00
val = malloc ( 8 * bpf_num_possible_cpus ( ) ) ;
2020-07-23 21:41:20 +03:00
err = bpf_iter_bpf_percpu_hash_map__load ( skel ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_percpu_hash_map__load " ) )
2020-07-23 21:41:20 +03:00
goto out ;
/* update map values here */
map_fd = bpf_map__fd ( skel - > maps . hashmap1 ) ;
for ( i = 0 ; i < bpf_map__max_entries ( skel - > maps . hashmap1 ) ; i + + ) {
key . a = i + 1 ;
key . b = i + 2 ;
key . c = i + 3 ;
expected_key_a + = key . a ;
expected_key_b + = key . b ;
for ( j = 0 ; j < bpf_num_possible_cpus ( ) ; j + + ) {
* ( __u32 * ) ( val + j * 8 ) = i + j ;
expected_val + = i + j ;
}
err = bpf_map_update_elem ( map_fd , & key , val , BPF_ANY ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " map_update " ) )
2020-07-23 21:41:20 +03:00
goto out ;
}
2020-08-05 08:50:58 +03:00
memset ( & linfo , 0 , sizeof ( linfo ) ) ;
linfo . map . map_fd = map_fd ;
opts . link_info = & linfo ;
opts . link_info_len = sizeof ( linfo ) ;
2020-07-23 21:41:20 +03:00
link = bpf_program__attach_iter ( skel - > progs . dump_bpf_percpu_hash_map , & opts ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-07-23 21:41:20 +03:00
goto out ;
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-07-23 21:41:20 +03:00
goto free_link ;
/* do some tests */
while ( ( len = read ( iter_fd , buf , sizeof ( buf ) ) ) > 0 )
;
if ( CHECK ( len < 0 , " read " , " read failed: %s \n " , strerror ( errno ) ) )
goto close_iter ;
/* test results */
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > key_sum_a , expected_key_a , " key_sum_a " ) )
2020-07-23 21:41:20 +03:00
goto close_iter ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > key_sum_b , expected_key_b , " key_sum_b " ) )
2020-07-23 21:41:20 +03:00
goto close_iter ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > val_sum , expected_val , " val_sum " ) )
2020-07-23 21:41:20 +03:00
goto close_iter ;
close_iter :
close ( iter_fd ) ;
free_link :
bpf_link__destroy ( link ) ;
out :
bpf_iter_bpf_percpu_hash_map__destroy ( skel ) ;
2021-11-07 19:55:16 +03:00
free ( val ) ;
2020-07-23 21:41:20 +03:00
}
2020-07-23 21:41:21 +03:00
static void test_bpf_array_map ( void )
{
__u64 val , expected_val = 0 , res_first_val , first_val = 0 ;
DECLARE_LIBBPF_OPTS ( bpf_iter_attach_opts , opts ) ;
__u32 expected_key = 0 , res_first_key ;
struct bpf_iter_bpf_array_map * skel ;
2020-08-05 08:50:58 +03:00
union bpf_iter_link_info linfo ;
2020-07-23 21:41:21 +03:00
int err , i , map_fd , iter_fd ;
struct bpf_link * link ;
char buf [ 64 ] = { } ;
int len , start ;
skel = bpf_iter_bpf_array_map__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_array_map__open_and_load " ) )
2020-07-23 21:41:21 +03:00
return ;
map_fd = bpf_map__fd ( skel - > maps . arraymap1 ) ;
for ( i = 0 ; i < bpf_map__max_entries ( skel - > maps . arraymap1 ) ; i + + ) {
val = i + 4 ;
expected_key + = i ;
expected_val + = val ;
if ( i = = 0 )
first_val = val ;
err = bpf_map_update_elem ( map_fd , & i , & val , BPF_ANY ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " map_update " ) )
2020-07-23 21:41:21 +03:00
goto out ;
}
2020-08-05 08:50:58 +03:00
memset ( & linfo , 0 , sizeof ( linfo ) ) ;
linfo . map . map_fd = map_fd ;
opts . link_info = & linfo ;
opts . link_info_len = sizeof ( linfo ) ;
2020-07-23 21:41:21 +03:00
link = bpf_program__attach_iter ( skel - > progs . dump_bpf_array_map , & opts ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-07-23 21:41:21 +03:00
goto out ;
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-07-23 21:41:21 +03:00
goto free_link ;
/* do some tests */
start = 0 ;
while ( ( len = read ( iter_fd , buf + start , sizeof ( buf ) - start ) ) > 0 )
start + = len ;
if ( CHECK ( len < 0 , " read " , " read failed: %s \n " , strerror ( errno ) ) )
goto close_iter ;
/* test results */
res_first_key = * ( __u32 * ) buf ;
res_first_val = * ( __u64 * ) ( buf + sizeof ( __u32 ) ) ;
if ( CHECK ( res_first_key ! = 0 | | res_first_val ! = first_val ,
" bpf_seq_write " ,
" seq_write failure: first key %u vs expected 0, "
" first value %llu vs expected %llu \n " ,
res_first_key , res_first_val , first_val ) )
goto close_iter ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > key_sum , expected_key , " key_sum " ) )
2020-07-23 21:41:21 +03:00
goto close_iter ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > val_sum , expected_val , " val_sum " ) )
2020-07-23 21:41:21 +03:00
goto close_iter ;
for ( i = 0 ; i < bpf_map__max_entries ( skel - > maps . arraymap1 ) ; i + + ) {
err = bpf_map_lookup_elem ( map_fd , & i , & val ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " map_lookup " ) )
2020-07-23 21:41:21 +03:00
goto out ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( i , val , " invalid_val " ) )
2020-07-23 21:41:21 +03:00
goto out ;
}
close_iter :
close ( iter_fd ) ;
free_link :
bpf_link__destroy ( link ) ;
out :
bpf_iter_bpf_array_map__destroy ( skel ) ;
}
static void test_bpf_percpu_array_map ( void )
{
DECLARE_LIBBPF_OPTS ( bpf_iter_attach_opts , opts ) ;
struct bpf_iter_bpf_percpu_array_map * skel ;
__u32 expected_key = 0 , expected_val = 0 ;
2020-08-05 08:50:58 +03:00
union bpf_iter_link_info linfo ;
2020-07-23 21:41:21 +03:00
int err , i , j , map_fd , iter_fd ;
struct bpf_link * link ;
char buf [ 64 ] ;
void * val ;
int len ;
skel = bpf_iter_bpf_percpu_array_map__open ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_percpu_array_map__open " ) )
2020-07-23 21:41:21 +03:00
return ;
skel - > rodata - > num_cpus = bpf_num_possible_cpus ( ) ;
2021-11-07 19:55:16 +03:00
val = malloc ( 8 * bpf_num_possible_cpus ( ) ) ;
2020-07-23 21:41:21 +03:00
err = bpf_iter_bpf_percpu_array_map__load ( skel ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_percpu_array_map__load " ) )
2020-07-23 21:41:21 +03:00
goto out ;
/* update map values here */
map_fd = bpf_map__fd ( skel - > maps . arraymap1 ) ;
for ( i = 0 ; i < bpf_map__max_entries ( skel - > maps . arraymap1 ) ; i + + ) {
expected_key + = i ;
for ( j = 0 ; j < bpf_num_possible_cpus ( ) ; j + + ) {
* ( __u32 * ) ( val + j * 8 ) = i + j ;
expected_val + = i + j ;
}
err = bpf_map_update_elem ( map_fd , & i , val , BPF_ANY ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " map_update " ) )
2020-07-23 21:41:21 +03:00
goto out ;
}
2020-08-05 08:50:58 +03:00
memset ( & linfo , 0 , sizeof ( linfo ) ) ;
linfo . map . map_fd = map_fd ;
opts . link_info = & linfo ;
opts . link_info_len = sizeof ( linfo ) ;
2020-07-23 21:41:21 +03:00
link = bpf_program__attach_iter ( skel - > progs . dump_bpf_percpu_array_map , & opts ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-07-23 21:41:21 +03:00
goto out ;
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-07-23 21:41:21 +03:00
goto free_link ;
/* do some tests */
while ( ( len = read ( iter_fd , buf , sizeof ( buf ) ) ) > 0 )
;
if ( CHECK ( len < 0 , " read " , " read failed: %s \n " , strerror ( errno ) ) )
goto close_iter ;
/* test results */
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > key_sum , expected_key , " key_sum " ) )
2020-07-23 21:41:21 +03:00
goto close_iter ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > val_sum , expected_val , " val_sum " ) )
2020-07-23 21:41:21 +03:00
goto close_iter ;
close_iter :
close ( iter_fd ) ;
free_link :
bpf_link__destroy ( link ) ;
out :
bpf_iter_bpf_percpu_array_map__destroy ( skel ) ;
2021-11-07 19:55:16 +03:00
free ( val ) ;
2020-07-23 21:41:21 +03:00
}
2020-12-04 14:36:07 +03:00
/* An iterator program deletes all local storage in a map. */
static void test_bpf_sk_storage_delete ( void )
{
DECLARE_LIBBPF_OPTS ( bpf_iter_attach_opts , opts ) ;
struct bpf_iter_bpf_sk_storage_helpers * skel ;
union bpf_iter_link_info linfo ;
int err , len , map_fd , iter_fd ;
struct bpf_link * link ;
int sock_fd = - 1 ;
__u32 val = 42 ;
char buf [ 64 ] ;
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_sk_storage_helpers__open_and_load " ) )
2020-12-04 14:36:07 +03:00
return ;
map_fd = bpf_map__fd ( skel - > maps . sk_stg_map ) ;
sock_fd = socket ( AF_INET6 , SOCK_STREAM , 0 ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( sock_fd , 0 , " socket " ) )
2020-12-04 14:36:07 +03:00
goto out ;
err = bpf_map_update_elem ( map_fd , & sock_fd , & val , BPF_NOEXIST ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " map_update " ) )
2020-12-04 14:36:07 +03:00
goto out ;
memset ( & linfo , 0 , sizeof ( linfo ) ) ;
linfo . map . map_fd = map_fd ;
opts . link_info = & linfo ;
opts . link_info_len = sizeof ( linfo ) ;
link = bpf_program__attach_iter ( skel - > progs . delete_bpf_sk_storage_map ,
& opts ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-12-04 14:36:07 +03:00
goto out ;
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-12-04 14:36:07 +03:00
goto free_link ;
/* do some tests */
while ( ( len = read ( iter_fd , buf , sizeof ( buf ) ) ) > 0 )
;
if ( CHECK ( len < 0 , " read " , " read failed: %s \n " , strerror ( errno ) ) )
goto close_iter ;
/* test results */
err = bpf_map_lookup_elem ( map_fd , & sock_fd , & val ) ;
if ( CHECK ( ! err | | errno ! = ENOENT , " bpf_map_lookup_elem " ,
" map value wasn't deleted (err=%d, errno=%d) \n " , err , errno ) )
goto close_iter ;
close_iter :
close ( iter_fd ) ;
free_link :
bpf_link__destroy ( link ) ;
out :
if ( sock_fd > = 0 )
close ( sock_fd ) ;
bpf_iter_bpf_sk_storage_helpers__destroy ( skel ) ;
}
2020-12-04 14:36:08 +03:00
/* This creates a socket and its local storage. It then runs a task_iter BPF
* program that replaces the existing socket local storage with the tgid of the
* only task owning a file descriptor to this socket , this process , prog_tests .
2020-12-04 14:36:09 +03:00
* It then runs a tcp socket iterator that negates the value in the existing
* socket local storage , the test verifies that the resulting value is - pid .
2020-12-04 14:36:08 +03:00
*/
static void test_bpf_sk_storage_get ( void )
{
struct bpf_iter_bpf_sk_storage_helpers * skel ;
int err , map_fd , val = - 1 ;
int sock_fd = - 1 ;
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_sk_storage_helpers__open_and_load " ) )
2020-12-04 14:36:08 +03:00
return ;
sock_fd = socket ( AF_INET6 , SOCK_STREAM , 0 ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( sock_fd , 0 , " socket " ) )
2020-12-04 14:36:08 +03:00
goto out ;
2020-12-04 14:36:09 +03:00
err = listen ( sock_fd , 1 ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " listen " ) )
2020-12-04 14:36:09 +03:00
goto close_socket ;
2020-12-04 14:36:08 +03:00
map_fd = bpf_map__fd ( skel - > maps . sk_stg_map ) ;
err = bpf_map_update_elem ( map_fd , & sock_fd , & val , BPF_NOEXIST ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " bpf_map_update_elem " ) )
2020-12-04 14:36:08 +03:00
goto close_socket ;
do_dummy_read ( skel - > progs . fill_socket_owner ) ;
err = bpf_map_lookup_elem ( map_fd , & sock_fd , & val ) ;
2020-12-04 14:36:09 +03:00
if ( CHECK ( err | | val ! = getpid ( ) , " bpf_map_lookup_elem " ,
" map value wasn't set correctly (expected %d, got %d, err=%d) \n " ,
getpid ( ) , val , err ) )
goto close_socket ;
do_dummy_read ( skel - > progs . negate_socket_local_storage ) ;
err = bpf_map_lookup_elem ( map_fd , & sock_fd , & val ) ;
CHECK ( err | | val ! = - getpid ( ) , " bpf_map_lookup_elem " ,
2020-12-04 14:36:08 +03:00
" map value wasn't set correctly (expected %d, got %d, err=%d) \n " ,
2020-12-04 14:36:09 +03:00
- getpid ( ) , val , err ) ;
2020-12-04 14:36:08 +03:00
close_socket :
close ( sock_fd ) ;
out :
bpf_iter_bpf_sk_storage_helpers__destroy ( skel ) ;
}
2020-07-23 21:41:22 +03:00
static void test_bpf_sk_storage_map ( void )
{
DECLARE_LIBBPF_OPTS ( bpf_iter_attach_opts , opts ) ;
int err , i , len , map_fd , iter_fd , num_sockets ;
struct bpf_iter_bpf_sk_storage_map * skel ;
2020-08-05 08:50:58 +03:00
union bpf_iter_link_info linfo ;
2020-07-23 21:41:22 +03:00
int sock_fd [ 3 ] = { - 1 , - 1 , - 1 } ;
__u32 val , expected_val = 0 ;
struct bpf_link * link ;
char buf [ 64 ] ;
skel = bpf_iter_bpf_sk_storage_map__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_sk_storage_map__open_and_load " ) )
2020-07-23 21:41:22 +03:00
return ;
map_fd = bpf_map__fd ( skel - > maps . sk_stg_map ) ;
num_sockets = ARRAY_SIZE ( sock_fd ) ;
for ( i = 0 ; i < num_sockets ; i + + ) {
sock_fd [ i ] = socket ( AF_INET6 , SOCK_STREAM , 0 ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( sock_fd [ i ] , 0 , " socket " ) )
2020-07-23 21:41:22 +03:00
goto out ;
val = i + 1 ;
expected_val + = val ;
err = bpf_map_update_elem ( map_fd , & sock_fd [ i ] , & val ,
BPF_NOEXIST ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " map_update " ) )
2020-07-23 21:41:22 +03:00
goto out ;
}
2020-08-05 08:50:58 +03:00
memset ( & linfo , 0 , sizeof ( linfo ) ) ;
linfo . map . map_fd = map_fd ;
opts . link_info = & linfo ;
opts . link_info_len = sizeof ( linfo ) ;
2020-07-23 21:41:22 +03:00
link = bpf_program__attach_iter ( skel - > progs . dump_bpf_sk_storage_map , & opts ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( link , " attach_iter " ) )
2020-07-23 21:41:22 +03:00
goto out ;
iter_fd = bpf_iter_create ( bpf_link__fd ( link ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2020-07-23 21:41:22 +03:00
goto free_link ;
/* do some tests */
while ( ( len = read ( iter_fd , buf , sizeof ( buf ) ) ) > 0 )
;
if ( CHECK ( len < 0 , " read " , " read failed: %s \n " , strerror ( errno ) ) )
goto close_iter ;
/* test results */
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > ipv6_sk_count , num_sockets , " ipv6_sk_count " ) )
2020-07-23 21:41:22 +03:00
goto close_iter ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_EQ ( skel - > bss - > val_sum , expected_val , " val_sum " ) )
2020-07-23 21:41:22 +03:00
goto close_iter ;
close_iter :
close ( iter_fd ) ;
free_link :
bpf_link__destroy ( link ) ;
out :
for ( i = 0 ; i < num_sockets ; i + + ) {
if ( sock_fd [ i ] > = 0 )
close ( sock_fd [ i ] ) ;
}
bpf_iter_bpf_sk_storage_map__destroy ( skel ) ;
}
2020-07-23 21:41:24 +03:00
static void test_rdonly_buf_out_of_bound ( void )
{
DECLARE_LIBBPF_OPTS ( bpf_iter_attach_opts , opts ) ;
struct bpf_iter_test_kern5 * skel ;
2020-08-05 08:50:58 +03:00
union bpf_iter_link_info linfo ;
2020-07-23 21:41:24 +03:00
struct bpf_link * link ;
skel = bpf_iter_test_kern5__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_test_kern5__open_and_load " ) )
2020-07-23 21:41:24 +03:00
return ;
2020-08-05 08:50:58 +03:00
memset ( & linfo , 0 , sizeof ( linfo ) ) ;
linfo . map . map_fd = bpf_map__fd ( skel - > maps . hashmap1 ) ;
opts . link_info = & linfo ;
opts . link_info_len = sizeof ( linfo ) ;
2020-07-23 21:41:24 +03:00
link = bpf_program__attach_iter ( skel - > progs . dump_bpf_hash_map , & opts ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_ERR_PTR ( link , " attach_iter " ) )
2020-07-23 21:41:24 +03:00
bpf_link__destroy ( link ) ;
bpf_iter_test_kern5__destroy ( skel ) ;
}
2020-07-29 01:18:01 +03:00
static void test_buf_neg_offset ( void )
{
struct bpf_iter_test_kern6 * skel ;
skel = bpf_iter_test_kern6__open_and_load ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_ERR_PTR ( skel , " bpf_iter_test_kern6__open_and_load " ) )
2020-07-29 01:18:01 +03:00
bpf_iter_test_kern6__destroy ( skel ) ;
}
2022-05-10 18:52:33 +03:00
static void test_link_iter ( void )
{
struct bpf_iter_bpf_link * skel ;
skel = bpf_iter_bpf_link__open_and_load ( ) ;
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_bpf_link__open_and_load " ) )
return ;
do_dummy_read ( skel - > progs . dump_bpf_link ) ;
bpf_iter_bpf_link__destroy ( skel ) ;
}
2022-07-12 15:31:45 +03:00
static void test_ksym_iter ( void )
{
struct bpf_iter_ksym * skel ;
skel = bpf_iter_ksym__open_and_load ( ) ;
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_ksym__open_and_load " ) )
return ;
do_dummy_read ( skel - > progs . dump_ksym ) ;
bpf_iter_ksym__destroy ( skel ) ;
}
2021-02-12 21:31:07 +03:00
# define CMP_BUFFER_SIZE 1024
static char task_vma_output [ CMP_BUFFER_SIZE ] ;
static char proc_maps_output [ CMP_BUFFER_SIZE ] ;
/* remove \0 and \t from str, and only keep the first line */
static void str_strip_first_line ( char * str )
{
char * dst = str , * src = str ;
do {
if ( * src = = ' ' | | * src = = ' \t ' )
src + + ;
else
* ( dst + + ) = * ( src + + ) ;
} while ( * src ! = ' \0 ' & & * src ! = ' \n ' ) ;
* dst = ' \0 ' ;
}
static void test_task_vma ( void )
{
int err , iter_fd = - 1 , proc_maps_fd = - 1 ;
struct bpf_iter_task_vma * skel ;
int len , read_size = 4 ;
char maps_path [ 64 ] ;
skel = bpf_iter_task_vma__open ( ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK_PTR ( skel , " bpf_iter_task_vma__open " ) )
2021-02-12 21:31:07 +03:00
return ;
skel - > bss - > pid = getpid ( ) ;
err = bpf_iter_task_vma__load ( skel ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_OK ( err , " bpf_iter_task_vma__load " ) )
2021-02-12 21:31:07 +03:00
goto out ;
skel - > links . proc_maps = bpf_program__attach_iter (
skel - > progs . proc_maps , NULL ) ;
2021-05-25 06:59:32 +03:00
if ( ! ASSERT_OK_PTR ( skel - > links . proc_maps , " bpf_program__attach_iter " ) ) {
2021-02-12 21:31:07 +03:00
skel - > links . proc_maps = NULL ;
goto out ;
}
iter_fd = bpf_iter_create ( bpf_link__fd ( skel - > links . proc_maps ) ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( iter_fd , 0 , " create_iter " ) )
2021-02-12 21:31:07 +03:00
goto out ;
/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
2021-11-30 21:18:11 +03:00
* to trigger seq_file corner cases .
2021-02-12 21:31:07 +03:00
*/
len = 0 ;
while ( len < CMP_BUFFER_SIZE ) {
err = read_fd_into_buffer ( iter_fd , task_vma_output + len ,
2022-04-09 02:58:17 +03:00
MIN ( read_size , CMP_BUFFER_SIZE - len ) ) ;
2021-11-30 21:18:11 +03:00
if ( ! err )
break ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( err , 0 , " read_iter_fd " ) )
2021-02-12 21:31:07 +03:00
goto out ;
len + = err ;
}
/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
snprintf ( maps_path , 64 , " /proc/%u/maps " , skel - > bss - > pid ) ;
proc_maps_fd = open ( maps_path , O_RDONLY ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( proc_maps_fd , 0 , " open_proc_maps " ) )
2021-02-12 21:31:07 +03:00
goto out ;
err = read_fd_into_buffer ( proc_maps_fd , proc_maps_output , CMP_BUFFER_SIZE ) ;
2022-05-10 18:52:32 +03:00
if ( ! ASSERT_GE ( err , 0 , " read_prog_maps_fd " ) )
2021-02-12 21:31:07 +03:00
goto out ;
/* strip and compare the first line of the two files */
str_strip_first_line ( task_vma_output ) ;
str_strip_first_line ( proc_maps_output ) ;
2022-05-10 18:52:32 +03:00
ASSERT_STREQ ( task_vma_output , proc_maps_output , " compare_output " ) ;
2021-02-12 21:31:07 +03:00
out :
close ( proc_maps_fd ) ;
close ( iter_fd ) ;
bpf_iter_task_vma__destroy ( skel ) ;
}
2020-05-09 20:59:23 +03:00
void test_bpf_iter ( void )
{
if ( test__start_subtest ( " btf_id_or_null " ) )
test_btf_id_or_null ( ) ;
if ( test__start_subtest ( " ipv6_route " ) )
test_ipv6_route ( ) ;
if ( test__start_subtest ( " netlink " ) )
test_netlink ( ) ;
if ( test__start_subtest ( " bpf_map " ) )
test_bpf_map ( ) ;
if ( test__start_subtest ( " task " ) )
test_task ( ) ;
2022-01-24 21:54:03 +03:00
if ( test__start_subtest ( " task_sleepable " ) )
test_task_sleepable ( ) ;
2020-06-30 09:28:46 +03:00
if ( test__start_subtest ( " task_stack " ) )
test_task_stack ( ) ;
2020-05-09 20:59:23 +03:00
if ( test__start_subtest ( " task_file " ) )
test_task_file ( ) ;
2021-02-12 21:31:07 +03:00
if ( test__start_subtest ( " task_vma " ) )
test_task_vma ( ) ;
2020-09-28 14:31:10 +03:00
if ( test__start_subtest ( " task_btf " ) )
test_task_btf ( ) ;
2020-06-24 02:08:23 +03:00
if ( test__start_subtest ( " tcp4 " ) )
test_tcp4 ( ) ;
if ( test__start_subtest ( " tcp6 " ) )
test_tcp6 ( ) ;
if ( test__start_subtest ( " udp4 " ) )
test_udp4 ( ) ;
if ( test__start_subtest ( " udp6 " ) )
test_udp6 ( ) ;
2021-08-14 04:57:17 +03:00
if ( test__start_subtest ( " unix " ) )
test_unix ( ) ;
2020-05-09 20:59:23 +03:00
if ( test__start_subtest ( " anon " ) )
test_anon_iter ( false ) ;
if ( test__start_subtest ( " anon-read-one-char " ) )
test_anon_iter ( true ) ;
if ( test__start_subtest ( " file " ) )
test_file_iter ( ) ;
if ( test__start_subtest ( " overflow " ) )
test_overflow ( false , false ) ;
if ( test__start_subtest ( " overflow-e2big " ) )
test_overflow ( true , false ) ;
if ( test__start_subtest ( " prog-ret-1 " ) )
test_overflow ( false , true ) ;
2020-07-23 21:41:20 +03:00
if ( test__start_subtest ( " bpf_hash_map " ) )
test_bpf_hash_map ( ) ;
if ( test__start_subtest ( " bpf_percpu_hash_map " ) )
test_bpf_percpu_hash_map ( ) ;
2020-07-23 21:41:21 +03:00
if ( test__start_subtest ( " bpf_array_map " ) )
test_bpf_array_map ( ) ;
if ( test__start_subtest ( " bpf_percpu_array_map " ) )
test_bpf_percpu_array_map ( ) ;
2020-07-23 21:41:22 +03:00
if ( test__start_subtest ( " bpf_sk_storage_map " ) )
test_bpf_sk_storage_map ( ) ;
2020-12-04 14:36:07 +03:00
if ( test__start_subtest ( " bpf_sk_storage_delete " ) )
test_bpf_sk_storage_delete ( ) ;
2020-12-04 14:36:08 +03:00
if ( test__start_subtest ( " bpf_sk_storage_get " ) )
test_bpf_sk_storage_get ( ) ;
2020-07-23 21:41:24 +03:00
if ( test__start_subtest ( " rdonly-buf-out-of-bound " ) )
test_rdonly_buf_out_of_bound ( ) ;
2020-07-29 01:18:01 +03:00
if ( test__start_subtest ( " buf-neg-offset " ) )
test_buf_neg_offset ( ) ;
2022-05-10 18:52:33 +03:00
if ( test__start_subtest ( " link-iter " ) )
test_link_iter ( ) ;
2022-07-12 15:31:45 +03:00
if ( test__start_subtest ( " ksym " ) )
test_ksym_iter ( ) ;
2020-05-09 20:59:23 +03:00
}