2019-10-15 20:25:05 -07:00
// SPDX-License-Identifier: GPL-2.0
# include <test_progs.h>
2020-05-08 10:46:09 -07:00
# include <network_helpers.h>
2021-12-01 15:28:21 -08:00
# include "kfree_skb.skel.h"
2019-10-15 20:25:05 -07:00
2019-11-07 10:09:05 -08:00
struct meta {
int ifindex ;
__u32 cb32_0 ;
__u8 cb8_0 ;
} ;
static union {
__u32 cb32 [ 5 ] ;
__u8 cb8 [ 20 ] ;
} cb = {
. cb32 [ 0 ] = 0x81828384 ,
} ;
2019-10-15 20:25:05 -07:00
static void on_sample ( void * ctx , int cpu , void * data , __u32 size )
{
2019-11-07 10:09:05 -08:00
struct meta * meta = ( struct meta * ) data ;
struct ipv6_packet * pkt_v6 = data + sizeof ( * meta ) ;
int duration = 0 ;
2019-10-15 20:25:05 -07:00
2019-11-07 10:09:05 -08:00
if ( CHECK ( size ! = 72 + sizeof ( * meta ) , " check_size " , " size %u != %zu \n " ,
size , 72 + sizeof ( * meta ) ) )
return ;
if ( CHECK ( meta - > ifindex ! = 1 , " check_meta_ifindex " ,
" meta->ifindex = %d \n " , meta - > ifindex ) )
2019-10-15 20:25:05 -07:00
/* spurious kfree_skb not on loopback device */
return ;
2019-11-07 10:09:05 -08:00
if ( CHECK ( meta - > cb8_0 ! = cb . cb8 [ 0 ] , " check_cb8_0 " , " cb8_0 %x != %x \n " ,
meta - > cb8_0 , cb . cb8 [ 0 ] ) )
return ;
if ( CHECK ( meta - > cb32_0 ! = cb . cb32 [ 0 ] , " check_cb32_0 " ,
" cb32_0 %x != %x \n " ,
meta - > cb32_0 , cb . cb32 [ 0 ] ) )
2019-10-15 20:25:05 -07:00
return ;
if ( CHECK ( pkt_v6 - > eth . h_proto ! = 0xdd86 , " check_eth " ,
" h_proto %x \n " , pkt_v6 - > eth . h_proto ) )
return ;
if ( CHECK ( pkt_v6 - > iph . nexthdr ! = 6 , " check_ip " ,
" iph.nexthdr %x \n " , pkt_v6 - > iph . nexthdr ) )
return ;
if ( CHECK ( pkt_v6 - > tcp . doff ! = 5 , " check_tcp " ,
" tcp.doff %x \n " , pkt_v6 - > tcp . doff ) )
return ;
* ( bool * ) ctx = true ;
}
2021-10-06 11:56:19 -07:00
/* TODO: fix kernel panic caused by this test in parallel mode */
void serial_test_kfree_skb ( void )
2019-10-15 20:25:05 -07:00
{
2019-11-07 10:09:05 -08:00
struct __sk_buff skb = { } ;
struct bpf_prog_test_run_attr tattr = {
. data_in = & pkt_v6 ,
. data_size_in = sizeof ( pkt_v6 ) ,
. ctx_in = & skb ,
. ctx_size_in = sizeof ( skb ) ,
} ;
2021-12-01 15:28:21 -08:00
struct kfree_skb * skel = NULL ;
struct bpf_link * link ;
struct bpf_object * obj ;
2019-10-15 20:25:05 -07:00
struct perf_buffer * pb = NULL ;
2021-12-01 15:28:21 -08:00
int err ;
2019-10-15 20:25:05 -07:00
bool passed = false ;
2019-11-07 10:09:05 -08:00
__u32 duration = 0 ;
2019-11-14 10:57:07 -08:00
const int zero = 0 ;
bool test_ok [ 2 ] ;
2019-10-15 20:25:05 -07:00
2021-11-03 15:08:44 -07:00
err = bpf_prog_test_load ( " ./test_pkt_access.o " , BPF_PROG_TYPE_SCHED_CLS ,
2019-11-07 10:09:05 -08:00
& obj , & tattr . prog_fd ) ;
2019-10-15 20:25:05 -07:00
if ( CHECK ( err , " prog_load sched cls " , " err %d errno %d \n " , err , errno ) )
return ;
2021-12-01 15:28:21 -08:00
skel = kfree_skb__open_and_load ( ) ;
if ( ! ASSERT_OK_PTR ( skel , " kfree_skb_skel " ) )
2019-11-14 10:57:07 -08:00
goto close_prog ;
2021-12-01 15:28:21 -08:00
link = bpf_program__attach_raw_tracepoint ( skel - > progs . trace_kfree_skb , NULL ) ;
2021-05-24 20:59:32 -07:00
if ( ! ASSERT_OK_PTR ( link , " attach_raw_tp " ) )
2019-10-15 20:25:05 -07:00
goto close_prog ;
2021-12-01 15:28:21 -08:00
skel - > links . trace_kfree_skb = link ;
link = bpf_program__attach_trace ( skel - > progs . fentry_eth_type_trans ) ;
if ( ! ASSERT_OK_PTR ( link , " attach fentry " ) )
2019-11-14 10:57:07 -08:00
goto close_prog ;
2021-12-01 15:28:21 -08:00
skel - > links . fentry_eth_type_trans = link ;
2019-10-15 20:25:05 -07:00
2021-12-01 15:28:21 -08:00
link = bpf_program__attach_trace ( skel - > progs . fexit_eth_type_trans ) ;
if ( ! ASSERT_OK_PTR ( link , " attach fexit " ) )
2019-10-15 20:25:05 -07:00
goto close_prog ;
2021-12-01 15:28:21 -08:00
skel - > links . fexit_eth_type_trans = link ;
2019-10-15 20:25:05 -07:00
/* set up perf buffer */
2021-12-01 15:28:21 -08:00
pb = perf_buffer__new ( bpf_map__fd ( skel - > maps . perf_buf_map ) , 1 ,
2021-11-10 21:36:21 -08:00
on_sample , NULL , & passed , NULL ) ;
2021-05-24 20:59:32 -07:00
if ( ! ASSERT_OK_PTR ( pb , " perf_buf__new " ) )
2019-10-15 20:25:05 -07:00
goto close_prog ;
2019-11-07 10:09:05 -08:00
memcpy ( skb . cb , & cb , sizeof ( cb ) ) ;
err = bpf_prog_test_run_xattr ( & tattr ) ;
duration = tattr . duration ;
CHECK ( err | | tattr . retval , " ipv6 " ,
2019-10-15 20:25:05 -07:00
" err %d errno %d retval %d duration %d \n " ,
2019-11-07 10:09:05 -08:00
err , errno , tattr . retval , duration ) ;
2019-10-15 20:25:05 -07:00
/* read perf buffer */
err = perf_buffer__poll ( pb , 100 ) ;
if ( CHECK ( err < 0 , " perf_buffer__poll " , " err %d \n " , err ) )
goto close_prog ;
2019-11-14 10:57:07 -08:00
2019-10-15 20:25:05 -07:00
/* make sure kfree_skb program was triggered
* and it sent expected skb into ring buffer
*/
2021-04-26 12:29:45 -07:00
ASSERT_TRUE ( passed , " passed " ) ;
2019-11-14 10:57:07 -08:00
2021-12-01 15:28:21 -08:00
err = bpf_map_lookup_elem ( bpf_map__fd ( skel - > maps . bss ) , & zero , test_ok ) ;
2019-11-14 10:57:07 -08:00
if ( CHECK ( err , " get_result " ,
" failed to get output data: %d \n " , err ) )
goto close_prog ;
CHECK_FAIL ( ! test_ok [ 0 ] | | ! test_ok [ 1 ] ) ;
2019-10-15 20:25:05 -07:00
close_prog :
perf_buffer__free ( pb ) ;
bpf_object__close ( obj ) ;
2021-12-01 15:28:21 -08:00
kfree_skb__destroy ( skel ) ;
2019-10-15 20:25:05 -07:00
}