2020-12-08 00:53:30 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2020 Intel Corporation. */
/*
* Some functions in this program are taken from
* Linux kernel samples / bpf / xdpsock * and modified
* for use .
*
* See test_xsk . sh for detailed information on test topology
* and prerequisite network setup .
*
* This test program contains two threads , each thread is single socket with
* a unique UMEM . It validates in - order packet delivery and packet content
* by sending packets to each other .
*
* Tests Information :
* - - - - - - - - - - - - - - - - - -
* These selftests test AF_XDP SKB and Native / DRV modes using veth
* Virtual Ethernet interfaces .
*
2021-02-23 19:23:03 +03:00
* For each mode , the following tests are run :
2021-09-07 10:19:25 +03:00
* a . nopoll - soft - irq processing in run - to - completion mode
2020-12-08 00:53:30 +03:00
* b . poll - using poll ( ) syscall
2020-12-08 00:53:32 +03:00
* c . Socket Teardown
* Create a Tx and a Rx socket , Tx from one socket , Rx on another . Destroy
* both sockets , then repeat multiple times . Only nopoll mode is used
2020-12-08 00:53:33 +03:00
* d . Bi - directional sockets
* Configure sockets as bi - directional tx / rx sockets , sets up fill and
* completion rings on each socket , tx / rx in both directions . Only nopoll
* mode is used
2021-02-23 19:23:04 +03:00
* e . Statistics
* Trigger some error conditions and ensure that the appropriate statistics
* are incremented . Within this test , the following statistics are tested :
* i . rx dropped
* Increase the UMEM frame headroom to a value which results in
* insufficient space in the rx buffer for both the packet and the headroom .
* ii . tx invalid
* Set the ' len ' field of tx descriptors to an invalid value ( umem frame
* size + 1 ) .
* iii . rx ring full
* Reduce the size of the RX ring to a fraction of the fill ring size .
* iv . fill queue empty
* Do not populate the fill queue and then try to receive pkts .
2021-03-30 01:43:13 +03:00
* f . bpf_link resource persistence
* Configure sockets at indexes 0 and 1 , run a traffic on queue ids 0 ,
* then remove xsk sockets from queue 0 on both veth interfaces and
* finally run a traffic on queues ids 1
2021-09-07 10:19:25 +03:00
* g . unaligned mode
2021-09-07 10:19:27 +03:00
* h . tests for invalid and corner case Tx descriptors so that the correct ones
* are discarded and let through , respectively .
2021-09-07 10:19:28 +03:00
* i . 2 K frame size tests
2020-12-08 00:53:30 +03:00
*
2021-03-30 01:43:13 +03:00
* Total tests : 12
2020-12-08 00:53:30 +03:00
*
* Flow :
* - - - - -
* - Single process spawns two threads : Tx and Rx
* - Each of these two threads attach to a veth interface within their assigned
* namespaces
* - Each thread Creates one AF_XDP socket connected to a unique umem for each
* veth interface
* - Tx thread Transmits 10 k packets from veth < xxxx > to veth < yyyy >
* - Rx thread verifies if all 10 k packets were received and delivered in - order ,
* and have the right content
*
2021-02-23 19:23:02 +03:00
* Enable / disable packet dump mode :
2020-12-08 00:53:30 +03:00
* - - - - - - - - - - - - - - - - - - - - - - - - - -
* To enable L2 - L4 headers and payload dump of each packet on STDOUT , add
* parameter - D to params array in test_xsk . sh , i . e . params = ( " -S " " -D " )
*/
# define _GNU_SOURCE
# include <fcntl.h>
# include <errno.h>
# include <getopt.h>
# include <asm/barrier.h>
# include <linux/if_link.h>
# include <linux/if_ether.h>
# include <linux/ip.h>
# include <linux/udp.h>
# include <arpa/inet.h>
# include <net/if.h>
# include <locale.h>
# include <poll.h>
# include <pthread.h>
# include <signal.h>
# include <stdbool.h>
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <stddef.h>
# include <sys/mman.h>
2022-05-10 14:55:58 +03:00
# include <sys/socket.h>
2022-05-10 14:56:00 +03:00
# include <sys/time.h>
2020-12-08 00:53:30 +03:00
# include <sys/types.h>
# include <sys/queue.h>
# include <time.h>
# include <unistd.h>
# include <stdatomic.h>
2022-06-28 00:15:13 +03:00
# include "xsk.h"
2022-07-07 14:16:12 +03:00
# include "xskxceiver.h"
2020-12-08 00:53:30 +03:00
# include "../kselftest.h"
2021-12-02 02:28:20 +03:00
/* AF_XDP APIs were moved into libxdp and marked as deprecated in libbpf.
2022-07-07 14:16:12 +03:00
* Until xskxceiver is either moved or re - writed into libxdp , suppress
2021-12-02 02:28:20 +03:00
* deprecation warnings in this file
*/
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
2021-03-30 01:43:01 +03:00
static const char * MAC1 = " \x00 \x0A \x56 \x9E \xEE \x62 " ;
static const char * MAC2 = " \x00 \x0A \x56 \x9E \xEE \x61 " ;
static const char * IP1 = " 192.168.100.162 " ;
static const char * IP2 = " 192.168.100.161 " ;
static const u16 UDP_PORT1 = 2020 ;
static const u16 UDP_PORT2 = 2121 ;
2020-12-08 00:53:30 +03:00
static void __exit_with_error ( int error , const char * file , const char * func , int line )
{
2021-08-25 12:37:21 +03:00
ksft_test_result_fail ( " [%s:%s:%i]: ERROR: %d/ \" %s \" \n " , file , func , line , error ,
strerror ( error ) ) ;
ksft_exit_xfail ( ) ;
2020-12-08 00:53:30 +03:00
}
# define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
2021-09-07 10:19:20 +03:00
# define mode_string(test) (test)->ifobj_tx->xdp_flags & XDP_FLAGS_SKB_MODE ? "SKB" : "DRV"
2022-05-10 14:55:58 +03:00
# define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
2021-09-07 10:19:20 +03:00
2022-05-10 14:55:59 +03:00
static void report_failure ( struct test_spec * test )
{
if ( test - > fail )
return ;
ksft_test_result_fail ( " FAIL: %s %s%s \n " , mode_string ( test ) , busy_poll_string ( test ) ,
test - > name ) ;
test - > fail = true ;
}
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:19 +03:00
static void memset32_htonl ( void * dest , u32 val , u32 size )
2020-12-08 00:53:30 +03:00
{
u32 * ptr = ( u32 * ) dest ;
int i ;
val = htonl ( val ) ;
for ( i = 0 ; i < ( size & ( ~ 0x3 ) ) ; i + = 4 )
ptr [ i > > 2 ] = val ;
}
/*
* Fold a partial checksum
* This function code has been taken from
* Linux kernel include / asm - generic / checksum . h
*/
2021-03-30 01:43:03 +03:00
static __u16 csum_fold ( __u32 csum )
2020-12-08 00:53:30 +03:00
{
u32 sum = ( __force u32 ) csum ;
sum = ( sum & 0xffff ) + ( sum > > 16 ) ;
sum = ( sum & 0xffff ) + ( sum > > 16 ) ;
return ( __force __u16 ) ~ sum ;
}
/*
* This function code has been taken from
* Linux kernel lib / checksum . c
*/
2021-03-30 01:43:03 +03:00
static u32 from64to32 ( u64 x )
2020-12-08 00:53:30 +03:00
{
/* add up 32-bit and 32-bit for 32+c bit */
x = ( x & 0xffffffff ) + ( x > > 32 ) ;
/* add up carry.. */
x = ( x & 0xffffffff ) + ( x > > 32 ) ;
return ( u32 ) x ;
}
/*
* This function code has been taken from
* Linux kernel lib / checksum . c
*/
2021-03-30 01:43:03 +03:00
static __u32 csum_tcpudp_nofold ( __be32 saddr , __be32 daddr , __u32 len , __u8 proto , __u32 sum )
2020-12-08 00:53:30 +03:00
{
unsigned long long s = ( __force u32 ) sum ;
s + = ( __force u32 ) saddr ;
s + = ( __force u32 ) daddr ;
# ifdef __BIG_ENDIAN__
s + = proto + len ;
# else
s + = ( proto + len ) < < 8 ;
# endif
return ( __force __u32 ) from64to32 ( s ) ;
}
/*
* This function has been taken from
* Linux kernel include / asm - generic / checksum . h
*/
2021-03-30 01:43:03 +03:00
static __u16 csum_tcpudp_magic ( __be32 saddr , __be32 daddr , __u32 len , __u8 proto , __u32 sum )
2020-12-08 00:53:30 +03:00
{
return csum_fold ( csum_tcpudp_nofold ( saddr , daddr , len , proto , sum ) ) ;
}
2021-03-30 01:43:03 +03:00
static u16 udp_csum ( u32 saddr , u32 daddr , u32 len , u8 proto , u16 * udp_pkt )
2020-12-08 00:53:30 +03:00
{
u32 csum = 0 ;
u32 cnt = 0 ;
/* udp hdr and data */
for ( ; cnt < len ; cnt + = 2 )
csum + = udp_pkt [ cnt > > 1 ] ;
return csum_tcpudp_magic ( saddr , daddr , len , proto , csum ) ;
}
2021-01-22 18:47:14 +03:00
static void gen_eth_hdr ( struct ifobject * ifobject , struct ethhdr * eth_hdr )
2020-12-08 00:53:30 +03:00
{
2021-01-22 18:47:14 +03:00
memcpy ( eth_hdr - > h_dest , ifobject - > dst_mac , ETH_ALEN ) ;
memcpy ( eth_hdr - > h_source , ifobject - > src_mac , ETH_ALEN ) ;
2020-12-08 00:53:30 +03:00
eth_hdr - > h_proto = htons ( ETH_P_IP ) ;
}
2021-01-22 18:47:14 +03:00
static void gen_ip_hdr ( struct ifobject * ifobject , struct iphdr * ip_hdr )
2020-12-08 00:53:30 +03:00
{
ip_hdr - > version = IP_PKT_VER ;
ip_hdr - > ihl = 0x5 ;
ip_hdr - > tos = IP_PKT_TOS ;
ip_hdr - > tot_len = htons ( IP_PKT_SIZE ) ;
ip_hdr - > id = 0 ;
ip_hdr - > frag_off = 0 ;
ip_hdr - > ttl = IPDEFTTL ;
ip_hdr - > protocol = IPPROTO_UDP ;
2021-01-22 18:47:14 +03:00
ip_hdr - > saddr = ifobject - > src_ip ;
ip_hdr - > daddr = ifobject - > dst_ip ;
2020-12-08 00:53:30 +03:00
ip_hdr - > check = 0 ;
}
2021-08-25 12:37:19 +03:00
static void gen_udp_hdr ( u32 payload , void * pkt , struct ifobject * ifobject ,
2021-01-22 18:47:21 +03:00
struct udphdr * udp_hdr )
2020-12-08 00:53:30 +03:00
{
2021-01-22 18:47:14 +03:00
udp_hdr - > source = htons ( ifobject - > src_port ) ;
udp_hdr - > dest = htons ( ifobject - > dst_port ) ;
2020-12-08 00:53:30 +03:00
udp_hdr - > len = htons ( UDP_PKT_SIZE ) ;
2021-08-25 12:37:19 +03:00
memset32_htonl ( pkt + PKT_HDR_SIZE , payload , UDP_PKT_DATA_SIZE ) ;
2020-12-08 00:53:30 +03:00
}
static void gen_udp_csum ( struct udphdr * udp_hdr , struct iphdr * ip_hdr )
{
udp_hdr - > check = 0 ;
udp_hdr - > check =
udp_csum ( ip_hdr - > saddr , ip_hdr - > daddr , UDP_PKT_SIZE , IPPROTO_UDP , ( u16 * ) udp_hdr ) ;
}
2021-09-07 10:19:19 +03:00
static int xsk_configure_umem ( struct xsk_umem_info * umem , void * buffer , u64 size )
2020-12-08 00:53:30 +03:00
{
2021-02-23 19:23:04 +03:00
struct xsk_umem_config cfg = {
. fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS ,
. comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS ,
2021-09-07 10:19:14 +03:00
. frame_size = umem - > frame_size ,
2021-09-07 10:19:12 +03:00
. frame_headroom = umem - > frame_headroom ,
2021-02-23 19:23:04 +03:00
. flags = XSK_UMEM__DEFAULT_FLAGS
} ;
2021-03-30 01:43:13 +03:00
int ret ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:25 +03:00
if ( umem - > unaligned_mode )
cfg . flags | = XDP_UMEM_UNALIGNED_CHUNK_FLAG ;
2021-03-30 01:43:13 +03:00
ret = xsk_umem__create ( & umem - > umem , buffer , size ,
& umem - > fq , & umem - > cq , & cfg ) ;
2020-12-08 00:53:30 +03:00
if ( ret )
2021-09-07 10:19:09 +03:00
return ret ;
2020-12-08 00:53:30 +03:00
2021-03-30 01:43:13 +03:00
umem - > buffer = buffer ;
2021-09-07 10:19:09 +03:00
return 0 ;
2020-12-08 00:53:30 +03:00
}
2022-05-10 14:55:58 +03:00
static void enable_busy_poll ( struct xsk_socket_info * xsk )
{
int sock_opt ;
sock_opt = 1 ;
if ( setsockopt ( xsk_socket__fd ( xsk - > xsk ) , SOL_SOCKET , SO_PREFER_BUSY_POLL ,
( void * ) & sock_opt , sizeof ( sock_opt ) ) < 0 )
exit_with_error ( errno ) ;
sock_opt = 20 ;
if ( setsockopt ( xsk_socket__fd ( xsk - > xsk ) , SOL_SOCKET , SO_BUSY_POLL ,
( void * ) & sock_opt , sizeof ( sock_opt ) ) < 0 )
exit_with_error ( errno ) ;
sock_opt = BATCH_SIZE ;
if ( setsockopt ( xsk_socket__fd ( xsk - > xsk ) , SOL_SOCKET , SO_BUSY_POLL_BUDGET ,
( void * ) & sock_opt , sizeof ( sock_opt ) ) < 0 )
exit_with_error ( errno ) ;
}
2021-09-07 10:19:09 +03:00
static int xsk_configure_socket ( struct xsk_socket_info * xsk , struct xsk_umem_info * umem ,
2022-01-25 11:29:45 +03:00
struct ifobject * ifobject , bool shared )
2020-12-08 00:53:30 +03:00
{
2022-01-25 11:29:45 +03:00
struct xsk_socket_config cfg = { } ;
2020-12-08 00:53:30 +03:00
struct xsk_ring_cons * rxr ;
struct xsk_ring_prod * txr ;
2021-09-07 10:19:09 +03:00
xsk - > umem = umem ;
2021-09-07 10:19:13 +03:00
cfg . rx_size = xsk - > rxqsize ;
2020-12-08 00:53:30 +03:00
cfg . tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS ;
2022-01-25 11:29:45 +03:00
cfg . libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD ;
2021-09-07 10:19:20 +03:00
cfg . xdp_flags = ifobject - > xdp_flags ;
cfg . bind_flags = ifobject - > bind_flags ;
2022-01-25 11:29:45 +03:00
if ( shared )
cfg . bind_flags | = XDP_SHARED_UMEM ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:17 +03:00
txr = ifobject - > tx_on ? & xsk - > tx : NULL ;
rxr = ifobject - > rx_on ? & xsk - > rx : NULL ;
2022-01-25 11:29:45 +03:00
return xsk_socket__create ( & xsk - > xsk , ifobject - > ifname , 0 , umem - > umem , rxr , txr , & cfg ) ;
2020-12-08 00:53:30 +03:00
}
static struct option long_options [ ] = {
{ " interface " , required_argument , 0 , ' i ' } ,
2022-05-10 14:55:58 +03:00
{ " busy-poll " , no_argument , 0 , ' b ' } ,
{ " dump-pkts " , no_argument , 0 , ' D ' } ,
2021-02-23 19:23:01 +03:00
{ " verbose " , no_argument , 0 , ' v ' } ,
2020-12-08 00:53:30 +03:00
{ 0 , 0 , 0 , 0 }
} ;
static void usage ( const char * prog )
{
const char * str =
2021-08-25 12:37:08 +03:00
" Usage: %s [OPTIONS] \n "
" Options: \n "
" -i, --interface Use interface \n "
" -D, --dump-pkts Dump packets L2 - L5 \n "
2022-05-10 14:55:58 +03:00
" -v, --verbose Verbose output \n "
" -b, --busy-poll Enable busy poll \n " ;
2021-08-25 12:37:08 +03:00
2020-12-08 00:53:30 +03:00
ksft_print_msg ( str , prog ) ;
}
2021-03-30 01:43:07 +03:00
static int switch_namespace ( const char * nsname )
2020-12-08 00:53:30 +03:00
{
char fqns [ 26 ] = " /var/run/netns/ " ;
int nsfd ;
2021-03-30 01:43:07 +03:00
if ( ! nsname | | strlen ( nsname ) = = 0 )
return - 1 ;
strncat ( fqns , nsname , sizeof ( fqns ) - strlen ( fqns ) - 1 ) ;
2020-12-08 00:53:30 +03:00
nsfd = open ( fqns , O_RDONLY ) ;
if ( nsfd = = - 1 )
exit_with_error ( errno ) ;
if ( setns ( nsfd , 0 ) = = - 1 )
exit_with_error ( errno ) ;
2021-03-30 01:43:07 +03:00
print_verbose ( " NS switched: %s \n " , nsname ) ;
2021-01-22 18:47:19 +03:00
2021-03-30 01:43:07 +03:00
return nsfd ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:11 +03:00
static bool validate_interface ( struct ifobject * ifobj )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:11 +03:00
if ( ! strcmp ( ifobj - > ifname , " " ) )
return false ;
return true ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:20 +03:00
static void parse_command_line ( struct ifobject * ifobj_tx , struct ifobject * ifobj_rx , int argc ,
char * * argv )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:11 +03:00
struct ifobject * ifobj ;
u32 interface_nb = 0 ;
int option_index , c ;
2020-12-08 00:53:30 +03:00
opterr = 0 ;
for ( ; ; ) {
2021-09-07 10:19:11 +03:00
char * sptr , * token ;
2020-12-08 00:53:30 +03:00
2022-05-10 14:55:58 +03:00
c = getopt_long ( argc , argv , " i:Dvb " , long_options , & option_index ) ;
2020-12-08 00:53:30 +03:00
if ( c = = - 1 )
break ;
switch ( c ) {
case ' i ' :
2021-09-07 10:19:11 +03:00
if ( interface_nb = = 0 )
2021-09-07 10:19:20 +03:00
ifobj = ifobj_tx ;
2021-09-07 10:19:11 +03:00
else if ( interface_nb = = 1 )
2021-09-07 10:19:20 +03:00
ifobj = ifobj_rx ;
2021-09-07 10:19:11 +03:00
else
2020-12-08 00:53:30 +03:00
break ;
sptr = strndupa ( optarg , strlen ( optarg ) ) ;
2021-09-07 10:19:11 +03:00
memcpy ( ifobj - > ifname , strsep ( & sptr , " , " ) , MAX_INTERFACE_NAME_CHARS ) ;
2020-12-08 00:53:30 +03:00
token = strsep ( & sptr , " , " ) ;
if ( token )
2021-09-07 10:19:11 +03:00
memcpy ( ifobj - > nsname , token , MAX_INTERFACES_NAMESPACE_CHARS ) ;
interface_nb + + ;
2020-12-08 00:53:30 +03:00
break ;
case ' D ' :
2021-08-25 12:37:22 +03:00
opt_pkt_dump = true ;
2020-12-08 00:53:30 +03:00
break ;
2021-02-23 19:23:01 +03:00
case ' v ' :
2021-08-25 12:37:22 +03:00
opt_verbose = true ;
2021-02-23 19:23:01 +03:00
break ;
2022-05-10 14:55:58 +03:00
case ' b ' :
ifobj_tx - > busy_poll = true ;
ifobj_rx - > busy_poll = true ;
break ;
2020-12-08 00:53:30 +03:00
default :
usage ( basename ( argv [ 0 ] ) ) ;
ksft_exit_xfail ( ) ;
}
}
2021-09-07 10:19:11 +03:00
}
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:11 +03:00
static void __test_spec_init ( struct test_spec * test , struct ifobject * ifobj_tx ,
struct ifobject * ifobj_rx )
{
u32 i , j ;
for ( i = 0 ; i < MAX_INTERFACES ; i + + ) {
struct ifobject * ifobj = i ? ifobj_rx : ifobj_tx ;
ifobj - > xsk = & ifobj - > xsk_arr [ 0 ] ;
2021-09-07 10:19:16 +03:00
ifobj - > use_poll = false ;
2022-05-10 14:56:04 +03:00
ifobj - > use_fill_ring = true ;
ifobj - > release_rx = true ;
2021-09-07 10:19:24 +03:00
ifobj - > pkt_stream = test - > pkt_stream_default ;
2022-05-10 14:56:02 +03:00
ifobj - > validation_func = NULL ;
2021-09-07 10:19:11 +03:00
2021-09-07 10:19:17 +03:00
if ( i = = 0 ) {
ifobj - > rx_on = false ;
ifobj - > tx_on = true ;
} else {
ifobj - > rx_on = true ;
ifobj - > tx_on = false ;
}
2021-09-07 10:19:11 +03:00
2022-01-25 11:29:45 +03:00
memset ( ifobj - > umem , 0 , sizeof ( * ifobj - > umem ) ) ;
ifobj - > umem - > num_frames = DEFAULT_UMEM_BUFFERS ;
ifobj - > umem - > frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE ;
2021-09-07 10:19:11 +03:00
for ( j = 0 ; j < MAX_SOCKETS ; j + + ) {
memset ( & ifobj - > xsk_arr [ j ] , 0 , sizeof ( ifobj - > xsk_arr [ j ] ) ) ;
2021-09-07 10:19:13 +03:00
ifobj - > xsk_arr [ j ] . rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS ;
2021-09-07 10:19:11 +03:00
}
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:11 +03:00
test - > ifobj_tx = ifobj_tx ;
test - > ifobj_rx = ifobj_rx ;
2021-09-07 10:19:18 +03:00
test - > current_step = 0 ;
test - > total_steps = 1 ;
2021-09-07 10:19:19 +03:00
test - > nb_sockets = 1 ;
2022-05-10 14:55:59 +03:00
test - > fail = false ;
2021-09-07 10:19:11 +03:00
}
static void test_spec_init ( struct test_spec * test , struct ifobject * ifobj_tx ,
2021-09-07 10:19:20 +03:00
struct ifobject * ifobj_rx , enum test_mode mode )
2021-09-07 10:19:11 +03:00
{
2021-09-07 10:19:24 +03:00
struct pkt_stream * pkt_stream ;
2021-09-07 10:19:20 +03:00
u32 i ;
2021-09-07 10:19:24 +03:00
pkt_stream = test - > pkt_stream_default ;
2021-09-07 10:19:11 +03:00
memset ( test , 0 , sizeof ( * test ) ) ;
2021-09-07 10:19:24 +03:00
test - > pkt_stream_default = pkt_stream ;
2021-09-07 10:19:20 +03:00
for ( i = 0 ; i < MAX_INTERFACES ; i + + ) {
struct ifobject * ifobj = i ? ifobj_rx : ifobj_tx ;
ifobj - > xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST ;
if ( mode = = TEST_MODE_SKB )
ifobj - > xdp_flags | = XDP_FLAGS_SKB_MODE ;
else
ifobj - > xdp_flags | = XDP_FLAGS_DRV_MODE ;
ifobj - > bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY ;
}
2021-09-07 10:19:11 +03:00
__test_spec_init ( test , ifobj_tx , ifobj_rx ) ;
}
static void test_spec_reset ( struct test_spec * test )
{
__test_spec_init ( test , test - > ifobj_tx , test - > ifobj_rx ) ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:15 +03:00
static void test_spec_set_name ( struct test_spec * test , const char * name )
{
strncpy ( test - > name , name , MAX_TEST_NAME_SIZE ) ;
}
2021-09-22 10:56:07 +03:00
static void pkt_stream_reset ( struct pkt_stream * pkt_stream )
{
if ( pkt_stream )
pkt_stream - > rx_pkt_nb = 0 ;
}
2021-08-25 12:37:20 +03:00
static struct pkt * pkt_stream_get_pkt ( struct pkt_stream * pkt_stream , u32 pkt_nb )
2021-08-25 12:37:19 +03:00
{
2021-08-25 12:37:20 +03:00
if ( pkt_nb > = pkt_stream - > nb_pkts )
return NULL ;
return & pkt_stream - > pkts [ pkt_nb ] ;
}
2022-05-10 14:56:04 +03:00
static struct pkt * pkt_stream_get_next_rx_pkt ( struct pkt_stream * pkt_stream , u32 * pkts_sent )
2021-09-07 10:19:27 +03:00
{
while ( pkt_stream - > rx_pkt_nb < pkt_stream - > nb_pkts ) {
2022-05-10 14:56:04 +03:00
( * pkts_sent ) + + ;
2021-09-07 10:19:27 +03:00
if ( pkt_stream - > pkts [ pkt_stream - > rx_pkt_nb ] . valid )
return & pkt_stream - > pkts [ pkt_stream - > rx_pkt_nb + + ] ;
pkt_stream - > rx_pkt_nb + + ;
}
return NULL ;
}
2021-09-07 10:19:24 +03:00
static void pkt_stream_delete ( struct pkt_stream * pkt_stream )
{
free ( pkt_stream - > pkts ) ;
free ( pkt_stream ) ;
}
static void pkt_stream_restore_default ( struct test_spec * test )
{
2022-05-10 14:56:04 +03:00
struct pkt_stream * tx_pkt_stream = test - > ifobj_tx - > pkt_stream ;
if ( tx_pkt_stream ! = test - > pkt_stream_default ) {
2021-09-07 10:19:26 +03:00
pkt_stream_delete ( test - > ifobj_tx - > pkt_stream ) ;
test - > ifobj_tx - > pkt_stream = test - > pkt_stream_default ;
}
2022-05-10 14:56:04 +03:00
if ( test - > ifobj_rx - > pkt_stream ! = test - > pkt_stream_default & &
test - > ifobj_rx - > pkt_stream ! = tx_pkt_stream )
pkt_stream_delete ( test - > ifobj_rx - > pkt_stream ) ;
2021-09-07 10:19:24 +03:00
test - > ifobj_rx - > pkt_stream = test - > pkt_stream_default ;
}
2021-09-07 10:19:27 +03:00
static struct pkt_stream * __pkt_stream_alloc ( u32 nb_pkts )
2021-08-25 12:37:20 +03:00
{
struct pkt_stream * pkt_stream ;
2021-09-07 10:19:25 +03:00
pkt_stream = calloc ( 1 , sizeof ( * pkt_stream ) ) ;
2021-08-25 12:37:20 +03:00
if ( ! pkt_stream )
2021-09-07 10:19:27 +03:00
return NULL ;
2021-08-25 12:37:20 +03:00
pkt_stream - > pkts = calloc ( nb_pkts , sizeof ( * pkt_stream - > pkts ) ) ;
2021-09-07 10:19:27 +03:00
if ( ! pkt_stream - > pkts ) {
free ( pkt_stream ) ;
return NULL ;
}
pkt_stream - > nb_pkts = nb_pkts ;
return pkt_stream ;
}
2022-05-10 14:56:04 +03:00
static void pkt_set ( struct xsk_umem_info * umem , struct pkt * pkt , u64 addr , u32 len )
{
pkt - > addr = addr ;
pkt - > len = len ;
if ( len > umem - > frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem - > frame_headroom )
pkt - > valid = false ;
else
pkt - > valid = true ;
}
2021-09-07 10:19:27 +03:00
static struct pkt_stream * pkt_stream_generate ( struct xsk_umem_info * umem , u32 nb_pkts , u32 pkt_len )
{
struct pkt_stream * pkt_stream ;
u32 i ;
pkt_stream = __pkt_stream_alloc ( nb_pkts ) ;
if ( ! pkt_stream )
2021-08-25 12:37:20 +03:00
exit_with_error ( ENOMEM ) ;
pkt_stream - > nb_pkts = nb_pkts ;
for ( i = 0 ; i < nb_pkts ; i + + ) {
2022-05-10 14:56:04 +03:00
pkt_set ( umem , & pkt_stream - > pkts [ i ] , ( i % umem - > num_frames ) * umem - > frame_size ,
pkt_len ) ;
2021-08-25 12:37:20 +03:00
pkt_stream - > pkts [ i ] . payload = i ;
}
return pkt_stream ;
}
2021-09-07 10:19:25 +03:00
static struct pkt_stream * pkt_stream_clone ( struct xsk_umem_info * umem ,
struct pkt_stream * pkt_stream )
{
return pkt_stream_generate ( umem , pkt_stream - > nb_pkts , pkt_stream - > pkts [ 0 ] . len ) ;
}
2021-09-07 10:19:24 +03:00
static void pkt_stream_replace ( struct test_spec * test , u32 nb_pkts , u32 pkt_len )
{
struct pkt_stream * pkt_stream ;
pkt_stream = pkt_stream_generate ( test - > ifobj_tx - > umem , nb_pkts , pkt_len ) ;
test - > ifobj_tx - > pkt_stream = pkt_stream ;
test - > ifobj_rx - > pkt_stream = pkt_stream ;
}
2021-09-22 10:56:12 +03:00
static void pkt_stream_replace_half ( struct test_spec * test , u32 pkt_len , int offset )
2021-09-07 10:19:25 +03:00
{
struct xsk_umem_info * umem = test - > ifobj_tx - > umem ;
struct pkt_stream * pkt_stream ;
u32 i ;
pkt_stream = pkt_stream_clone ( umem , test - > pkt_stream_default ) ;
2022-05-10 14:56:04 +03:00
for ( i = 1 ; i < test - > pkt_stream_default - > nb_pkts ; i + = 2 )
pkt_set ( umem , & pkt_stream - > pkts [ i ] ,
( i % umem - > num_frames ) * umem - > frame_size + offset , pkt_len ) ;
2021-09-07 10:19:25 +03:00
test - > ifobj_tx - > pkt_stream = pkt_stream ;
test - > ifobj_rx - > pkt_stream = pkt_stream ;
}
2022-05-10 14:56:04 +03:00
static void pkt_stream_receive_half ( struct test_spec * test )
{
struct xsk_umem_info * umem = test - > ifobj_rx - > umem ;
struct pkt_stream * pkt_stream = test - > ifobj_tx - > pkt_stream ;
u32 i ;
test - > ifobj_rx - > pkt_stream = pkt_stream_generate ( umem , pkt_stream - > nb_pkts ,
pkt_stream - > pkts [ 0 ] . len ) ;
pkt_stream = test - > ifobj_rx - > pkt_stream ;
for ( i = 1 ; i < pkt_stream - > nb_pkts ; i + = 2 )
pkt_stream - > pkts [ i ] . valid = false ;
}
2021-08-25 12:37:20 +03:00
static struct pkt * pkt_generate ( struct ifobject * ifobject , u32 pkt_nb )
{
struct pkt * pkt = pkt_stream_get_pkt ( ifobject - > pkt_stream , pkt_nb ) ;
struct udphdr * udp_hdr ;
struct ethhdr * eth_hdr ;
struct iphdr * ip_hdr ;
void * data ;
if ( ! pkt )
return NULL ;
2022-05-10 14:55:57 +03:00
if ( ! pkt - > valid | | pkt - > len < MIN_PKT_SIZE )
2021-09-07 10:19:27 +03:00
return pkt ;
2021-08-25 12:37:20 +03:00
data = xsk_umem__get_data ( ifobject - > umem - > buffer , pkt - > addr ) ;
udp_hdr = ( struct udphdr * ) ( data + sizeof ( struct ethhdr ) + sizeof ( struct iphdr ) ) ;
ip_hdr = ( struct iphdr * ) ( data + sizeof ( struct ethhdr ) ) ;
eth_hdr = ( struct ethhdr * ) data ;
2021-08-25 12:37:19 +03:00
gen_udp_hdr ( pkt_nb , data , ifobject , udp_hdr ) ;
gen_ip_hdr ( ifobject , ip_hdr ) ;
gen_udp_csum ( udp_hdr , ip_hdr ) ;
gen_eth_hdr ( ifobject , eth_hdr ) ;
2021-08-25 12:37:20 +03:00
return pkt ;
2021-08-25 12:37:19 +03:00
}
2021-09-07 10:19:27 +03:00
static void pkt_stream_generate_custom ( struct test_spec * test , struct pkt * pkts , u32 nb_pkts )
{
struct pkt_stream * pkt_stream ;
u32 i ;
pkt_stream = __pkt_stream_alloc ( nb_pkts ) ;
if ( ! pkt_stream )
exit_with_error ( ENOMEM ) ;
test - > ifobj_tx - > pkt_stream = pkt_stream ;
test - > ifobj_rx - > pkt_stream = pkt_stream ;
for ( i = 0 ; i < nb_pkts ; i + + ) {
pkt_stream - > pkts [ i ] . addr = pkts [ i ] . addr ;
pkt_stream - > pkts [ i ] . len = pkts [ i ] . len ;
pkt_stream - > pkts [ i ] . payload = i ;
pkt_stream - > pkts [ i ] . valid = pkts [ i ] . valid ;
}
}
2021-08-25 12:37:15 +03:00
static void pkt_dump ( void * pkt , u32 len )
{
char s [ INET_ADDRSTRLEN ] ;
struct ethhdr * ethhdr ;
struct udphdr * udphdr ;
struct iphdr * iphdr ;
int payload , i ;
ethhdr = pkt ;
iphdr = pkt + sizeof ( * ethhdr ) ;
udphdr = pkt + sizeof ( * ethhdr ) + sizeof ( * iphdr ) ;
/*extract L2 frame */
fprintf ( stdout , " DEBUG>> L2: dst mac: " ) ;
for ( i = 0 ; i < ETH_ALEN ; i + + )
fprintf ( stdout , " %02X " , ethhdr - > h_dest [ i ] ) ;
fprintf ( stdout , " \n DEBUG>> L2: src mac: " ) ;
for ( i = 0 ; i < ETH_ALEN ; i + + )
fprintf ( stdout , " %02X " , ethhdr - > h_source [ i ] ) ;
/*extract L3 frame */
fprintf ( stdout , " \n DEBUG>> L3: ip_hdr->ihl: %02X \n " , iphdr - > ihl ) ;
fprintf ( stdout , " DEBUG>> L3: ip_hdr->saddr: %s \n " ,
inet_ntop ( AF_INET , & iphdr - > saddr , s , sizeof ( s ) ) ) ;
fprintf ( stdout , " DEBUG>> L3: ip_hdr->daddr: %s \n " ,
inet_ntop ( AF_INET , & iphdr - > daddr , s , sizeof ( s ) ) ) ;
/*extract L4 frame */
fprintf ( stdout , " DEBUG>> L4: udp_hdr->src: %d \n " , ntohs ( udphdr - > source ) ) ;
fprintf ( stdout , " DEBUG>> L4: udp_hdr->dst: %d \n " , ntohs ( udphdr - > dest ) ) ;
/*extract L5 frame */
payload = * ( ( uint32_t * ) ( pkt + PKT_HDR_SIZE ) ) ;
fprintf ( stdout , " DEBUG>> L5: payload: %d \n " , payload ) ;
fprintf ( stdout , " --------------------------------------- \n " ) ;
}
2021-09-22 10:56:13 +03:00
static bool is_offset_correct ( struct xsk_umem_info * umem , struct pkt_stream * pkt_stream , u64 addr ,
u64 pkt_stream_addr )
{
u32 headroom = umem - > unaligned_mode ? 0 : umem - > frame_headroom ;
u32 offset = addr % umem - > frame_size , expected_offset = 0 ;
if ( ! pkt_stream - > use_addr_for_fill )
pkt_stream_addr = 0 ;
expected_offset + = ( pkt_stream_addr + headroom + XDP_PACKET_HEADROOM ) % umem - > frame_size ;
if ( offset = = expected_offset )
return true ;
2022-05-10 14:55:59 +03:00
ksft_print_msg ( " [%s] expected [%u], got [%u] \n " , __func__ , expected_offset , offset ) ;
2021-09-22 10:56:13 +03:00
return false ;
}
2021-09-07 10:19:25 +03:00
static bool is_pkt_valid ( struct pkt * pkt , void * buffer , u64 addr , u32 len )
2021-08-25 12:37:15 +03:00
{
2021-09-07 10:19:25 +03:00
void * data = xsk_umem__get_data ( buffer , addr ) ;
2021-08-25 12:37:19 +03:00
struct iphdr * iphdr = ( struct iphdr * ) ( data + sizeof ( struct ethhdr ) ) ;
2021-08-25 12:37:15 +03:00
2021-08-25 12:37:20 +03:00
if ( ! pkt ) {
2022-05-10 14:55:59 +03:00
ksft_print_msg ( " [%s] too many packets received \n " , __func__ ) ;
2021-08-25 12:37:20 +03:00
return false ;
}
2022-05-10 14:55:57 +03:00
if ( len < MIN_PKT_SIZE | | pkt - > len < MIN_PKT_SIZE ) {
/* Do not try to verify packets that are smaller than minimum size. */
2021-09-07 10:19:27 +03:00
return true ;
}
if ( pkt - > len ! = len ) {
2022-05-10 14:55:59 +03:00
ksft_print_msg ( " [%s] expected length [%d], got length [%d] \n " ,
__func__ , pkt - > len , len ) ;
2021-09-07 10:19:27 +03:00
return false ;
}
2021-08-25 12:37:15 +03:00
if ( iphdr - > version = = IP_PKT_VER & & iphdr - > tos = = IP_PKT_TOS ) {
2021-08-25 12:37:19 +03:00
u32 seqnum = ntohl ( * ( ( u32 * ) ( data + PKT_HDR_SIZE ) ) ) ;
2021-08-25 12:37:15 +03:00
2021-09-07 10:19:24 +03:00
if ( opt_pkt_dump )
2021-08-25 12:37:19 +03:00
pkt_dump ( data , PKT_SIZE ) ;
2021-08-25 12:37:15 +03:00
2021-08-25 12:37:20 +03:00
if ( pkt - > payload ! = seqnum ) {
2022-05-10 14:55:59 +03:00
ksft_print_msg ( " [%s] expected seqnum [%d], got seqnum [%d] \n " ,
__func__ , pkt - > payload , seqnum ) ;
2021-08-25 12:37:20 +03:00
return false ;
}
2021-08-25 12:37:15 +03:00
} else {
ksft_print_msg ( " Invalid frame received: " ) ;
ksft_print_msg ( " [IP_PKT_VER: %02X], [IP_PKT_TOS: %02X] \n " , iphdr - > version ,
iphdr - > tos ) ;
2021-08-25 12:37:20 +03:00
return false ;
2021-08-25 12:37:15 +03:00
}
2021-08-25 12:37:20 +03:00
return true ;
2021-08-25 12:37:15 +03:00
}
2020-12-08 00:53:30 +03:00
static void kick_tx ( struct xsk_socket_info * xsk )
{
int ret ;
ret = sendto ( xsk_socket__fd ( xsk - > xsk ) , NULL , 0 , MSG_DONTWAIT , NULL , 0 ) ;
2022-05-10 14:55:58 +03:00
if ( ret > = 0 )
return ;
if ( errno = = ENOBUFS | | errno = = EAGAIN | | errno = = EBUSY | | errno = = ENETDOWN ) {
usleep ( 100 ) ;
2020-12-08 00:53:30 +03:00
return ;
2022-05-10 14:55:58 +03:00
}
2020-12-08 00:53:30 +03:00
exit_with_error ( errno ) ;
}
2022-05-10 14:55:58 +03:00
static void kick_rx ( struct xsk_socket_info * xsk )
{
int ret ;
ret = recvfrom ( xsk_socket__fd ( xsk - > xsk ) , NULL , 0 , MSG_DONTWAIT , NULL , NULL ) ;
if ( ret < 0 )
exit_with_error ( errno ) ;
}
2022-05-10 14:55:59 +03:00
static int complete_pkts ( struct xsk_socket_info * xsk , int batch_size )
2020-12-08 00:53:30 +03:00
{
unsigned int rcvd ;
u32 idx ;
2021-03-30 01:43:16 +03:00
if ( xsk_ring_prod__needs_wakeup ( & xsk - > tx ) )
2020-12-08 00:53:30 +03:00
kick_tx ( xsk ) ;
rcvd = xsk_ring_cons__peek ( & xsk - > umem - > cq , batch_size , & idx ) ;
if ( rcvd ) {
2021-09-07 10:19:27 +03:00
if ( rcvd > xsk - > outstanding_tx ) {
u64 addr = * xsk_ring_cons__comp_addr ( & xsk - > umem - > cq , idx + rcvd - 1 ) ;
2022-05-10 14:55:59 +03:00
ksft_print_msg ( " [%s] Too many packets completed \n " , __func__ ) ;
2021-09-07 10:19:27 +03:00
ksft_print_msg ( " Last completion address: %llx \n " , addr ) ;
2022-05-10 14:55:59 +03:00
return TEST_FAILURE ;
2021-09-07 10:19:27 +03:00
}
2020-12-08 00:53:30 +03:00
xsk_ring_cons__release ( & xsk - > umem - > cq , rcvd ) ;
xsk - > outstanding_tx - = rcvd ;
}
2022-05-10 14:55:59 +03:00
return TEST_PASS ;
2020-12-08 00:53:30 +03:00
}
2022-05-10 14:55:59 +03:00
static int receive_pkts ( struct ifobject * ifobj , struct pollfd * fds )
2020-12-08 00:53:30 +03:00
{
2022-05-10 14:56:00 +03:00
struct timeval tv_end , tv_now , tv_timeout = { RECV_TMOUT , 0 } ;
2022-05-10 14:56:04 +03:00
u32 idx_rx = 0 , idx_fq = 0 , rcvd , i , pkts_sent = 0 ;
2022-05-10 14:55:58 +03:00
struct pkt_stream * pkt_stream = ifobj - > pkt_stream ;
struct xsk_socket_info * xsk = ifobj - > xsk ;
2021-09-22 10:56:13 +03:00
struct xsk_umem_info * umem = xsk - > umem ;
2022-05-10 14:56:04 +03:00
struct pkt * pkt ;
2020-12-08 00:53:30 +03:00
int ret ;
2022-05-10 14:56:00 +03:00
ret = gettimeofday ( & tv_now , NULL ) ;
if ( ret )
exit_with_error ( errno ) ;
timeradd ( & tv_now , & tv_timeout , & tv_end ) ;
2022-05-10 14:56:04 +03:00
pkt = pkt_stream_get_next_rx_pkt ( pkt_stream , & pkts_sent ) ;
2021-08-25 12:37:20 +03:00
while ( pkt ) {
2022-05-10 14:56:00 +03:00
ret = gettimeofday ( & tv_now , NULL ) ;
if ( ret )
exit_with_error ( errno ) ;
if ( timercmp ( & tv_now , & tv_end , > ) ) {
ksft_print_msg ( " ERROR: [%s] Receive loop timed out \n " , __func__ ) ;
return TEST_FAILURE ;
}
2022-05-10 14:55:58 +03:00
kick_rx ( xsk ) ;
2021-08-25 12:37:20 +03:00
rcvd = xsk_ring_cons__peek ( & xsk - > rx , BATCH_SIZE , & idx_rx ) ;
if ( ! rcvd ) {
2021-09-22 10:56:13 +03:00
if ( xsk_ring_prod__needs_wakeup ( & umem - > fq ) ) {
2021-08-25 12:37:20 +03:00
ret = poll ( fds , 1 , POLL_TMOUT ) ;
if ( ret < 0 )
exit_with_error ( - ret ) ;
}
continue ;
2020-12-08 00:53:30 +03:00
}
2022-05-10 14:56:04 +03:00
if ( ifobj - > use_fill_ring ) {
ret = xsk_ring_prod__reserve ( & umem - > fq , rcvd , & idx_fq ) ;
while ( ret ! = rcvd ) {
2021-08-25 12:37:20 +03:00
if ( ret < 0 )
exit_with_error ( - ret ) ;
2022-05-10 14:56:04 +03:00
if ( xsk_ring_prod__needs_wakeup ( & umem - > fq ) ) {
ret = poll ( fds , 1 , POLL_TMOUT ) ;
if ( ret < 0 )
exit_with_error ( - ret ) ;
}
ret = xsk_ring_prod__reserve ( & umem - > fq , rcvd , & idx_fq ) ;
2021-08-25 12:37:20 +03:00
}
2020-12-08 00:53:30 +03:00
}
2021-08-25 12:37:20 +03:00
for ( i = 0 ; i < rcvd ; i + + ) {
const struct xdp_desc * desc = xsk_ring_cons__rx_desc ( & xsk - > rx , idx_rx + + ) ;
u64 addr = desc - > addr , orig ;
2021-01-22 18:47:22 +03:00
2021-08-25 12:37:20 +03:00
orig = xsk_umem__extract_addr ( addr ) ;
addr = xsk_umem__add_offset_to_addr ( addr ) ;
2021-09-22 10:56:13 +03:00
2022-05-10 14:55:59 +03:00
if ( ! is_pkt_valid ( pkt , umem - > buffer , addr , desc - > len ) | |
! is_offset_correct ( umem , pkt_stream , addr , pkt - > addr ) )
return TEST_FAILURE ;
2020-12-08 00:53:30 +03:00
2022-05-10 14:56:04 +03:00
if ( ifobj - > use_fill_ring )
* xsk_ring_prod__fill_addr ( & umem - > fq , idx_fq + + ) = orig ;
pkt = pkt_stream_get_next_rx_pkt ( pkt_stream , & pkts_sent ) ;
2021-08-25 12:37:20 +03:00
}
2020-12-08 00:53:30 +03:00
2022-05-10 14:56:04 +03:00
if ( ifobj - > use_fill_ring )
xsk_ring_prod__submit ( & umem - > fq , rcvd ) ;
if ( ifobj - > release_rx )
xsk_ring_cons__release ( & xsk - > rx , rcvd ) ;
2021-09-22 10:56:10 +03:00
pthread_mutex_lock ( & pacing_mutex ) ;
2022-05-10 14:56:04 +03:00
pkts_in_flight - = pkts_sent ;
2021-09-22 10:56:10 +03:00
if ( pkts_in_flight < umem - > num_frames )
pthread_cond_signal ( & pacing_cond ) ;
pthread_mutex_unlock ( & pacing_mutex ) ;
2022-05-10 14:56:04 +03:00
pkts_sent = 0 ;
2020-12-08 00:53:30 +03:00
}
2022-05-10 14:55:59 +03:00
return TEST_PASS ;
2020-12-08 00:53:30 +03:00
}
2022-05-10 14:55:59 +03:00
static int __send_pkts ( struct ifobject * ifobject , u32 * pkt_nb )
2020-12-08 00:53:30 +03:00
{
2021-08-25 12:37:19 +03:00
struct xsk_socket_info * xsk = ifobject - > xsk ;
2021-09-07 10:19:23 +03:00
u32 i , idx , valid_pkts = 0 ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
while ( xsk_ring_prod__reserve ( & xsk - > tx , BATCH_SIZE , & idx ) < BATCH_SIZE )
complete_pkts ( xsk , BATCH_SIZE ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
for ( i = 0 ; i < BATCH_SIZE ; i + + ) {
2020-12-08 00:53:30 +03:00
struct xdp_desc * tx_desc = xsk_ring_prod__tx_desc ( & xsk - > tx , idx + i ) ;
2022-05-10 14:55:59 +03:00
struct pkt * pkt = pkt_generate ( ifobject , * pkt_nb ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
if ( ! pkt )
break ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
tx_desc - > addr = pkt - > addr ;
tx_desc - > len = pkt - > len ;
2022-05-10 14:55:59 +03:00
( * pkt_nb ) + + ;
2021-09-07 10:19:23 +03:00
if ( pkt - > valid )
valid_pkts + + ;
2021-02-23 19:23:04 +03:00
}
2020-12-08 00:53:30 +03:00
2021-09-22 10:56:10 +03:00
pthread_mutex_lock ( & pacing_mutex ) ;
pkts_in_flight + = valid_pkts ;
2022-05-10 14:56:04 +03:00
/* pkts_in_flight might be negative if many invalid packets are sent */
if ( pkts_in_flight > = ( int ) ( ifobject - > umem - > num_frames - BATCH_SIZE ) ) {
2021-09-22 10:56:10 +03:00
kick_tx ( xsk ) ;
pthread_cond_wait ( & pacing_cond , & pacing_mutex ) ;
}
pthread_mutex_unlock ( & pacing_mutex ) ;
2021-08-25 12:37:20 +03:00
xsk_ring_prod__submit ( & xsk - > tx , i ) ;
2021-09-07 10:19:23 +03:00
xsk - > outstanding_tx + = valid_pkts ;
2022-05-10 14:55:59 +03:00
if ( complete_pkts ( xsk , i ) )
return TEST_FAILURE ;
2020-12-08 00:53:30 +03:00
2021-09-22 10:56:10 +03:00
usleep ( 10 ) ;
2022-05-10 14:55:59 +03:00
return TEST_PASS ;
2020-12-08 00:53:30 +03:00
}
2021-08-25 12:37:20 +03:00
static void wait_for_tx_completion ( struct xsk_socket_info * xsk )
2020-12-08 00:53:30 +03:00
{
2021-08-25 12:37:20 +03:00
while ( xsk - > outstanding_tx )
complete_pkts ( xsk , BATCH_SIZE ) ;
2020-12-08 00:53:30 +03:00
}
2022-05-10 14:55:59 +03:00
static int send_pkts ( struct test_spec * test , struct ifobject * ifobject )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:22 +03:00
struct pollfd fds = { } ;
2021-08-25 12:37:20 +03:00
u32 pkt_cnt = 0 ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:22 +03:00
fds . fd = xsk_socket__fd ( ifobject - > xsk - > xsk ) ;
fds . events = POLLOUT ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
while ( pkt_cnt < ifobject - > pkt_stream - > nb_pkts ) {
2022-05-10 14:55:59 +03:00
int err ;
2021-09-07 10:19:16 +03:00
if ( ifobject - > use_poll ) {
2021-08-25 12:37:20 +03:00
int ret ;
2021-09-07 10:19:22 +03:00
ret = poll ( & fds , 1 , POLL_TMOUT ) ;
2020-12-08 00:53:30 +03:00
if ( ret < = 0 )
continue ;
2021-09-07 10:19:22 +03:00
if ( ! ( fds . revents & POLLOUT ) )
2020-12-08 00:53:30 +03:00
continue ;
}
2022-05-10 14:55:59 +03:00
err = __send_pkts ( ifobject , & pkt_cnt ) ;
if ( err | | test - > fail )
return TEST_FAILURE ;
2020-12-08 00:53:30 +03:00
}
2021-08-25 12:37:20 +03:00
wait_for_tx_completion ( ifobject - > xsk ) ;
2022-05-10 14:55:59 +03:00
return TEST_PASS ;
2020-12-08 00:53:30 +03:00
}
2022-05-10 14:56:02 +03:00
static int get_xsk_stats ( struct xsk_socket * xsk , struct xdp_statistics * stats )
{
int fd = xsk_socket__fd ( xsk ) , err ;
socklen_t optlen , expected_len ;
optlen = sizeof ( * stats ) ;
err = getsockopt ( fd , SOL_XDP , XDP_STATISTICS , stats , & optlen ) ;
if ( err ) {
ksft_print_msg ( " [%s] getsockopt(XDP_STATISTICS) error %u %s \n " ,
__func__ , - err , strerror ( - err ) ) ;
return TEST_FAILURE ;
}
expected_len = sizeof ( struct xdp_statistics ) ;
if ( optlen ! = expected_len ) {
ksft_print_msg ( " [%s] getsockopt optlen error. Expected: %u got: %u \n " ,
__func__ , expected_len , optlen ) ;
return TEST_FAILURE ;
}
return TEST_PASS ;
}
static int validate_rx_dropped ( struct ifobject * ifobject )
2021-02-23 19:23:04 +03:00
{
2021-08-25 12:37:16 +03:00
struct xsk_socket * xsk = ifobject - > xsk - > xsk ;
2021-02-23 19:23:04 +03:00
struct xdp_statistics stats ;
int err ;
2022-05-10 14:55:58 +03:00
kick_rx ( ifobject - > xsk ) ;
2022-05-10 14:56:02 +03:00
err = get_xsk_stats ( xsk , & stats ) ;
if ( err )
2022-05-10 14:55:59 +03:00
return TEST_FAILURE ;
2021-02-23 19:23:04 +03:00
2022-05-10 14:56:04 +03:00
if ( stats . rx_dropped = = ifobject - > pkt_stream - > nb_pkts / 2 )
2022-05-10 14:56:02 +03:00
return TEST_PASS ;
2021-02-23 19:23:04 +03:00
2022-05-10 14:56:04 +03:00
return TEST_FAILURE ;
2022-05-10 14:56:02 +03:00
}
static int validate_rx_full ( struct ifobject * ifobject )
{
struct xsk_socket * xsk = ifobject - > xsk - > xsk ;
struct xdp_statistics stats ;
int err ;
2022-05-10 14:56:04 +03:00
usleep ( 1000 ) ;
2022-05-10 14:56:02 +03:00
kick_rx ( ifobject - > xsk ) ;
err = get_xsk_stats ( xsk , & stats ) ;
if ( err )
return TEST_FAILURE ;
2022-05-10 14:56:04 +03:00
if ( stats . rx_ring_full )
2022-05-10 14:56:02 +03:00
return TEST_PASS ;
2022-05-10 14:56:04 +03:00
return TEST_FAILURE ;
2022-05-10 14:56:02 +03:00
}
static int validate_fill_empty ( struct ifobject * ifobject )
{
struct xsk_socket * xsk = ifobject - > xsk - > xsk ;
struct xdp_statistics stats ;
int err ;
2022-05-10 14:56:04 +03:00
usleep ( 1000 ) ;
2022-05-10 14:56:02 +03:00
kick_rx ( ifobject - > xsk ) ;
err = get_xsk_stats ( xsk , & stats ) ;
if ( err )
return TEST_FAILURE ;
2022-05-10 14:56:04 +03:00
if ( stats . rx_fill_ring_empty_descs )
2022-05-10 14:56:02 +03:00
return TEST_PASS ;
2021-08-25 12:37:16 +03:00
2022-05-10 14:56:04 +03:00
return TEST_FAILURE ;
2021-08-25 12:37:16 +03:00
}
2022-05-10 14:56:02 +03:00
static int validate_tx_invalid_descs ( struct ifobject * ifobject )
2021-08-25 12:37:16 +03:00
{
struct xsk_socket * xsk = ifobject - > xsk - > xsk ;
int fd = xsk_socket__fd ( xsk ) ;
struct xdp_statistics stats ;
socklen_t optlen ;
int err ;
optlen = sizeof ( stats ) ;
err = getsockopt ( fd , SOL_XDP , XDP_STATISTICS , & stats , & optlen ) ;
if ( err ) {
2022-05-10 14:55:59 +03:00
ksft_print_msg ( " [%s] getsockopt(XDP_STATISTICS) error %u %s \n " ,
__func__ , - err , strerror ( - err ) ) ;
return TEST_FAILURE ;
2021-08-25 12:37:16 +03:00
}
2022-05-10 14:56:04 +03:00
if ( stats . tx_invalid_descs ! = ifobject - > pkt_stream - > nb_pkts / 2 ) {
2022-05-10 14:55:59 +03:00
ksft_print_msg ( " [%s] tx_invalid_descs incorrect. Got [%u] expected [%u] \n " ,
__func__ , stats . tx_invalid_descs , ifobject - > pkt_stream - > nb_pkts ) ;
return TEST_FAILURE ;
}
2021-08-25 12:37:16 +03:00
2022-05-10 14:55:59 +03:00
return TEST_PASS ;
2021-02-23 19:23:04 +03:00
}
2021-09-07 10:19:18 +03:00
static void thread_common_ops ( struct test_spec * test , struct ifobject * ifobject )
2020-12-08 00:53:30 +03:00
{
2022-01-25 11:29:45 +03:00
u64 umem_sz = ifobject - > umem - > num_frames * ifobject - > umem - > frame_size ;
2021-09-07 10:19:25 +03:00
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE ;
2022-06-29 17:34:57 +03:00
LIBBPF_OPTS ( bpf_xdp_query_opts , opts ) ;
2022-01-25 11:29:45 +03:00
int ret , ifindex ;
void * bufs ;
2021-09-07 10:19:19 +03:00
u32 i ;
2020-12-08 00:53:30 +03:00
2021-03-30 01:43:13 +03:00
ifobject - > ns_fd = switch_namespace ( ifobject - > nsname ) ;
2021-09-07 10:19:25 +03:00
if ( ifobject - > umem - > unaligned_mode )
mmap_flags | = MAP_HUGETLB ;
2022-01-25 11:29:45 +03:00
bufs = mmap ( NULL , umem_sz , PROT_READ | PROT_WRITE , mmap_flags , - 1 , 0 ) ;
if ( bufs = = MAP_FAILED )
exit_with_error ( errno ) ;
2021-03-30 01:43:08 +03:00
2022-01-25 11:29:45 +03:00
ret = xsk_configure_umem ( ifobject - > umem , bufs , umem_sz ) ;
if ( ret )
exit_with_error ( - ret ) ;
2021-09-07 10:19:09 +03:00
2022-01-25 11:29:45 +03:00
for ( i = 0 ; i < test - > nb_sockets ; i + + ) {
u32 ctr = 0 ;
2020-12-08 00:53:30 +03:00
2021-09-22 10:56:09 +03:00
while ( ctr + + < SOCK_RECONF_CTR ) {
2022-01-25 11:29:45 +03:00
ret = xsk_configure_socket ( & ifobject - > xsk_arr [ i ] , ifobject - > umem ,
ifobject , ! ! i ) ;
2021-09-07 10:19:19 +03:00
if ( ! ret )
break ;
2021-09-07 10:19:09 +03:00
2021-09-07 10:19:19 +03:00
/* Retry if it fails as xsk_socket__create() is asynchronous */
if ( ctr > = SOCK_RECONF_CTR )
exit_with_error ( - ret ) ;
usleep ( USLEEP_MAX ) ;
}
2022-05-10 14:55:58 +03:00
if ( ifobject - > busy_poll )
enable_busy_poll ( & ifobject - > xsk_arr [ i ] ) ;
2021-03-30 01:43:13 +03:00
}
2021-09-07 10:19:09 +03:00
ifobject - > xsk = & ifobject - > xsk_arr [ 0 ] ;
2022-01-25 11:29:45 +03:00
if ( ! ifobject - > rx_on )
return ;
ifindex = if_nametoindex ( ifobject - > ifname ) ;
if ( ! ifindex )
exit_with_error ( errno ) ;
2022-06-29 17:34:56 +03:00
ret = xsk_setup_xdp_prog_xsk ( ifobject - > xsk - > xsk , & ifobject - > xsk_map_fd ) ;
2022-01-25 11:29:45 +03:00
if ( ret )
exit_with_error ( - ret ) ;
2022-06-29 17:34:57 +03:00
ret = bpf_xdp_query ( ifindex , ifobject - > xdp_flags , & opts ) ;
if ( ret )
exit_with_error ( - ret ) ;
if ( ifobject - > xdp_flags & XDP_FLAGS_SKB_MODE ) {
if ( opts . attach_mode ! = XDP_ATTACHED_SKB ) {
ksft_print_msg ( " ERROR: [%s] XDP prog not in SKB mode \n " ) ;
exit_with_error ( - EINVAL ) ;
}
} else if ( ifobject - > xdp_flags & XDP_FLAGS_DRV_MODE ) {
if ( opts . attach_mode ! = XDP_ATTACHED_DRV ) {
ksft_print_msg ( " ERROR: [%s] XDP prog not in DRV mode \n " ) ;
exit_with_error ( - EINVAL ) ;
}
}
2022-01-25 11:29:45 +03:00
ret = xsk_socket__update_xskmap ( ifobject - > xsk - > xsk , ifobject - > xsk_map_fd ) ;
if ( ret )
exit_with_error ( - ret ) ;
2020-12-08 00:53:30 +03:00
}
2021-03-30 01:43:13 +03:00
static void testapp_cleanup_xsk_res ( struct ifobject * ifobj )
{
2021-09-07 10:19:24 +03:00
print_verbose ( " Destroying socket \n " ) ;
2021-09-07 10:19:18 +03:00
xsk_socket__delete ( ifobj - > xsk - > xsk ) ;
2021-09-07 10:19:27 +03:00
munmap ( ifobj - > umem - > buffer , ifobj - > umem - > num_frames * ifobj - > umem - > frame_size ) ;
2021-09-07 10:19:18 +03:00
xsk_umem__delete ( ifobj - > umem - > umem ) ;
2021-03-30 01:43:13 +03:00
}
2021-03-30 01:43:08 +03:00
static void * worker_testapp_validate_tx ( void * arg )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:18 +03:00
struct test_spec * test = ( struct test_spec * ) arg ;
struct ifobject * ifobject = test - > ifobj_tx ;
2022-05-10 14:55:59 +03:00
int err ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:18 +03:00
if ( test - > current_step = = 1 )
thread_common_ops ( test , ifobject ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
print_verbose ( " Sending %d packets on interface %s \n " , ifobject - > pkt_stream - > nb_pkts ,
ifobject - > ifname ) ;
2022-05-10 14:55:59 +03:00
err = send_pkts ( test , ifobject ) ;
2020-12-08 00:53:30 +03:00
2022-05-10 14:56:04 +03:00
if ( ! err & & ifobject - > validation_func )
2022-05-10 14:56:02 +03:00
err = ifobject - > validation_func ( ifobject ) ;
2022-05-10 14:56:04 +03:00
if ( err )
2022-05-10 14:55:59 +03:00
report_failure ( test ) ;
2021-08-25 12:37:16 +03:00
2022-05-10 14:55:59 +03:00
if ( test - > total_steps = = test - > current_step | | err )
2021-09-07 10:19:18 +03:00
testapp_cleanup_xsk_res ( ifobject ) ;
2021-03-30 01:43:08 +03:00
pthread_exit ( NULL ) ;
}
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:25 +03:00
static void xsk_populate_fill_ring ( struct xsk_umem_info * umem , struct pkt_stream * pkt_stream )
{
2021-09-22 10:56:08 +03:00
u32 idx = 0 , i , buffers_to_fill ;
2021-09-07 10:19:25 +03:00
int ret ;
2021-09-22 10:56:08 +03:00
if ( umem - > num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS )
buffers_to_fill = umem - > num_frames ;
else
buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS ;
ret = xsk_ring_prod__reserve ( & umem - > fq , buffers_to_fill , & idx ) ;
if ( ret ! = buffers_to_fill )
2021-09-07 10:19:25 +03:00
exit_with_error ( ENOSPC ) ;
2021-09-22 10:56:08 +03:00
for ( i = 0 ; i < buffers_to_fill ; i + + ) {
2021-09-07 10:19:25 +03:00
u64 addr ;
if ( pkt_stream - > use_addr_for_fill ) {
struct pkt * pkt = pkt_stream_get_pkt ( pkt_stream , i ) ;
if ( ! pkt )
break ;
addr = pkt - > addr ;
} else {
2021-09-22 10:56:13 +03:00
addr = i * umem - > frame_size ;
2021-09-07 10:19:25 +03:00
}
* xsk_ring_prod__fill_addr ( & umem - > fq , idx + + ) = addr ;
}
2021-09-22 10:56:08 +03:00
xsk_ring_prod__submit ( & umem - > fq , buffers_to_fill ) ;
2021-09-07 10:19:25 +03:00
}
2021-03-30 01:43:08 +03:00
static void * worker_testapp_validate_rx ( void * arg )
{
2021-09-07 10:19:18 +03:00
struct test_spec * test = ( struct test_spec * ) arg ;
struct ifobject * ifobject = test - > ifobj_rx ;
2021-09-07 10:19:22 +03:00
struct pollfd fds = { } ;
2022-05-10 14:55:59 +03:00
int err ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:18 +03:00
if ( test - > current_step = = 1 )
thread_common_ops ( test , ifobject ) ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:26 +03:00
xsk_populate_fill_ring ( ifobject - > umem , ifobject - > pkt_stream ) ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:22 +03:00
fds . fd = xsk_socket__fd ( ifobject - > xsk - > xsk ) ;
fds . events = POLLIN ;
2021-02-23 19:23:04 +03:00
2021-03-30 01:43:15 +03:00
pthread_barrier_wait ( & barr ) ;
2020-12-08 00:53:30 +03:00
2022-05-10 14:56:04 +03:00
err = receive_pkts ( ifobject , & fds ) ;
2022-05-10 14:55:59 +03:00
2022-05-10 14:56:04 +03:00
if ( ! err & & ifobject - > validation_func )
err = ifobject - > validation_func ( ifobject ) ;
2022-05-10 14:55:59 +03:00
if ( err ) {
report_failure ( test ) ;
pthread_mutex_lock ( & pacing_mutex ) ;
pthread_cond_signal ( & pacing_cond ) ;
pthread_mutex_unlock ( & pacing_mutex ) ;
}
2020-12-08 00:53:32 +03:00
2022-05-10 14:55:59 +03:00
if ( test - > total_steps = = test - > current_step | | err )
2021-09-07 10:19:18 +03:00
testapp_cleanup_xsk_res ( ifobject ) ;
2020-12-08 00:53:30 +03:00
pthread_exit ( NULL ) ;
}
2022-05-10 14:55:59 +03:00
static int testapp_validate_traffic ( struct test_spec * test )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:11 +03:00
struct ifobject * ifobj_tx = test - > ifobj_tx ;
struct ifobject * ifobj_rx = test - > ifobj_rx ;
2021-09-07 10:19:21 +03:00
pthread_t t0 , t1 ;
2021-01-22 18:47:22 +03:00
2021-03-30 01:43:15 +03:00
if ( pthread_barrier_init ( & barr , NULL , 2 ) )
exit_with_error ( errno ) ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:18 +03:00
test - > current_step + + ;
2021-09-22 10:56:07 +03:00
pkt_stream_reset ( ifobj_rx - > pkt_stream ) ;
2021-09-22 10:56:10 +03:00
pkts_in_flight = 0 ;
2021-08-25 12:37:20 +03:00
2020-12-08 00:53:30 +03:00
/*Spawn RX thread */
2021-09-07 10:19:18 +03:00
pthread_create ( & t0 , NULL , ifobj_rx - > func_ptr , test ) ;
2020-12-08 00:53:30 +03:00
2021-03-30 01:43:15 +03:00
pthread_barrier_wait ( & barr ) ;
if ( pthread_barrier_destroy ( & barr ) )
2020-12-08 00:53:30 +03:00
exit_with_error ( errno ) ;
/*Spawn TX thread */
2021-09-07 10:19:18 +03:00
pthread_create ( & t1 , NULL , ifobj_tx - > func_ptr , test ) ;
2020-12-08 00:53:30 +03:00
pthread_join ( t1 , NULL ) ;
pthread_join ( t0 , NULL ) ;
2022-05-10 14:55:59 +03:00
return ! ! test - > fail ;
2020-12-08 00:53:32 +03:00
}
2021-09-07 10:19:11 +03:00
static void testapp_teardown ( struct test_spec * test )
2021-03-30 01:43:10 +03:00
{
int i ;
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " TEARDOWN " ) ;
2021-03-30 01:43:10 +03:00
for ( i = 0 ; i < MAX_TEARDOWN_ITER ; i + + ) {
2022-05-10 14:55:59 +03:00
if ( testapp_validate_traffic ( test ) )
return ;
2021-09-07 10:19:11 +03:00
test_spec_reset ( test ) ;
2021-03-30 01:43:10 +03:00
}
}
2021-09-07 10:19:11 +03:00
static void swap_directions ( struct ifobject * * ifobj1 , struct ifobject * * ifobj2 )
2021-03-30 01:43:10 +03:00
{
2021-09-07 10:19:11 +03:00
thread_func_t tmp_func_ptr = ( * ifobj1 ) - > func_ptr ;
struct ifobject * tmp_ifobj = ( * ifobj1 ) ;
2021-03-30 01:43:10 +03:00
2021-09-07 10:19:11 +03:00
( * ifobj1 ) - > func_ptr = ( * ifobj2 ) - > func_ptr ;
( * ifobj2 ) - > func_ptr = tmp_func_ptr ;
2021-03-30 01:43:10 +03:00
2021-09-07 10:19:11 +03:00
* ifobj1 = * ifobj2 ;
* ifobj2 = tmp_ifobj ;
2021-03-30 01:43:10 +03:00
}
2021-09-07 10:19:11 +03:00
static void testapp_bidi ( struct test_spec * test )
2020-12-08 00:53:32 +03:00
{
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " BIDIRECTIONAL " ) ;
2021-09-07 10:19:17 +03:00
test - > ifobj_tx - > rx_on = true ;
test - > ifobj_rx - > tx_on = true ;
2021-09-07 10:19:18 +03:00
test - > total_steps = 2 ;
2022-05-10 14:55:59 +03:00
if ( testapp_validate_traffic ( test ) )
return ;
2021-09-07 10:19:18 +03:00
print_verbose ( " Switching Tx/Rx vectors \n " ) ;
swap_directions ( & test - > ifobj_rx , & test - > ifobj_tx ) ;
testapp_validate_traffic ( test ) ;
2020-12-08 00:53:32 +03:00
2021-09-07 10:19:11 +03:00
swap_directions ( & test - > ifobj_rx , & test - > ifobj_tx ) ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:11 +03:00
static void swap_xsk_resources ( struct ifobject * ifobj_tx , struct ifobject * ifobj_rx )
2021-03-30 01:43:13 +03:00
{
2022-01-25 11:29:45 +03:00
int ret ;
2021-09-07 10:19:11 +03:00
xsk_socket__delete ( ifobj_tx - > xsk - > xsk ) ;
xsk_socket__delete ( ifobj_rx - > xsk - > xsk ) ;
ifobj_tx - > xsk = & ifobj_tx - > xsk_arr [ 1 ] ;
ifobj_rx - > xsk = & ifobj_rx - > xsk_arr [ 1 ] ;
2022-01-25 11:29:45 +03:00
ret = xsk_socket__update_xskmap ( ifobj_rx - > xsk - > xsk , ifobj_rx - > xsk_map_fd ) ;
if ( ret )
exit_with_error ( - ret ) ;
2021-03-30 01:43:13 +03:00
}
2021-09-07 10:19:11 +03:00
static void testapp_bpf_res ( struct test_spec * test )
2021-03-30 01:43:13 +03:00
{
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " BPF_RES " ) ;
2021-09-07 10:19:18 +03:00
test - > total_steps = 2 ;
2021-09-07 10:19:19 +03:00
test - > nb_sockets = 2 ;
2022-05-10 14:55:59 +03:00
if ( testapp_validate_traffic ( test ) )
return ;
2021-09-07 10:19:18 +03:00
swap_xsk_resources ( test - > ifobj_tx , test - > ifobj_rx ) ;
testapp_validate_traffic ( test ) ;
2021-03-30 01:43:13 +03:00
}
2021-09-22 10:56:13 +03:00
static void testapp_headroom ( struct test_spec * test )
{
test_spec_set_name ( test , " UMEM_HEADROOM " ) ;
test - > ifobj_rx - > umem - > frame_headroom = UMEM_HEADROOM_TEST_SIZE ;
testapp_validate_traffic ( test ) ;
}
2022-05-10 14:56:03 +03:00
static void testapp_stats_rx_dropped ( struct test_spec * test )
2021-02-23 19:23:04 +03:00
{
2022-05-10 14:56:03 +03:00
test_spec_set_name ( test , " STAT_RX_DROPPED " ) ;
test - > ifobj_rx - > umem - > frame_headroom = test - > ifobj_rx - > umem - > frame_size -
2022-05-10 14:56:04 +03:00
XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3 ;
pkt_stream_replace_half ( test , MIN_PKT_SIZE * 4 , 0 ) ;
pkt_stream_receive_half ( test ) ;
2022-05-10 14:56:03 +03:00
test - > ifobj_rx - > validation_func = validate_rx_dropped ;
testapp_validate_traffic ( test ) ;
}
2021-09-07 10:19:24 +03:00
2022-05-10 14:56:03 +03:00
static void testapp_stats_tx_invalid_descs ( struct test_spec * test )
{
test_spec_set_name ( test , " STAT_TX_INVALID " ) ;
2022-05-10 14:56:04 +03:00
pkt_stream_replace_half ( test , XSK_UMEM__INVALID_FRAME_SIZE , 0 ) ;
2022-05-10 14:56:03 +03:00
test - > ifobj_tx - > validation_func = validate_tx_invalid_descs ;
testapp_validate_traffic ( test ) ;
2021-09-07 10:19:24 +03:00
2022-05-10 14:56:03 +03:00
pkt_stream_restore_default ( test ) ;
}
2022-05-10 14:55:59 +03:00
2022-05-10 14:56:03 +03:00
static void testapp_stats_rx_full ( struct test_spec * test )
{
test_spec_set_name ( test , " STAT_RX_FULL " ) ;
2022-05-10 14:56:04 +03:00
pkt_stream_replace ( test , DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2 , PKT_SIZE ) ;
test - > ifobj_rx - > pkt_stream = pkt_stream_generate ( test - > ifobj_rx - > umem ,
DEFAULT_UMEM_BUFFERS , PKT_SIZE ) ;
if ( ! test - > ifobj_rx - > pkt_stream )
exit_with_error ( ENOMEM ) ;
test - > ifobj_rx - > xsk - > rxqsize = DEFAULT_UMEM_BUFFERS ;
test - > ifobj_rx - > release_rx = false ;
2022-05-10 14:56:03 +03:00
test - > ifobj_rx - > validation_func = validate_rx_full ;
testapp_validate_traffic ( test ) ;
2022-05-10 14:56:04 +03:00
pkt_stream_restore_default ( test ) ;
2022-05-10 14:56:03 +03:00
}
2021-02-23 19:23:04 +03:00
2022-05-10 14:56:03 +03:00
static void testapp_stats_fill_empty ( struct test_spec * test )
{
test_spec_set_name ( test , " STAT_RX_FILL_EMPTY " ) ;
2022-05-10 14:56:04 +03:00
pkt_stream_replace ( test , DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2 , PKT_SIZE ) ;
test - > ifobj_rx - > pkt_stream = pkt_stream_generate ( test - > ifobj_rx - > umem ,
DEFAULT_UMEM_BUFFERS , PKT_SIZE ) ;
2022-05-10 14:56:03 +03:00
if ( ! test - > ifobj_rx - > pkt_stream )
exit_with_error ( ENOMEM ) ;
2022-05-10 14:56:04 +03:00
test - > ifobj_rx - > use_fill_ring = false ;
2022-05-10 14:56:03 +03:00
test - > ifobj_rx - > validation_func = validate_fill_empty ;
testapp_validate_traffic ( test ) ;
pkt_stream_restore_default ( test ) ;
2021-02-23 19:23:04 +03:00
}
2021-09-07 10:19:25 +03:00
/* Simple test */
static bool hugepages_present ( struct ifobject * ifobject )
{
const size_t mmap_sz = 2 * ifobject - > umem - > num_frames * ifobject - > umem - > frame_size ;
void * bufs ;
bufs = mmap ( NULL , mmap_sz , PROT_READ | PROT_WRITE ,
2021-11-17 15:36:13 +03:00
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB , - 1 , 0 ) ;
2021-09-07 10:19:25 +03:00
if ( bufs = = MAP_FAILED )
return false ;
munmap ( bufs , mmap_sz ) ;
return true ;
}
static bool testapp_unaligned ( struct test_spec * test )
{
if ( ! hugepages_present ( test - > ifobj_tx ) ) {
ksft_test_result_skip ( " No 2M huge pages present. \n " ) ;
return false ;
}
test_spec_set_name ( test , " UNALIGNED_MODE " ) ;
test - > ifobj_tx - > umem - > unaligned_mode = true ;
test - > ifobj_rx - > umem - > unaligned_mode = true ;
/* Let half of the packets straddle a buffer boundrary */
2021-09-22 10:56:12 +03:00
pkt_stream_replace_half ( test , PKT_SIZE , - PKT_SIZE / 2 ) ;
2021-09-07 10:19:25 +03:00
test - > ifobj_rx - > pkt_stream - > use_addr_for_fill = true ;
testapp_validate_traffic ( test ) ;
pkt_stream_restore_default ( test ) ;
return true ;
}
2021-09-22 10:56:11 +03:00
static void testapp_single_pkt ( struct test_spec * test )
{
struct pkt pkts [ ] = { { 0x1000 , PKT_SIZE , 0 , true } } ;
pkt_stream_generate_custom ( test , pkts , ARRAY_SIZE ( pkts ) ) ;
testapp_validate_traffic ( test ) ;
pkt_stream_restore_default ( test ) ;
}
2021-09-07 10:19:27 +03:00
static void testapp_invalid_desc ( struct test_spec * test )
{
struct pkt pkts [ ] = {
2022-05-10 14:55:57 +03:00
/* Zero packet address allowed */
{ 0 , PKT_SIZE , 0 , true } ,
/* Allowed packet */
{ 0x1000 , PKT_SIZE , 0 , true } ,
2021-09-07 10:19:27 +03:00
/* Straddling the start of umem */
{ - 2 , PKT_SIZE , 0 , false } ,
/* Packet too large */
{ 0x2000 , XSK_UMEM__INVALID_FRAME_SIZE , 0 , false } ,
/* After umem ends */
{ UMEM_SIZE , PKT_SIZE , 0 , false } ,
/* Straddle the end of umem */
{ UMEM_SIZE - PKT_SIZE / 2 , PKT_SIZE , 0 , false } ,
/* Straddle a page boundrary */
{ 0x3000 - PKT_SIZE / 2 , PKT_SIZE , 0 , false } ,
2021-09-07 10:19:28 +03:00
/* Straddle a 2K boundrary */
{ 0x3800 - PKT_SIZE / 2 , PKT_SIZE , 0 , true } ,
2021-09-07 10:19:27 +03:00
/* Valid packet for synch so that something is received */
{ 0x4000 , PKT_SIZE , 0 , true } } ;
if ( test - > ifobj_tx - > umem - > unaligned_mode ) {
/* Crossing a page boundrary allowed */
pkts [ 6 ] . valid = true ;
}
2021-09-07 10:19:28 +03:00
if ( test - > ifobj_tx - > umem - > frame_size = = XSK_UMEM__DEFAULT_FRAME_SIZE / 2 ) {
/* Crossing a 2K frame size boundrary not allowed */
pkts [ 7 ] . valid = false ;
}
2021-09-07 10:19:27 +03:00
pkt_stream_generate_custom ( test , pkts , ARRAY_SIZE ( pkts ) ) ;
testapp_validate_traffic ( test ) ;
pkt_stream_restore_default ( test ) ;
}
2021-09-07 10:19:10 +03:00
static void init_iface ( struct ifobject * ifobj , const char * dst_mac , const char * src_mac ,
const char * dst_ip , const char * src_ip , const u16 dst_port ,
2021-09-07 10:19:17 +03:00
const u16 src_port , thread_func_t func_ptr )
2020-12-08 00:53:30 +03:00
{
2021-03-30 01:43:01 +03:00
struct in_addr ip ;
memcpy ( ifobj - > dst_mac , dst_mac , ETH_ALEN ) ;
memcpy ( ifobj - > src_mac , src_mac , ETH_ALEN ) ;
inet_aton ( dst_ip , & ip ) ;
ifobj - > dst_ip = ip . s_addr ;
inet_aton ( src_ip , & ip ) ;
ifobj - > src_ip = ip . s_addr ;
ifobj - > dst_port = dst_port ;
ifobj - > src_port = src_port ;
2021-03-30 01:43:10 +03:00
2021-09-07 10:19:10 +03:00
ifobj - > func_ptr = func_ptr ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:20 +03:00
static void run_pkt_test ( struct test_spec * test , enum test_mode mode , enum test_type type )
2021-02-23 19:23:03 +03:00
{
2022-05-10 14:56:03 +03:00
switch ( type ) {
case TEST_TYPE_STATS_RX_DROPPED :
testapp_stats_rx_dropped ( test ) ;
break ;
case TEST_TYPE_STATS_TX_INVALID_DESCS :
testapp_stats_tx_invalid_descs ( test ) ;
break ;
case TEST_TYPE_STATS_RX_FULL :
testapp_stats_rx_full ( test ) ;
break ;
case TEST_TYPE_STATS_FILL_EMPTY :
testapp_stats_fill_empty ( test ) ;
2021-03-30 01:43:10 +03:00
break ;
case TEST_TYPE_TEARDOWN :
2021-09-07 10:19:11 +03:00
testapp_teardown ( test ) ;
2021-03-30 01:43:10 +03:00
break ;
case TEST_TYPE_BIDI :
2021-09-07 10:19:11 +03:00
testapp_bidi ( test ) ;
2021-03-30 01:43:10 +03:00
break ;
2021-03-30 01:43:13 +03:00
case TEST_TYPE_BPF_RES :
2021-09-07 10:19:11 +03:00
testapp_bpf_res ( test ) ;
2021-03-30 01:43:13 +03:00
break ;
2021-09-07 10:19:27 +03:00
case TEST_TYPE_RUN_TO_COMPLETION :
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " RUN_TO_COMPLETION " ) ;
testapp_validate_traffic ( test ) ;
break ;
2021-09-22 10:56:11 +03:00
case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT :
test_spec_set_name ( test , " RUN_TO_COMPLETION_SINGLE_PKT " ) ;
testapp_single_pkt ( test ) ;
break ;
2021-09-07 10:19:28 +03:00
case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME :
test_spec_set_name ( test , " RUN_TO_COMPLETION_2K_FRAME_SIZE " ) ;
test - > ifobj_tx - > umem - > frame_size = 2048 ;
test - > ifobj_rx - > umem - > frame_size = 2048 ;
2022-05-10 14:56:04 +03:00
pkt_stream_replace ( test , DEFAULT_PKT_CNT , PKT_SIZE ) ;
2021-09-07 10:19:28 +03:00
testapp_validate_traffic ( test ) ;
pkt_stream_restore_default ( test ) ;
break ;
2021-09-07 10:19:15 +03:00
case TEST_TYPE_POLL :
2021-09-07 10:19:16 +03:00
test - > ifobj_tx - > use_poll = true ;
test - > ifobj_rx - > use_poll = true ;
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " POLL " ) ;
2021-09-07 10:19:11 +03:00
testapp_validate_traffic ( test ) ;
2021-03-30 01:43:10 +03:00
break ;
2021-09-07 10:19:27 +03:00
case TEST_TYPE_ALIGNED_INV_DESC :
test_spec_set_name ( test , " ALIGNED_INV_DESC " ) ;
testapp_invalid_desc ( test ) ;
break ;
2021-09-07 10:19:28 +03:00
case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME :
test_spec_set_name ( test , " ALIGNED_INV_DESC_2K_FRAME_SIZE " ) ;
test - > ifobj_tx - > umem - > frame_size = 2048 ;
test - > ifobj_rx - > umem - > frame_size = 2048 ;
testapp_invalid_desc ( test ) ;
break ;
2021-09-07 10:19:27 +03:00
case TEST_TYPE_UNALIGNED_INV_DESC :
2021-11-17 15:36:13 +03:00
if ( ! hugepages_present ( test - > ifobj_tx ) ) {
ksft_test_result_skip ( " No 2M huge pages present. \n " ) ;
return ;
}
2021-09-07 10:19:27 +03:00
test_spec_set_name ( test , " UNALIGNED_INV_DESC " ) ;
test - > ifobj_tx - > umem - > unaligned_mode = true ;
test - > ifobj_rx - > umem - > unaligned_mode = true ;
testapp_invalid_desc ( test ) ;
break ;
2021-09-07 10:19:25 +03:00
case TEST_TYPE_UNALIGNED :
if ( ! testapp_unaligned ( test ) )
return ;
break ;
2021-09-22 10:56:13 +03:00
case TEST_TYPE_HEADROOM :
testapp_headroom ( test ) ;
break ;
2021-09-07 10:19:15 +03:00
default :
break ;
2021-03-30 01:43:10 +03:00
}
2021-09-07 10:19:15 +03:00
2022-05-10 14:55:59 +03:00
if ( ! test - > fail )
ksft_test_result_pass ( " PASS: %s %s%s \n " , mode_string ( test ) , busy_poll_string ( test ) ,
test - > name ) ;
2021-02-23 19:23:03 +03:00
}
2021-08-25 12:37:18 +03:00
static struct ifobject * ifobject_create ( void )
{
struct ifobject * ifobj ;
ifobj = calloc ( 1 , sizeof ( struct ifobject ) ) ;
if ( ! ifobj )
return NULL ;
2021-09-07 10:19:09 +03:00
ifobj - > xsk_arr = calloc ( MAX_SOCKETS , sizeof ( * ifobj - > xsk_arr ) ) ;
2021-08-25 12:37:18 +03:00
if ( ! ifobj - > xsk_arr )
goto out_xsk_arr ;
2022-01-25 11:29:45 +03:00
ifobj - > umem = calloc ( 1 , sizeof ( * ifobj - > umem ) ) ;
if ( ! ifobj - > umem )
goto out_umem ;
2021-08-25 12:37:18 +03:00
return ifobj ;
2022-01-25 11:29:45 +03:00
out_umem :
2021-08-25 12:37:18 +03:00
free ( ifobj - > xsk_arr ) ;
out_xsk_arr :
free ( ifobj ) ;
return NULL ;
}
static void ifobject_delete ( struct ifobject * ifobj )
{
2022-01-25 11:29:45 +03:00
free ( ifobj - > umem ) ;
2021-08-25 12:37:18 +03:00
free ( ifobj - > xsk_arr ) ;
free ( ifobj ) ;
}
2020-12-08 00:53:30 +03:00
int main ( int argc , char * * argv )
{
2021-09-07 10:19:24 +03:00
struct pkt_stream * pkt_stream_default ;
2021-09-07 10:19:11 +03:00
struct ifobject * ifobj_tx , * ifobj_rx ;
2022-05-10 14:55:59 +03:00
u32 i , j , failed_tests = 0 ;
2021-09-07 10:19:11 +03:00
struct test_spec test ;
2020-12-08 00:53:30 +03:00
2022-04-09 15:59:56 +03:00
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode ( LIBBPF_STRICT_ALL ) ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:11 +03:00
ifobj_tx = ifobject_create ( ) ;
if ( ! ifobj_tx )
exit_with_error ( ENOMEM ) ;
ifobj_rx = ifobject_create ( ) ;
if ( ! ifobj_rx )
exit_with_error ( ENOMEM ) ;
2020-12-08 00:53:30 +03:00
setlocale ( LC_ALL , " " ) ;
2021-09-07 10:19:20 +03:00
parse_command_line ( ifobj_tx , ifobj_rx , argc , argv ) ;
2021-09-07 10:19:11 +03:00
if ( ! validate_interface ( ifobj_tx ) | | ! validate_interface ( ifobj_rx ) ) {
usage ( basename ( argv [ 0 ] ) ) ;
ksft_exit_xfail ( ) ;
}
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:17 +03:00
init_iface ( ifobj_tx , MAC1 , MAC2 , IP1 , IP2 , UDP_PORT1 , UDP_PORT2 ,
2021-09-07 10:19:10 +03:00
worker_testapp_validate_tx ) ;
2021-09-07 10:19:17 +03:00
init_iface ( ifobj_rx , MAC2 , MAC1 , IP2 , IP1 , UDP_PORT2 , UDP_PORT1 ,
2021-09-07 10:19:10 +03:00
worker_testapp_validate_rx ) ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:24 +03:00
test_spec_init ( & test , ifobj_tx , ifobj_rx , 0 ) ;
pkt_stream_default = pkt_stream_generate ( ifobj_tx - > umem , DEFAULT_PKT_CNT , PKT_SIZE ) ;
if ( ! pkt_stream_default )
exit_with_error ( ENOMEM ) ;
test . pkt_stream_default = pkt_stream_default ;
2021-02-23 19:23:03 +03:00
ksft_set_plan ( TEST_MODE_MAX * TEST_TYPE_MAX ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:18 +03:00
for ( i = 0 ; i < TEST_MODE_MAX ; i + + )
2021-08-25 12:37:11 +03:00
for ( j = 0 ; j < TEST_TYPE_MAX ; j + + ) {
2021-09-07 10:19:20 +03:00
test_spec_init ( & test , ifobj_tx , ifobj_rx , i ) ;
2021-09-07 10:19:11 +03:00
run_pkt_test ( & test , i , j ) ;
2021-08-25 12:37:11 +03:00
usleep ( USLEEP_MAX ) ;
2022-05-10 14:55:59 +03:00
if ( test . fail )
failed_tests + + ;
2021-08-25 12:37:11 +03:00
}
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:24 +03:00
pkt_stream_delete ( pkt_stream_default ) ;
2021-09-07 10:19:11 +03:00
ifobject_delete ( ifobj_tx ) ;
ifobject_delete ( ifobj_rx ) ;
2021-03-30 01:43:10 +03:00
2022-05-10 14:55:59 +03:00
if ( failed_tests )
ksft_exit_fail ( ) ;
else
ksft_exit_pass ( ) ;
2020-12-08 00:53:30 +03:00
}