2020-12-08 00:53:30 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2020 Intel Corporation. */
/*
* Some functions in this program are taken from
* Linux kernel samples / bpf / xdpsock * and modified
* for use .
*
* See test_xsk . sh for detailed information on test topology
* and prerequisite network setup .
*
* This test program contains two threads , each thread is single socket with
* a unique UMEM . It validates in - order packet delivery and packet content
* by sending packets to each other .
*
* Tests Information :
* - - - - - - - - - - - - - - - - - -
* These selftests test AF_XDP SKB and Native / DRV modes using veth
* Virtual Ethernet interfaces .
*
2021-02-23 19:23:03 +03:00
* For each mode , the following tests are run :
2021-09-07 10:19:25 +03:00
* a . nopoll - soft - irq processing in run - to - completion mode
2020-12-08 00:53:30 +03:00
* b . poll - using poll ( ) syscall
2020-12-08 00:53:32 +03:00
* c . Socket Teardown
* Create a Tx and a Rx socket , Tx from one socket , Rx on another . Destroy
* both sockets , then repeat multiple times . Only nopoll mode is used
2020-12-08 00:53:33 +03:00
* d . Bi - directional sockets
* Configure sockets as bi - directional tx / rx sockets , sets up fill and
* completion rings on each socket , tx / rx in both directions . Only nopoll
* mode is used
2021-02-23 19:23:04 +03:00
* e . Statistics
* Trigger some error conditions and ensure that the appropriate statistics
* are incremented . Within this test , the following statistics are tested :
* i . rx dropped
* Increase the UMEM frame headroom to a value which results in
* insufficient space in the rx buffer for both the packet and the headroom .
* ii . tx invalid
* Set the ' len ' field of tx descriptors to an invalid value ( umem frame
* size + 1 ) .
* iii . rx ring full
* Reduce the size of the RX ring to a fraction of the fill ring size .
* iv . fill queue empty
* Do not populate the fill queue and then try to receive pkts .
2021-03-30 01:43:13 +03:00
* f . bpf_link resource persistence
* Configure sockets at indexes 0 and 1 , run a traffic on queue ids 0 ,
* then remove xsk sockets from queue 0 on both veth interfaces and
* finally run a traffic on queues ids 1
2021-09-07 10:19:25 +03:00
* g . unaligned mode
2021-09-07 10:19:27 +03:00
* h . tests for invalid and corner case Tx descriptors so that the correct ones
* are discarded and let through , respectively .
2021-09-07 10:19:28 +03:00
* i . 2 K frame size tests
2020-12-08 00:53:30 +03:00
*
2021-03-30 01:43:13 +03:00
* Total tests : 12
2020-12-08 00:53:30 +03:00
*
* Flow :
* - - - - -
* - Single process spawns two threads : Tx and Rx
* - Each of these two threads attach to a veth interface within their assigned
* namespaces
* - Each thread Creates one AF_XDP socket connected to a unique umem for each
* veth interface
* - Tx thread Transmits 10 k packets from veth < xxxx > to veth < yyyy >
* - Rx thread verifies if all 10 k packets were received and delivered in - order ,
* and have the right content
*
2021-02-23 19:23:02 +03:00
* Enable / disable packet dump mode :
2020-12-08 00:53:30 +03:00
* - - - - - - - - - - - - - - - - - - - - - - - - - -
* To enable L2 - L4 headers and payload dump of each packet on STDOUT , add
* parameter - D to params array in test_xsk . sh , i . e . params = ( " -S " " -D " )
*/
# define _GNU_SOURCE
# include <fcntl.h>
# include <errno.h>
# include <getopt.h>
# include <asm/barrier.h>
# include <linux/if_link.h>
# include <linux/if_ether.h>
# include <linux/ip.h>
# include <linux/udp.h>
# include <arpa/inet.h>
# include <net/if.h>
# include <locale.h>
# include <poll.h>
# include <pthread.h>
# include <signal.h>
# include <stdbool.h>
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <stddef.h>
# include <sys/mman.h>
# include <sys/resource.h>
# include <sys/types.h>
# include <sys/queue.h>
# include <time.h>
# include <unistd.h>
# include <stdatomic.h>
# include <bpf/xsk.h>
# include "xdpxceiver.h"
# include "../kselftest.h"
2021-03-30 01:43:01 +03:00
static const char * MAC1 = " \x00 \x0A \x56 \x9E \xEE \x62 " ;
static const char * MAC2 = " \x00 \x0A \x56 \x9E \xEE \x61 " ;
static const char * IP1 = " 192.168.100.162 " ;
static const char * IP2 = " 192.168.100.161 " ;
static const u16 UDP_PORT1 = 2020 ;
static const u16 UDP_PORT2 = 2121 ;
2020-12-08 00:53:30 +03:00
static void __exit_with_error ( int error , const char * file , const char * func , int line )
{
2021-08-25 12:37:21 +03:00
ksft_test_result_fail ( " [%s:%s:%i]: ERROR: %d/ \" %s \" \n " , file , func , line , error ,
strerror ( error ) ) ;
ksft_exit_xfail ( ) ;
2020-12-08 00:53:30 +03:00
}
# define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
2021-09-07 10:19:20 +03:00
# define mode_string(test) (test)->ifobj_tx->xdp_flags & XDP_FLAGS_SKB_MODE ? "SKB" : "DRV"
# define print_ksft_result(test) \
( ksft_test_result_pass ( " PASS: %s %s \n " , mode_string ( test ) , ( test ) - > name ) )
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:19 +03:00
static void memset32_htonl ( void * dest , u32 val , u32 size )
2020-12-08 00:53:30 +03:00
{
u32 * ptr = ( u32 * ) dest ;
int i ;
val = htonl ( val ) ;
for ( i = 0 ; i < ( size & ( ~ 0x3 ) ) ; i + = 4 )
ptr [ i > > 2 ] = val ;
}
/*
* Fold a partial checksum
* This function code has been taken from
* Linux kernel include / asm - generic / checksum . h
*/
2021-03-30 01:43:03 +03:00
static __u16 csum_fold ( __u32 csum )
2020-12-08 00:53:30 +03:00
{
u32 sum = ( __force u32 ) csum ;
sum = ( sum & 0xffff ) + ( sum > > 16 ) ;
sum = ( sum & 0xffff ) + ( sum > > 16 ) ;
return ( __force __u16 ) ~ sum ;
}
/*
* This function code has been taken from
* Linux kernel lib / checksum . c
*/
2021-03-30 01:43:03 +03:00
static u32 from64to32 ( u64 x )
2020-12-08 00:53:30 +03:00
{
/* add up 32-bit and 32-bit for 32+c bit */
x = ( x & 0xffffffff ) + ( x > > 32 ) ;
/* add up carry.. */
x = ( x & 0xffffffff ) + ( x > > 32 ) ;
return ( u32 ) x ;
}
/*
* This function code has been taken from
* Linux kernel lib / checksum . c
*/
2021-03-30 01:43:03 +03:00
static __u32 csum_tcpudp_nofold ( __be32 saddr , __be32 daddr , __u32 len , __u8 proto , __u32 sum )
2020-12-08 00:53:30 +03:00
{
unsigned long long s = ( __force u32 ) sum ;
s + = ( __force u32 ) saddr ;
s + = ( __force u32 ) daddr ;
# ifdef __BIG_ENDIAN__
s + = proto + len ;
# else
s + = ( proto + len ) < < 8 ;
# endif
return ( __force __u32 ) from64to32 ( s ) ;
}
/*
* This function has been taken from
* Linux kernel include / asm - generic / checksum . h
*/
2021-03-30 01:43:03 +03:00
static __u16 csum_tcpudp_magic ( __be32 saddr , __be32 daddr , __u32 len , __u8 proto , __u32 sum )
2020-12-08 00:53:30 +03:00
{
return csum_fold ( csum_tcpudp_nofold ( saddr , daddr , len , proto , sum ) ) ;
}
2021-03-30 01:43:03 +03:00
static u16 udp_csum ( u32 saddr , u32 daddr , u32 len , u8 proto , u16 * udp_pkt )
2020-12-08 00:53:30 +03:00
{
u32 csum = 0 ;
u32 cnt = 0 ;
/* udp hdr and data */
for ( ; cnt < len ; cnt + = 2 )
csum + = udp_pkt [ cnt > > 1 ] ;
return csum_tcpudp_magic ( saddr , daddr , len , proto , csum ) ;
}
2021-01-22 18:47:14 +03:00
static void gen_eth_hdr ( struct ifobject * ifobject , struct ethhdr * eth_hdr )
2020-12-08 00:53:30 +03:00
{
2021-01-22 18:47:14 +03:00
memcpy ( eth_hdr - > h_dest , ifobject - > dst_mac , ETH_ALEN ) ;
memcpy ( eth_hdr - > h_source , ifobject - > src_mac , ETH_ALEN ) ;
2020-12-08 00:53:30 +03:00
eth_hdr - > h_proto = htons ( ETH_P_IP ) ;
}
2021-01-22 18:47:14 +03:00
static void gen_ip_hdr ( struct ifobject * ifobject , struct iphdr * ip_hdr )
2020-12-08 00:53:30 +03:00
{
ip_hdr - > version = IP_PKT_VER ;
ip_hdr - > ihl = 0x5 ;
ip_hdr - > tos = IP_PKT_TOS ;
ip_hdr - > tot_len = htons ( IP_PKT_SIZE ) ;
ip_hdr - > id = 0 ;
ip_hdr - > frag_off = 0 ;
ip_hdr - > ttl = IPDEFTTL ;
ip_hdr - > protocol = IPPROTO_UDP ;
2021-01-22 18:47:14 +03:00
ip_hdr - > saddr = ifobject - > src_ip ;
ip_hdr - > daddr = ifobject - > dst_ip ;
2020-12-08 00:53:30 +03:00
ip_hdr - > check = 0 ;
}
2021-08-25 12:37:19 +03:00
static void gen_udp_hdr ( u32 payload , void * pkt , struct ifobject * ifobject ,
2021-01-22 18:47:21 +03:00
struct udphdr * udp_hdr )
2020-12-08 00:53:30 +03:00
{
2021-01-22 18:47:14 +03:00
udp_hdr - > source = htons ( ifobject - > src_port ) ;
udp_hdr - > dest = htons ( ifobject - > dst_port ) ;
2020-12-08 00:53:30 +03:00
udp_hdr - > len = htons ( UDP_PKT_SIZE ) ;
2021-08-25 12:37:19 +03:00
memset32_htonl ( pkt + PKT_HDR_SIZE , payload , UDP_PKT_DATA_SIZE ) ;
2020-12-08 00:53:30 +03:00
}
static void gen_udp_csum ( struct udphdr * udp_hdr , struct iphdr * ip_hdr )
{
udp_hdr - > check = 0 ;
udp_hdr - > check =
udp_csum ( ip_hdr - > saddr , ip_hdr - > daddr , UDP_PKT_SIZE , IPPROTO_UDP , ( u16 * ) udp_hdr ) ;
}
2021-09-07 10:19:19 +03:00
static int xsk_configure_umem ( struct xsk_umem_info * umem , void * buffer , u64 size )
2020-12-08 00:53:30 +03:00
{
2021-02-23 19:23:04 +03:00
struct xsk_umem_config cfg = {
. fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS ,
. comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS ,
2021-09-07 10:19:14 +03:00
. frame_size = umem - > frame_size ,
2021-09-07 10:19:12 +03:00
. frame_headroom = umem - > frame_headroom ,
2021-02-23 19:23:04 +03:00
. flags = XSK_UMEM__DEFAULT_FLAGS
} ;
2021-03-30 01:43:13 +03:00
int ret ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:25 +03:00
if ( umem - > unaligned_mode )
cfg . flags | = XDP_UMEM_UNALIGNED_CHUNK_FLAG ;
2021-03-30 01:43:13 +03:00
ret = xsk_umem__create ( & umem - > umem , buffer , size ,
& umem - > fq , & umem - > cq , & cfg ) ;
2020-12-08 00:53:30 +03:00
if ( ret )
2021-09-07 10:19:09 +03:00
return ret ;
2020-12-08 00:53:30 +03:00
2021-03-30 01:43:13 +03:00
umem - > buffer = buffer ;
2021-09-07 10:19:09 +03:00
return 0 ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:09 +03:00
static int xsk_configure_socket ( struct xsk_socket_info * xsk , struct xsk_umem_info * umem ,
struct ifobject * ifobject , u32 qid )
2020-12-08 00:53:30 +03:00
{
struct xsk_socket_config cfg ;
struct xsk_ring_cons * rxr ;
struct xsk_ring_prod * txr ;
2021-09-07 10:19:09 +03:00
xsk - > umem = umem ;
2021-09-07 10:19:13 +03:00
cfg . rx_size = xsk - > rxqsize ;
2020-12-08 00:53:30 +03:00
cfg . tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS ;
cfg . libbpf_flags = 0 ;
2021-09-07 10:19:20 +03:00
cfg . xdp_flags = ifobject - > xdp_flags ;
cfg . bind_flags = ifobject - > bind_flags ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:17 +03:00
txr = ifobject - > tx_on ? & xsk - > tx : NULL ;
rxr = ifobject - > rx_on ? & xsk - > rx : NULL ;
2021-09-07 10:19:09 +03:00
return xsk_socket__create ( & xsk - > xsk , ifobject - > ifname , qid , umem - > umem , rxr , txr , & cfg ) ;
2020-12-08 00:53:30 +03:00
}
static struct option long_options [ ] = {
{ " interface " , required_argument , 0 , ' i ' } ,
{ " queue " , optional_argument , 0 , ' q ' } ,
2021-02-23 19:23:02 +03:00
{ " dump-pkts " , optional_argument , 0 , ' D ' } ,
2021-02-23 19:23:01 +03:00
{ " verbose " , no_argument , 0 , ' v ' } ,
2020-12-08 00:53:30 +03:00
{ 0 , 0 , 0 , 0 }
} ;
static void usage ( const char * prog )
{
const char * str =
2021-08-25 12:37:08 +03:00
" Usage: %s [OPTIONS] \n "
" Options: \n "
" -i, --interface Use interface \n "
" -q, --queue=n Use queue n (default 0) \n "
" -D, --dump-pkts Dump packets L2 - L5 \n "
" -v, --verbose Verbose output \n " ;
2020-12-08 00:53:30 +03:00
ksft_print_msg ( str , prog ) ;
}
2021-03-30 01:43:07 +03:00
static int switch_namespace ( const char * nsname )
2020-12-08 00:53:30 +03:00
{
char fqns [ 26 ] = " /var/run/netns/ " ;
int nsfd ;
2021-03-30 01:43:07 +03:00
if ( ! nsname | | strlen ( nsname ) = = 0 )
return - 1 ;
strncat ( fqns , nsname , sizeof ( fqns ) - strlen ( fqns ) - 1 ) ;
2020-12-08 00:53:30 +03:00
nsfd = open ( fqns , O_RDONLY ) ;
if ( nsfd = = - 1 )
exit_with_error ( errno ) ;
if ( setns ( nsfd , 0 ) = = - 1 )
exit_with_error ( errno ) ;
2021-03-30 01:43:07 +03:00
print_verbose ( " NS switched: %s \n " , nsname ) ;
2021-01-22 18:47:19 +03:00
2021-03-30 01:43:07 +03:00
return nsfd ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:11 +03:00
static bool validate_interface ( struct ifobject * ifobj )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:11 +03:00
if ( ! strcmp ( ifobj - > ifname , " " ) )
return false ;
return true ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:20 +03:00
static void parse_command_line ( struct ifobject * ifobj_tx , struct ifobject * ifobj_rx , int argc ,
char * * argv )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:11 +03:00
struct ifobject * ifobj ;
u32 interface_nb = 0 ;
int option_index , c ;
2020-12-08 00:53:30 +03:00
opterr = 0 ;
for ( ; ; ) {
2021-09-07 10:19:11 +03:00
char * sptr , * token ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:11 +03:00
c = getopt_long ( argc , argv , " i:Dv " , long_options , & option_index ) ;
2020-12-08 00:53:30 +03:00
if ( c = = - 1 )
break ;
switch ( c ) {
case ' i ' :
2021-09-07 10:19:11 +03:00
if ( interface_nb = = 0 )
2021-09-07 10:19:20 +03:00
ifobj = ifobj_tx ;
2021-09-07 10:19:11 +03:00
else if ( interface_nb = = 1 )
2021-09-07 10:19:20 +03:00
ifobj = ifobj_rx ;
2021-09-07 10:19:11 +03:00
else
2020-12-08 00:53:30 +03:00
break ;
sptr = strndupa ( optarg , strlen ( optarg ) ) ;
2021-09-07 10:19:11 +03:00
memcpy ( ifobj - > ifname , strsep ( & sptr , " , " ) , MAX_INTERFACE_NAME_CHARS ) ;
2020-12-08 00:53:30 +03:00
token = strsep ( & sptr , " , " ) ;
if ( token )
2021-09-07 10:19:11 +03:00
memcpy ( ifobj - > nsname , token , MAX_INTERFACES_NAMESPACE_CHARS ) ;
interface_nb + + ;
2020-12-08 00:53:30 +03:00
break ;
case ' D ' :
2021-08-25 12:37:22 +03:00
opt_pkt_dump = true ;
2020-12-08 00:53:30 +03:00
break ;
2021-02-23 19:23:01 +03:00
case ' v ' :
2021-08-25 12:37:22 +03:00
opt_verbose = true ;
2021-02-23 19:23:01 +03:00
break ;
2020-12-08 00:53:30 +03:00
default :
usage ( basename ( argv [ 0 ] ) ) ;
ksft_exit_xfail ( ) ;
}
}
2021-09-07 10:19:11 +03:00
}
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:11 +03:00
static void __test_spec_init ( struct test_spec * test , struct ifobject * ifobj_tx ,
struct ifobject * ifobj_rx )
{
u32 i , j ;
for ( i = 0 ; i < MAX_INTERFACES ; i + + ) {
struct ifobject * ifobj = i ? ifobj_rx : ifobj_tx ;
ifobj - > umem = & ifobj - > umem_arr [ 0 ] ;
ifobj - > xsk = & ifobj - > xsk_arr [ 0 ] ;
2021-09-07 10:19:16 +03:00
ifobj - > use_poll = false ;
2021-09-22 10:56:10 +03:00
ifobj - > pacing_on = true ;
2021-09-07 10:19:24 +03:00
ifobj - > pkt_stream = test - > pkt_stream_default ;
2021-09-07 10:19:11 +03:00
2021-09-07 10:19:17 +03:00
if ( i = = 0 ) {
ifobj - > rx_on = false ;
ifobj - > tx_on = true ;
} else {
ifobj - > rx_on = true ;
ifobj - > tx_on = false ;
}
2021-09-07 10:19:11 +03:00
for ( j = 0 ; j < MAX_SOCKETS ; j + + ) {
memset ( & ifobj - > umem_arr [ j ] , 0 , sizeof ( ifobj - > umem_arr [ j ] ) ) ;
memset ( & ifobj - > xsk_arr [ j ] , 0 , sizeof ( ifobj - > xsk_arr [ j ] ) ) ;
2021-09-07 10:19:27 +03:00
ifobj - > umem_arr [ j ] . num_frames = DEFAULT_UMEM_BUFFERS ;
2021-09-07 10:19:14 +03:00
ifobj - > umem_arr [ j ] . frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE ;
2021-09-07 10:19:13 +03:00
ifobj - > xsk_arr [ j ] . rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS ;
2021-09-07 10:19:11 +03:00
}
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:11 +03:00
test - > ifobj_tx = ifobj_tx ;
test - > ifobj_rx = ifobj_rx ;
2021-09-07 10:19:18 +03:00
test - > current_step = 0 ;
test - > total_steps = 1 ;
2021-09-07 10:19:19 +03:00
test - > nb_sockets = 1 ;
2021-09-07 10:19:11 +03:00
}
static void test_spec_init ( struct test_spec * test , struct ifobject * ifobj_tx ,
2021-09-07 10:19:20 +03:00
struct ifobject * ifobj_rx , enum test_mode mode )
2021-09-07 10:19:11 +03:00
{
2021-09-07 10:19:24 +03:00
struct pkt_stream * pkt_stream ;
2021-09-07 10:19:20 +03:00
u32 i ;
2021-09-07 10:19:24 +03:00
pkt_stream = test - > pkt_stream_default ;
2021-09-07 10:19:11 +03:00
memset ( test , 0 , sizeof ( * test ) ) ;
2021-09-07 10:19:24 +03:00
test - > pkt_stream_default = pkt_stream ;
2021-09-07 10:19:20 +03:00
for ( i = 0 ; i < MAX_INTERFACES ; i + + ) {
struct ifobject * ifobj = i ? ifobj_rx : ifobj_tx ;
ifobj - > xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST ;
if ( mode = = TEST_MODE_SKB )
ifobj - > xdp_flags | = XDP_FLAGS_SKB_MODE ;
else
ifobj - > xdp_flags | = XDP_FLAGS_DRV_MODE ;
ifobj - > bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY ;
}
2021-09-07 10:19:11 +03:00
__test_spec_init ( test , ifobj_tx , ifobj_rx ) ;
}
static void test_spec_reset ( struct test_spec * test )
{
__test_spec_init ( test , test - > ifobj_tx , test - > ifobj_rx ) ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:15 +03:00
static void test_spec_set_name ( struct test_spec * test , const char * name )
{
strncpy ( test - > name , name , MAX_TEST_NAME_SIZE ) ;
}
2021-09-22 10:56:07 +03:00
static void pkt_stream_reset ( struct pkt_stream * pkt_stream )
{
if ( pkt_stream )
pkt_stream - > rx_pkt_nb = 0 ;
}
2021-08-25 12:37:20 +03:00
static struct pkt * pkt_stream_get_pkt ( struct pkt_stream * pkt_stream , u32 pkt_nb )
2021-08-25 12:37:19 +03:00
{
2021-08-25 12:37:20 +03:00
if ( pkt_nb > = pkt_stream - > nb_pkts )
return NULL ;
return & pkt_stream - > pkts [ pkt_nb ] ;
}
2021-09-07 10:19:27 +03:00
static struct pkt * pkt_stream_get_next_rx_pkt ( struct pkt_stream * pkt_stream )
{
while ( pkt_stream - > rx_pkt_nb < pkt_stream - > nb_pkts ) {
if ( pkt_stream - > pkts [ pkt_stream - > rx_pkt_nb ] . valid )
return & pkt_stream - > pkts [ pkt_stream - > rx_pkt_nb + + ] ;
pkt_stream - > rx_pkt_nb + + ;
}
return NULL ;
}
2021-09-07 10:19:24 +03:00
static void pkt_stream_delete ( struct pkt_stream * pkt_stream )
{
free ( pkt_stream - > pkts ) ;
free ( pkt_stream ) ;
}
static void pkt_stream_restore_default ( struct test_spec * test )
{
2021-09-07 10:19:26 +03:00
if ( test - > ifobj_tx - > pkt_stream ! = test - > pkt_stream_default ) {
pkt_stream_delete ( test - > ifobj_tx - > pkt_stream ) ;
test - > ifobj_tx - > pkt_stream = test - > pkt_stream_default ;
}
2021-09-07 10:19:24 +03:00
test - > ifobj_rx - > pkt_stream = test - > pkt_stream_default ;
}
2021-09-07 10:19:27 +03:00
static struct pkt_stream * __pkt_stream_alloc ( u32 nb_pkts )
2021-08-25 12:37:20 +03:00
{
struct pkt_stream * pkt_stream ;
2021-09-07 10:19:25 +03:00
pkt_stream = calloc ( 1 , sizeof ( * pkt_stream ) ) ;
2021-08-25 12:37:20 +03:00
if ( ! pkt_stream )
2021-09-07 10:19:27 +03:00
return NULL ;
2021-08-25 12:37:20 +03:00
pkt_stream - > pkts = calloc ( nb_pkts , sizeof ( * pkt_stream - > pkts ) ) ;
2021-09-07 10:19:27 +03:00
if ( ! pkt_stream - > pkts ) {
free ( pkt_stream ) ;
return NULL ;
}
pkt_stream - > nb_pkts = nb_pkts ;
return pkt_stream ;
}
static struct pkt_stream * pkt_stream_generate ( struct xsk_umem_info * umem , u32 nb_pkts , u32 pkt_len )
{
struct pkt_stream * pkt_stream ;
u32 i ;
pkt_stream = __pkt_stream_alloc ( nb_pkts ) ;
if ( ! pkt_stream )
2021-08-25 12:37:20 +03:00
exit_with_error ( ENOMEM ) ;
pkt_stream - > nb_pkts = nb_pkts ;
for ( i = 0 ; i < nb_pkts ; i + + ) {
2021-09-07 10:19:25 +03:00
pkt_stream - > pkts [ i ] . addr = ( i % umem - > num_frames ) * umem - > frame_size +
DEFAULT_OFFSET ;
2021-08-25 12:37:20 +03:00
pkt_stream - > pkts [ i ] . len = pkt_len ;
pkt_stream - > pkts [ i ] . payload = i ;
2021-09-07 10:19:23 +03:00
if ( pkt_len > umem - > frame_size )
pkt_stream - > pkts [ i ] . valid = false ;
else
pkt_stream - > pkts [ i ] . valid = true ;
2021-08-25 12:37:20 +03:00
}
return pkt_stream ;
}
2021-09-07 10:19:25 +03:00
static struct pkt_stream * pkt_stream_clone ( struct xsk_umem_info * umem ,
struct pkt_stream * pkt_stream )
{
return pkt_stream_generate ( umem , pkt_stream - > nb_pkts , pkt_stream - > pkts [ 0 ] . len ) ;
}
2021-09-07 10:19:24 +03:00
static void pkt_stream_replace ( struct test_spec * test , u32 nb_pkts , u32 pkt_len )
{
struct pkt_stream * pkt_stream ;
pkt_stream = pkt_stream_generate ( test - > ifobj_tx - > umem , nb_pkts , pkt_len ) ;
test - > ifobj_tx - > pkt_stream = pkt_stream ;
test - > ifobj_rx - > pkt_stream = pkt_stream ;
}
2021-09-07 10:19:25 +03:00
static void pkt_stream_replace_half ( struct test_spec * test , u32 pkt_len , u32 offset )
{
struct xsk_umem_info * umem = test - > ifobj_tx - > umem ;
struct pkt_stream * pkt_stream ;
u32 i ;
pkt_stream = pkt_stream_clone ( umem , test - > pkt_stream_default ) ;
for ( i = 0 ; i < test - > pkt_stream_default - > nb_pkts ; i + = 2 ) {
pkt_stream - > pkts [ i ] . addr = ( i % umem - > num_frames ) * umem - > frame_size + offset ;
pkt_stream - > pkts [ i ] . len = pkt_len ;
}
test - > ifobj_tx - > pkt_stream = pkt_stream ;
test - > ifobj_rx - > pkt_stream = pkt_stream ;
}
2021-08-25 12:37:20 +03:00
static struct pkt * pkt_generate ( struct ifobject * ifobject , u32 pkt_nb )
{
struct pkt * pkt = pkt_stream_get_pkt ( ifobject - > pkt_stream , pkt_nb ) ;
struct udphdr * udp_hdr ;
struct ethhdr * eth_hdr ;
struct iphdr * ip_hdr ;
void * data ;
if ( ! pkt )
return NULL ;
2021-09-07 10:19:27 +03:00
if ( ! pkt - > valid | | pkt - > len < PKT_SIZE )
return pkt ;
2021-08-25 12:37:20 +03:00
data = xsk_umem__get_data ( ifobject - > umem - > buffer , pkt - > addr ) ;
udp_hdr = ( struct udphdr * ) ( data + sizeof ( struct ethhdr ) + sizeof ( struct iphdr ) ) ;
ip_hdr = ( struct iphdr * ) ( data + sizeof ( struct ethhdr ) ) ;
eth_hdr = ( struct ethhdr * ) data ;
2021-08-25 12:37:19 +03:00
gen_udp_hdr ( pkt_nb , data , ifobject , udp_hdr ) ;
gen_ip_hdr ( ifobject , ip_hdr ) ;
gen_udp_csum ( udp_hdr , ip_hdr ) ;
gen_eth_hdr ( ifobject , eth_hdr ) ;
2021-08-25 12:37:20 +03:00
return pkt ;
2021-08-25 12:37:19 +03:00
}
2021-09-07 10:19:27 +03:00
static void pkt_stream_generate_custom ( struct test_spec * test , struct pkt * pkts , u32 nb_pkts )
{
struct pkt_stream * pkt_stream ;
u32 i ;
pkt_stream = __pkt_stream_alloc ( nb_pkts ) ;
if ( ! pkt_stream )
exit_with_error ( ENOMEM ) ;
test - > ifobj_tx - > pkt_stream = pkt_stream ;
test - > ifobj_rx - > pkt_stream = pkt_stream ;
for ( i = 0 ; i < nb_pkts ; i + + ) {
pkt_stream - > pkts [ i ] . addr = pkts [ i ] . addr ;
pkt_stream - > pkts [ i ] . len = pkts [ i ] . len ;
pkt_stream - > pkts [ i ] . payload = i ;
pkt_stream - > pkts [ i ] . valid = pkts [ i ] . valid ;
}
}
2021-08-25 12:37:15 +03:00
static void pkt_dump ( void * pkt , u32 len )
{
char s [ INET_ADDRSTRLEN ] ;
struct ethhdr * ethhdr ;
struct udphdr * udphdr ;
struct iphdr * iphdr ;
int payload , i ;
ethhdr = pkt ;
iphdr = pkt + sizeof ( * ethhdr ) ;
udphdr = pkt + sizeof ( * ethhdr ) + sizeof ( * iphdr ) ;
/*extract L2 frame */
fprintf ( stdout , " DEBUG>> L2: dst mac: " ) ;
for ( i = 0 ; i < ETH_ALEN ; i + + )
fprintf ( stdout , " %02X " , ethhdr - > h_dest [ i ] ) ;
fprintf ( stdout , " \n DEBUG>> L2: src mac: " ) ;
for ( i = 0 ; i < ETH_ALEN ; i + + )
fprintf ( stdout , " %02X " , ethhdr - > h_source [ i ] ) ;
/*extract L3 frame */
fprintf ( stdout , " \n DEBUG>> L3: ip_hdr->ihl: %02X \n " , iphdr - > ihl ) ;
fprintf ( stdout , " DEBUG>> L3: ip_hdr->saddr: %s \n " ,
inet_ntop ( AF_INET , & iphdr - > saddr , s , sizeof ( s ) ) ) ;
fprintf ( stdout , " DEBUG>> L3: ip_hdr->daddr: %s \n " ,
inet_ntop ( AF_INET , & iphdr - > daddr , s , sizeof ( s ) ) ) ;
/*extract L4 frame */
fprintf ( stdout , " DEBUG>> L4: udp_hdr->src: %d \n " , ntohs ( udphdr - > source ) ) ;
fprintf ( stdout , " DEBUG>> L4: udp_hdr->dst: %d \n " , ntohs ( udphdr - > dest ) ) ;
/*extract L5 frame */
payload = * ( ( uint32_t * ) ( pkt + PKT_HDR_SIZE ) ) ;
fprintf ( stdout , " DEBUG>> L5: payload: %d \n " , payload ) ;
fprintf ( stdout , " --------------------------------------- \n " ) ;
}
2021-09-07 10:19:25 +03:00
static bool is_pkt_valid ( struct pkt * pkt , void * buffer , u64 addr , u32 len )
2021-08-25 12:37:15 +03:00
{
2021-09-07 10:19:25 +03:00
void * data = xsk_umem__get_data ( buffer , addr ) ;
2021-08-25 12:37:19 +03:00
struct iphdr * iphdr = ( struct iphdr * ) ( data + sizeof ( struct ethhdr ) ) ;
2021-08-25 12:37:15 +03:00
2021-08-25 12:37:20 +03:00
if ( ! pkt ) {
ksft_test_result_fail ( " ERROR: [%s] too many packets received \n " , __func__ ) ;
return false ;
}
2021-09-07 10:19:27 +03:00
if ( len < PKT_SIZE ) {
/*Do not try to verify packets that are smaller than minimum size. */
return true ;
}
if ( pkt - > len ! = len ) {
ksft_test_result_fail
( " ERROR: [%s] expected length [%d], got length [%d] \n " ,
__func__ , pkt - > len , len ) ;
return false ;
}
2021-08-25 12:37:15 +03:00
if ( iphdr - > version = = IP_PKT_VER & & iphdr - > tos = = IP_PKT_TOS ) {
2021-08-25 12:37:19 +03:00
u32 seqnum = ntohl ( * ( ( u32 * ) ( data + PKT_HDR_SIZE ) ) ) ;
2021-08-25 12:37:15 +03:00
2021-09-07 10:19:24 +03:00
if ( opt_pkt_dump )
2021-08-25 12:37:19 +03:00
pkt_dump ( data , PKT_SIZE ) ;
2021-08-25 12:37:15 +03:00
2021-08-25 12:37:20 +03:00
if ( pkt - > payload ! = seqnum ) {
ksft_test_result_fail
( " ERROR: [%s] expected seqnum [%d], got seqnum [%d] \n " ,
__func__ , pkt - > payload , seqnum ) ;
return false ;
}
2021-08-25 12:37:15 +03:00
} else {
ksft_print_msg ( " Invalid frame received: " ) ;
ksft_print_msg ( " [IP_PKT_VER: %02X], [IP_PKT_TOS: %02X] \n " , iphdr - > version ,
iphdr - > tos ) ;
2021-08-25 12:37:20 +03:00
return false ;
2021-08-25 12:37:15 +03:00
}
2021-08-25 12:37:20 +03:00
return true ;
2021-08-25 12:37:15 +03:00
}
2020-12-08 00:53:30 +03:00
static void kick_tx ( struct xsk_socket_info * xsk )
{
int ret ;
ret = sendto ( xsk_socket__fd ( xsk - > xsk ) , NULL , 0 , MSG_DONTWAIT , NULL , 0 ) ;
if ( ret > = 0 | | errno = = ENOBUFS | | errno = = EAGAIN | | errno = = EBUSY | | errno = = ENETDOWN )
return ;
exit_with_error ( errno ) ;
}
2021-08-25 12:37:20 +03:00
static void complete_pkts ( struct xsk_socket_info * xsk , int batch_size )
2020-12-08 00:53:30 +03:00
{
unsigned int rcvd ;
u32 idx ;
2021-03-30 01:43:16 +03:00
if ( xsk_ring_prod__needs_wakeup ( & xsk - > tx ) )
2020-12-08 00:53:30 +03:00
kick_tx ( xsk ) ;
rcvd = xsk_ring_cons__peek ( & xsk - > umem - > cq , batch_size , & idx ) ;
if ( rcvd ) {
2021-09-07 10:19:27 +03:00
if ( rcvd > xsk - > outstanding_tx ) {
u64 addr = * xsk_ring_cons__comp_addr ( & xsk - > umem - > cq , idx + rcvd - 1 ) ;
ksft_test_result_fail ( " ERROR: [%s] Too many packets completed \n " ,
__func__ ) ;
ksft_print_msg ( " Last completion address: %llx \n " , addr ) ;
return ;
}
2020-12-08 00:53:30 +03:00
xsk_ring_cons__release ( & xsk - > umem - > cq , rcvd ) ;
xsk - > outstanding_tx - = rcvd ;
}
}
2021-08-25 12:37:20 +03:00
static void receive_pkts ( struct pkt_stream * pkt_stream , struct xsk_socket_info * xsk ,
struct pollfd * fds )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:27 +03:00
struct pkt * pkt = pkt_stream_get_next_rx_pkt ( pkt_stream ) ;
u32 idx_rx = 0 , idx_fq = 0 , rcvd , i ;
2021-09-22 10:56:10 +03:00
u32 total = 0 ;
2020-12-08 00:53:30 +03:00
int ret ;
2021-08-25 12:37:20 +03:00
while ( pkt ) {
rcvd = xsk_ring_cons__peek ( & xsk - > rx , BATCH_SIZE , & idx_rx ) ;
if ( ! rcvd ) {
if ( xsk_ring_prod__needs_wakeup ( & xsk - > umem - > fq ) ) {
ret = poll ( fds , 1 , POLL_TMOUT ) ;
if ( ret < 0 )
exit_with_error ( - ret ) ;
}
continue ;
2020-12-08 00:53:30 +03:00
}
2021-08-25 12:37:20 +03:00
ret = xsk_ring_prod__reserve ( & xsk - > umem - > fq , rcvd , & idx_fq ) ;
while ( ret ! = rcvd ) {
2020-12-08 00:53:30 +03:00
if ( ret < 0 )
2021-08-25 12:37:10 +03:00
exit_with_error ( - ret ) ;
2021-08-25 12:37:20 +03:00
if ( xsk_ring_prod__needs_wakeup ( & xsk - > umem - > fq ) ) {
ret = poll ( fds , 1 , POLL_TMOUT ) ;
if ( ret < 0 )
exit_with_error ( - ret ) ;
}
ret = xsk_ring_prod__reserve ( & xsk - > umem - > fq , rcvd , & idx_fq ) ;
2020-12-08 00:53:30 +03:00
}
2021-08-25 12:37:20 +03:00
for ( i = 0 ; i < rcvd ; i + + ) {
const struct xdp_desc * desc = xsk_ring_cons__rx_desc ( & xsk - > rx , idx_rx + + ) ;
u64 addr = desc - > addr , orig ;
2021-01-22 18:47:22 +03:00
2021-09-07 10:19:27 +03:00
if ( ! pkt ) {
ksft_test_result_fail ( " ERROR: [%s] Received too many packets. \n " ,
__func__ ) ;
ksft_print_msg ( " Last packet has addr: %llx len: %u \n " ,
addr , desc - > len ) ;
return ;
}
2021-08-25 12:37:20 +03:00
orig = xsk_umem__extract_addr ( addr ) ;
addr = xsk_umem__add_offset_to_addr ( addr ) ;
2021-09-07 10:19:25 +03:00
if ( ! is_pkt_valid ( pkt , xsk - > umem - > buffer , addr , desc - > len ) )
2021-08-25 12:37:20 +03:00
return ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
* xsk_ring_prod__fill_addr ( & xsk - > umem - > fq , idx_fq + + ) = orig ;
2021-09-07 10:19:27 +03:00
pkt = pkt_stream_get_next_rx_pkt ( pkt_stream ) ;
2021-08-25 12:37:20 +03:00
}
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
xsk_ring_prod__submit ( & xsk - > umem - > fq , rcvd ) ;
xsk_ring_cons__release ( & xsk - > rx , rcvd ) ;
2021-09-22 10:56:10 +03:00
pthread_mutex_lock ( & pacing_mutex ) ;
pkts_in_flight - = rcvd ;
total + = rcvd ;
if ( pkts_in_flight < umem - > num_frames )
pthread_cond_signal ( & pacing_cond ) ;
pthread_mutex_unlock ( & pacing_mutex ) ;
2020-12-08 00:53:30 +03:00
}
}
2021-08-25 12:37:20 +03:00
static u32 __send_pkts ( struct ifobject * ifobject , u32 pkt_nb )
2020-12-08 00:53:30 +03:00
{
2021-08-25 12:37:19 +03:00
struct xsk_socket_info * xsk = ifobject - > xsk ;
2021-09-07 10:19:23 +03:00
u32 i , idx , valid_pkts = 0 ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
while ( xsk_ring_prod__reserve ( & xsk - > tx , BATCH_SIZE , & idx ) < BATCH_SIZE )
complete_pkts ( xsk , BATCH_SIZE ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
for ( i = 0 ; i < BATCH_SIZE ; i + + ) {
2020-12-08 00:53:30 +03:00
struct xdp_desc * tx_desc = xsk_ring_prod__tx_desc ( & xsk - > tx , idx + i ) ;
2021-08-25 12:37:20 +03:00
struct pkt * pkt = pkt_generate ( ifobject , pkt_nb ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
if ( ! pkt )
break ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
tx_desc - > addr = pkt - > addr ;
tx_desc - > len = pkt - > len ;
pkt_nb + + ;
2021-09-07 10:19:23 +03:00
if ( pkt - > valid )
valid_pkts + + ;
2021-02-23 19:23:04 +03:00
}
2020-12-08 00:53:30 +03:00
2021-09-22 10:56:10 +03:00
pthread_mutex_lock ( & pacing_mutex ) ;
pkts_in_flight + = valid_pkts ;
if ( ifobject - > pacing_on & & pkts_in_flight > = ifobject - > umem - > num_frames - BATCH_SIZE ) {
kick_tx ( xsk ) ;
pthread_cond_wait ( & pacing_cond , & pacing_mutex ) ;
}
pthread_mutex_unlock ( & pacing_mutex ) ;
2021-08-25 12:37:20 +03:00
xsk_ring_prod__submit ( & xsk - > tx , i ) ;
2021-09-07 10:19:23 +03:00
xsk - > outstanding_tx + = valid_pkts ;
2021-09-22 10:56:10 +03:00
complete_pkts ( xsk , i ) ;
2020-12-08 00:53:30 +03:00
2021-09-22 10:56:10 +03:00
usleep ( 10 ) ;
2021-08-25 12:37:20 +03:00
return i ;
2020-12-08 00:53:30 +03:00
}
2021-08-25 12:37:20 +03:00
static void wait_for_tx_completion ( struct xsk_socket_info * xsk )
2020-12-08 00:53:30 +03:00
{
2021-08-25 12:37:20 +03:00
while ( xsk - > outstanding_tx )
complete_pkts ( xsk , BATCH_SIZE ) ;
2020-12-08 00:53:30 +03:00
}
2021-08-25 12:37:20 +03:00
static void send_pkts ( struct ifobject * ifobject )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:22 +03:00
struct pollfd fds = { } ;
2021-08-25 12:37:20 +03:00
u32 pkt_cnt = 0 ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:22 +03:00
fds . fd = xsk_socket__fd ( ifobject - > xsk - > xsk ) ;
fds . events = POLLOUT ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
while ( pkt_cnt < ifobject - > pkt_stream - > nb_pkts ) {
2021-09-07 10:19:16 +03:00
if ( ifobject - > use_poll ) {
2021-08-25 12:37:20 +03:00
int ret ;
2021-09-07 10:19:22 +03:00
ret = poll ( & fds , 1 , POLL_TMOUT ) ;
2020-12-08 00:53:30 +03:00
if ( ret < = 0 )
continue ;
2021-09-07 10:19:22 +03:00
if ( ! ( fds . revents & POLLOUT ) )
2020-12-08 00:53:30 +03:00
continue ;
}
2021-09-22 10:56:10 +03:00
pkt_cnt + = __send_pkts ( ifobject , pkt_cnt ) ;
2020-12-08 00:53:30 +03:00
}
2021-08-25 12:37:20 +03:00
wait_for_tx_completion ( ifobject - > xsk ) ;
2020-12-08 00:53:30 +03:00
}
2021-08-25 12:37:16 +03:00
static bool rx_stats_are_valid ( struct ifobject * ifobject )
2021-02-23 19:23:04 +03:00
{
2021-08-25 12:37:20 +03:00
u32 xsk_stat = 0 , expected_stat = ifobject - > pkt_stream - > nb_pkts ;
2021-08-25 12:37:16 +03:00
struct xsk_socket * xsk = ifobject - > xsk - > xsk ;
int fd = xsk_socket__fd ( xsk ) ;
2021-02-23 19:23:04 +03:00
struct xdp_statistics stats ;
socklen_t optlen ;
int err ;
optlen = sizeof ( stats ) ;
err = getsockopt ( fd , SOL_XDP , XDP_STATISTICS , & stats , & optlen ) ;
2021-08-25 12:37:16 +03:00
if ( err ) {
2021-09-07 10:19:18 +03:00
ksft_test_result_fail ( " ERROR Rx: [%s] getsockopt(XDP_STATISTICS) error %u %s \n " ,
2021-08-25 12:37:16 +03:00
__func__ , - err , strerror ( - err ) ) ;
return true ;
}
2021-02-23 19:23:04 +03:00
if ( optlen = = sizeof ( struct xdp_statistics ) ) {
switch ( stat_test_type ) {
case STAT_TEST_RX_DROPPED :
xsk_stat = stats . rx_dropped ;
break ;
case STAT_TEST_TX_INVALID :
2021-08-25 12:37:16 +03:00
return true ;
2021-02-23 19:23:04 +03:00
case STAT_TEST_RX_FULL :
xsk_stat = stats . rx_ring_full ;
expected_stat - = RX_FULL_RXQSIZE ;
break ;
case STAT_TEST_RX_FILL_EMPTY :
xsk_stat = stats . rx_fill_ring_empty_descs ;
break ;
default :
break ;
}
if ( xsk_stat = = expected_stat )
2021-08-25 12:37:16 +03:00
return true ;
2021-02-23 19:23:04 +03:00
}
2021-08-25 12:37:16 +03:00
return false ;
}
static void tx_stats_validate ( struct ifobject * ifobject )
{
struct xsk_socket * xsk = ifobject - > xsk - > xsk ;
int fd = xsk_socket__fd ( xsk ) ;
struct xdp_statistics stats ;
socklen_t optlen ;
int err ;
optlen = sizeof ( stats ) ;
err = getsockopt ( fd , SOL_XDP , XDP_STATISTICS , & stats , & optlen ) ;
if ( err ) {
2021-09-07 10:19:18 +03:00
ksft_test_result_fail ( " ERROR Tx: [%s] getsockopt(XDP_STATISTICS) error %u %s \n " ,
2021-08-25 12:37:16 +03:00
__func__ , - err , strerror ( - err ) ) ;
return ;
}
2021-08-25 12:37:20 +03:00
if ( stats . tx_invalid_descs = = ifobject - > pkt_stream - > nb_pkts )
2021-08-25 12:37:16 +03:00
return ;
ksft_test_result_fail ( " ERROR: [%s] tx_invalid_descs incorrect. Got [%u] expected [%u] \n " ,
2021-08-25 12:37:20 +03:00
__func__ , stats . tx_invalid_descs , ifobject - > pkt_stream - > nb_pkts ) ;
2021-02-23 19:23:04 +03:00
}
2021-09-07 10:19:18 +03:00
static void thread_common_ops ( struct test_spec * test , struct ifobject * ifobject )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:25 +03:00
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE ;
2021-09-07 10:19:19 +03:00
u32 i ;
2020-12-08 00:53:30 +03:00
2021-03-30 01:43:13 +03:00
ifobject - > ns_fd = switch_namespace ( ifobject - > nsname ) ;
2021-09-07 10:19:25 +03:00
if ( ifobject - > umem - > unaligned_mode )
mmap_flags | = MAP_HUGETLB ;
2021-09-07 10:19:19 +03:00
for ( i = 0 ; i < test - > nb_sockets ; i + + ) {
u64 umem_sz = ifobject - > umem - > num_frames * ifobject - > umem - > frame_size ;
u32 ctr = 0 ;
void * bufs ;
2021-09-22 10:56:09 +03:00
int ret ;
2021-03-30 01:43:08 +03:00
2021-09-07 10:19:19 +03:00
bufs = mmap ( NULL , umem_sz , PROT_READ | PROT_WRITE , mmap_flags , - 1 , 0 ) ;
if ( bufs = = MAP_FAILED )
exit_with_error ( errno ) ;
2021-09-07 10:19:09 +03:00
2021-09-22 10:56:09 +03:00
ret = xsk_configure_umem ( & ifobject - > umem_arr [ i ] , bufs , umem_sz ) ;
if ( ret )
exit_with_error ( - ret ) ;
2020-12-08 00:53:30 +03:00
2021-09-22 10:56:09 +03:00
while ( ctr + + < SOCK_RECONF_CTR ) {
2021-09-07 10:19:19 +03:00
ret = xsk_configure_socket ( & ifobject - > xsk_arr [ i ] , & ifobject - > umem_arr [ i ] ,
ifobject , i ) ;
if ( ! ret )
break ;
2021-09-07 10:19:09 +03:00
2021-09-07 10:19:19 +03:00
/* Retry if it fails as xsk_socket__create() is asynchronous */
if ( ctr > = SOCK_RECONF_CTR )
exit_with_error ( - ret ) ;
usleep ( USLEEP_MAX ) ;
}
2021-03-30 01:43:13 +03:00
}
2021-09-07 10:19:09 +03:00
ifobject - > umem = & ifobject - > umem_arr [ 0 ] ;
ifobject - > xsk = & ifobject - > xsk_arr [ 0 ] ;
2020-12-08 00:53:30 +03:00
}
2021-03-30 01:43:13 +03:00
static void testapp_cleanup_xsk_res ( struct ifobject * ifobj )
{
2021-09-07 10:19:24 +03:00
print_verbose ( " Destroying socket \n " ) ;
2021-09-07 10:19:18 +03:00
xsk_socket__delete ( ifobj - > xsk - > xsk ) ;
2021-09-07 10:19:27 +03:00
munmap ( ifobj - > umem - > buffer , ifobj - > umem - > num_frames * ifobj - > umem - > frame_size ) ;
2021-09-07 10:19:18 +03:00
xsk_umem__delete ( ifobj - > umem - > umem ) ;
2021-03-30 01:43:13 +03:00
}
2021-03-30 01:43:08 +03:00
static void * worker_testapp_validate_tx ( void * arg )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:18 +03:00
struct test_spec * test = ( struct test_spec * ) arg ;
struct ifobject * ifobject = test - > ifobj_tx ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:18 +03:00
if ( test - > current_step = = 1 )
thread_common_ops ( test , ifobject ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
print_verbose ( " Sending %d packets on interface %s \n " , ifobject - > pkt_stream - > nb_pkts ,
ifobject - > ifname ) ;
send_pkts ( ifobject ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:16 +03:00
if ( stat_test_type = = STAT_TEST_TX_INVALID )
tx_stats_validate ( ifobject ) ;
2021-09-07 10:19:18 +03:00
if ( test - > total_steps = = test - > current_step )
testapp_cleanup_xsk_res ( ifobject ) ;
2021-03-30 01:43:08 +03:00
pthread_exit ( NULL ) ;
}
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:25 +03:00
static void xsk_populate_fill_ring ( struct xsk_umem_info * umem , struct pkt_stream * pkt_stream )
{
2021-09-22 10:56:08 +03:00
u32 idx = 0 , i , buffers_to_fill ;
2021-09-07 10:19:25 +03:00
int ret ;
2021-09-22 10:56:08 +03:00
if ( umem - > num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS )
buffers_to_fill = umem - > num_frames ;
else
buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS ;
ret = xsk_ring_prod__reserve ( & umem - > fq , buffers_to_fill , & idx ) ;
if ( ret ! = buffers_to_fill )
2021-09-07 10:19:25 +03:00
exit_with_error ( ENOSPC ) ;
2021-09-22 10:56:08 +03:00
for ( i = 0 ; i < buffers_to_fill ; i + + ) {
2021-09-07 10:19:25 +03:00
u64 addr ;
if ( pkt_stream - > use_addr_for_fill ) {
struct pkt * pkt = pkt_stream_get_pkt ( pkt_stream , i ) ;
if ( ! pkt )
break ;
addr = pkt - > addr ;
} else {
2021-09-22 10:56:08 +03:00
addr = i * umem - > frame_size + DEFAULT_OFFSET ;
2021-09-07 10:19:25 +03:00
}
* xsk_ring_prod__fill_addr ( & umem - > fq , idx + + ) = addr ;
}
2021-09-22 10:56:08 +03:00
xsk_ring_prod__submit ( & umem - > fq , buffers_to_fill ) ;
2021-09-07 10:19:25 +03:00
}
2021-03-30 01:43:08 +03:00
static void * worker_testapp_validate_rx ( void * arg )
{
2021-09-07 10:19:18 +03:00
struct test_spec * test = ( struct test_spec * ) arg ;
struct ifobject * ifobject = test - > ifobj_rx ;
2021-09-07 10:19:22 +03:00
struct pollfd fds = { } ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:18 +03:00
if ( test - > current_step = = 1 )
thread_common_ops ( test , ifobject ) ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:26 +03:00
xsk_populate_fill_ring ( ifobject - > umem , ifobject - > pkt_stream ) ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:22 +03:00
fds . fd = xsk_socket__fd ( ifobject - > xsk - > xsk ) ;
fds . events = POLLIN ;
2021-02-23 19:23:04 +03:00
2021-03-30 01:43:15 +03:00
pthread_barrier_wait ( & barr ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:20 +03:00
if ( test_type = = TEST_TYPE_STATS )
while ( ! rx_stats_are_valid ( ifobject ) )
continue ;
else
2021-09-07 10:19:22 +03:00
receive_pkts ( ifobject - > pkt_stream , ifobject - > xsk , & fds ) ;
2020-12-08 00:53:32 +03:00
2021-09-07 10:19:18 +03:00
if ( test - > total_steps = = test - > current_step )
testapp_cleanup_xsk_res ( ifobject ) ;
2020-12-08 00:53:30 +03:00
pthread_exit ( NULL ) ;
}
2021-09-07 10:19:11 +03:00
static void testapp_validate_traffic ( struct test_spec * test )
2020-12-08 00:53:30 +03:00
{
2021-09-07 10:19:11 +03:00
struct ifobject * ifobj_tx = test - > ifobj_tx ;
struct ifobject * ifobj_rx = test - > ifobj_rx ;
2021-09-07 10:19:21 +03:00
pthread_t t0 , t1 ;
2021-01-22 18:47:22 +03:00
2021-03-30 01:43:15 +03:00
if ( pthread_barrier_init ( & barr , NULL , 2 ) )
exit_with_error ( errno ) ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:18 +03:00
test - > current_step + + ;
2021-09-22 10:56:07 +03:00
pkt_stream_reset ( ifobj_rx - > pkt_stream ) ;
2021-09-22 10:56:10 +03:00
pkts_in_flight = 0 ;
2021-08-25 12:37:20 +03:00
2020-12-08 00:53:30 +03:00
/*Spawn RX thread */
2021-09-07 10:19:18 +03:00
pthread_create ( & t0 , NULL , ifobj_rx - > func_ptr , test ) ;
2020-12-08 00:53:30 +03:00
2021-03-30 01:43:15 +03:00
pthread_barrier_wait ( & barr ) ;
if ( pthread_barrier_destroy ( & barr ) )
2020-12-08 00:53:30 +03:00
exit_with_error ( errno ) ;
/*Spawn TX thread */
2021-09-07 10:19:18 +03:00
pthread_create ( & t1 , NULL , ifobj_tx - > func_ptr , test ) ;
2020-12-08 00:53:30 +03:00
pthread_join ( t1 , NULL ) ;
pthread_join ( t0 , NULL ) ;
2020-12-08 00:53:32 +03:00
}
2021-09-07 10:19:11 +03:00
static void testapp_teardown ( struct test_spec * test )
2021-03-30 01:43:10 +03:00
{
int i ;
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " TEARDOWN " ) ;
2021-03-30 01:43:10 +03:00
for ( i = 0 ; i < MAX_TEARDOWN_ITER ; i + + ) {
2021-09-07 10:19:11 +03:00
testapp_validate_traffic ( test ) ;
test_spec_reset ( test ) ;
2021-03-30 01:43:10 +03:00
}
}
2021-09-07 10:19:11 +03:00
static void swap_directions ( struct ifobject * * ifobj1 , struct ifobject * * ifobj2 )
2021-03-30 01:43:10 +03:00
{
2021-09-07 10:19:11 +03:00
thread_func_t tmp_func_ptr = ( * ifobj1 ) - > func_ptr ;
struct ifobject * tmp_ifobj = ( * ifobj1 ) ;
2021-03-30 01:43:10 +03:00
2021-09-07 10:19:11 +03:00
( * ifobj1 ) - > func_ptr = ( * ifobj2 ) - > func_ptr ;
( * ifobj2 ) - > func_ptr = tmp_func_ptr ;
2021-03-30 01:43:10 +03:00
2021-09-07 10:19:11 +03:00
* ifobj1 = * ifobj2 ;
* ifobj2 = tmp_ifobj ;
2021-03-30 01:43:10 +03:00
}
2021-09-07 10:19:11 +03:00
static void testapp_bidi ( struct test_spec * test )
2020-12-08 00:53:32 +03:00
{
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " BIDIRECTIONAL " ) ;
2021-09-07 10:19:17 +03:00
test - > ifobj_tx - > rx_on = true ;
test - > ifobj_rx - > tx_on = true ;
2021-09-07 10:19:18 +03:00
test - > total_steps = 2 ;
testapp_validate_traffic ( test ) ;
print_verbose ( " Switching Tx/Rx vectors \n " ) ;
swap_directions ( & test - > ifobj_rx , & test - > ifobj_tx ) ;
testapp_validate_traffic ( test ) ;
2020-12-08 00:53:32 +03:00
2021-09-07 10:19:11 +03:00
swap_directions ( & test - > ifobj_rx , & test - > ifobj_tx ) ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:11 +03:00
static void swap_xsk_resources ( struct ifobject * ifobj_tx , struct ifobject * ifobj_rx )
2021-03-30 01:43:13 +03:00
{
2021-09-07 10:19:11 +03:00
xsk_socket__delete ( ifobj_tx - > xsk - > xsk ) ;
xsk_umem__delete ( ifobj_tx - > umem - > umem ) ;
xsk_socket__delete ( ifobj_rx - > xsk - > xsk ) ;
xsk_umem__delete ( ifobj_rx - > umem - > umem ) ;
ifobj_tx - > umem = & ifobj_tx - > umem_arr [ 1 ] ;
ifobj_tx - > xsk = & ifobj_tx - > xsk_arr [ 1 ] ;
ifobj_rx - > umem = & ifobj_rx - > umem_arr [ 1 ] ;
ifobj_rx - > xsk = & ifobj_rx - > xsk_arr [ 1 ] ;
2021-03-30 01:43:13 +03:00
}
2021-09-07 10:19:11 +03:00
static void testapp_bpf_res ( struct test_spec * test )
2021-03-30 01:43:13 +03:00
{
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " BPF_RES " ) ;
2021-09-07 10:19:18 +03:00
test - > total_steps = 2 ;
2021-09-07 10:19:19 +03:00
test - > nb_sockets = 2 ;
2021-09-07 10:19:18 +03:00
testapp_validate_traffic ( test ) ;
swap_xsk_resources ( test - > ifobj_tx , test - > ifobj_rx ) ;
testapp_validate_traffic ( test ) ;
2021-03-30 01:43:13 +03:00
}
2021-09-07 10:19:11 +03:00
static void testapp_stats ( struct test_spec * test )
2021-02-23 19:23:04 +03:00
{
2021-09-07 10:19:24 +03:00
int i ;
for ( i = 0 ; i < STAT_TEST_TYPE_MAX ; i + + ) {
2021-09-07 10:19:11 +03:00
test_spec_reset ( test ) ;
2021-02-23 19:23:04 +03:00
stat_test_type = i ;
2021-09-22 10:56:10 +03:00
/* No or few packets will be received so cannot pace packets */
test - > ifobj_tx - > pacing_on = false ;
2021-02-23 19:23:04 +03:00
switch ( stat_test_type ) {
case STAT_TEST_RX_DROPPED :
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " STAT_RX_DROPPED " ) ;
2021-09-07 10:19:14 +03:00
test - > ifobj_rx - > umem - > frame_headroom = test - > ifobj_rx - > umem - > frame_size -
2021-09-07 10:19:12 +03:00
XDP_PACKET_HEADROOM - 1 ;
2021-09-07 10:19:24 +03:00
testapp_validate_traffic ( test ) ;
2021-02-23 19:23:04 +03:00
break ;
case STAT_TEST_RX_FULL :
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " STAT_RX_FULL " ) ;
2021-09-07 10:19:13 +03:00
test - > ifobj_rx - > xsk - > rxqsize = RX_FULL_RXQSIZE ;
2021-09-07 10:19:24 +03:00
testapp_validate_traffic ( test ) ;
2021-02-23 19:23:04 +03:00
break ;
2021-08-25 12:37:20 +03:00
case STAT_TEST_TX_INVALID :
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " STAT_TX_INVALID " ) ;
2021-09-07 10:19:24 +03:00
pkt_stream_replace ( test , DEFAULT_PKT_CNT , XSK_UMEM__INVALID_FRAME_SIZE ) ;
testapp_validate_traffic ( test ) ;
pkt_stream_restore_default ( test ) ;
break ;
2021-09-07 10:19:15 +03:00
case STAT_TEST_RX_FILL_EMPTY :
test_spec_set_name ( test , " STAT_RX_FILL_EMPTY " ) ;
2021-09-07 10:19:26 +03:00
test - > ifobj_rx - > pkt_stream = pkt_stream_generate ( test - > ifobj_rx - > umem , 0 ,
MIN_PKT_SIZE ) ;
if ( ! test - > ifobj_rx - > pkt_stream )
exit_with_error ( ENOMEM ) ;
test - > ifobj_rx - > pkt_stream - > use_addr_for_fill = true ;
2021-09-07 10:19:24 +03:00
testapp_validate_traffic ( test ) ;
2021-09-07 10:19:26 +03:00
pkt_stream_restore_default ( test ) ;
2021-09-07 10:19:15 +03:00
break ;
2021-02-23 19:23:04 +03:00
default :
break ;
}
}
2021-09-07 10:19:15 +03:00
/* To only see the whole stat set being completed unless an individual test fails. */
test_spec_set_name ( test , " STATS " ) ;
2021-02-23 19:23:04 +03:00
}
2021-09-07 10:19:25 +03:00
/* Simple test */
static bool hugepages_present ( struct ifobject * ifobject )
{
const size_t mmap_sz = 2 * ifobject - > umem - > num_frames * ifobject - > umem - > frame_size ;
void * bufs ;
bufs = mmap ( NULL , mmap_sz , PROT_READ | PROT_WRITE ,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_HUGETLB , - 1 , 0 ) ;
if ( bufs = = MAP_FAILED )
return false ;
munmap ( bufs , mmap_sz ) ;
return true ;
}
static bool testapp_unaligned ( struct test_spec * test )
{
if ( ! hugepages_present ( test - > ifobj_tx ) ) {
ksft_test_result_skip ( " No 2M huge pages present. \n " ) ;
return false ;
}
test_spec_set_name ( test , " UNALIGNED_MODE " ) ;
test - > ifobj_tx - > umem - > unaligned_mode = true ;
test - > ifobj_rx - > umem - > unaligned_mode = true ;
/* Let half of the packets straddle a buffer boundrary */
pkt_stream_replace_half ( test , PKT_SIZE , test - > ifobj_tx - > umem - > frame_size - 32 ) ;
test - > ifobj_rx - > pkt_stream - > use_addr_for_fill = true ;
testapp_validate_traffic ( test ) ;
pkt_stream_restore_default ( test ) ;
return true ;
}
2021-09-07 10:19:27 +03:00
static void testapp_invalid_desc ( struct test_spec * test )
{
struct pkt pkts [ ] = {
/* Zero packet length at address zero allowed */
{ 0 , 0 , 0 , true } ,
/* Zero packet length allowed */
{ 0x1000 , 0 , 0 , true } ,
/* Straddling the start of umem */
{ - 2 , PKT_SIZE , 0 , false } ,
/* Packet too large */
{ 0x2000 , XSK_UMEM__INVALID_FRAME_SIZE , 0 , false } ,
/* After umem ends */
{ UMEM_SIZE , PKT_SIZE , 0 , false } ,
/* Straddle the end of umem */
{ UMEM_SIZE - PKT_SIZE / 2 , PKT_SIZE , 0 , false } ,
/* Straddle a page boundrary */
{ 0x3000 - PKT_SIZE / 2 , PKT_SIZE , 0 , false } ,
2021-09-07 10:19:28 +03:00
/* Straddle a 2K boundrary */
{ 0x3800 - PKT_SIZE / 2 , PKT_SIZE , 0 , true } ,
2021-09-07 10:19:27 +03:00
/* Valid packet for synch so that something is received */
{ 0x4000 , PKT_SIZE , 0 , true } } ;
if ( test - > ifobj_tx - > umem - > unaligned_mode ) {
/* Crossing a page boundrary allowed */
pkts [ 6 ] . valid = true ;
}
2021-09-07 10:19:28 +03:00
if ( test - > ifobj_tx - > umem - > frame_size = = XSK_UMEM__DEFAULT_FRAME_SIZE / 2 ) {
/* Crossing a 2K frame size boundrary not allowed */
pkts [ 7 ] . valid = false ;
}
2021-09-07 10:19:27 +03:00
pkt_stream_generate_custom ( test , pkts , ARRAY_SIZE ( pkts ) ) ;
testapp_validate_traffic ( test ) ;
pkt_stream_restore_default ( test ) ;
}
2021-09-07 10:19:10 +03:00
static void init_iface ( struct ifobject * ifobj , const char * dst_mac , const char * src_mac ,
const char * dst_ip , const char * src_ip , const u16 dst_port ,
2021-09-07 10:19:17 +03:00
const u16 src_port , thread_func_t func_ptr )
2020-12-08 00:53:30 +03:00
{
2021-03-30 01:43:01 +03:00
struct in_addr ip ;
memcpy ( ifobj - > dst_mac , dst_mac , ETH_ALEN ) ;
memcpy ( ifobj - > src_mac , src_mac , ETH_ALEN ) ;
inet_aton ( dst_ip , & ip ) ;
ifobj - > dst_ip = ip . s_addr ;
inet_aton ( src_ip , & ip ) ;
ifobj - > src_ip = ip . s_addr ;
ifobj - > dst_port = dst_port ;
ifobj - > src_port = src_port ;
2021-03-30 01:43:10 +03:00
2021-09-07 10:19:10 +03:00
ifobj - > func_ptr = func_ptr ;
2020-12-08 00:53:30 +03:00
}
2021-09-07 10:19:20 +03:00
static void run_pkt_test ( struct test_spec * test , enum test_mode mode , enum test_type type )
2021-02-23 19:23:03 +03:00
{
test_type = type ;
/* reset defaults after potential previous test */
2021-02-23 19:23:04 +03:00
stat_test_type = - 1 ;
2021-02-23 19:23:03 +03:00
2021-03-30 01:43:10 +03:00
switch ( test_type ) {
case TEST_TYPE_STATS :
2021-09-07 10:19:11 +03:00
testapp_stats ( test ) ;
2021-03-30 01:43:10 +03:00
break ;
case TEST_TYPE_TEARDOWN :
2021-09-07 10:19:11 +03:00
testapp_teardown ( test ) ;
2021-03-30 01:43:10 +03:00
break ;
case TEST_TYPE_BIDI :
2021-09-07 10:19:11 +03:00
testapp_bidi ( test ) ;
2021-03-30 01:43:10 +03:00
break ;
2021-03-30 01:43:13 +03:00
case TEST_TYPE_BPF_RES :
2021-09-07 10:19:11 +03:00
testapp_bpf_res ( test ) ;
2021-03-30 01:43:13 +03:00
break ;
2021-09-07 10:19:27 +03:00
case TEST_TYPE_RUN_TO_COMPLETION :
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " RUN_TO_COMPLETION " ) ;
testapp_validate_traffic ( test ) ;
break ;
2021-09-07 10:19:28 +03:00
case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME :
test_spec_set_name ( test , " RUN_TO_COMPLETION_2K_FRAME_SIZE " ) ;
test - > ifobj_tx - > umem - > frame_size = 2048 ;
test - > ifobj_rx - > umem - > frame_size = 2048 ;
pkt_stream_replace ( test , DEFAULT_PKT_CNT , MIN_PKT_SIZE ) ;
testapp_validate_traffic ( test ) ;
pkt_stream_restore_default ( test ) ;
break ;
2021-09-07 10:19:15 +03:00
case TEST_TYPE_POLL :
2021-09-07 10:19:16 +03:00
test - > ifobj_tx - > use_poll = true ;
test - > ifobj_rx - > use_poll = true ;
2021-09-07 10:19:15 +03:00
test_spec_set_name ( test , " POLL " ) ;
2021-09-07 10:19:11 +03:00
testapp_validate_traffic ( test ) ;
2021-03-30 01:43:10 +03:00
break ;
2021-09-07 10:19:27 +03:00
case TEST_TYPE_ALIGNED_INV_DESC :
test_spec_set_name ( test , " ALIGNED_INV_DESC " ) ;
testapp_invalid_desc ( test ) ;
break ;
2021-09-07 10:19:28 +03:00
case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME :
test_spec_set_name ( test , " ALIGNED_INV_DESC_2K_FRAME_SIZE " ) ;
test - > ifobj_tx - > umem - > frame_size = 2048 ;
test - > ifobj_rx - > umem - > frame_size = 2048 ;
testapp_invalid_desc ( test ) ;
break ;
2021-09-07 10:19:27 +03:00
case TEST_TYPE_UNALIGNED_INV_DESC :
test_spec_set_name ( test , " UNALIGNED_INV_DESC " ) ;
test - > ifobj_tx - > umem - > unaligned_mode = true ;
test - > ifobj_rx - > umem - > unaligned_mode = true ;
testapp_invalid_desc ( test ) ;
break ;
2021-09-07 10:19:25 +03:00
case TEST_TYPE_UNALIGNED :
if ( ! testapp_unaligned ( test ) )
return ;
break ;
2021-09-07 10:19:15 +03:00
default :
break ;
2021-03-30 01:43:10 +03:00
}
2021-09-07 10:19:15 +03:00
print_ksft_result ( test ) ;
2021-02-23 19:23:03 +03:00
}
2021-08-25 12:37:18 +03:00
static struct ifobject * ifobject_create ( void )
{
struct ifobject * ifobj ;
ifobj = calloc ( 1 , sizeof ( struct ifobject ) ) ;
if ( ! ifobj )
return NULL ;
2021-09-07 10:19:09 +03:00
ifobj - > xsk_arr = calloc ( MAX_SOCKETS , sizeof ( * ifobj - > xsk_arr ) ) ;
2021-08-25 12:37:18 +03:00
if ( ! ifobj - > xsk_arr )
goto out_xsk_arr ;
2021-09-07 10:19:09 +03:00
ifobj - > umem_arr = calloc ( MAX_SOCKETS , sizeof ( * ifobj - > umem_arr ) ) ;
2021-08-25 12:37:18 +03:00
if ( ! ifobj - > umem_arr )
goto out_umem_arr ;
return ifobj ;
out_umem_arr :
free ( ifobj - > xsk_arr ) ;
out_xsk_arr :
free ( ifobj ) ;
return NULL ;
}
static void ifobject_delete ( struct ifobject * ifobj )
{
free ( ifobj - > umem_arr ) ;
free ( ifobj - > xsk_arr ) ;
free ( ifobj ) ;
}
2020-12-08 00:53:30 +03:00
int main ( int argc , char * * argv )
{
struct rlimit _rlim = { RLIM_INFINITY , RLIM_INFINITY } ;
2021-09-07 10:19:24 +03:00
struct pkt_stream * pkt_stream_default ;
2021-09-07 10:19:11 +03:00
struct ifobject * ifobj_tx , * ifobj_rx ;
struct test_spec test ;
u32 i , j ;
2020-12-08 00:53:30 +03:00
if ( setrlimit ( RLIMIT_MEMLOCK , & _rlim ) )
exit_with_error ( errno ) ;
2021-09-07 10:19:11 +03:00
ifobj_tx = ifobject_create ( ) ;
if ( ! ifobj_tx )
exit_with_error ( ENOMEM ) ;
ifobj_rx = ifobject_create ( ) ;
if ( ! ifobj_rx )
exit_with_error ( ENOMEM ) ;
2020-12-08 00:53:30 +03:00
setlocale ( LC_ALL , " " ) ;
2021-09-07 10:19:20 +03:00
parse_command_line ( ifobj_tx , ifobj_rx , argc , argv ) ;
2021-09-07 10:19:11 +03:00
if ( ! validate_interface ( ifobj_tx ) | | ! validate_interface ( ifobj_rx ) ) {
usage ( basename ( argv [ 0 ] ) ) ;
ksft_exit_xfail ( ) ;
}
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:17 +03:00
init_iface ( ifobj_tx , MAC1 , MAC2 , IP1 , IP2 , UDP_PORT1 , UDP_PORT2 ,
2021-09-07 10:19:10 +03:00
worker_testapp_validate_tx ) ;
2021-09-07 10:19:17 +03:00
init_iface ( ifobj_rx , MAC2 , MAC1 , IP2 , IP1 , UDP_PORT2 , UDP_PORT1 ,
2021-09-07 10:19:10 +03:00
worker_testapp_validate_rx ) ;
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:24 +03:00
test_spec_init ( & test , ifobj_tx , ifobj_rx , 0 ) ;
pkt_stream_default = pkt_stream_generate ( ifobj_tx - > umem , DEFAULT_PKT_CNT , PKT_SIZE ) ;
if ( ! pkt_stream_default )
exit_with_error ( ENOMEM ) ;
test . pkt_stream_default = pkt_stream_default ;
2021-02-23 19:23:03 +03:00
ksft_set_plan ( TEST_MODE_MAX * TEST_TYPE_MAX ) ;
2020-12-08 00:53:30 +03:00
2021-08-25 12:37:18 +03:00
for ( i = 0 ; i < TEST_MODE_MAX ; i + + )
2021-08-25 12:37:11 +03:00
for ( j = 0 ; j < TEST_TYPE_MAX ; j + + ) {
2021-09-07 10:19:20 +03:00
test_spec_init ( & test , ifobj_tx , ifobj_rx , i ) ;
2021-09-07 10:19:11 +03:00
run_pkt_test ( & test , i , j ) ;
2021-08-25 12:37:11 +03:00
usleep ( USLEEP_MAX ) ;
}
2020-12-08 00:53:30 +03:00
2021-09-07 10:19:24 +03:00
pkt_stream_delete ( pkt_stream_default ) ;
2021-09-07 10:19:11 +03:00
ifobject_delete ( ifobj_tx ) ;
ifobject_delete ( ifobj_rx ) ;
2021-03-30 01:43:10 +03:00
2020-12-08 00:53:30 +03:00
ksft_exit_pass ( ) ;
return 0 ;
}