From d2e541494935a659b67e51aa3d1945bb3b799c4e Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:00 +0200 Subject: [PATCH 01/10] selftests/xsk: do not change XDP program when not necessary Do not change the XDP program for the Tx thread when not needed. It was erroneously compared to the XDP program for the Rx thread, which is always going to be different, which meant that the code made unnecessary switches to the same program it had before. This did not affect functionality, just performance. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-2-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/xskxceiver.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index f144d0604ddf..f7950af576e1 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -1402,11 +1402,20 @@ static void handler(int signum) pthread_exit(NULL); } -static bool xdp_prog_changed(struct test_spec *test, struct ifobject *ifobj) +static bool xdp_prog_changed_rx(struct test_spec *test) { + struct ifobject *ifobj = test->ifobj_rx; + return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode; } +static bool xdp_prog_changed_tx(struct test_spec *test) +{ + struct ifobject *ifobj = test->ifobj_tx; + + return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode; +} + static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog, struct bpf_map *xskmap, enum test_mode mode) { @@ -1433,13 +1442,13 @@ static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_pro static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx, struct ifobject *ifobj_tx) { - if (xdp_prog_changed(test, ifobj_rx)) + if (xdp_prog_changed_rx(test)) xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode); if (!ifobj_tx || ifobj_tx->shared_umem) return; - if (xdp_prog_changed(test, ifobj_tx)) + if (xdp_prog_changed_tx(test)) xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode); } From df82d2e89c41d1dc6f02a881f0cddac8252bb441 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:01 +0200 Subject: [PATCH 02/10] selftests/xsk: generate simpler packets with variable length Implement support for generating pkts with variable length. Before this patch, they were all 64 bytes, exception for some packets of zero length and some that were too large. This feature will be used to test multi-buffer support for which large packets are needed. The packets are also made simpler, just a valid Ethernet header followed by a sequence number. This so that it will become easier to implement packet generation when each packet consists of multiple fragments. There is also a maintenance burden associated with carrying all this code for generating proper UDP/IP packets, especially since they are not needed. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-3-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/xskxceiver.c | 221 ++++------------------- tools/testing/selftests/bpf/xskxceiver.h | 17 +- 2 files changed, 38 insertions(+), 200 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index f7950af576e1..c13478875fb1 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -76,16 +76,13 @@ #include #include #include -#include #include -#include #include #include #include #include #include #include -#include #include #include #include @@ -94,10 +91,8 @@ #include #include #include -#include #include #include -#include #include "xsk_xdp_progs.skel.h" #include "xsk.h" @@ -109,10 +104,6 @@ static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62"; static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61"; -static const char *IP1 = "192.168.100.162"; -static const char *IP2 = "192.168.100.161"; -static const u16 UDP_PORT1 = 2020; -static const u16 UDP_PORT2 = 2121; static void __exit_with_error(int error, const char *file, const char *func, int line) { @@ -158,101 +149,11 @@ static void memset32_htonl(void *dest, u32 val, u32 size) ptr[i >> 2] = val; } -/* - * Fold a partial checksum - * This function code has been taken from - * Linux kernel include/asm-generic/checksum.h - */ -static __u16 csum_fold(__u32 csum) -{ - u32 sum = (__force u32)csum; - - sum = (sum & 0xffff) + (sum >> 16); - sum = (sum & 0xffff) + (sum >> 16); - return (__force __u16)~sum; -} - -/* - * This function code has been taken from - * Linux kernel lib/checksum.c - */ -static u32 from64to32(u64 x) -{ - /* add up 32-bit and 32-bit for 32+c bit */ - x = (x & 0xffffffff) + (x >> 32); - /* add up carry.. */ - x = (x & 0xffffffff) + (x >> 32); - return (u32)x; -} - -/* - * This function code has been taken from - * Linux kernel lib/checksum.c - */ -static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum) -{ - unsigned long long s = (__force u32)sum; - - s += (__force u32)saddr; - s += (__force u32)daddr; -#ifdef __BIG_ENDIAN__ - s += proto + len; -#else - s += (proto + len) << 8; -#endif - return (__force __u32)from64to32(s); -} - -/* - * This function has been taken from - * Linux kernel include/asm-generic/checksum.h - */ -static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum) -{ - return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); -} - -static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt) -{ - u32 csum = 0; - u32 cnt = 0; - - /* udp hdr and data */ - for (; cnt < len; cnt += 2) - csum += udp_pkt[cnt >> 1]; - - return csum_tcpudp_magic(saddr, daddr, len, proto, csum); -} - static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr) { memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN); memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN); - eth_hdr->h_proto = htons(ETH_P_IP); -} - -static void gen_ip_hdr(struct ifobject *ifobject, struct iphdr *ip_hdr) -{ - ip_hdr->version = IP_PKT_VER; - ip_hdr->ihl = 0x5; - ip_hdr->tos = IP_PKT_TOS; - ip_hdr->tot_len = htons(IP_PKT_SIZE); - ip_hdr->id = 0; - ip_hdr->frag_off = 0; - ip_hdr->ttl = IPDEFTTL; - ip_hdr->protocol = IPPROTO_UDP; - ip_hdr->saddr = ifobject->src_ip; - ip_hdr->daddr = ifobject->dst_ip; - ip_hdr->check = 0; -} - -static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject, - struct udphdr *udp_hdr) -{ - udp_hdr->source = htons(ifobject->src_port); - udp_hdr->dest = htons(ifobject->dst_port); - udp_hdr->len = htons(UDP_PKT_SIZE); - memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE); + eth_hdr->h_proto = htons(ETH_P_LOOPBACK); } static bool is_umem_valid(struct ifobject *ifobj) @@ -260,13 +161,6 @@ static bool is_umem_valid(struct ifobject *ifobj) return !!ifobj->umem->umem; } -static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr) -{ - udp_hdr->check = 0; - udp_hdr->check = - udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr); -} - static u32 mode_to_xdp_flags(enum test_mode mode) { return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; @@ -697,9 +591,7 @@ static void pkt_stream_receive_half(struct test_spec *test) static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) { struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb); - struct udphdr *udp_hdr; struct ethhdr *eth_hdr; - struct iphdr *ip_hdr; void *data; if (!pkt) @@ -708,14 +600,10 @@ static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) return pkt; data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr); - udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr)); - ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr)); - eth_hdr = (struct ethhdr *)data; + eth_hdr = data; - gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr); - gen_ip_hdr(ifobject, ip_hdr); - gen_udp_csum(udp_hdr, ip_hdr); gen_eth_hdr(ifobject, eth_hdr); + memset32_htonl(data + PKT_HDR_SIZE, pkt_nb, pkt->len - PKT_HDR_SIZE); return pkt; } @@ -746,18 +634,11 @@ static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts); } -static void pkt_dump(void *pkt, u32 len) +static void pkt_dump(void *pkt) { - char s[INET_ADDRSTRLEN]; - struct ethhdr *ethhdr; - struct udphdr *udphdr; - struct iphdr *iphdr; + struct ethhdr *ethhdr = pkt; u32 payload, i; - ethhdr = pkt; - iphdr = pkt + sizeof(*ethhdr); - udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr); - /*extract L2 frame */ fprintf(stdout, "DEBUG>> L2: dst mac: "); for (i = 0; i < ETH_ALEN; i++) @@ -767,19 +648,10 @@ static void pkt_dump(void *pkt, u32 len) for (i = 0; i < ETH_ALEN; i++) fprintf(stdout, "%02X", ethhdr->h_source[i]); - /*extract L3 frame */ - fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl); - fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n", - inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s))); - fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n", - inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s))); - /*extract L4 frame */ - fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source)); - fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest)); /*extract L5 frame */ payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE))); - fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload); + fprintf(stdout, "\nDEBUG>> L5: payload: %d\n", payload); fprintf(stdout, "---------------------------------------\n"); } @@ -818,7 +690,7 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) { void *data = xsk_umem__get_data(buffer, addr); - struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr)); + u32 seqnum; if (!pkt) { ksft_print_msg("[%s] too many packets received\n", __func__); @@ -836,21 +708,13 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) return false; } - if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) { - u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); + seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); + if (opt_pkt_dump) + pkt_dump(data); - if (opt_pkt_dump) - pkt_dump(data, PKT_SIZE); - - if (pkt->payload != seqnum) { - ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n", - __func__, pkt->payload, seqnum); - return false; - } - } else { - ksft_print_msg("Invalid frame received: "); - ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version, - iphdr->tos); + if (pkt->payload != seqnum) { + ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n", + __func__, pkt->payload, seqnum); return false; } @@ -1606,9 +1470,9 @@ static void testapp_stats_tx_invalid_descs(struct test_spec *test) static void testapp_stats_rx_full(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_FULL"); - pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE); + pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, - DEFAULT_UMEM_BUFFERS, PKT_SIZE); + DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); if (!test->ifobj_rx->pkt_stream) exit_with_error(ENOMEM); @@ -1621,9 +1485,9 @@ static void testapp_stats_rx_full(struct test_spec *test) static void testapp_stats_fill_empty(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_FILL_EMPTY"); - pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE); + pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, - DEFAULT_UMEM_BUFFERS, PKT_SIZE); + DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); if (!test->ifobj_rx->pkt_stream) exit_with_error(ENOMEM); @@ -1659,7 +1523,7 @@ static bool testapp_unaligned(struct test_spec *test) test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; /* Let half of the packets straddle a buffer boundrary */ - pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2); + pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2); test->ifobj_rx->pkt_stream->use_addr_for_fill = true; testapp_validate_traffic(test); @@ -1668,7 +1532,7 @@ static bool testapp_unaligned(struct test_spec *test) static void testapp_single_pkt(struct test_spec *test) { - struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}}; + struct pkt pkts[] = {{0x1000, MIN_PKT_SIZE, 0, true}}; pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); testapp_validate_traffic(test); @@ -1679,25 +1543,25 @@ static void testapp_invalid_desc(struct test_spec *test) u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; struct pkt pkts[] = { /* Zero packet address allowed */ - {0, PKT_SIZE, 0, true}, + {0, MIN_PKT_SIZE, 0, true}, /* Allowed packet */ - {0x1000, PKT_SIZE, 0, true}, + {0x1000, MIN_PKT_SIZE, 0, true}, /* Straddling the start of umem */ - {-2, PKT_SIZE, 0, false}, + {-2, MIN_PKT_SIZE, 0, false}, /* Packet too large */ {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, /* Up to end of umem allowed */ - {umem_size - PKT_SIZE, PKT_SIZE, 0, true}, + {umem_size - MIN_PKT_SIZE, MIN_PKT_SIZE, 0, true}, /* After umem ends */ - {umem_size, PKT_SIZE, 0, false}, + {umem_size, MIN_PKT_SIZE, 0, false}, /* Straddle the end of umem */ - {umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false}, + {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, /* Straddle a page boundrary */ - {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false}, + {0x3000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, /* Straddle a 2K boundrary */ - {0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true}, + {0x3800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true}, /* Valid packet for synch so that something is received */ - {0x4000, PKT_SIZE, 0, true}}; + {0x4000, MIN_PKT_SIZE, 0, true}}; if (test->ifobj_tx->umem->unaligned_mode) { /* Crossing a page boundrary allowed */ @@ -1788,24 +1652,13 @@ static void xsk_unload_xdp_programs(struct ifobject *ifobj) } static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac, - const char *dst_ip, const char *src_ip, const u16 dst_port, - const u16 src_port, thread_func_t func_ptr) + thread_func_t func_ptr) { - struct in_addr ip; int err; memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN); memcpy(ifobj->src_mac, src_mac, ETH_ALEN); - inet_aton(dst_ip, &ip); - ifobj->dst_ip = ip.s_addr; - - inet_aton(src_ip, &ip); - ifobj->src_ip = ip.s_addr; - - ifobj->dst_port = dst_port; - ifobj->src_port = src_port; - ifobj->func_ptr = func_ptr; err = xsk_load_xdp_programs(ifobj); @@ -1855,7 +1708,7 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE"); test->ifobj_tx->umem->frame_size = 2048; test->ifobj_rx->umem->frame_size = 2048; - pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE); + pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE); testapp_validate_traffic(test); break; case TEST_TYPE_RX_POLL: @@ -1912,8 +1765,8 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ */ page_size = sysconf(_SC_PAGESIZE); umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; - assert(umem_size % page_size > PKT_SIZE); - assert(umem_size % page_size < page_size - PKT_SIZE); + assert(umem_size % page_size > MIN_PKT_SIZE); + assert(umem_size % page_size < page_size - MIN_PKT_SIZE); testapp_invalid_desc(test); break; } @@ -2039,14 +1892,12 @@ int main(int argc, char **argv) modes++; } - init_iface(ifobj_rx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, - worker_testapp_validate_rx); - init_iface(ifobj_tx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, - worker_testapp_validate_tx); + init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx); + init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx); test_spec_init(&test, ifobj_tx, ifobj_rx, 0); - tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); - rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE); + tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE); + rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE); if (!tx_pkt_stream_default || !rx_pkt_stream_default) exit_with_error(ENOMEM); test.tx_pkt_stream_default = tx_pkt_stream_default; diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index c535aeab2ca3..8b094718629d 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -35,17 +35,8 @@ #define MAX_SOCKETS 2 #define MAX_TEST_NAME_SIZE 32 #define MAX_TEARDOWN_ITER 10 -#define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ - sizeof(struct udphdr)) -#define MIN_ETH_PKT_SIZE 64 -#define ETH_FCS_SIZE 4 -#define MIN_PKT_SIZE (MIN_ETH_PKT_SIZE - ETH_FCS_SIZE) -#define PKT_SIZE (MIN_PKT_SIZE) -#define IP_PKT_SIZE (PKT_SIZE - sizeof(struct ethhdr)) -#define IP_PKT_VER 0x4 -#define IP_PKT_TOS 0x9 -#define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr)) -#define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr)) +#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2) /* Just to align the data in the packet */ +#define MIN_PKT_SIZE 64 #define USLEEP_MAX 10000 #define SOCK_RECONF_CTR 10 #define BATCH_SIZE 64 @@ -148,11 +139,7 @@ struct ifobject { struct bpf_program *xdp_prog; enum test_mode mode; int ifindex; - u32 dst_ip; - u32 src_ip; u32 bind_flags; - u16 src_port; - u16 dst_port; bool tx_on; bool rx_on; bool use_poll; From feb973a9094ff92fb8c141be5d2e762f9ab36a7a Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:02 +0200 Subject: [PATCH 03/10] selftests/xsk: add varying payload pattern within packet Add a varying payload pattern within the packet. Instead of having just a packet number that is the same for all words in a packet, make each word different in the packet. The upper 16-bits are set to the packet number and the lower 16-bits are the sequence number of the words in this packet. So the 3rd packet's 5th 32-bit word of data will contain the number (2<<32) | 4 as they are numbered from 0. This will make it easier to detect fragments that are out of order when starting to test multi-buffer support. The member payload in the packet is renamed pkt_nb to reflect that it is now only a pkt_nb, not the real payload as seen above. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-4-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/xskxceiver.c | 70 ++++++++++++++++-------- tools/testing/selftests/bpf/xskxceiver.h | 3 +- 2 files changed, 48 insertions(+), 25 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index c13478875fb1..818b7130f932 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -138,15 +138,16 @@ static void report_failure(struct test_spec *test) test->fail = true; } -static void memset32_htonl(void *dest, u32 val, u32 size) +/* The payload is a word consisting of a packet sequence number in the upper + * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's + * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0. + */ +static void write_payload(void *dest, u32 val, u32 size) { - u32 *ptr = (u32 *)dest; - int i; + u32 *ptr = (u32 *)dest, i; - val = htonl(val); - - for (i = 0; i < (size & (~0x3)); i += 4) - ptr[i >> 2] = val; + for (i = 0; i < size / sizeof(*ptr); i++) + ptr[i] = htonl(val << 16 | i); } static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr) @@ -532,7 +533,7 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb for (i = 0; i < nb_pkts; i++) { pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size, pkt_len); - pkt_stream->pkts[i].payload = i; + pkt_stream->pkts[i].pkt_nb = i; } return pkt_stream; @@ -603,7 +604,7 @@ static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) eth_hdr = data; gen_eth_hdr(ifobject, eth_hdr); - memset32_htonl(data + PKT_HDR_SIZE, pkt_nb, pkt->len - PKT_HDR_SIZE); + write_payload(data + PKT_HDR_SIZE, pkt_nb, pkt->len - PKT_HDR_SIZE); return pkt; } @@ -621,7 +622,7 @@ static void __pkt_stream_generate_custom(struct ifobject *ifobj, for (i = 0; i < nb_pkts; i++) { pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr; pkt_stream->pkts[i].len = pkts[i].len; - pkt_stream->pkts[i].payload = i; + pkt_stream->pkts[i].pkt_nb = i; pkt_stream->pkts[i].valid = pkts[i].valid; } @@ -634,10 +635,24 @@ static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts); } -static void pkt_dump(void *pkt) +static void pkt_print_data(u32 *data, u32 cnt) +{ + u32 i; + + for (i = 0; i < cnt; i++) { + u32 seqnum, pkt_nb; + + seqnum = ntohl(*data) & 0xffff; + pkt_nb = ntohl(*data) >> 16; + fprintf(stdout, "%u:%u ", pkt_nb, seqnum); + data++; + } +} + +static void pkt_dump(void *pkt, u32 len) { struct ethhdr *ethhdr = pkt; - u32 payload, i; + u32 i; /*extract L2 frame */ fprintf(stdout, "DEBUG>> L2: dst mac: "); @@ -649,10 +664,15 @@ static void pkt_dump(void *pkt) fprintf(stdout, "%02X", ethhdr->h_source[i]); /*extract L5 frame */ - payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE))); - - fprintf(stdout, "\nDEBUG>> L5: payload: %d\n", payload); - fprintf(stdout, "---------------------------------------\n"); + fprintf(stdout, "\nDEBUG>> L5: seqnum: "); + pkt_print_data(pkt + PKT_HDR_SIZE, PKT_DUMP_NB_TO_PRINT); + fprintf(stdout, "...."); + if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) { + fprintf(stdout, "\n.... "); + pkt_print_data(pkt + PKT_HDR_SIZE + len - PKT_DUMP_NB_TO_PRINT * sizeof(u32), + PKT_DUMP_NB_TO_PRINT); + } + fprintf(stdout, "\n---------------------------------------\n"); } static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr, @@ -678,9 +698,9 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) void *data = xsk_umem__get_data(buffer, addr); struct xdp_info *meta = data - sizeof(struct xdp_info); - if (meta->count != pkt->payload) { + if (meta->count != pkt->pkt_nb) { ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n", - __func__, pkt->payload, meta->count); + __func__, pkt->pkt_nb, meta->count); return false; } @@ -690,7 +710,7 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) { void *data = xsk_umem__get_data(buffer, addr); - u32 seqnum; + u32 seqnum, pkt_data; if (!pkt) { ksft_print_msg("[%s] too many packets received\n", __func__); @@ -708,13 +728,15 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) return false; } - seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); - if (opt_pkt_dump) - pkt_dump(data); + pkt_data = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); + seqnum = pkt_data >> 16; - if (pkt->payload != seqnum) { + if (opt_pkt_dump) + pkt_dump(data, len); + + if (pkt->pkt_nb != seqnum) { ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n", - __func__, pkt->payload, seqnum); + __func__, pkt->pkt_nb, seqnum); return false; } diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 8b094718629d..91022c4876eb 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -48,6 +48,7 @@ #define UMEM_HEADROOM_TEST_SIZE 128 #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1) #define HUGEPAGE_SIZE (2 * 1024 * 1024) +#define PKT_DUMP_NB_TO_PRINT 16 #define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0) @@ -111,7 +112,7 @@ struct xsk_socket_info { struct pkt { u64 addr; u32 len; - u32 payload; + u32 pkt_nb; bool valid; }; From 7a8a6762822a1f5249eec356e7ea31f98161fba0 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:03 +0200 Subject: [PATCH 04/10] selftests/xsk: dump packet at error Dump the content of the packet when a test finds that packets are received out of order, the length is wrong, or some other packet error. Use the already existing pkt_dump function for this and call it when the above errors are detected. Get rid of the command line option for dumping packets as it is not useful to print out thousands of good packets followed by the faulty one you would like to see. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-5-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_xsk.sh | 10 +--------- tools/testing/selftests/bpf/xskxceiver.c | 20 ++++++++------------ tools/testing/selftests/bpf/xskxceiver.h | 1 - 3 files changed, 9 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh index 377fb157a57c..c2ad50f26b63 100755 --- a/tools/testing/selftests/bpf/test_xsk.sh +++ b/tools/testing/selftests/bpf/test_xsk.sh @@ -68,9 +68,6 @@ # Run with verbose output: # sudo ./test_xsk.sh -v # -# Run and dump packet contents: -# sudo ./test_xsk.sh -D -# # Set up veth interfaces and leave them up so xskxceiver can be launched in a debugger: # sudo ./test_xsk.sh -d # @@ -81,11 +78,10 @@ ETH="" -while getopts "vDi:d" flag +while getopts "vi:d" flag do case "${flag}" in v) verbose=1;; - D) dump_pkts=1;; d) debug=1;; i) ETH=${OPTARG};; esac @@ -157,10 +153,6 @@ if [[ $verbose -eq 1 ]]; then ARGS+="-v " fi -if [[ $dump_pkts -eq 1 ]]; then - ARGS="-D " -fi - retval=$? test_status $retval "${TEST_NAME}" diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 818b7130f932..0a8231ed6626 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -275,7 +275,6 @@ out: static struct option long_options[] = { {"interface", required_argument, 0, 'i'}, {"busy-poll", no_argument, 0, 'b'}, - {"dump-pkts", no_argument, 0, 'D'}, {"verbose", no_argument, 0, 'v'}, {0, 0, 0, 0} }; @@ -286,7 +285,6 @@ static void usage(const char *prog) " Usage: %s [OPTIONS]\n" " Options:\n" " -i, --interface Use interface\n" - " -D, --dump-pkts Dump packets L2 - L5\n" " -v, --verbose Verbose output\n" " -b, --busy-poll Enable busy poll\n"; @@ -310,7 +308,7 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj opterr = 0; for (;;) { - c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index); + c = getopt_long(argc, argv, "i:vb", long_options, &option_index); if (c == -1) break; @@ -332,9 +330,6 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj interface_nb++; break; - case 'D': - opt_pkt_dump = true; - break; case 'v': opt_verbose = true; break; @@ -714,7 +709,7 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) if (!pkt) { ksft_print_msg("[%s] too many packets received\n", __func__); - return false; + goto error; } if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) { @@ -725,22 +720,23 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) if (pkt->len != len) { ksft_print_msg("[%s] expected length [%d], got length [%d]\n", __func__, pkt->len, len); - return false; + goto error; } pkt_data = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); seqnum = pkt_data >> 16; - if (opt_pkt_dump) - pkt_dump(data, len); - if (pkt->pkt_nb != seqnum) { ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n", __func__, pkt->pkt_nb, seqnum); - return false; + goto error; } return true; + +error: + pkt_dump(data, len); + return false; } static void kick_tx(struct xsk_socket_info *xsk) diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 91022c4876eb..5e0be9685557 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -85,7 +85,6 @@ enum test_type { TEST_TYPE_MAX }; -static bool opt_pkt_dump; static bool opt_verbose; struct xsk_umem_info { From 69fc03d220a318a4df2c6f0612b17225e71a6069 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:04 +0200 Subject: [PATCH 05/10] selftests/xsk: add packet iterator for tx to packet stream Convert the current variable rx_pkt_nb to an iterator that can be used for both Rx and Tx. This to simplify the code and making Tx more like Rx that already has this feature. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-6-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/xskxceiver.c | 43 +++++++++++++----------- tools/testing/selftests/bpf/xskxceiver.h | 2 +- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 0a8231ed6626..0823890c0709 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -444,24 +444,24 @@ static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *x static void pkt_stream_reset(struct pkt_stream *pkt_stream) { if (pkt_stream) - pkt_stream->rx_pkt_nb = 0; + pkt_stream->current_pkt_nb = 0; } -static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb) +static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream) { - if (pkt_nb >= pkt_stream->nb_pkts) + if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) return NULL; - return &pkt_stream->pkts[pkt_nb]; + return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; } static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent) { - while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) { + while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { (*pkts_sent)++; - if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid) - return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++]; - pkt_stream->rx_pkt_nb++; + if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid) + return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; + pkt_stream->current_pkt_nb++; } return NULL; } @@ -584,9 +584,9 @@ static void pkt_stream_receive_half(struct test_spec *test) pkt_stream->pkts[i].valid = false; } -static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) +static struct pkt *pkt_generate(struct ifobject *ifobject) { - struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb); + struct pkt *pkt = pkt_stream_get_next_tx_pkt(ifobject->pkt_stream); struct ethhdr *eth_hdr; void *data; @@ -599,7 +599,7 @@ static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) eth_hdr = data; gen_eth_hdr(ifobject, eth_hdr); - write_payload(data + PKT_HDR_SIZE, pkt_nb, pkt->len - PKT_HDR_SIZE); + write_payload(data + PKT_HDR_SIZE, pkt->pkt_nb, pkt->len - PKT_HDR_SIZE); return pkt; } @@ -883,8 +883,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds) return TEST_PASS; } -static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds, - bool timeout) +static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout) { struct xsk_socket_info *xsk = ifobject->xsk; bool use_poll = ifobject->use_poll; @@ -916,14 +915,13 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd for (i = 0; i < BATCH_SIZE; i++) { struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); - struct pkt *pkt = pkt_generate(ifobject, *pkt_nb); + struct pkt *pkt = pkt_generate(ifobject); if (!pkt) break; tx_desc->addr = pkt->addr; tx_desc->len = pkt->len; - (*pkt_nb)++; if (pkt->valid) valid_pkts++; } @@ -970,15 +968,16 @@ static void wait_for_tx_completion(struct xsk_socket_info *xsk) static int send_pkts(struct test_spec *test, struct ifobject *ifobject) { + struct pkt_stream *pkt_stream = ifobject->pkt_stream; bool timeout = !is_umem_valid(test->ifobj_rx); struct pollfd fds = { }; - u32 pkt_cnt = 0, ret; + u32 ret; fds.fd = xsk_socket__fd(ifobject->xsk->xsk); fds.events = POLLOUT; - while (pkt_cnt < ifobject->pkt_stream->nb_pkts) { - ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout); + while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { + ret = __send_pkts(ifobject, &fds, timeout); if ((ret || test->fail) && !timeout) return TEST_FAILURE; else if (ret == TEST_PASS && timeout) @@ -1150,7 +1149,7 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream u64 addr; if (pkt_stream->use_addr_for_fill) { - struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); + struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream); if (!pkt) break; @@ -1162,6 +1161,8 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; } xsk_ring_prod__submit(&umem->fq, i); + + pkt_stream_reset(pkt_stream); } static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) @@ -1339,9 +1340,11 @@ static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *i { pthread_t t0, t1; - if (ifobj2) + if (ifobj2) { if (pthread_barrier_init(&barr, NULL, 2)) exit_with_error(errno); + pkt_stream_reset(ifobj2->pkt_stream); + } test->current_step++; pkt_stream_reset(ifobj1->pkt_stream); diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 5e0be9685557..7ea28d844007 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -117,7 +117,7 @@ struct pkt { struct pkt_stream { u32 nb_pkts; - u32 rx_pkt_nb; + u32 current_pkt_nb; struct pkt *pkts; bool use_addr_for_fill; }; From d9f6d9709f87236f9a33de0d42dae8402b812e19 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:05 +0200 Subject: [PATCH 06/10] selftests/xsk: store offset in pkt instead of addr Store the offset in struct pkt instead of the address. This is important since address is only meaningful in the context of a packet that is stored in a single umem buffer and thus a single Tx descriptor. If the packet, in contrast need to be represented by multiple buffers in the umem, storing the address makes no sense since the packet will consist of multiple buffers in the umem at various addresses. This change is in preparation for the upcoming multi-buffer support in AF_XDP and the corresponding tests. So instead of indicating the address, we instead indicate the offset of the packet in the first buffer. The actual address of the buffer is allocated from the umem with a new function called umem_alloc_buffer(). This also means we can get rid of the use_fill_for_addr flag as the addresses fed into the fill ring will always be the offset from the pkt specification in the packet stream plus the address of the allocated buffer from the umem. No special casing needed. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-7-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/xskxceiver.c | 150 +++++++++++++---------- tools/testing/selftests/bpf/xskxceiver.h | 4 +- 2 files changed, 90 insertions(+), 64 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 0823890c0709..d488d859d3a2 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -167,7 +167,13 @@ static u32 mode_to_xdp_flags(enum test_mode mode) return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; } -static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size) +static u64 umem_size(struct xsk_umem_info *umem) +{ + return umem->num_frames * umem->frame_size; +} + +static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, + u64 size) { struct xsk_umem_config cfg = { .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, @@ -187,9 +193,31 @@ static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size return ret; umem->buffer = buffer; + if (ifobj->shared_umem && ifobj->rx_on) { + umem->base_addr = umem_size(umem); + umem->next_buffer = umem_size(umem); + } + return 0; } +static u64 umem_alloc_buffer(struct xsk_umem_info *umem) +{ + u64 addr; + + addr = umem->next_buffer; + umem->next_buffer += umem->frame_size; + if (umem->next_buffer >= umem->base_addr + umem_size(umem)) + umem->next_buffer = umem->base_addr; + + return addr; +} + +static void umem_reset_alloc(struct xsk_umem_info *umem) +{ + umem->next_buffer = 0; +} + static void enable_busy_poll(struct xsk_socket_info *xsk) { int sock_opt; @@ -249,7 +277,7 @@ static bool ifobj_zc_avail(struct ifobject *ifobject) exit_with_error(ENOMEM); } umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; - ret = xsk_configure_umem(umem, bufs, umem_sz); + ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz); if (ret) exit_with_error(-ret); @@ -372,9 +400,6 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, memset(ifobj->umem, 0, sizeof(*ifobj->umem)); ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; - if (ifobj->shared_umem && ifobj->rx_on) - ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS * - XSK_UMEM__DEFAULT_FRAME_SIZE; for (j = 0; j < MAX_SOCKETS; j++) { memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); @@ -506,9 +531,9 @@ static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) return pkt_stream; } -static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len) +static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len) { - pkt->addr = addr + umem->base_addr; + pkt->offset = offset; pkt->len = len; if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom) pkt->valid = false; @@ -526,8 +551,7 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb exit_with_error(ENOMEM); for (i = 0; i < nb_pkts; i++) { - pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size, - pkt_len); + pkt_set(umem, &pkt_stream->pkts[i], 0, pkt_len); pkt_stream->pkts[i].pkt_nb = i; } @@ -559,8 +583,7 @@ static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream); for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2) - pkt_set(umem, &pkt_stream->pkts[i], - (i % umem->num_frames) * umem->frame_size + offset, pkt_len); + pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len); ifobj->pkt_stream = pkt_stream; } @@ -584,24 +607,26 @@ static void pkt_stream_receive_half(struct test_spec *test) pkt_stream->pkts[i].valid = false; } -static struct pkt *pkt_generate(struct ifobject *ifobject) +static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem) +{ + if (!pkt->valid) + return pkt->offset; + return pkt->offset + umem_alloc_buffer(umem); +} + +static void pkt_generate(struct ifobject *ifobject, struct pkt *pkt, u64 addr) { - struct pkt *pkt = pkt_stream_get_next_tx_pkt(ifobject->pkt_stream); struct ethhdr *eth_hdr; void *data; - if (!pkt) - return NULL; if (!pkt->valid || pkt->len < MIN_PKT_SIZE) - return pkt; + return; - data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr); + data = xsk_umem__get_data(ifobject->umem->buffer, addr); eth_hdr = data; gen_eth_hdr(ifobject, eth_hdr); write_payload(data + PKT_HDR_SIZE, pkt->pkt_nb, pkt->len - PKT_HDR_SIZE); - - return pkt; } static void __pkt_stream_generate_custom(struct ifobject *ifobj, @@ -615,7 +640,7 @@ static void __pkt_stream_generate_custom(struct ifobject *ifobj, exit_with_error(ENOMEM); for (i = 0; i < nb_pkts; i++) { - pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr; + pkt_stream->pkts[i].offset = pkts[i].offset; pkt_stream->pkts[i].len = pkts[i].len; pkt_stream->pkts[i].pkt_nb = i; pkt_stream->pkts[i].valid = pkts[i].valid; @@ -670,16 +695,16 @@ static void pkt_dump(void *pkt, u32 len) fprintf(stdout, "\n---------------------------------------\n"); } -static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr, - u64 pkt_stream_addr) +static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr) { u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom; - u32 offset = addr % umem->frame_size, expected_offset = 0; + u32 offset = addr % umem->frame_size, expected_offset; + int pkt_offset = pkt->valid ? pkt->offset : 0; - if (!pkt_stream->use_addr_for_fill) - pkt_stream_addr = 0; + if (!umem->unaligned_mode) + pkt_offset = 0; - expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; + expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; if (offset == expected_offset) return true; @@ -858,7 +883,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds) addr = xsk_umem__add_offset_to_addr(addr); if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) || - !is_offset_correct(umem, pkt_stream, addr, pkt->addr) || + !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr))) return TEST_FAILURE; @@ -915,15 +940,16 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo for (i = 0; i < BATCH_SIZE; i++) { struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); - struct pkt *pkt = pkt_generate(ifobject); + struct pkt *pkt = pkt_stream_get_next_tx_pkt(ifobject->pkt_stream); if (!pkt) break; - tx_desc->addr = pkt->addr; + tx_desc->addr = pkt_get_addr(pkt, ifobject->umem); tx_desc->len = pkt->len; if (pkt->valid) valid_pkts++; + pkt_generate(ifobject, pkt, tx_desc->addr); } pthread_mutex_lock(&pacing_mutex); @@ -1130,11 +1156,12 @@ static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobje ifobject->xsk = &ifobject->xsk_arr[0]; ifobject->xskmap = test->ifobj_rx->xskmap; memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); + ifobject->umem->base_addr = 0; } static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) { - u32 idx = 0, i, buffers_to_fill; + u32 idx = 0, i, buffers_to_fill, nb_pkts; int ret; if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) @@ -1145,24 +1172,23 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); if (ret != buffers_to_fill) exit_with_error(ENOSPC); + for (i = 0; i < buffers_to_fill; i++) { + struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts); u64 addr; - if (pkt_stream->use_addr_for_fill) { - struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream); - - if (!pkt) - break; - addr = pkt->addr; - } else { - addr = i * umem->frame_size; - } - + if (!pkt) + addr = i * umem->frame_size + umem->base_addr; + else if (pkt->offset >= 0) + addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem); + else + addr = pkt->offset + umem_alloc_buffer(umem); *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; } xsk_ring_prod__submit(&umem->fq, i); pkt_stream_reset(pkt_stream); + umem_reset_alloc(umem); } static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) @@ -1183,12 +1209,10 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) if (bufs == MAP_FAILED) exit_with_error(errno); - ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz); + ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz); if (ret) exit_with_error(-ret); - xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); - xsk_configure_socket(test, ifobject, ifobject->umem, false); ifobject->xsk = &ifobject->xsk_arr[0]; @@ -1196,6 +1220,8 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) if (!ifobject->rx_on) return; + xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); + ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); if (ret) exit_with_error(errno); @@ -1543,9 +1569,8 @@ static bool testapp_unaligned(struct test_spec *test) test_spec_set_name(test, "UNALIGNED_MODE"); test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; - /* Let half of the packets straddle a buffer boundrary */ + /* Let half of the packets straddle a 4K buffer boundary */ pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2); - test->ifobj_rx->pkt_stream->use_addr_for_fill = true; testapp_validate_traffic(test); return true; @@ -1553,7 +1578,7 @@ static bool testapp_unaligned(struct test_spec *test) static void testapp_single_pkt(struct test_spec *test) { - struct pkt pkts[] = {{0x1000, MIN_PKT_SIZE, 0, true}}; + struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}}; pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); testapp_validate_traffic(test); @@ -1561,42 +1586,43 @@ static void testapp_single_pkt(struct test_spec *test) static void testapp_invalid_desc(struct test_spec *test) { - u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; + struct xsk_umem_info *umem = test->ifobj_tx->umem; + u64 umem_size = umem->num_frames * umem->frame_size; struct pkt pkts[] = { /* Zero packet address allowed */ {0, MIN_PKT_SIZE, 0, true}, /* Allowed packet */ - {0x1000, MIN_PKT_SIZE, 0, true}, + {0, MIN_PKT_SIZE, 0, true}, /* Straddling the start of umem */ {-2, MIN_PKT_SIZE, 0, false}, /* Packet too large */ - {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, + {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, /* Up to end of umem allowed */ - {umem_size - MIN_PKT_SIZE, MIN_PKT_SIZE, 0, true}, + {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true}, /* After umem ends */ {umem_size, MIN_PKT_SIZE, 0, false}, /* Straddle the end of umem */ {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, - /* Straddle a page boundrary */ - {0x3000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, - /* Straddle a 2K boundrary */ - {0x3800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true}, + /* Straddle a 4K boundary */ + {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, + /* Straddle a 2K boundary */ + {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true}, /* Valid packet for synch so that something is received */ - {0x4000, MIN_PKT_SIZE, 0, true}}; + {0, MIN_PKT_SIZE, 0, true}}; - if (test->ifobj_tx->umem->unaligned_mode) { - /* Crossing a page boundrary allowed */ + if (umem->unaligned_mode) { + /* Crossing a page boundary allowed */ pkts[7].valid = true; } - if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { - /* Crossing a 2K frame size boundrary not allowed */ + if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { + /* Crossing a 2K frame size boundary not allowed */ pkts[8].valid = false; } if (test->ifobj_tx->shared_umem) { - pkts[4].addr += umem_size; - pkts[5].addr += umem_size; - pkts[6].addr += umem_size; + pkts[4].offset += umem_size; + pkts[5].offset += umem_size; + pkts[6].offset += umem_size; } pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 7ea28d844007..be4664a38d74 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -91,6 +91,7 @@ struct xsk_umem_info { struct xsk_ring_prod fq; struct xsk_ring_cons cq; struct xsk_umem *umem; + u64 next_buffer; u32 num_frames; u32 frame_headroom; void *buffer; @@ -109,7 +110,7 @@ struct xsk_socket_info { }; struct pkt { - u64 addr; + int offset; u32 len; u32 pkt_nb; bool valid; @@ -119,7 +120,6 @@ struct pkt_stream { u32 nb_pkts; u32 current_pkt_nb; struct pkt *pkts; - bool use_addr_for_fill; }; struct ifobject; From 041b68f688a38865434d7b8fbfe64beb03e54ff2 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:06 +0200 Subject: [PATCH 07/10] selftests/xsx: test for huge pages only once Test for hugepages only once at the beginning of the execution of the whole test suite, instead of before each test that needs huge pages. These are the tests that use unaligned mode. As more unaligned tests will be added, so the current system just does not scale. With this change, there are now three possible outcomes of a test run: fail, pass, or skip. To simplify the handling of this, the function testapp_validate_traffic() now returns this value to the main loop. As this function is used by nearly all tests, it meant a small change to most of them. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-8-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/xskxceiver.c | 186 +++++++++++------------ tools/testing/selftests/bpf/xskxceiver.h | 2 + 2 files changed, 94 insertions(+), 94 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index d488d859d3a2..f0d929cb730a 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -1413,6 +1413,12 @@ static int testapp_validate_traffic(struct test_spec *test) struct ifobject *ifobj_rx = test->ifobj_rx; struct ifobject *ifobj_tx = test->ifobj_tx; + if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) || + (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) { + ksft_test_result_skip("No huge pages present.\n"); + return TEST_SKIP; + } + xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx); return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx); } @@ -1422,16 +1428,18 @@ static int testapp_validate_traffic_single_thread(struct test_spec *test, struct return __testapp_validate_traffic(test, ifobj, NULL); } -static void testapp_teardown(struct test_spec *test) +static int testapp_teardown(struct test_spec *test) { int i; test_spec_set_name(test, "TEARDOWN"); for (i = 0; i < MAX_TEARDOWN_ITER; i++) { if (testapp_validate_traffic(test)) - return; + return TEST_FAILURE; test_spec_reset(test); } + + return TEST_PASS; } static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) @@ -1446,20 +1454,23 @@ static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) *ifobj2 = tmp_ifobj; } -static void testapp_bidi(struct test_spec *test) +static int testapp_bidi(struct test_spec *test) { + int res; + test_spec_set_name(test, "BIDIRECTIONAL"); test->ifobj_tx->rx_on = true; test->ifobj_rx->tx_on = true; test->total_steps = 2; if (testapp_validate_traffic(test)) - return; + return TEST_FAILURE; print_verbose("Switching Tx/Rx vectors\n"); swap_directions(&test->ifobj_rx, &test->ifobj_tx); - __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx); + res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx); swap_directions(&test->ifobj_rx, &test->ifobj_tx); + return res; } static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx) @@ -1476,115 +1487,94 @@ static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj exit_with_error(errno); } -static void testapp_bpf_res(struct test_spec *test) +static int testapp_bpf_res(struct test_spec *test) { test_spec_set_name(test, "BPF_RES"); test->total_steps = 2; test->nb_sockets = 2; if (testapp_validate_traffic(test)) - return; + return TEST_FAILURE; swap_xsk_resources(test->ifobj_tx, test->ifobj_rx); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_headroom(struct test_spec *test) +static int testapp_headroom(struct test_spec *test) { test_spec_set_name(test, "UMEM_HEADROOM"); test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE; - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_stats_rx_dropped(struct test_spec *test) +static int testapp_stats_rx_dropped(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_DROPPED"); + if (test->mode == TEST_MODE_ZC) { + ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); + return TEST_SKIP; + } + pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; pkt_stream_receive_half(test); test->ifobj_rx->validation_func = validate_rx_dropped; - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_stats_tx_invalid_descs(struct test_spec *test) +static int testapp_stats_tx_invalid_descs(struct test_spec *test) { test_spec_set_name(test, "STAT_TX_INVALID"); pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0); test->ifobj_tx->validation_func = validate_tx_invalid_descs; - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_stats_rx_full(struct test_spec *test) +static int testapp_stats_rx_full(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_FULL"); pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); - if (!test->ifobj_rx->pkt_stream) - exit_with_error(ENOMEM); test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; test->ifobj_rx->release_rx = false; test->ifobj_rx->validation_func = validate_rx_full; - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_stats_fill_empty(struct test_spec *test) +static int testapp_stats_fill_empty(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_FILL_EMPTY"); pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); - if (!test->ifobj_rx->pkt_stream) - exit_with_error(ENOMEM); test->ifobj_rx->use_fill_ring = false; test->ifobj_rx->validation_func = validate_fill_empty; - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -/* Simple test */ -static bool hugepages_present(struct ifobject *ifobject) +static int testapp_unaligned(struct test_spec *test) { - size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; - void *bufs; - - bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_HUGE_2MB, -1, 0); - if (bufs == MAP_FAILED) - return false; - - mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; - munmap(bufs, mmap_sz); - return true; -} - -static bool testapp_unaligned(struct test_spec *test) -{ - if (!hugepages_present(test->ifobj_tx)) { - ksft_test_result_skip("No 2M huge pages present.\n"); - return false; - } - test_spec_set_name(test, "UNALIGNED_MODE"); test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; /* Let half of the packets straddle a 4K buffer boundary */ pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2); - testapp_validate_traffic(test); - return true; + return testapp_validate_traffic(test); } -static void testapp_single_pkt(struct test_spec *test) +static int testapp_single_pkt(struct test_spec *test) { struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}}; pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_invalid_desc(struct test_spec *test) +static int testapp_invalid_desc(struct test_spec *test) { struct xsk_umem_info *umem = test->ifobj_tx->umem; u64 umem_size = umem->num_frames * umem->frame_size; @@ -1626,10 +1616,10 @@ static void testapp_invalid_desc(struct test_spec *test) } pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_xdp_drop(struct test_spec *test) +static int testapp_xdp_drop(struct test_spec *test) { struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; @@ -1639,10 +1629,10 @@ static void testapp_xdp_drop(struct test_spec *test) skel_rx->maps.xsk, skel_tx->maps.xsk); pkt_stream_receive_half(test); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_xdp_metadata_count(struct test_spec *test) +static int testapp_xdp_metadata_count(struct test_spec *test) { struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; @@ -1663,10 +1653,10 @@ static void testapp_xdp_metadata_count(struct test_spec *test) if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) exit_with_error(errno); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_poll_txq_tmout(struct test_spec *test) +static int testapp_poll_txq_tmout(struct test_spec *test) { test_spec_set_name(test, "POLL_TXQ_FULL"); @@ -1674,14 +1664,14 @@ static void testapp_poll_txq_tmout(struct test_spec *test) /* create invalid frame by set umem frame_size and pkt length equal to 2048 */ test->ifobj_tx->umem->frame_size = 2048; pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048); - testapp_validate_traffic_single_thread(test, test->ifobj_tx); + return testapp_validate_traffic_single_thread(test, test->ifobj_tx); } -static void testapp_poll_rxq_tmout(struct test_spec *test) +static int testapp_poll_rxq_tmout(struct test_spec *test) { test_spec_set_name(test, "POLL_RXQ_EMPTY"); test->ifobj_rx->use_poll = true; - testapp_validate_traffic_single_thread(test, test->ifobj_rx); + return testapp_validate_traffic_single_thread(test, test->ifobj_rx); } static int xsk_load_xdp_programs(struct ifobject *ifobj) @@ -1698,6 +1688,22 @@ static void xsk_unload_xdp_programs(struct ifobject *ifobj) xsk_xdp_progs__destroy(ifobj->xdp_progs); } +/* Simple test */ +static bool hugepages_present(void) +{ + size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; + void *bufs; + + bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB); + if (bufs == MAP_FAILED) + return false; + + mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; + munmap(bufs, mmap_sz); + return true; +} + static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac, thread_func_t func_ptr) { @@ -1713,94 +1719,87 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char * printf("Error loading XDP program\n"); exit_with_error(err); } + + if (hugepages_present()) + ifobj->unaligned_supp = true; } static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type) { + int ret = TEST_SKIP; + switch (type) { case TEST_TYPE_STATS_RX_DROPPED: - if (mode == TEST_MODE_ZC) { - ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); - return; - } - testapp_stats_rx_dropped(test); + ret = testapp_stats_rx_dropped(test); break; case TEST_TYPE_STATS_TX_INVALID_DESCS: - testapp_stats_tx_invalid_descs(test); + ret = testapp_stats_tx_invalid_descs(test); break; case TEST_TYPE_STATS_RX_FULL: - testapp_stats_rx_full(test); + ret = testapp_stats_rx_full(test); break; case TEST_TYPE_STATS_FILL_EMPTY: - testapp_stats_fill_empty(test); + ret = testapp_stats_fill_empty(test); break; case TEST_TYPE_TEARDOWN: - testapp_teardown(test); + ret = testapp_teardown(test); break; case TEST_TYPE_BIDI: - testapp_bidi(test); + ret = testapp_bidi(test); break; case TEST_TYPE_BPF_RES: - testapp_bpf_res(test); + ret = testapp_bpf_res(test); break; case TEST_TYPE_RUN_TO_COMPLETION: test_spec_set_name(test, "RUN_TO_COMPLETION"); - testapp_validate_traffic(test); + ret = testapp_validate_traffic(test); break; case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT: test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT"); - testapp_single_pkt(test); + ret = testapp_single_pkt(test); break; case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME: test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE"); test->ifobj_tx->umem->frame_size = 2048; test->ifobj_rx->umem->frame_size = 2048; pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE); - testapp_validate_traffic(test); + ret = testapp_validate_traffic(test); break; case TEST_TYPE_RX_POLL: test->ifobj_rx->use_poll = true; test_spec_set_name(test, "POLL_RX"); - testapp_validate_traffic(test); + ret = testapp_validate_traffic(test); break; case TEST_TYPE_TX_POLL: test->ifobj_tx->use_poll = true; test_spec_set_name(test, "POLL_TX"); - testapp_validate_traffic(test); + ret = testapp_validate_traffic(test); break; case TEST_TYPE_POLL_TXQ_TMOUT: - testapp_poll_txq_tmout(test); + ret = testapp_poll_txq_tmout(test); break; case TEST_TYPE_POLL_RXQ_TMOUT: - testapp_poll_rxq_tmout(test); + ret = testapp_poll_rxq_tmout(test); break; case TEST_TYPE_ALIGNED_INV_DESC: test_spec_set_name(test, "ALIGNED_INV_DESC"); - testapp_invalid_desc(test); + ret = testapp_invalid_desc(test); break; case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME: test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE"); test->ifobj_tx->umem->frame_size = 2048; test->ifobj_rx->umem->frame_size = 2048; - testapp_invalid_desc(test); + ret = testapp_invalid_desc(test); break; case TEST_TYPE_UNALIGNED_INV_DESC: - if (!hugepages_present(test->ifobj_tx)) { - ksft_test_result_skip("No 2M huge pages present.\n"); - return; - } test_spec_set_name(test, "UNALIGNED_INV_DESC"); test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; - testapp_invalid_desc(test); + ret = testapp_invalid_desc(test); break; case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: { u64 page_size, umem_size; - if (!hugepages_present(test->ifobj_tx)) { - ksft_test_result_skip("No 2M huge pages present.\n"); - return; - } test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE"); /* Odd frame size so the UMEM doesn't end near a page boundary. */ test->ifobj_tx->umem->frame_size = 4001; @@ -1814,27 +1813,26 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; assert(umem_size % page_size > MIN_PKT_SIZE); assert(umem_size % page_size < page_size - MIN_PKT_SIZE); - testapp_invalid_desc(test); + ret = testapp_invalid_desc(test); break; } case TEST_TYPE_UNALIGNED: - if (!testapp_unaligned(test)) - return; + ret = testapp_unaligned(test); break; case TEST_TYPE_HEADROOM: - testapp_headroom(test); + ret = testapp_headroom(test); break; case TEST_TYPE_XDP_DROP_HALF: - testapp_xdp_drop(test); + ret = testapp_xdp_drop(test); break; case TEST_TYPE_XDP_METADATA_COUNT: - testapp_xdp_metadata_count(test); + ret = testapp_xdp_metadata_count(test); break; default: break; } - if (!test->fail) + if (ret == TEST_PASS) ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test), test->name); pkt_stream_restore_default(test); diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index be4664a38d74..00862732e751 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -30,6 +30,7 @@ #define TEST_PASS 0 #define TEST_FAILURE -1 #define TEST_CONTINUE 1 +#define TEST_SKIP 2 #define MAX_INTERFACES 2 #define MAX_INTERFACE_NAME_CHARS 16 #define MAX_SOCKETS 2 @@ -148,6 +149,7 @@ struct ifobject { bool release_rx; bool shared_umem; bool use_metadata; + bool unaligned_supp; u8 dst_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN]; }; From 86e41755b43227858ef04b452c25b9dbcf667622 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:07 +0200 Subject: [PATCH 08/10] selftests/xsk: populate fill ring based on frags needed Populate the fill ring based on the number of frags a packet needs. With multi-buffer support, a packet might require more than a single fragment/buffer, so the function xsk_populate_fill_ring() needs to consider how many buffers a packet will consume, and put that many buffers on the fill ring for each packet it should receive. As we are still not sending any multi-buffer packets, the function will only produce one buffer per packet at the moment. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-9-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/xsk.h | 5 +++ tools/testing/selftests/bpf/xskxceiver.c | 48 ++++++++++++++++++------ 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h index 04ed8b544712..8da8d557768b 100644 --- a/tools/testing/selftests/bpf/xsk.h +++ b/tools/testing/selftests/bpf/xsk.h @@ -134,6 +134,11 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb) __atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE); } +static inline void xsk_ring_prod__cancel(struct xsk_ring_prod *prod, __u32 nb) +{ + prod->cached_prod -= nb; +} + static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx) { __u32 entries = xsk_cons_nb_avail(cons, nb); diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index f0d929cb730a..c54f25dcf134 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -531,6 +531,18 @@ static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) return pkt_stream; } +static u32 ceil_u32(u32 a, u32 b) +{ + return (a + b - 1) / b; +} + +static u32 pkt_nb_frags(u32 frame_size, struct pkt *pkt) +{ + if (!pkt || !pkt->valid) + return 1; + return ceil_u32(pkt->len, frame_size); +} + static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len) { pkt->offset = offset; @@ -1159,9 +1171,11 @@ static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobje ifobject->umem->base_addr = 0; } -static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) +static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, + bool fill_up) { - u32 idx = 0, i, buffers_to_fill, nb_pkts; + u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM; + u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts; int ret; if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) @@ -1173,19 +1187,29 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream if (ret != buffers_to_fill) exit_with_error(ENOSPC); - for (i = 0; i < buffers_to_fill; i++) { + while (filled < buffers_to_fill) { struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts); u64 addr; + u32 i; - if (!pkt) - addr = i * umem->frame_size + umem->base_addr; - else if (pkt->offset >= 0) - addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem); - else - addr = pkt->offset + umem_alloc_buffer(umem); - *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; + for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt); i++) { + if (!pkt) { + if (!fill_up) + break; + addr = filled * umem->frame_size + umem->base_addr; + } else if (pkt->offset >= 0) { + addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem); + } else { + addr = pkt->offset + umem_alloc_buffer(umem); + } + + *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; + if (++filled >= buffers_to_fill) + break; + } } - xsk_ring_prod__submit(&umem->fq, i); + xsk_ring_prod__submit(&umem->fq, filled); + xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled); pkt_stream_reset(pkt_stream); umem_reset_alloc(umem); @@ -1220,7 +1244,7 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) if (!ifobject->rx_on) return; - xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); + xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring); ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); if (ret) From 2f6eae0df1a80bb636f43f1b954678da0a10fa49 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:08 +0200 Subject: [PATCH 09/10] selftests/xsk: generate data for multi-buffer packets Add the ability to generate data in the packets that are correct for multi-buffer packets. The ethernet header should only go into the first fragment followed by data and the others should only have data. We also need to modify the pkt_dump function so that it knows what fragment has an ethernet header so it can print this. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-10-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/xskxceiver.c | 70 +++++++++++++++--------- 1 file changed, 43 insertions(+), 27 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index c54f25dcf134..b48017611499 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -142,12 +142,14 @@ static void report_failure(struct test_spec *test) * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0. */ -static void write_payload(void *dest, u32 val, u32 size) +static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size) { u32 *ptr = (u32 *)dest, i; - for (i = 0; i < size / sizeof(*ptr); i++) - ptr[i] = htonl(val << 16 | i); + start /= sizeof(*ptr); + size /= sizeof(*ptr); + for (i = 0; i < size; i++) + ptr[i] = htonl(pkt_nb << 16 | (i + start)); } static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr) @@ -563,8 +565,10 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb exit_with_error(ENOMEM); for (i = 0; i < nb_pkts; i++) { - pkt_set(umem, &pkt_stream->pkts[i], 0, pkt_len); - pkt_stream->pkts[i].pkt_nb = i; + struct pkt *pkt = &pkt_stream->pkts[i]; + + pkt_set(umem, pkt, 0, pkt_len); + pkt->pkt_nb = i; } return pkt_stream; @@ -626,19 +630,24 @@ static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem) return pkt->offset + umem_alloc_buffer(umem); } -static void pkt_generate(struct ifobject *ifobject, struct pkt *pkt, u64 addr) +static void pkt_generate(struct ifobject *ifobject, u64 addr, u32 len, u32 pkt_nb, + u32 bytes_written) { - struct ethhdr *eth_hdr; - void *data; + void *data = xsk_umem__get_data(ifobject->umem->buffer, addr); - if (!pkt->valid || pkt->len < MIN_PKT_SIZE) + if (len < MIN_PKT_SIZE) return; - data = xsk_umem__get_data(ifobject->umem->buffer, addr); - eth_hdr = data; + if (!bytes_written) { + gen_eth_hdr(ifobject, data); - gen_eth_hdr(ifobject, eth_hdr); - write_payload(data + PKT_HDR_SIZE, pkt->pkt_nb, pkt->len - PKT_HDR_SIZE); + len -= PKT_HDR_SIZE; + data += PKT_HDR_SIZE; + } else { + bytes_written -= PKT_HDR_SIZE; + } + + write_payload(data, pkt_nb, bytes_written, len); } static void __pkt_stream_generate_custom(struct ifobject *ifobj, @@ -681,27 +690,33 @@ static void pkt_print_data(u32 *data, u32 cnt) } } -static void pkt_dump(void *pkt, u32 len) +static void pkt_dump(void *pkt, u32 len, bool eth_header) { struct ethhdr *ethhdr = pkt; - u32 i; + u32 i, *data; - /*extract L2 frame */ - fprintf(stdout, "DEBUG>> L2: dst mac: "); - for (i = 0; i < ETH_ALEN; i++) - fprintf(stdout, "%02X", ethhdr->h_dest[i]); + if (eth_header) { + /*extract L2 frame */ + fprintf(stdout, "DEBUG>> L2: dst mac: "); + for (i = 0; i < ETH_ALEN; i++) + fprintf(stdout, "%02X", ethhdr->h_dest[i]); - fprintf(stdout, "\nDEBUG>> L2: src mac: "); - for (i = 0; i < ETH_ALEN; i++) - fprintf(stdout, "%02X", ethhdr->h_source[i]); + fprintf(stdout, "\nDEBUG>> L2: src mac: "); + for (i = 0; i < ETH_ALEN; i++) + fprintf(stdout, "%02X", ethhdr->h_source[i]); + + data = pkt + PKT_HDR_SIZE; + } else { + data = pkt; + } /*extract L5 frame */ fprintf(stdout, "\nDEBUG>> L5: seqnum: "); - pkt_print_data(pkt + PKT_HDR_SIZE, PKT_DUMP_NB_TO_PRINT); + pkt_print_data(data, PKT_DUMP_NB_TO_PRINT); fprintf(stdout, "...."); if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) { fprintf(stdout, "\n.... "); - pkt_print_data(pkt + PKT_HDR_SIZE + len - PKT_DUMP_NB_TO_PRINT * sizeof(u32), + pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT, PKT_DUMP_NB_TO_PRINT); } fprintf(stdout, "\n---------------------------------------\n"); @@ -772,7 +787,7 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) return true; error: - pkt_dump(data, len); + pkt_dump(data, len, true); return false; } @@ -959,9 +974,10 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo tx_desc->addr = pkt_get_addr(pkt, ifobject->umem); tx_desc->len = pkt->len; - if (pkt->valid) + if (pkt->valid) { valid_pkts++; - pkt_generate(ifobject, pkt, tx_desc->addr); + pkt_generate(ifobject, tx_desc->addr, tx_desc->len, pkt->pkt_nb, 0); + } } pthread_mutex_lock(&pacing_mutex); From 7cd6df4f5ec278888b4c3ab12b291c2124b46c1c Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 16 May 2023 12:31:09 +0200 Subject: [PATCH 10/10] selftests/xsk: adjust packet pacing for multi-buffer support Modify the packet pacing algorithm so that it works with multi-buffer packets. This algorithm makes sure we do not send too many buffers to the receiving thread so that packets have to be dropped. The previous algorithm made the assumption that each packet only consumes one buffer, but that is not true anymore when multi-buffer support gets added. Instead, we find out what the largest packet size is in the packet stream and assume that each packet will consume this many buffers. This is conservative and overly cautious as there might be smaller packets in the stream that need fewer buffers per packet. But it keeps the algorithm simple. Also simplify it by removing the pthread conditional and just test if there is enough space in the Rx thread before trying to send one more batch. Also makes the tests run faster. Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230516103109.3066-11-magnus.karlsson@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/xskxceiver.c | 48 ++++++++++++++---------- tools/testing/selftests/bpf/xskxceiver.h | 2 +- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index b48017611499..218d7f694e5c 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -555,6 +555,11 @@ static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 pkt->valid = true; } +static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len) +{ + return ceil_u32(len, umem->frame_size) * umem->frame_size; +} + static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) { struct pkt_stream *pkt_stream; @@ -564,6 +569,8 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb if (!pkt_stream) exit_with_error(ENOMEM); + pkt_stream->nb_pkts = nb_pkts; + pkt_stream->max_pkt_len = pkt_len; for (i = 0; i < nb_pkts; i++) { struct pkt *pkt = &pkt_stream->pkts[i]; @@ -661,10 +668,14 @@ static void __pkt_stream_generate_custom(struct ifobject *ifobj, exit_with_error(ENOMEM); for (i = 0; i < nb_pkts; i++) { - pkt_stream->pkts[i].offset = pkts[i].offset; - pkt_stream->pkts[i].len = pkts[i].len; - pkt_stream->pkts[i].pkt_nb = i; - pkt_stream->pkts[i].valid = pkts[i].valid; + struct pkt *pkt = &pkt_stream->pkts[i]; + + pkt->offset = pkts[i].offset; + pkt->len = pkts[i].len; + pkt->pkt_nb = i; + pkt->valid = pkts[i].valid; + if (pkt->len > pkt_stream->max_pkt_len) + pkt_stream->max_pkt_len = pkt->len; } ifobj->pkt_stream = pkt_stream; @@ -926,8 +937,6 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds) pthread_mutex_lock(&pacing_mutex); pkts_in_flight -= pkts_sent; - if (pkts_in_flight < umem->num_frames) - pthread_cond_signal(&pacing_cond); pthread_mutex_unlock(&pacing_mutex); pkts_sent = 0; } @@ -938,10 +947,18 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds) static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout) { struct xsk_socket_info *xsk = ifobject->xsk; + struct xsk_umem_info *umem = ifobject->umem; + u32 i, idx = 0, valid_pkts = 0, buffer_len; bool use_poll = ifobject->use_poll; - u32 i, idx = 0, valid_pkts = 0; int ret; + buffer_len = pkt_get_buffer_len(umem, ifobject->pkt_stream->max_pkt_len); + /* pkts_in_flight might be negative if many invalid packets are sent */ + if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) { + kick_tx(xsk); + return TEST_CONTINUE; + } + while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) { if (use_poll) { ret = poll(fds, 1, POLL_TMOUT); @@ -972,7 +989,7 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo if (!pkt) break; - tx_desc->addr = pkt_get_addr(pkt, ifobject->umem); + tx_desc->addr = pkt_get_addr(pkt, umem); tx_desc->len = pkt->len; if (pkt->valid) { valid_pkts++; @@ -982,11 +999,6 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo pthread_mutex_lock(&pacing_mutex); pkts_in_flight += valid_pkts; - /* pkts_in_flight might be negative if many invalid packets are sent */ - if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) { - kick_tx(xsk); - pthread_cond_wait(&pacing_cond, &pacing_mutex); - } pthread_mutex_unlock(&pacing_mutex); xsk_ring_prod__submit(&xsk->tx, i); @@ -1032,9 +1044,11 @@ static int send_pkts(struct test_spec *test, struct ifobject *ifobject) while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { ret = __send_pkts(ifobject, &fds, timeout); + if (ret == TEST_CONTINUE && !test->fail) + continue; if ((ret || test->fail) && !timeout) return TEST_FAILURE; - else if (ret == TEST_PASS && timeout) + if (ret == TEST_PASS && timeout) return ret; } @@ -1319,12 +1333,8 @@ static void *worker_testapp_validate_rx(void *arg) if (!err && ifobject->validation_func) err = ifobject->validation_func(ifobject); - if (err) { + if (err) report_failure(test); - pthread_mutex_lock(&pacing_mutex); - pthread_cond_signal(&pacing_cond); - pthread_mutex_unlock(&pacing_mutex); - } pthread_exit(NULL); } diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 00862732e751..aaf27e067640 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -121,6 +121,7 @@ struct pkt_stream { u32 nb_pkts; u32 current_pkt_nb; struct pkt *pkts; + u32 max_pkt_len; }; struct ifobject; @@ -173,7 +174,6 @@ struct test_spec { pthread_barrier_t barr; pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER; -pthread_cond_t pacing_cond = PTHREAD_COND_INITIALIZER; int pkts_in_flight;