2019-02-21 10:21:26 +01:00
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* AF_XDP user - space access library .
*
* Copyright ( c ) 2018 - 2019 Intel Corporation .
*
* Author ( s ) : Magnus Karlsson < magnus . karlsson @ intel . com >
*/
# include <errno.h>
# include <stdlib.h>
# include <string.h>
# include <unistd.h>
# include <arpa/inet.h>
# include <asm/barrier.h>
# include <linux/compiler.h>
# include <linux/ethtool.h>
# include <linux/filter.h>
# include <linux/if_ether.h>
2023-07-19 15:24:16 +02:00
# include <linux/if_link.h>
2019-02-21 10:21:26 +01:00
# include <linux/if_packet.h>
# include <linux/if_xdp.h>
2020-09-14 15:32:10 -07:00
# include <linux/kernel.h>
2020-08-28 10:26:27 +02:00
# include <linux/list.h>
2023-07-19 15:24:16 +02:00
# include <linux/netlink.h>
# include <linux/rtnetlink.h>
2019-02-21 10:21:26 +01:00
# include <linux/sockios.h>
# include <net/if.h>
# include <sys/ioctl.h>
# include <sys/mman.h>
# include <sys/socket.h>
# include <sys/types.h>
2022-06-27 14:15:13 -07:00
# include <bpf/bpf.h>
# include <bpf/libbpf.h>
2019-02-21 10:21:26 +01:00
# include "xsk.h"
2022-11-04 09:27:54 +08:00
# include "bpf_util.h"
2019-02-21 10:21:26 +01:00
# ifndef SOL_XDP
# define SOL_XDP 283
# endif
# ifndef AF_XDP
# define AF_XDP 44
# endif
# ifndef PF_XDP
# define PF_XDP AF_XDP
# endif
2022-06-27 14:15:13 -07:00
# define pr_warn(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
2023-01-11 10:35:20 +01:00
# define XSKMAP_SIZE 1
2019-02-21 10:21:26 +01:00
struct xsk_umem {
2020-08-28 10:26:27 +02:00
struct xsk_ring_prod * fill_save ;
struct xsk_ring_cons * comp_save ;
2019-02-21 10:21:26 +01:00
char * umem_area ;
struct xsk_umem_config config ;
int fd ;
int refcount ;
2020-08-28 10:26:27 +02:00
struct list_head ctx_list ;
2021-03-31 06:12:18 +00:00
bool rx_ring_setup_done ;
bool tx_ring_setup_done ;
2020-08-28 10:26:27 +02:00
} ;
struct xsk_ctx {
struct xsk_ring_prod * fill ;
struct xsk_ring_cons * comp ;
__u32 queue_id ;
struct xsk_umem * umem ;
int refcount ;
int ifindex ;
struct list_head list ;
2019-02-21 10:21:26 +01:00
} ;
struct xsk_socket {
struct xsk_ring_cons * rx ;
struct xsk_ring_prod * tx ;
2020-08-28 10:26:27 +02:00
struct xsk_ctx * ctx ;
2019-02-21 10:21:26 +01:00
struct xsk_socket_config config ;
int fd ;
} ;
2023-07-19 15:24:16 +02:00
struct nl_mtu_req {
struct nlmsghdr nh ;
struct ifinfomsg msg ;
char buf [ 512 ] ;
} ;
2019-02-21 10:21:26 +01:00
int xsk_umem__fd ( const struct xsk_umem * umem )
{
return umem ? umem - > fd : - EINVAL ;
}
int xsk_socket__fd ( const struct xsk_socket * xsk )
{
return xsk ? xsk - > fd : - EINVAL ;
}
static bool xsk_page_aligned ( void * buffer )
{
unsigned long addr = ( unsigned long ) buffer ;
return ! ( addr & ( getpagesize ( ) - 1 ) ) ;
}
static void xsk_set_umem_config ( struct xsk_umem_config * cfg ,
const struct xsk_umem_config * usr_cfg )
{
if ( ! usr_cfg ) {
cfg - > fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS ;
cfg - > comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS ;
cfg - > frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE ;
cfg - > frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM ;
2019-08-27 02:25:27 +00:00
cfg - > flags = XSK_UMEM__DEFAULT_FLAGS ;
2019-02-21 10:21:26 +01:00
return ;
}
cfg - > fill_size = usr_cfg - > fill_size ;
cfg - > comp_size = usr_cfg - > comp_size ;
cfg - > frame_size = usr_cfg - > frame_size ;
cfg - > frame_headroom = usr_cfg - > frame_headroom ;
2019-08-27 02:25:27 +00:00
cfg - > flags = usr_cfg - > flags ;
2019-02-21 10:21:26 +01:00
}
2019-03-12 09:59:45 +01:00
static int xsk_set_xdp_socket_config ( struct xsk_socket_config * cfg ,
const struct xsk_socket_config * usr_cfg )
2019-02-21 10:21:26 +01:00
{
if ( ! usr_cfg ) {
cfg - > rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS ;
cfg - > tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS ;
cfg - > bind_flags = 0 ;
2019-03-12 09:59:45 +01:00
return 0 ;
2019-02-21 10:21:26 +01:00
}
cfg - > rx_size = usr_cfg - > rx_size ;
cfg - > tx_size = usr_cfg - > tx_size ;
cfg - > bind_flags = usr_cfg - > bind_flags ;
2019-03-12 09:59:45 +01:00
return 0 ;
2019-02-21 10:21:26 +01:00
}
2019-10-25 11:17:15 +02:00
static int xsk_get_mmap_offsets ( int fd , struct xdp_mmap_offsets * off )
{
socklen_t optlen ;
int err ;
optlen = sizeof ( * off ) ;
err = getsockopt ( fd , SOL_XDP , XDP_MMAP_OFFSETS , off , & optlen ) ;
if ( err )
return err ;
if ( optlen = = sizeof ( * off ) )
return 0 ;
return - EINVAL ;
}
2020-08-28 10:26:27 +02:00
static int xsk_create_umem_rings ( struct xsk_umem * umem , int fd ,
struct xsk_ring_prod * fill ,
struct xsk_ring_cons * comp )
{
struct xdp_mmap_offsets off ;
void * map ;
int err ;
err = setsockopt ( fd , SOL_XDP , XDP_UMEM_FILL_RING ,
& umem - > config . fill_size ,
sizeof ( umem - > config . fill_size ) ) ;
if ( err )
return - errno ;
err = setsockopt ( fd , SOL_XDP , XDP_UMEM_COMPLETION_RING ,
& umem - > config . comp_size ,
sizeof ( umem - > config . comp_size ) ) ;
if ( err )
return - errno ;
err = xsk_get_mmap_offsets ( fd , & off ) ;
if ( err )
return - errno ;
map = mmap ( NULL , off . fr . desc + umem - > config . fill_size * sizeof ( __u64 ) ,
PROT_READ | PROT_WRITE , MAP_SHARED | MAP_POPULATE , fd ,
XDP_UMEM_PGOFF_FILL_RING ) ;
if ( map = = MAP_FAILED )
return - errno ;
fill - > mask = umem - > config . fill_size - 1 ;
fill - > size = umem - > config . fill_size ;
fill - > producer = map + off . fr . producer ;
fill - > consumer = map + off . fr . consumer ;
fill - > flags = map + off . fr . flags ;
fill - > ring = map + off . fr . desc ;
fill - > cached_cons = umem - > config . fill_size ;
map = mmap ( NULL , off . cr . desc + umem - > config . comp_size * sizeof ( __u64 ) ,
PROT_READ | PROT_WRITE , MAP_SHARED | MAP_POPULATE , fd ,
XDP_UMEM_PGOFF_COMPLETION_RING ) ;
if ( map = = MAP_FAILED ) {
err = - errno ;
goto out_mmap ;
}
comp - > mask = umem - > config . comp_size - 1 ;
comp - > size = umem - > config . comp_size ;
comp - > producer = map + off . cr . producer ;
comp - > consumer = map + off . cr . consumer ;
comp - > flags = map + off . cr . flags ;
comp - > ring = map + off . cr . desc ;
return 0 ;
out_mmap :
munmap ( map , off . fr . desc + umem - > config . fill_size * sizeof ( __u64 ) ) ;
return err ;
}
2022-06-27 14:15:13 -07:00
int xsk_umem__create ( struct xsk_umem * * umem_ptr , void * umem_area ,
__u64 size , struct xsk_ring_prod * fill ,
struct xsk_ring_cons * comp ,
const struct xsk_umem_config * usr_config )
2019-02-21 10:21:26 +01:00
{
struct xdp_umem_reg mr ;
struct xsk_umem * umem ;
int err ;
if ( ! umem_area | | ! umem_ptr | | ! fill | | ! comp )
return - EFAULT ;
if ( ! size & & ! xsk_page_aligned ( umem_area ) )
return - EINVAL ;
umem = calloc ( 1 , sizeof ( * umem ) ) ;
if ( ! umem )
return - ENOMEM ;
2021-10-28 12:04:58 +05:30
umem - > fd = socket ( AF_XDP , SOCK_RAW | SOCK_CLOEXEC , 0 ) ;
2019-02-21 10:21:26 +01:00
if ( umem - > fd < 0 ) {
err = - errno ;
goto out_umem_alloc ;
}
umem - > umem_area = umem_area ;
2020-08-28 10:26:27 +02:00
INIT_LIST_HEAD ( & umem - > ctx_list ) ;
2019-02-21 10:21:26 +01:00
xsk_set_umem_config ( & umem - > config , usr_config ) ;
2019-10-09 18:49:29 +02:00
memset ( & mr , 0 , sizeof ( mr ) ) ;
2019-02-21 10:21:26 +01:00
mr . addr = ( uintptr_t ) umem_area ;
mr . len = size ;
mr . chunk_size = umem - > config . frame_size ;
mr . headroom = umem - > config . frame_headroom ;
2019-08-27 02:25:27 +00:00
mr . flags = umem - > config . flags ;
2019-02-21 10:21:26 +01:00
err = setsockopt ( umem - > fd , SOL_XDP , XDP_UMEM_REG , & mr , sizeof ( mr ) ) ;
if ( err ) {
err = - errno ;
goto out_socket ;
}
2020-08-28 10:26:27 +02:00
err = xsk_create_umem_rings ( umem , umem - > fd , fill , comp ) ;
if ( err )
2019-02-21 10:21:26 +01:00
goto out_socket ;
2020-08-28 10:26:27 +02:00
umem - > fill_save = fill ;
umem - > comp_save = comp ;
2019-02-21 10:21:26 +01:00
* umem_ptr = umem ;
return 0 ;
out_socket :
close ( umem - > fd ) ;
out_umem_alloc :
free ( umem ) ;
return err ;
}
2023-01-11 10:35:26 +01:00
bool xsk_is_in_mode ( u32 ifindex , int mode )
{
LIBBPF_OPTS ( bpf_xdp_query_opts , opts ) ;
int ret ;
ret = bpf_xdp_query ( ifindex , mode , & opts ) ;
if ( ret ) {
printf ( " XDP mode query returned error %s \n " , strerror ( errno ) ) ;
return false ;
}
if ( mode = = XDP_FLAGS_DRV_MODE )
return opts . attach_mode = = XDP_ATTACHED_DRV ;
else if ( mode = = XDP_FLAGS_SKB_MODE )
return opts . attach_mode = = XDP_ATTACHED_SKB ;
return false ;
}
2023-07-19 15:24:16 +02:00
/* Lifted from netlink.c in tools/lib/bpf */
static int netlink_recvmsg ( int sock , struct msghdr * mhdr , int flags )
{
int len ;
do {
len = recvmsg ( sock , mhdr , flags ) ;
} while ( len < 0 & & ( errno = = EINTR | | errno = = EAGAIN ) ) ;
if ( len < 0 )
return - errno ;
return len ;
}
/* Lifted from netlink.c in tools/lib/bpf */
static int alloc_iov ( struct iovec * iov , int len )
{
void * nbuf ;
nbuf = realloc ( iov - > iov_base , len ) ;
if ( ! nbuf )
return - ENOMEM ;
iov - > iov_base = nbuf ;
iov - > iov_len = len ;
return 0 ;
}
/* Original version lifted from netlink.c in tools/lib/bpf */
static int netlink_recv ( int sock )
{
struct iovec iov = { } ;
struct msghdr mhdr = {
. msg_iov = & iov ,
. msg_iovlen = 1 ,
} ;
bool multipart = true ;
struct nlmsgerr * err ;
struct nlmsghdr * nh ;
int len , ret ;
ret = alloc_iov ( & iov , 4096 ) ;
if ( ret )
goto done ;
while ( multipart ) {
multipart = false ;
len = netlink_recvmsg ( sock , & mhdr , MSG_PEEK | MSG_TRUNC ) ;
if ( len < 0 ) {
ret = len ;
goto done ;
}
if ( len > iov . iov_len ) {
ret = alloc_iov ( & iov , len ) ;
if ( ret )
goto done ;
}
len = netlink_recvmsg ( sock , & mhdr , 0 ) ;
if ( len < 0 ) {
ret = len ;
goto done ;
}
if ( len = = 0 )
break ;
for ( nh = ( struct nlmsghdr * ) iov . iov_base ; NLMSG_OK ( nh , len ) ;
nh = NLMSG_NEXT ( nh , len ) ) {
if ( nh - > nlmsg_flags & NLM_F_MULTI )
multipart = true ;
switch ( nh - > nlmsg_type ) {
case NLMSG_ERROR :
err = ( struct nlmsgerr * ) NLMSG_DATA ( nh ) ;
if ( ! err - > error )
continue ;
ret = err - > error ;
goto done ;
case NLMSG_DONE :
ret = 0 ;
goto done ;
default :
break ;
}
}
}
ret = 0 ;
done :
free ( iov . iov_base ) ;
return ret ;
}
int xsk_set_mtu ( int ifindex , int mtu )
{
struct nl_mtu_req req ;
struct rtattr * rta ;
int fd , ret ;
fd = socket ( AF_NETLINK , SOCK_DGRAM , NETLINK_ROUTE ) ;
if ( fd < 0 )
return fd ;
memset ( & req , 0 , sizeof ( req ) ) ;
req . nh . nlmsg_len = NLMSG_LENGTH ( sizeof ( struct ifinfomsg ) ) ;
req . nh . nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK ;
req . nh . nlmsg_type = RTM_NEWLINK ;
req . msg . ifi_family = AF_UNSPEC ;
req . msg . ifi_index = ifindex ;
rta = ( struct rtattr * ) ( ( ( char * ) & req ) + NLMSG_ALIGN ( req . nh . nlmsg_len ) ) ;
rta - > rta_type = IFLA_MTU ;
rta - > rta_len = RTA_LENGTH ( sizeof ( unsigned int ) ) ;
req . nh . nlmsg_len = NLMSG_ALIGN ( req . nh . nlmsg_len ) + RTA_LENGTH ( sizeof ( mtu ) ) ;
memcpy ( RTA_DATA ( rta ) , & mtu , sizeof ( mtu ) ) ;
ret = send ( fd , & req , req . nh . nlmsg_len , 0 ) ;
if ( ret < 0 ) {
close ( fd ) ;
return errno ;
}
ret = netlink_recv ( fd ) ;
close ( fd ) ;
return ret ;
}
2023-01-11 10:35:22 +01:00
int xsk_attach_xdp_program ( struct bpf_program * prog , int ifindex , u32 xdp_flags )
2019-02-21 10:21:26 +01:00
{
2021-03-30 00:43:05 +02:00
int prog_fd ;
2019-02-21 10:21:26 +01:00
2023-01-11 10:35:22 +01:00
prog_fd = bpf_program__fd ( prog ) ;
return bpf_xdp_attach ( ifindex , prog_fd , xdp_flags , NULL ) ;
2023-01-11 10:35:20 +01:00
}
2023-01-11 10:35:22 +01:00
void xsk_detach_xdp_program ( int ifindex , u32 xdp_flags )
2023-01-11 10:35:20 +01:00
{
2023-01-11 10:35:22 +01:00
bpf_xdp_detach ( ifindex , xdp_flags , NULL ) ;
}
2023-01-11 10:35:20 +01:00
2023-01-11 10:35:22 +01:00
void xsk_clear_xskmap ( struct bpf_map * map )
{
u32 index = 0 ;
int map_fd ;
2023-01-11 10:35:20 +01:00
2023-01-11 10:35:22 +01:00
map_fd = bpf_map__fd ( map ) ;
bpf_map_delete_elem ( map_fd , & index ) ;
2021-03-30 00:43:05 +02:00
}
2023-01-11 10:35:22 +01:00
int xsk_update_xskmap ( struct bpf_map * map , struct xsk_socket * xsk )
2023-01-11 10:35:20 +01:00
{
2023-01-11 10:35:22 +01:00
int map_fd , sock_fd ;
u32 index = 0 ;
2023-01-11 10:35:20 +01:00
2023-01-11 10:35:22 +01:00
map_fd = bpf_map__fd ( map ) ;
sock_fd = xsk_socket__fd ( xsk ) ;
return bpf_map_update_elem ( map_fd , & index , & sock_fd , 0 ) ;
2023-01-11 10:35:20 +01:00
}
2020-08-28 10:26:27 +02:00
static struct xsk_ctx * xsk_get_ctx ( struct xsk_umem * umem , int ifindex ,
__u32 queue_id )
{
struct xsk_ctx * ctx ;
if ( list_empty ( & umem - > ctx_list ) )
return NULL ;
list_for_each_entry ( ctx , & umem - > ctx_list , list ) {
if ( ctx - > ifindex = = ifindex & & ctx - > queue_id = = queue_id ) {
ctx - > refcount + + ;
return ctx ;
}
}
return NULL ;
}
2021-03-31 06:12:17 +00:00
static void xsk_put_ctx ( struct xsk_ctx * ctx , bool unmap )
2020-08-28 10:26:27 +02:00
{
struct xsk_umem * umem = ctx - > umem ;
struct xdp_mmap_offsets off ;
int err ;
2021-03-31 06:12:17 +00:00
if ( - - ctx - > refcount )
return ;
2020-08-28 10:26:27 +02:00
2021-03-31 06:12:17 +00:00
if ( ! unmap )
goto out_free ;
err = xsk_get_mmap_offsets ( umem - > fd , & off ) ;
if ( err )
goto out_free ;
munmap ( ctx - > fill - > ring - off . fr . desc , off . fr . desc + umem - > config . fill_size *
sizeof ( __u64 ) ) ;
munmap ( ctx - > comp - > ring - off . cr . desc , off . cr . desc + umem - > config . comp_size *
sizeof ( __u64 ) ) ;
out_free :
list_del ( & ctx - > list ) ;
free ( ctx ) ;
2020-08-28 10:26:27 +02:00
}
static struct xsk_ctx * xsk_create_ctx ( struct xsk_socket * xsk ,
struct xsk_umem * umem , int ifindex ,
2023-01-11 10:35:20 +01:00
__u32 queue_id ,
2020-08-28 10:26:27 +02:00
struct xsk_ring_prod * fill ,
struct xsk_ring_cons * comp )
{
struct xsk_ctx * ctx ;
int err ;
ctx = calloc ( 1 , sizeof ( * ctx ) ) ;
if ( ! ctx )
return NULL ;
if ( ! umem - > fill_save ) {
err = xsk_create_umem_rings ( umem , xsk - > fd , fill , comp ) ;
if ( err ) {
free ( ctx ) ;
return NULL ;
}
} else if ( umem - > fill_save ! = fill | | umem - > comp_save ! = comp ) {
/* Copy over rings to new structs. */
memcpy ( fill , umem - > fill_save , sizeof ( * fill ) ) ;
memcpy ( comp , umem - > comp_save , sizeof ( * comp ) ) ;
}
ctx - > ifindex = ifindex ;
ctx - > refcount = 1 ;
ctx - > umem = umem ;
ctx - > queue_id = queue_id ;
ctx - > fill = fill ;
ctx - > comp = comp ;
list_add ( & ctx - > list , & umem - > ctx_list ) ;
return ctx ;
}
int xsk_socket__create_shared ( struct xsk_socket * * xsk_ptr ,
2023-01-11 10:35:20 +01:00
int ifindex ,
2020-08-28 10:26:27 +02:00
__u32 queue_id , struct xsk_umem * umem ,
struct xsk_ring_cons * rx ,
struct xsk_ring_prod * tx ,
struct xsk_ring_prod * fill ,
struct xsk_ring_cons * comp ,
const struct xsk_socket_config * usr_config )
2019-02-21 10:21:26 +01:00
{
2021-04-08 05:20:09 +00:00
bool unmap , rx_setup_done = false , tx_setup_done = false ;
2019-04-30 14:45:35 +02:00
void * rx_map = NULL , * tx_map = NULL ;
2019-02-21 10:21:26 +01:00
struct sockaddr_xdp sxdp = { } ;
struct xdp_mmap_offsets off ;
struct xsk_socket * xsk ;
2020-08-28 10:26:27 +02:00
struct xsk_ctx * ctx ;
2023-01-11 10:35:20 +01:00
int err ;
2019-02-21 10:21:26 +01:00
2020-10-07 13:42:26 +02:00
if ( ! umem | | ! xsk_ptr | | ! ( rx | | tx ) )
2019-02-21 10:21:26 +01:00
return - EFAULT ;
2021-04-08 05:20:09 +00:00
unmap = umem - > fill_save ! = fill ;
2019-02-21 10:21:26 +01:00
xsk = calloc ( 1 , sizeof ( * xsk ) ) ;
if ( ! xsk )
return - ENOMEM ;
2019-11-07 18:47:36 +01:00
err = xsk_set_xdp_socket_config ( & xsk - > config , usr_config ) ;
if ( err )
goto out_xsk_alloc ;
2019-02-21 10:21:26 +01:00
if ( umem - > refcount + + > 0 ) {
2021-10-28 12:04:58 +05:30
xsk - > fd = socket ( AF_XDP , SOCK_RAW | SOCK_CLOEXEC , 0 ) ;
2019-02-21 10:21:26 +01:00
if ( xsk - > fd < 0 ) {
err = - errno ;
goto out_xsk_alloc ;
}
} else {
xsk - > fd = umem - > fd ;
2021-03-31 06:12:18 +00:00
rx_setup_done = umem - > rx_ring_setup_done ;
tx_setup_done = umem - > tx_ring_setup_done ;
2019-02-21 10:21:26 +01:00
}
2020-08-28 10:26:27 +02:00
ctx = xsk_get_ctx ( umem , ifindex , queue_id ) ;
if ( ! ctx ) {
2020-10-07 13:42:26 +02:00
if ( ! fill | | ! comp ) {
err = - EFAULT ;
goto out_socket ;
}
2023-01-11 10:35:20 +01:00
ctx = xsk_create_ctx ( xsk , umem , ifindex , queue_id , fill , comp ) ;
2020-08-28 10:26:27 +02:00
if ( ! ctx ) {
err = - ENOMEM ;
goto out_socket ;
}
2019-02-21 10:21:26 +01:00
}
2020-08-28 10:26:27 +02:00
xsk - > ctx = ctx ;
2019-02-21 10:21:26 +01:00
2021-03-31 06:12:18 +00:00
if ( rx & & ! rx_setup_done ) {
2019-02-21 10:21:26 +01:00
err = setsockopt ( xsk - > fd , SOL_XDP , XDP_RX_RING ,
& xsk - > config . rx_size ,
sizeof ( xsk - > config . rx_size ) ) ;
if ( err ) {
err = - errno ;
2020-08-28 10:26:27 +02:00
goto out_put_ctx ;
2019-02-21 10:21:26 +01:00
}
2021-03-31 06:12:18 +00:00
if ( xsk - > fd = = umem - > fd )
umem - > rx_ring_setup_done = true ;
2019-02-21 10:21:26 +01:00
}
2021-03-31 06:12:18 +00:00
if ( tx & & ! tx_setup_done ) {
2019-02-21 10:21:26 +01:00
err = setsockopt ( xsk - > fd , SOL_XDP , XDP_TX_RING ,
& xsk - > config . tx_size ,
sizeof ( xsk - > config . tx_size ) ) ;
if ( err ) {
err = - errno ;
2020-08-28 10:26:27 +02:00
goto out_put_ctx ;
2019-02-21 10:21:26 +01:00
}
2021-03-31 06:12:18 +00:00
if ( xsk - > fd = = umem - > fd )
2021-06-07 14:08:35 +01:00
umem - > tx_ring_setup_done = true ;
2019-02-21 10:21:26 +01:00
}
2019-10-25 11:17:15 +02:00
err = xsk_get_mmap_offsets ( xsk - > fd , & off ) ;
2019-02-21 10:21:26 +01:00
if ( err ) {
err = - errno ;
2020-08-28 10:26:27 +02:00
goto out_put_ctx ;
2019-02-21 10:21:26 +01:00
}
if ( rx ) {
2019-08-15 15:13:54 +03:00
rx_map = mmap ( NULL , off . rx . desc +
xsk - > config . rx_size * sizeof ( struct xdp_desc ) ,
PROT_READ | PROT_WRITE , MAP_SHARED | MAP_POPULATE ,
xsk - > fd , XDP_PGOFF_RX_RING ) ;
2019-04-30 14:45:35 +02:00
if ( rx_map = = MAP_FAILED ) {
2019-02-21 10:21:26 +01:00
err = - errno ;
2020-08-28 10:26:27 +02:00
goto out_put_ctx ;
2019-02-21 10:21:26 +01:00
}
rx - > mask = xsk - > config . rx_size - 1 ;
rx - > size = xsk - > config . rx_size ;
2019-04-30 14:45:35 +02:00
rx - > producer = rx_map + off . rx . producer ;
rx - > consumer = rx_map + off . rx . consumer ;
2019-08-14 09:27:20 +02:00
rx - > flags = rx_map + off . rx . flags ;
2019-04-30 14:45:35 +02:00
rx - > ring = rx_map + off . rx . desc ;
2020-03-27 03:24:07 +00:00
rx - > cached_prod = * rx - > producer ;
rx - > cached_cons = * rx - > consumer ;
2019-02-21 10:21:26 +01:00
}
xsk - > rx = rx ;
if ( tx ) {
2019-08-15 15:13:54 +03:00
tx_map = mmap ( NULL , off . tx . desc +
xsk - > config . tx_size * sizeof ( struct xdp_desc ) ,
PROT_READ | PROT_WRITE , MAP_SHARED | MAP_POPULATE ,
xsk - > fd , XDP_PGOFF_TX_RING ) ;
2019-04-30 14:45:35 +02:00
if ( tx_map = = MAP_FAILED ) {
2019-02-21 10:21:26 +01:00
err = - errno ;
goto out_mmap_rx ;
}
tx - > mask = xsk - > config . tx_size - 1 ;
tx - > size = xsk - > config . tx_size ;
2019-04-30 14:45:35 +02:00
tx - > producer = tx_map + off . tx . producer ;
tx - > consumer = tx_map + off . tx . consumer ;
2019-08-14 09:27:20 +02:00
tx - > flags = tx_map + off . tx . flags ;
2019-04-30 14:45:35 +02:00
tx - > ring = tx_map + off . tx . desc ;
2020-03-27 03:24:07 +00:00
tx - > cached_prod = * tx - > producer ;
/* cached_cons is r->size bigger than the real consumer pointer
* See xsk_prod_nb_free
*/
tx - > cached_cons = * tx - > consumer + xsk - > config . tx_size ;
2019-02-21 10:21:26 +01:00
}
xsk - > tx = tx ;
sxdp . sxdp_family = PF_XDP ;
2020-08-28 10:26:27 +02:00
sxdp . sxdp_ifindex = ctx - > ifindex ;
sxdp . sxdp_queue_id = ctx - > queue_id ;
2019-11-07 18:47:36 +01:00
if ( umem - > refcount > 1 ) {
2020-08-28 10:26:27 +02:00
sxdp . sxdp_flags | = XDP_SHARED_UMEM ;
2019-11-07 18:47:36 +01:00
sxdp . sxdp_shared_umem_fd = umem - > fd ;
} else {
sxdp . sxdp_flags = xsk - > config . bind_flags ;
}
2019-02-21 10:21:26 +01:00
err = bind ( xsk - > fd , ( struct sockaddr * ) & sxdp , sizeof ( sxdp ) ) ;
if ( err ) {
err = - errno ;
goto out_mmap_tx ;
}
* xsk_ptr = xsk ;
2021-03-31 06:12:17 +00:00
umem - > fill_save = NULL ;
umem - > comp_save = NULL ;
2019-02-21 10:21:26 +01:00
return 0 ;
out_mmap_tx :
if ( tx )
2019-04-30 14:45:35 +02:00
munmap ( tx_map , off . tx . desc +
2019-02-21 10:21:26 +01:00
xsk - > config . tx_size * sizeof ( struct xdp_desc ) ) ;
out_mmap_rx :
if ( rx )
2019-04-30 14:45:35 +02:00
munmap ( rx_map , off . rx . desc +
2019-02-21 10:21:26 +01:00
xsk - > config . rx_size * sizeof ( struct xdp_desc ) ) ;
2020-08-28 10:26:27 +02:00
out_put_ctx :
2021-03-31 06:12:17 +00:00
xsk_put_ctx ( ctx , unmap ) ;
2019-02-21 10:21:26 +01:00
out_socket :
if ( - - umem - > refcount )
close ( xsk - > fd ) ;
out_xsk_alloc :
free ( xsk ) ;
return err ;
}
2023-01-11 10:35:20 +01:00
int xsk_socket__create ( struct xsk_socket * * xsk_ptr , int ifindex ,
2020-08-28 10:26:27 +02:00
__u32 queue_id , struct xsk_umem * umem ,
struct xsk_ring_cons * rx , struct xsk_ring_prod * tx ,
const struct xsk_socket_config * usr_config )
2019-02-21 10:21:26 +01:00
{
2021-03-31 06:12:16 +00:00
if ( ! umem )
return - EFAULT ;
2023-01-11 10:35:20 +01:00
return xsk_socket__create_shared ( xsk_ptr , ifindex , queue_id , umem ,
2020-08-28 10:26:27 +02:00
rx , tx , umem - > fill_save ,
umem - > comp_save , usr_config ) ;
}
2019-02-21 10:21:26 +01:00
2020-08-28 10:26:27 +02:00
int xsk_umem__delete ( struct xsk_umem * umem )
{
2022-03-01 13:26:23 +00:00
struct xdp_mmap_offsets off ;
int err ;
2019-02-21 10:21:26 +01:00
if ( ! umem )
return 0 ;
if ( umem - > refcount )
return - EBUSY ;
2022-03-01 13:26:23 +00:00
err = xsk_get_mmap_offsets ( umem - > fd , & off ) ;
if ( ! err & & umem - > fill_save & & umem - > comp_save ) {
munmap ( umem - > fill_save - > ring - off . fr . desc ,
off . fr . desc + umem - > config . fill_size * sizeof ( __u64 ) ) ;
munmap ( umem - > comp_save - > ring - off . cr . desc ,
off . cr . desc + umem - > config . comp_size * sizeof ( __u64 ) ) ;
}
2019-02-21 10:21:26 +01:00
close ( umem - > fd ) ;
free ( umem ) ;
return 0 ;
}
void xsk_socket__delete ( struct xsk_socket * xsk )
{
2019-04-30 14:45:35 +02:00
size_t desc_sz = sizeof ( struct xdp_desc ) ;
2019-02-21 10:21:26 +01:00
struct xdp_mmap_offsets off ;
2020-11-03 10:41:30 +01:00
struct xsk_umem * umem ;
2020-11-03 10:41:29 +01:00
struct xsk_ctx * ctx ;
2019-02-21 10:21:26 +01:00
int err ;
if ( ! xsk )
return ;
2020-11-03 10:41:29 +01:00
ctx = xsk - > ctx ;
2020-11-03 10:41:30 +01:00
umem = ctx - > umem ;
2022-06-29 16:34:58 +02:00
2022-09-01 13:26:45 -07:00
xsk_put_ctx ( ctx , true ) ;
2019-10-25 11:17:15 +02:00
err = xsk_get_mmap_offsets ( xsk - > fd , & off ) ;
2019-02-21 10:21:26 +01:00
if ( ! err ) {
2019-04-30 14:45:35 +02:00
if ( xsk - > rx ) {
2019-05-06 11:24:43 +02:00
munmap ( xsk - > rx - > ring - off . rx . desc ,
off . rx . desc + xsk - > config . rx_size * desc_sz ) ;
2019-04-30 14:45:35 +02:00
}
if ( xsk - > tx ) {
2019-05-06 11:24:43 +02:00
munmap ( xsk - > tx - > ring - off . tx . desc ,
off . tx . desc + xsk - > config . tx_size * desc_sz ) ;
2019-04-30 14:45:35 +02:00
}
2019-02-21 10:21:26 +01:00
}
2020-11-03 10:41:30 +01:00
umem - > refcount - - ;
2019-02-21 10:21:26 +01:00
/* Do not close an fd that also has an associated umem connected
* to it .
*/
2020-11-03 10:41:30 +01:00
if ( xsk - > fd ! = umem - > fd )
2019-02-21 10:21:26 +01:00
close ( xsk - > fd ) ;
free ( xsk ) ;
}