2019-02-21 10:21:26 +01:00
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* AF_XDP user - space access library .
*
* Copyright ( c ) 2018 - 2019 Intel Corporation .
*
* Author ( s ) : Magnus Karlsson < magnus . karlsson @ intel . com >
*/
# include <errno.h>
# include <stdlib.h>
# include <string.h>
# include <unistd.h>
# include <arpa/inet.h>
# include <asm/barrier.h>
# include <linux/compiler.h>
# include <linux/ethtool.h>
# include <linux/filter.h>
# include <linux/if_ether.h>
# include <linux/if_packet.h>
# include <linux/if_xdp.h>
# include <linux/sockios.h>
# include <net/if.h>
# include <sys/ioctl.h>
# include <sys/mman.h>
# include <sys/socket.h>
# include <sys/types.h>
# include "bpf.h"
# include "libbpf.h"
2019-05-15 20:39:27 -07:00
# include "libbpf_internal.h"
2019-02-21 10:21:26 +01:00
# include "xsk.h"
# ifndef SOL_XDP
# define SOL_XDP 283
# endif
# ifndef AF_XDP
# define AF_XDP 44
# endif
# ifndef PF_XDP
# define PF_XDP AF_XDP
# endif
struct xsk_umem {
struct xsk_ring_prod * fill ;
struct xsk_ring_cons * comp ;
char * umem_area ;
struct xsk_umem_config config ;
int fd ;
int refcount ;
} ;
struct xsk_socket {
struct xsk_ring_cons * rx ;
struct xsk_ring_prod * tx ;
__u64 outstanding_tx ;
struct xsk_umem * umem ;
struct xsk_socket_config config ;
int fd ;
int ifindex ;
int prog_fd ;
int xsks_map_fd ;
__u32 queue_id ;
char ifname [ IFNAMSIZ ] ;
} ;
struct xsk_nl_info {
bool xdp_prog_attached ;
int ifindex ;
int fd ;
} ;
int xsk_umem__fd ( const struct xsk_umem * umem )
{
return umem ? umem - > fd : - EINVAL ;
}
int xsk_socket__fd ( const struct xsk_socket * xsk )
{
return xsk ? xsk - > fd : - EINVAL ;
}
static bool xsk_page_aligned ( void * buffer )
{
unsigned long addr = ( unsigned long ) buffer ;
return ! ( addr & ( getpagesize ( ) - 1 ) ) ;
}
static void xsk_set_umem_config ( struct xsk_umem_config * cfg ,
const struct xsk_umem_config * usr_cfg )
{
if ( ! usr_cfg ) {
cfg - > fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS ;
cfg - > comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS ;
cfg - > frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE ;
cfg - > frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM ;
2019-08-27 02:25:27 +00:00
cfg - > flags = XSK_UMEM__DEFAULT_FLAGS ;
2019-02-21 10:21:26 +01:00
return ;
}
cfg - > fill_size = usr_cfg - > fill_size ;
cfg - > comp_size = usr_cfg - > comp_size ;
cfg - > frame_size = usr_cfg - > frame_size ;
cfg - > frame_headroom = usr_cfg - > frame_headroom ;
2019-08-27 02:25:27 +00:00
cfg - > flags = usr_cfg - > flags ;
2019-02-21 10:21:26 +01:00
}
2019-03-12 09:59:45 +01:00
static int xsk_set_xdp_socket_config ( struct xsk_socket_config * cfg ,
const struct xsk_socket_config * usr_cfg )
2019-02-21 10:21:26 +01:00
{
if ( ! usr_cfg ) {
cfg - > rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS ;
cfg - > tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS ;
cfg - > libbpf_flags = 0 ;
cfg - > xdp_flags = 0 ;
cfg - > bind_flags = 0 ;
2019-03-12 09:59:45 +01:00
return 0 ;
2019-02-21 10:21:26 +01:00
}
2019-03-12 09:59:45 +01:00
if ( usr_cfg - > libbpf_flags & ~ XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD )
return - EINVAL ;
2019-02-21 10:21:26 +01:00
cfg - > rx_size = usr_cfg - > rx_size ;
cfg - > tx_size = usr_cfg - > tx_size ;
cfg - > libbpf_flags = usr_cfg - > libbpf_flags ;
cfg - > xdp_flags = usr_cfg - > xdp_flags ;
cfg - > bind_flags = usr_cfg - > bind_flags ;
2019-03-12 09:59:45 +01:00
return 0 ;
2019-02-21 10:21:26 +01:00
}
2019-08-27 02:25:27 +00:00
int xsk_umem__create_v0_0_4 ( struct xsk_umem * * umem_ptr , void * umem_area ,
__u64 size , struct xsk_ring_prod * fill ,
struct xsk_ring_cons * comp ,
const struct xsk_umem_config * usr_config )
2019-02-21 10:21:26 +01:00
{
struct xdp_mmap_offsets off ;
struct xdp_umem_reg mr ;
struct xsk_umem * umem ;
socklen_t optlen ;
void * map ;
int err ;
if ( ! umem_area | | ! umem_ptr | | ! fill | | ! comp )
return - EFAULT ;
if ( ! size & & ! xsk_page_aligned ( umem_area ) )
return - EINVAL ;
umem = calloc ( 1 , sizeof ( * umem ) ) ;
if ( ! umem )
return - ENOMEM ;
umem - > fd = socket ( AF_XDP , SOCK_RAW , 0 ) ;
if ( umem - > fd < 0 ) {
err = - errno ;
goto out_umem_alloc ;
}
umem - > umem_area = umem_area ;
xsk_set_umem_config ( & umem - > config , usr_config ) ;
mr . addr = ( uintptr_t ) umem_area ;
mr . len = size ;
mr . chunk_size = umem - > config . frame_size ;
mr . headroom = umem - > config . frame_headroom ;
2019-08-27 02:25:27 +00:00
mr . flags = umem - > config . flags ;
2019-02-21 10:21:26 +01:00
err = setsockopt ( umem - > fd , SOL_XDP , XDP_UMEM_REG , & mr , sizeof ( mr ) ) ;
if ( err ) {
err = - errno ;
goto out_socket ;
}
err = setsockopt ( umem - > fd , SOL_XDP , XDP_UMEM_FILL_RING ,
& umem - > config . fill_size ,
sizeof ( umem - > config . fill_size ) ) ;
if ( err ) {
err = - errno ;
goto out_socket ;
}
err = setsockopt ( umem - > fd , SOL_XDP , XDP_UMEM_COMPLETION_RING ,
& umem - > config . comp_size ,
sizeof ( umem - > config . comp_size ) ) ;
if ( err ) {
err = - errno ;
goto out_socket ;
}
optlen = sizeof ( off ) ;
err = getsockopt ( umem - > fd , SOL_XDP , XDP_MMAP_OFFSETS , & off , & optlen ) ;
if ( err ) {
err = - errno ;
goto out_socket ;
}
2019-08-15 15:13:54 +03:00
map = mmap ( NULL , off . fr . desc + umem - > config . fill_size * sizeof ( __u64 ) ,
PROT_READ | PROT_WRITE , MAP_SHARED | MAP_POPULATE , umem - > fd ,
XDP_UMEM_PGOFF_FILL_RING ) ;
2019-02-21 10:21:26 +01:00
if ( map = = MAP_FAILED ) {
err = - errno ;
goto out_socket ;
}
umem - > fill = fill ;
fill - > mask = umem - > config . fill_size - 1 ;
fill - > size = umem - > config . fill_size ;
fill - > producer = map + off . fr . producer ;
fill - > consumer = map + off . fr . consumer ;
2019-08-14 09:27:20 +02:00
fill - > flags = map + off . fr . flags ;
2019-02-21 10:21:26 +01:00
fill - > ring = map + off . fr . desc ;
fill - > cached_cons = umem - > config . fill_size ;
2019-08-15 15:13:54 +03:00
map = mmap ( NULL , off . cr . desc + umem - > config . comp_size * sizeof ( __u64 ) ,
PROT_READ | PROT_WRITE , MAP_SHARED | MAP_POPULATE , umem - > fd ,
XDP_UMEM_PGOFF_COMPLETION_RING ) ;
2019-02-21 10:21:26 +01:00
if ( map = = MAP_FAILED ) {
err = - errno ;
goto out_mmap ;
}
umem - > comp = comp ;
comp - > mask = umem - > config . comp_size - 1 ;
comp - > size = umem - > config . comp_size ;
comp - > producer = map + off . cr . producer ;
comp - > consumer = map + off . cr . consumer ;
2019-08-14 09:27:20 +02:00
comp - > flags = map + off . cr . flags ;
2019-02-21 10:21:26 +01:00
comp - > ring = map + off . cr . desc ;
* umem_ptr = umem ;
return 0 ;
out_mmap :
2019-04-30 14:45:35 +02:00
munmap ( map , off . fr . desc + umem - > config . fill_size * sizeof ( __u64 ) ) ;
2019-02-21 10:21:26 +01:00
out_socket :
close ( umem - > fd ) ;
out_umem_alloc :
free ( umem ) ;
return err ;
}
2019-08-27 02:25:27 +00:00
struct xsk_umem_config_v1 {
__u32 fill_size ;
__u32 comp_size ;
__u32 frame_size ;
__u32 frame_headroom ;
} ;
int xsk_umem__create_v0_0_2 ( struct xsk_umem * * umem_ptr , void * umem_area ,
__u64 size , struct xsk_ring_prod * fill ,
struct xsk_ring_cons * comp ,
const struct xsk_umem_config * usr_config )
{
struct xsk_umem_config config ;
memcpy ( & config , usr_config , sizeof ( struct xsk_umem_config_v1 ) ) ;
config . flags = 0 ;
return xsk_umem__create_v0_0_4 ( umem_ptr , umem_area , size , fill , comp ,
& config ) ;
}
asm ( " .symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2 " ) ;
asm ( " .symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4 " ) ;
2019-02-21 10:21:26 +01:00
static int xsk_load_xdp_prog ( struct xsk_socket * xsk )
{
2019-04-10 08:54:16 +02:00
static const int log_buf_size = 16 * 1024 ;
char log_buf [ log_buf_size ] ;
2019-02-21 10:21:26 +01:00
int err , prog_fd ;
/* This is the C-program:
* SEC ( " xdp_sock " ) int xdp_sock_prog ( struct xdp_md * ctx )
* {
2019-06-06 13:59:43 -07:00
* int index = ctx - > rx_queue_index ;
2019-02-21 10:21:26 +01:00
*
* // A set entry here means that the correspnding queue_id
* // has an active AF_XDP socket bound to it.
2019-06-06 13:59:43 -07:00
* if ( bpf_map_lookup_elem ( & xsks_map , & index ) )
2019-02-21 10:21:26 +01:00
* return bpf_redirect_map ( & xsks_map , index , 0 ) ;
*
* return XDP_PASS ;
* }
*/
struct bpf_insn prog [ ] = {
/* r1 = *(u32 *)(r1 + 16) */
BPF_LDX_MEM ( BPF_W , BPF_REG_1 , BPF_REG_1 , 16 ) ,
/* *(u32 *)(r10 - 4) = r1 */
BPF_STX_MEM ( BPF_W , BPF_REG_10 , BPF_REG_1 , - 4 ) ,
BPF_MOV64_REG ( BPF_REG_2 , BPF_REG_10 ) ,
BPF_ALU64_IMM ( BPF_ADD , BPF_REG_2 , - 4 ) ,
2019-06-06 13:59:43 -07:00
BPF_LD_MAP_FD ( BPF_REG_1 , xsk - > xsks_map_fd ) ,
2019-02-21 10:21:26 +01:00
BPF_EMIT_CALL ( BPF_FUNC_map_lookup_elem ) ,
BPF_MOV64_REG ( BPF_REG_1 , BPF_REG_0 ) ,
BPF_MOV32_IMM ( BPF_REG_0 , 2 ) ,
/* if r1 == 0 goto +5 */
BPF_JMP_IMM ( BPF_JEQ , BPF_REG_1 , 0 , 5 ) ,
/* r2 = *(u32 *)(r10 - 4) */
BPF_LD_MAP_FD ( BPF_REG_1 , xsk - > xsks_map_fd ) ,
BPF_LDX_MEM ( BPF_W , BPF_REG_2 , BPF_REG_10 , - 4 ) ,
BPF_MOV32_IMM ( BPF_REG_3 , 0 ) ,
BPF_EMIT_CALL ( BPF_FUNC_redirect_map ) ,
/* The jumps are to this instruction */
BPF_EXIT_INSN ( ) ,
} ;
size_t insns_cnt = sizeof ( prog ) / sizeof ( struct bpf_insn ) ;
prog_fd = bpf_load_program ( BPF_PROG_TYPE_XDP , prog , insns_cnt ,
2019-04-10 08:54:16 +02:00
" LGPL-2.1 or BSD-2-Clause " , 0 , log_buf ,
log_buf_size ) ;
2019-02-21 10:21:26 +01:00
if ( prog_fd < 0 ) {
2019-04-10 08:54:16 +02:00
pr_warning ( " BPF log buffer: \n %s " , log_buf ) ;
2019-02-21 10:21:26 +01:00
return prog_fd ;
}
err = bpf_set_link_xdp_fd ( xsk - > ifindex , prog_fd , xsk - > config . xdp_flags ) ;
if ( err ) {
close ( prog_fd ) ;
return err ;
}
xsk - > prog_fd = prog_fd ;
return 0 ;
}
static int xsk_get_max_queues ( struct xsk_socket * xsk )
{
2019-07-23 15:08:10 +03:00
struct ethtool_channels channels = { . cmd = ETHTOOL_GCHANNELS } ;
struct ifreq ifr = { } ;
2019-02-21 10:21:26 +01:00
int fd , err , ret ;
fd = socket ( AF_INET , SOCK_DGRAM , 0 ) ;
if ( fd < 0 )
return - errno ;
ifr . ifr_data = ( void * ) & channels ;
2019-07-24 14:47:53 -07:00
memcpy ( ifr . ifr_name , xsk - > ifname , IFNAMSIZ - 1 ) ;
2019-07-02 08:16:20 -07:00
ifr . ifr_name [ IFNAMSIZ - 1 ] = ' \0 ' ;
2019-02-21 10:21:26 +01:00
err = ioctl ( fd , SIOCETHTOOL , & ifr ) ;
if ( err & & errno ! = EOPNOTSUPP ) {
ret = - errno ;
goto out ;
}
2019-07-23 15:08:10 +03:00
if ( err | | channels . max_combined = = 0 )
2019-02-21 10:21:26 +01:00
/* If the device says it has no channels, then all traffic
* is sent to a single stream , so max queues = 1.
*/
ret = 1 ;
else
ret = channels . max_combined ;
out :
close ( fd ) ;
return ret ;
}
static int xsk_create_bpf_maps ( struct xsk_socket * xsk )
{
int max_queues ;
int fd ;
max_queues = xsk_get_max_queues ( xsk ) ;
if ( max_queues < 0 )
return max_queues ;
2019-06-06 13:59:43 -07:00
fd = bpf_create_map_name ( BPF_MAP_TYPE_XSKMAP , " xsks_map " ,
2019-02-21 10:21:26 +01:00
sizeof ( int ) , sizeof ( int ) , max_queues , 0 ) ;
if ( fd < 0 )
return fd ;
xsk - > xsks_map_fd = fd ;
return 0 ;
}
static void xsk_delete_bpf_maps ( struct xsk_socket * xsk )
{
2019-06-06 13:59:43 -07:00
bpf_map_delete_elem ( xsk - > xsks_map_fd , & xsk - > queue_id ) ;
2019-02-21 10:21:26 +01:00
close ( xsk - > xsks_map_fd ) ;
}
2019-04-30 14:45:36 +02:00
static int xsk_lookup_bpf_maps ( struct xsk_socket * xsk )
2019-02-21 10:21:26 +01:00
{
2019-04-30 14:45:36 +02:00
__u32 i , * map_ids , num_maps , prog_len = sizeof ( struct bpf_prog_info ) ;
__u32 map_len = sizeof ( struct bpf_map_info ) ;
2019-02-21 10:21:26 +01:00
struct bpf_prog_info prog_info = { } ;
struct bpf_map_info map_info ;
2019-04-30 14:45:36 +02:00
int fd , err ;
2019-02-21 10:21:26 +01:00
err = bpf_obj_get_info_by_fd ( xsk - > prog_fd , & prog_info , & prog_len ) ;
if ( err )
return err ;
num_maps = prog_info . nr_map_ids ;
map_ids = calloc ( prog_info . nr_map_ids , sizeof ( * map_ids ) ) ;
if ( ! map_ids )
return - ENOMEM ;
memset ( & prog_info , 0 , prog_len ) ;
prog_info . nr_map_ids = num_maps ;
prog_info . map_ids = ( __u64 ) ( unsigned long ) map_ids ;
err = bpf_obj_get_info_by_fd ( xsk - > prog_fd , & prog_info , & prog_len ) ;
if ( err )
goto out_map_ids ;
2019-06-06 13:59:43 -07:00
xsk - > xsks_map_fd = - 1 ;
2019-02-21 10:21:26 +01:00
2019-06-06 13:59:43 -07:00
for ( i = 0 ; i < prog_info . nr_map_ids ; i + + ) {
2019-02-21 10:21:26 +01:00
fd = bpf_map_get_fd_by_id ( map_ids [ i ] ) ;
2019-04-30 14:45:36 +02:00
if ( fd < 0 )
continue ;
2019-02-21 10:21:26 +01:00
err = bpf_obj_get_info_by_fd ( fd , & map_info , & map_len ) ;
2019-04-30 14:45:36 +02:00
if ( err ) {
close ( fd ) ;
continue ;
}
2019-02-21 10:21:26 +01:00
2019-04-30 14:45:36 +02:00
if ( ! strcmp ( map_info . name , " xsks_map " ) ) {
2019-02-21 10:21:26 +01:00
xsk - > xsks_map_fd = fd ;
2019-04-30 14:45:36 +02:00
continue ;
2019-02-21 10:21:26 +01:00
}
2019-04-30 14:45:36 +02:00
close ( fd ) ;
2019-02-21 10:21:26 +01:00
}
2019-04-30 14:45:36 +02:00
err = 0 ;
2019-06-06 13:59:43 -07:00
if ( xsk - > xsks_map_fd = = - 1 )
2019-02-21 10:21:26 +01:00
err = - ENOENT ;
out_map_ids :
free ( map_ids ) ;
return err ;
}
2019-04-30 14:45:36 +02:00
static int xsk_set_bpf_maps ( struct xsk_socket * xsk )
{
2019-06-06 13:59:43 -07:00
return bpf_map_update_elem ( xsk - > xsks_map_fd , & xsk - > queue_id ,
& xsk - > fd , 0 ) ;
2019-04-30 14:45:36 +02:00
}
2019-02-21 10:21:26 +01:00
static int xsk_setup_xdp_prog ( struct xsk_socket * xsk )
{
__u32 prog_id = 0 ;
int err ;
err = bpf_get_link_xdp_id ( xsk - > ifindex , & prog_id ,
xsk - > config . xdp_flags ) ;
if ( err )
return err ;
if ( ! prog_id ) {
err = xsk_create_bpf_maps ( xsk ) ;
if ( err )
return err ;
err = xsk_load_xdp_prog ( xsk ) ;
2019-06-06 13:59:43 -07:00
if ( err ) {
xsk_delete_bpf_maps ( xsk ) ;
return err ;
}
2019-02-21 10:21:26 +01:00
} else {
xsk - > prog_fd = bpf_prog_get_fd_by_id ( prog_id ) ;
2019-04-30 14:45:36 +02:00
err = xsk_lookup_bpf_maps ( xsk ) ;
2019-06-06 13:59:43 -07:00
if ( err ) {
close ( xsk - > prog_fd ) ;
return err ;
}
2019-02-21 10:21:26 +01:00
}
2019-04-30 14:45:36 +02:00
err = xsk_set_bpf_maps ( xsk ) ;
2019-06-06 13:59:43 -07:00
if ( err ) {
xsk_delete_bpf_maps ( xsk ) ;
close ( xsk - > prog_fd ) ;
return err ;
}
2019-02-21 10:21:26 +01:00
return 0 ;
}
int xsk_socket__create ( struct xsk_socket * * xsk_ptr , const char * ifname ,
__u32 queue_id , struct xsk_umem * umem ,
struct xsk_ring_cons * rx , struct xsk_ring_prod * tx ,
const struct xsk_socket_config * usr_config )
{
2019-04-30 14:45:35 +02:00
void * rx_map = NULL , * tx_map = NULL ;
2019-02-21 10:21:26 +01:00
struct sockaddr_xdp sxdp = { } ;
struct xdp_mmap_offsets off ;
struct xsk_socket * xsk ;
socklen_t optlen ;
int err ;
if ( ! umem | | ! xsk_ptr | | ! rx | | ! tx )
return - EFAULT ;
if ( umem - > refcount ) {
pr_warning ( " Error: shared umems not supported by libbpf. \n " ) ;
return - EBUSY ;
}
xsk = calloc ( 1 , sizeof ( * xsk ) ) ;
if ( ! xsk )
return - ENOMEM ;
if ( umem - > refcount + + > 0 ) {
xsk - > fd = socket ( AF_XDP , SOCK_RAW , 0 ) ;
if ( xsk - > fd < 0 ) {
err = - errno ;
goto out_xsk_alloc ;
}
} else {
xsk - > fd = umem - > fd ;
}
xsk - > outstanding_tx = 0 ;
xsk - > queue_id = queue_id ;
xsk - > umem = umem ;
xsk - > ifindex = if_nametoindex ( ifname ) ;
if ( ! xsk - > ifindex ) {
err = - errno ;
goto out_socket ;
}
2019-07-24 14:47:53 -07:00
memcpy ( xsk - > ifname , ifname , IFNAMSIZ - 1 ) ;
2019-07-15 20:57:03 -07:00
xsk - > ifname [ IFNAMSIZ - 1 ] = ' \0 ' ;
2019-02-21 10:21:26 +01:00
2019-03-12 09:59:45 +01:00
err = xsk_set_xdp_socket_config ( & xsk - > config , usr_config ) ;
if ( err )
goto out_socket ;
2019-02-21 10:21:26 +01:00
if ( rx ) {
err = setsockopt ( xsk - > fd , SOL_XDP , XDP_RX_RING ,
& xsk - > config . rx_size ,
sizeof ( xsk - > config . rx_size ) ) ;
if ( err ) {
err = - errno ;
goto out_socket ;
}
}
if ( tx ) {
err = setsockopt ( xsk - > fd , SOL_XDP , XDP_TX_RING ,
& xsk - > config . tx_size ,
sizeof ( xsk - > config . tx_size ) ) ;
if ( err ) {
err = - errno ;
goto out_socket ;
}
}
optlen = sizeof ( off ) ;
err = getsockopt ( xsk - > fd , SOL_XDP , XDP_MMAP_OFFSETS , & off , & optlen ) ;
if ( err ) {
err = - errno ;
goto out_socket ;
}
if ( rx ) {
2019-08-15 15:13:54 +03:00
rx_map = mmap ( NULL , off . rx . desc +
xsk - > config . rx_size * sizeof ( struct xdp_desc ) ,
PROT_READ | PROT_WRITE , MAP_SHARED | MAP_POPULATE ,
xsk - > fd , XDP_PGOFF_RX_RING ) ;
2019-04-30 14:45:35 +02:00
if ( rx_map = = MAP_FAILED ) {
2019-02-21 10:21:26 +01:00
err = - errno ;
goto out_socket ;
}
rx - > mask = xsk - > config . rx_size - 1 ;
rx - > size = xsk - > config . rx_size ;
2019-04-30 14:45:35 +02:00
rx - > producer = rx_map + off . rx . producer ;
rx - > consumer = rx_map + off . rx . consumer ;
2019-08-14 09:27:20 +02:00
rx - > flags = rx_map + off . rx . flags ;
2019-04-30 14:45:35 +02:00
rx - > ring = rx_map + off . rx . desc ;
2019-02-21 10:21:26 +01:00
}
xsk - > rx = rx ;
if ( tx ) {
2019-08-15 15:13:54 +03:00
tx_map = mmap ( NULL , off . tx . desc +
xsk - > config . tx_size * sizeof ( struct xdp_desc ) ,
PROT_READ | PROT_WRITE , MAP_SHARED | MAP_POPULATE ,
xsk - > fd , XDP_PGOFF_TX_RING ) ;
2019-04-30 14:45:35 +02:00
if ( tx_map = = MAP_FAILED ) {
2019-02-21 10:21:26 +01:00
err = - errno ;
goto out_mmap_rx ;
}
tx - > mask = xsk - > config . tx_size - 1 ;
tx - > size = xsk - > config . tx_size ;
2019-04-30 14:45:35 +02:00
tx - > producer = tx_map + off . tx . producer ;
tx - > consumer = tx_map + off . tx . consumer ;
2019-08-14 09:27:20 +02:00
tx - > flags = tx_map + off . tx . flags ;
2019-04-30 14:45:35 +02:00
tx - > ring = tx_map + off . tx . desc ;
2019-02-21 10:21:26 +01:00
tx - > cached_cons = xsk - > config . tx_size ;
}
xsk - > tx = tx ;
sxdp . sxdp_family = PF_XDP ;
sxdp . sxdp_ifindex = xsk - > ifindex ;
sxdp . sxdp_queue_id = xsk - > queue_id ;
sxdp . sxdp_flags = xsk - > config . bind_flags ;
err = bind ( xsk - > fd , ( struct sockaddr * ) & sxdp , sizeof ( sxdp ) ) ;
if ( err ) {
err = - errno ;
goto out_mmap_tx ;
}
2019-06-06 13:59:43 -07:00
xsk - > prog_fd = - 1 ;
2019-06-26 17:35:26 +03:00
2019-02-21 10:21:26 +01:00
if ( ! ( xsk - > config . libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD ) ) {
err = xsk_setup_xdp_prog ( xsk ) ;
if ( err )
goto out_mmap_tx ;
}
* xsk_ptr = xsk ;
return 0 ;
out_mmap_tx :
if ( tx )
2019-04-30 14:45:35 +02:00
munmap ( tx_map , off . tx . desc +
2019-02-21 10:21:26 +01:00
xsk - > config . tx_size * sizeof ( struct xdp_desc ) ) ;
out_mmap_rx :
if ( rx )
2019-04-30 14:45:35 +02:00
munmap ( rx_map , off . rx . desc +
2019-02-21 10:21:26 +01:00
xsk - > config . rx_size * sizeof ( struct xdp_desc ) ) ;
out_socket :
if ( - - umem - > refcount )
close ( xsk - > fd ) ;
out_xsk_alloc :
free ( xsk ) ;
return err ;
}
int xsk_umem__delete ( struct xsk_umem * umem )
{
struct xdp_mmap_offsets off ;
socklen_t optlen ;
int err ;
if ( ! umem )
return 0 ;
if ( umem - > refcount )
return - EBUSY ;
optlen = sizeof ( off ) ;
err = getsockopt ( umem - > fd , SOL_XDP , XDP_MMAP_OFFSETS , & off , & optlen ) ;
if ( ! err ) {
2019-05-06 11:24:43 +02:00
munmap ( umem - > fill - > ring - off . fr . desc ,
off . fr . desc + umem - > config . fill_size * sizeof ( __u64 ) ) ;
munmap ( umem - > comp - > ring - off . cr . desc ,
off . cr . desc + umem - > config . comp_size * sizeof ( __u64 ) ) ;
2019-02-21 10:21:26 +01:00
}
close ( umem - > fd ) ;
free ( umem ) ;
return 0 ;
}
void xsk_socket__delete ( struct xsk_socket * xsk )
{
2019-04-30 14:45:35 +02:00
size_t desc_sz = sizeof ( struct xdp_desc ) ;
2019-02-21 10:21:26 +01:00
struct xdp_mmap_offsets off ;
socklen_t optlen ;
int err ;
if ( ! xsk )
return ;
2019-06-06 13:59:43 -07:00
if ( xsk - > prog_fd ! = - 1 ) {
xsk_delete_bpf_maps ( xsk ) ;
close ( xsk - > prog_fd ) ;
}
2019-02-21 10:21:26 +01:00
optlen = sizeof ( off ) ;
err = getsockopt ( xsk - > fd , SOL_XDP , XDP_MMAP_OFFSETS , & off , & optlen ) ;
if ( ! err ) {
2019-04-30 14:45:35 +02:00
if ( xsk - > rx ) {
2019-05-06 11:24:43 +02:00
munmap ( xsk - > rx - > ring - off . rx . desc ,
off . rx . desc + xsk - > config . rx_size * desc_sz ) ;
2019-04-30 14:45:35 +02:00
}
if ( xsk - > tx ) {
2019-05-06 11:24:43 +02:00
munmap ( xsk - > tx - > ring - off . tx . desc ,
off . tx . desc + xsk - > config . tx_size * desc_sz ) ;
2019-04-30 14:45:35 +02:00
}
2019-02-21 10:21:26 +01:00
}
xsk - > umem - > refcount - - ;
/* Do not close an fd that also has an associated umem connected
* to it .
*/
if ( xsk - > fd ! = xsk - > umem - > fd )
close ( xsk - > fd ) ;
free ( xsk ) ;
}