2019-02-21 10:21:26 +01:00
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/*
* AF_XDP user - space access library .
*
2021-03-10 09:09:29 +01:00
* Copyright ( c ) 2018 - 2019 Intel Corporation .
* Copyright ( c ) 2019 Facebook
2019-02-21 10:21:26 +01:00
*
* Author ( s ) : Magnus Karlsson < magnus . karlsson @ intel . com >
*/
2022-06-27 14:15:13 -07:00
# ifndef __XSK_H
# define __XSK_H
2019-02-21 10:21:26 +01:00
# include <stdio.h>
# include <stdint.h>
2021-03-10 09:09:29 +01:00
# include <stdbool.h>
2019-02-21 10:21:26 +01:00
# include <linux/if_xdp.h>
2022-06-27 14:15:13 -07:00
# include <bpf/libbpf.h>
2019-02-21 10:21:26 +01:00
# ifdef __cplusplus
extern " C " {
# endif
/* Do not access these members directly. Use the functions below. */
# define DEFINE_XSK_RING(name) \
struct name { \
__u32 cached_prod ; \
__u32 cached_cons ; \
__u32 mask ; \
__u32 size ; \
__u32 * producer ; \
__u32 * consumer ; \
void * ring ; \
2019-08-14 09:27:20 +02:00
__u32 * flags ; \
2019-02-21 10:21:26 +01:00
}
DEFINE_XSK_RING ( xsk_ring_prod ) ;
DEFINE_XSK_RING ( xsk_ring_cons ) ;
2019-04-16 14:58:09 +02:00
/* For a detailed explanation on the memory barriers associated with the
* ring , please take a look at net / xdp / xsk_queue . h .
*/
2019-02-21 10:21:26 +01:00
struct xsk_umem ;
struct xsk_socket ;
static inline __u64 * xsk_ring_prod__fill_addr ( struct xsk_ring_prod * fill ,
__u32 idx )
{
__u64 * addrs = ( __u64 * ) fill - > ring ;
return & addrs [ idx & fill - > mask ] ;
}
static inline const __u64 *
xsk_ring_cons__comp_addr ( const struct xsk_ring_cons * comp , __u32 idx )
{
const __u64 * addrs = ( const __u64 * ) comp - > ring ;
return & addrs [ idx & comp - > mask ] ;
}
static inline struct xdp_desc * xsk_ring_prod__tx_desc ( struct xsk_ring_prod * tx ,
__u32 idx )
{
struct xdp_desc * descs = ( struct xdp_desc * ) tx - > ring ;
return & descs [ idx & tx - > mask ] ;
}
static inline const struct xdp_desc *
xsk_ring_cons__rx_desc ( const struct xsk_ring_cons * rx , __u32 idx )
{
const struct xdp_desc * descs = ( const struct xdp_desc * ) rx - > ring ;
return & descs [ idx & rx - > mask ] ;
}
2019-08-14 09:27:20 +02:00
static inline int xsk_ring_prod__needs_wakeup ( const struct xsk_ring_prod * r )
{
return * r - > flags & XDP_RING_NEED_WAKEUP ;
}
2019-02-21 10:21:26 +01:00
static inline __u32 xsk_prod_nb_free ( struct xsk_ring_prod * r , __u32 nb )
{
__u32 free_entries = r - > cached_cons - r - > cached_prod ;
if ( free_entries > = nb )
return free_entries ;
/* Refresh the local tail pointer.
* cached_cons is r - > size bigger than the real consumer pointer so
* that this addition can be avoided in the more frequently
* executed code that computs free_entries in the beginning of
* this function . Without this optimization it whould have been
* free_entries = r - > cached_prod - r - > cached_cons + r - > size .
*/
2023-01-11 10:35:18 +01:00
r - > cached_cons = __atomic_load_n ( r - > consumer , __ATOMIC_ACQUIRE ) ;
2021-03-05 10:41:13 +01:00
r - > cached_cons + = r - > size ;
2019-02-21 10:21:26 +01:00
return r - > cached_cons - r - > cached_prod ;
}
static inline __u32 xsk_cons_nb_avail ( struct xsk_ring_cons * r , __u32 nb )
{
__u32 entries = r - > cached_prod - r - > cached_cons ;
if ( entries = = 0 ) {
2023-01-11 10:35:18 +01:00
r - > cached_prod = __atomic_load_n ( r - > producer , __ATOMIC_ACQUIRE ) ;
2019-02-21 10:21:26 +01:00
entries = r - > cached_prod - r - > cached_cons ;
}
return ( entries > nb ) ? nb : entries ;
}
2020-11-26 10:37:35 +01:00
static inline __u32 xsk_ring_prod__reserve ( struct xsk_ring_prod * prod , __u32 nb , __u32 * idx )
2019-02-21 10:21:26 +01:00
{
2019-04-16 14:58:10 +02:00
if ( xsk_prod_nb_free ( prod , nb ) < nb )
2019-02-21 10:21:26 +01:00
return 0 ;
* idx = prod - > cached_prod ;
prod - > cached_prod + = nb ;
return nb ;
}
2020-11-26 10:37:35 +01:00
static inline void xsk_ring_prod__submit ( struct xsk_ring_prod * prod , __u32 nb )
2019-02-21 10:21:26 +01:00
{
2019-04-16 14:58:09 +02:00
/* Make sure everything has been written to the ring before indicating
* this to the kernel by writing the producer pointer .
2019-02-21 10:21:26 +01:00
*/
2023-01-11 10:35:18 +01:00
__atomic_store_n ( prod - > producer , * prod - > producer + nb , __ATOMIC_RELEASE ) ;
2019-02-21 10:21:26 +01:00
}
2020-11-26 10:37:35 +01:00
static inline __u32 xsk_ring_cons__peek ( struct xsk_ring_cons * cons , __u32 nb , __u32 * idx )
2019-02-21 10:21:26 +01:00
{
2020-11-26 10:37:35 +01:00
__u32 entries = xsk_cons_nb_avail ( cons , nb ) ;
2019-02-21 10:21:26 +01:00
2019-04-16 14:58:10 +02:00
if ( entries > 0 ) {
2019-02-21 10:21:26 +01:00
* idx = cons - > cached_cons ;
cons - > cached_cons + = entries ;
}
return entries ;
}
2020-11-26 10:37:35 +01:00
static inline void xsk_ring_cons__cancel ( struct xsk_ring_cons * cons , __u32 nb )
2020-11-24 15:21:14 +08:00
{
cons - > cached_cons - = nb ;
}
2020-11-26 10:37:35 +01:00
static inline void xsk_ring_cons__release ( struct xsk_ring_cons * cons , __u32 nb )
2019-02-21 10:21:26 +01:00
{
2019-04-16 14:58:09 +02:00
/* Make sure data has been read before indicating we are done
* with the entries by updating the consumer pointer .
*/
2023-01-11 10:35:18 +01:00
__atomic_store_n ( cons - > consumer , * cons - > consumer + nb , __ATOMIC_RELEASE ) ;
2019-02-21 10:21:26 +01:00
}
static inline void * xsk_umem__get_data ( void * umem_area , __u64 addr )
{
return & ( ( char * ) umem_area ) [ addr ] ;
}
2019-08-27 02:25:27 +00:00
static inline __u64 xsk_umem__extract_addr ( __u64 addr )
{
return addr & XSK_UNALIGNED_BUF_ADDR_MASK ;
}
static inline __u64 xsk_umem__extract_offset ( __u64 addr )
{
return addr > > XSK_UNALIGNED_BUF_OFFSET_SHIFT ;
}
static inline __u64 xsk_umem__add_offset_to_addr ( __u64 addr )
{
return xsk_umem__extract_addr ( addr ) + xsk_umem__extract_offset ( addr ) ;
}
2021-10-29 11:01:11 +02:00
int xsk_umem__fd ( const struct xsk_umem * umem ) ;
int xsk_socket__fd ( const struct xsk_socket * xsk ) ;
2019-02-21 10:21:26 +01:00
# define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
# define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
2019-06-26 17:35:27 +03:00
# define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */
2019-02-21 10:21:26 +01:00
# define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
# define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
2019-08-27 02:25:27 +00:00
# define XSK_UMEM__DEFAULT_FLAGS 0
2019-02-21 10:21:26 +01:00
struct xsk_umem_config {
__u32 fill_size ;
__u32 comp_size ;
__u32 frame_size ;
__u32 frame_headroom ;
2019-08-27 02:25:27 +00:00
__u32 flags ;
2019-02-21 10:21:26 +01:00
} ;
2023-01-11 10:35:22 +01:00
int xsk_attach_xdp_program ( struct bpf_program * prog , int ifindex , u32 xdp_flags ) ;
void xsk_detach_xdp_program ( int ifindex , u32 xdp_flags ) ;
int xsk_update_xskmap ( struct bpf_map * map , struct xsk_socket * xsk ) ;
void xsk_clear_xskmap ( struct bpf_map * map ) ;
2023-01-11 10:35:20 +01:00
2019-02-21 10:21:26 +01:00
struct xsk_socket_config {
__u32 rx_size ;
__u32 tx_size ;
__u16 bind_flags ;
} ;
/* Set config to NULL to get the default configuration. */
2021-10-29 11:01:11 +02:00
int xsk_umem__create ( struct xsk_umem * * umem ,
void * umem_area , __u64 size ,
struct xsk_ring_prod * fill ,
struct xsk_ring_cons * comp ,
const struct xsk_umem_config * config ) ;
int xsk_socket__create ( struct xsk_socket * * xsk ,
2023-01-11 10:35:20 +01:00
int ifindex , __u32 queue_id ,
2021-10-29 11:01:11 +02:00
struct xsk_umem * umem ,
struct xsk_ring_cons * rx ,
struct xsk_ring_prod * tx ,
const struct xsk_socket_config * config ) ;
int xsk_socket__create_shared ( struct xsk_socket * * xsk_ptr ,
2023-01-11 10:35:20 +01:00
int ifindex ,
2021-10-29 11:01:11 +02:00
__u32 queue_id , struct xsk_umem * umem ,
struct xsk_ring_cons * rx ,
struct xsk_ring_prod * tx ,
struct xsk_ring_prod * fill ,
struct xsk_ring_cons * comp ,
const struct xsk_socket_config * config ) ;
2019-02-21 10:21:26 +01:00
/* Returns 0 for success and -EBUSY if the umem is still in use. */
2021-10-29 11:01:11 +02:00
int xsk_umem__delete ( struct xsk_umem * umem ) ;
void xsk_socket__delete ( struct xsk_socket * xsk ) ;
2019-02-21 10:21:26 +01:00
# ifdef __cplusplus
} /* extern "C" */
# endif
2022-06-27 14:15:13 -07:00
# endif /* __XSK_H */