2018-05-18 15:00:21 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
/* AF_XDP internal functions
2018-05-02 14:01:23 +03:00
* Copyright ( c ) 2018 Intel Corporation .
*/
# ifndef _LINUX_XDP_SOCK_H
# define _LINUX_XDP_SOCK_H
2018-06-04 15:05:51 +03:00
# include <linux/workqueue.h>
# include <linux/if_xdp.h>
2018-05-02 14:01:23 +03:00
# include <linux/mutex.h>
2018-06-04 15:05:57 +03:00
# include <linux/spinlock.h>
2018-06-04 15:05:51 +03:00
# include <linux/mm.h>
2018-05-02 14:01:23 +03:00
# include <net/sock.h>
2018-05-02 14:01:25 +03:00
struct net_device ;
struct xsk_queue ;
2018-06-04 15:05:51 +03:00
2018-06-04 15:05:52 +03:00
struct xdp_umem_page {
void * addr ;
2018-06-04 15:05:55 +03:00
dma_addr_t dma ;
2018-06-04 15:05:52 +03:00
} ;
2018-09-07 11:18:46 +03:00
struct xdp_umem_fq_reuse {
u32 nentries ;
u32 length ;
u64 handles [ ] ;
} ;
2018-06-04 15:05:51 +03:00
struct xdp_umem {
struct xsk_queue * fq ;
struct xsk_queue * cq ;
2018-06-04 15:05:52 +03:00
struct xdp_umem_page * pages ;
2018-08-31 14:40:02 +03:00
u64 chunk_mask ;
u64 size ;
2018-06-04 15:05:51 +03:00
u32 headroom ;
u32 chunk_size_nohr ;
struct user_struct * user ;
unsigned long address ;
refcount_t users ;
struct work_struct work ;
2018-06-04 15:05:52 +03:00
struct page * * pgs ;
2018-06-04 15:05:51 +03:00
u32 npgs ;
2019-01-24 21:59:38 +03:00
int id ;
2018-06-04 15:05:55 +03:00
struct net_device * dev ;
2018-09-07 11:18:46 +03:00
struct xdp_umem_fq_reuse * fq_reuse ;
2018-06-04 15:05:55 +03:00
u16 queue_id ;
bool zc ;
2018-06-04 15:05:57 +03:00
spinlock_t xsk_list_lock ;
struct list_head xsk_list ;
2018-06-04 15:05:51 +03:00
} ;
2018-05-02 14:01:23 +03:00
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk ;
2018-05-02 14:01:25 +03:00
struct xsk_queue * rx ;
struct net_device * dev ;
2018-05-02 14:01:23 +03:00
struct xdp_umem * umem ;
2018-05-02 14:01:28 +03:00
struct list_head flush_node ;
2018-05-02 14:01:26 +03:00
u16 queue_id ;
2018-05-02 14:01:32 +03:00
struct xsk_queue * tx ____cacheline_aligned_in_smp ;
2018-06-04 15:05:57 +03:00
struct list_head list ;
bool zc ;
2018-05-02 14:01:23 +03:00
/* Protects multiple processes in the control path */
struct mutex mutex ;
2018-06-29 10:48:20 +03:00
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
* in the SKB destructor callback .
*/
spinlock_t tx_completion_lock ;
2018-05-02 14:01:27 +03:00
u64 rx_dropped ;
2018-05-02 14:01:23 +03:00
} ;
2018-05-02 14:01:27 +03:00
struct xdp_buff ;
# ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv ( struct xdp_sock * xs , struct xdp_buff * xdp ) ;
int xsk_rcv ( struct xdp_sock * xs , struct xdp_buff * xdp ) ;
void xsk_flush ( struct xdp_sock * xs ) ;
2018-05-02 14:01:28 +03:00
bool xsk_is_setup_for_bpf_map ( struct xdp_sock * xs ) ;
2018-06-04 15:05:57 +03:00
/* Used from netdev driver */
2018-06-04 15:05:55 +03:00
u64 * xsk_umem_peek_addr ( struct xdp_umem * umem , u64 * addr ) ;
void xsk_umem_discard_addr ( struct xdp_umem * umem ) ;
2018-06-04 15:05:57 +03:00
void xsk_umem_complete_tx ( struct xdp_umem * umem , u32 nb_entries ) ;
bool xsk_umem_consume_tx ( struct xdp_umem * umem , dma_addr_t * dma , u32 * len ) ;
void xsk_umem_consume_tx_done ( struct xdp_umem * umem ) ;
2018-09-07 11:18:46 +03:00
struct xdp_umem_fq_reuse * xsk_reuseq_prepare ( u32 nentries ) ;
struct xdp_umem_fq_reuse * xsk_reuseq_swap ( struct xdp_umem * umem ,
struct xdp_umem_fq_reuse * newq ) ;
void xsk_reuseq_free ( struct xdp_umem_fq_reuse * rq ) ;
2018-10-01 15:51:36 +03:00
struct xdp_umem * xdp_get_umem_from_qid ( struct net_device * dev , u16 queue_id ) ;
2018-08-28 15:44:27 +03:00
static inline char * xdp_umem_get_data ( struct xdp_umem * umem , u64 addr )
{
return umem - > pages [ addr > > PAGE_SHIFT ] . addr + ( addr & ( PAGE_SIZE - 1 ) ) ;
}
static inline dma_addr_t xdp_umem_get_dma ( struct xdp_umem * umem , u64 addr )
{
return umem - > pages [ addr > > PAGE_SHIFT ] . dma + ( addr & ( PAGE_SIZE - 1 ) ) ;
}
2018-09-07 11:18:46 +03:00
/* Reuse-queue aware version of FILL queue helpers */
static inline u64 * xsk_umem_peek_addr_rq ( struct xdp_umem * umem , u64 * addr )
{
struct xdp_umem_fq_reuse * rq = umem - > fq_reuse ;
if ( ! rq - > length )
return xsk_umem_peek_addr ( umem , addr ) ;
* addr = rq - > handles [ rq - > length - 1 ] ;
return addr ;
}
static inline void xsk_umem_discard_addr_rq ( struct xdp_umem * umem )
{
struct xdp_umem_fq_reuse * rq = umem - > fq_reuse ;
if ( ! rq - > length )
xsk_umem_discard_addr ( umem ) ;
else
rq - > length - - ;
}
static inline void xsk_umem_fq_reuse ( struct xdp_umem * umem , u64 addr )
{
struct xdp_umem_fq_reuse * rq = umem - > fq_reuse ;
rq - > handles [ rq - > length + + ] = addr ;
}
2018-05-02 14:01:27 +03:00
# else
static inline int xsk_generic_rcv ( struct xdp_sock * xs , struct xdp_buff * xdp )
{
return - ENOTSUPP ;
}
static inline int xsk_rcv ( struct xdp_sock * xs , struct xdp_buff * xdp )
{
return - ENOTSUPP ;
}
static inline void xsk_flush ( struct xdp_sock * xs )
{
}
2018-05-02 14:01:28 +03:00
static inline bool xsk_is_setup_for_bpf_map ( struct xdp_sock * xs )
{
return false ;
}
2018-08-28 15:44:27 +03:00
static inline u64 * xsk_umem_peek_addr ( struct xdp_umem * umem , u64 * addr )
{
return NULL ;
}
static inline void xsk_umem_discard_addr ( struct xdp_umem * umem )
{
}
static inline void xsk_umem_complete_tx ( struct xdp_umem * umem , u32 nb_entries )
{
}
static inline bool xsk_umem_consume_tx ( struct xdp_umem * umem , dma_addr_t * dma ,
u32 * len )
{
return false ;
}
static inline void xsk_umem_consume_tx_done ( struct xdp_umem * umem )
{
}
2018-09-07 11:18:46 +03:00
static inline struct xdp_umem_fq_reuse * xsk_reuseq_prepare ( u32 nentries )
{
return NULL ;
}
static inline struct xdp_umem_fq_reuse * xsk_reuseq_swap (
struct xdp_umem * umem ,
struct xdp_umem_fq_reuse * newq )
{
return NULL ;
}
static inline void xsk_reuseq_free ( struct xdp_umem_fq_reuse * rq )
{
}
2018-10-01 15:51:36 +03:00
static inline struct xdp_umem * xdp_get_umem_from_qid ( struct net_device * dev ,
u16 queue_id )
{
return NULL ;
}
2018-08-28 15:44:27 +03:00
static inline char * xdp_umem_get_data ( struct xdp_umem * umem , u64 addr )
{
return NULL ;
}
static inline dma_addr_t xdp_umem_get_dma ( struct xdp_umem * umem , u64 addr )
{
return 0 ;
}
2018-09-07 11:18:46 +03:00
static inline u64 * xsk_umem_peek_addr_rq ( struct xdp_umem * umem , u64 * addr )
{
return NULL ;
}
static inline void xsk_umem_discard_addr_rq ( struct xdp_umem * umem )
{
}
static inline void xsk_umem_fq_reuse ( struct xdp_umem * umem , u64 addr )
{
}
2018-05-02 14:01:27 +03:00
# endif /* CONFIG_XDP_SOCKETS */
2018-05-02 14:01:23 +03:00
# endif /* _LINUX_XDP_SOCK_H */