2018-05-18 15:00:21 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
/* AF_XDP internal functions
2018-05-02 14:01:23 +03:00
* Copyright ( c ) 2018 Intel Corporation .
*/
# ifndef _LINUX_XDP_SOCK_H
# define _LINUX_XDP_SOCK_H
2018-06-04 15:05:51 +03:00
# include <linux/workqueue.h>
# include <linux/if_xdp.h>
2018-05-02 14:01:23 +03:00
# include <linux/mutex.h>
2018-06-04 15:05:51 +03:00
# include <linux/mm.h>
2018-05-02 14:01:23 +03:00
# include <net/sock.h>
2018-05-02 14:01:25 +03:00
struct net_device ;
struct xsk_queue ;
2018-06-04 15:05:51 +03:00
struct xdp_umem_props {
u64 chunk_mask ;
u64 size ;
} ;
2018-06-04 15:05:52 +03:00
struct xdp_umem_page {
void * addr ;
} ;
2018-06-04 15:05:51 +03:00
struct xdp_umem {
struct xsk_queue * fq ;
struct xsk_queue * cq ;
2018-06-04 15:05:52 +03:00
struct xdp_umem_page * pages ;
2018-06-04 15:05:51 +03:00
struct xdp_umem_props props ;
u32 headroom ;
u32 chunk_size_nohr ;
struct user_struct * user ;
struct pid * pid ;
unsigned long address ;
refcount_t users ;
struct work_struct work ;
2018-06-04 15:05:52 +03:00
struct page * * pgs ;
2018-06-04 15:05:51 +03:00
u32 npgs ;
} ;
2018-05-02 14:01:23 +03:00
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk ;
2018-05-02 14:01:25 +03:00
struct xsk_queue * rx ;
struct net_device * dev ;
2018-05-02 14:01:23 +03:00
struct xdp_umem * umem ;
2018-05-02 14:01:28 +03:00
struct list_head flush_node ;
2018-05-02 14:01:26 +03:00
u16 queue_id ;
2018-05-02 14:01:32 +03:00
struct xsk_queue * tx ____cacheline_aligned_in_smp ;
2018-05-02 14:01:23 +03:00
/* Protects multiple processes in the control path */
struct mutex mutex ;
2018-05-02 14:01:27 +03:00
u64 rx_dropped ;
2018-05-02 14:01:23 +03:00
} ;
2018-05-02 14:01:27 +03:00
struct xdp_buff ;
# ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv ( struct xdp_sock * xs , struct xdp_buff * xdp ) ;
int xsk_rcv ( struct xdp_sock * xs , struct xdp_buff * xdp ) ;
void xsk_flush ( struct xdp_sock * xs ) ;
2018-05-02 14:01:28 +03:00
bool xsk_is_setup_for_bpf_map ( struct xdp_sock * xs ) ;
2018-05-02 14:01:27 +03:00
# else
static inline int xsk_generic_rcv ( struct xdp_sock * xs , struct xdp_buff * xdp )
{
return - ENOTSUPP ;
}
static inline int xsk_rcv ( struct xdp_sock * xs , struct xdp_buff * xdp )
{
return - ENOTSUPP ;
}
static inline void xsk_flush ( struct xdp_sock * xs )
{
}
2018-05-02 14:01:28 +03:00
static inline bool xsk_is_setup_for_bpf_map ( struct xdp_sock * xs )
{
return false ;
}
2018-05-02 14:01:27 +03:00
# endif /* CONFIG_XDP_SOCKETS */
2018-05-02 14:01:23 +03:00
# endif /* _LINUX_XDP_SOCK_H */