net/mlx5e: Support bpf_xdp_adjust_head()
This patch adds bpf_xdp_adjust_head() support to mlx5e. 1. rx_headroom is added to struct mlx5e_rq. It uses an existing 4 byte hole in the struct. 2. The adjusted data length is checked against MLX5E_XDP_MIN_INLINE and MLX5E_SW2HW_MTU(rq->netdev->mtu). 3. The macro MLX5E_SW2HW_MTU is moved from en_main.c to en.h. MLX5E_HW2SW_MTU is also moved to en.h for symmetric reason but it is not a must. v2: - Keep the xdp specific logic in mlx5e_xdp_handle() - Update dma_len after the sanity checks in mlx5e_xmit_xdp_frame() Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0e40f4c959
commit
d8bec2b29a
@ -51,6 +51,9 @@
|
|||||||
|
|
||||||
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
|
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
|
||||||
|
|
||||||
|
#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
|
||||||
|
#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
|
||||||
|
|
||||||
#define MLX5E_MAX_NUM_TC 8
|
#define MLX5E_MAX_NUM_TC 8
|
||||||
|
|
||||||
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
|
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
|
||||||
@ -369,6 +372,7 @@ struct mlx5e_rq {
|
|||||||
|
|
||||||
unsigned long state;
|
unsigned long state;
|
||||||
int ix;
|
int ix;
|
||||||
|
u16 rx_headroom;
|
||||||
|
|
||||||
struct mlx5e_rx_am am; /* Adaptive Moderation */
|
struct mlx5e_rx_am am; /* Adaptive Moderation */
|
||||||
struct bpf_prog *xdp_prog;
|
struct bpf_prog *xdp_prog;
|
||||||
|
@ -343,9 +343,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
|
|||||||
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
|
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
|
|
||||||
#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
|
|
||||||
|
|
||||||
static inline int mlx5e_get_wqe_mtt_sz(void)
|
static inline int mlx5e_get_wqe_mtt_sz(void)
|
||||||
{
|
{
|
||||||
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
|
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
|
||||||
@ -534,9 +531,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
|||||||
goto err_rq_wq_destroy;
|
goto err_rq_wq_destroy;
|
||||||
}
|
}
|
||||||
|
|
||||||
rq->buff.map_dir = DMA_FROM_DEVICE;
|
if (rq->xdp_prog) {
|
||||||
if (rq->xdp_prog)
|
|
||||||
rq->buff.map_dir = DMA_BIDIRECTIONAL;
|
rq->buff.map_dir = DMA_BIDIRECTIONAL;
|
||||||
|
rq->rx_headroom = XDP_PACKET_HEADROOM;
|
||||||
|
} else {
|
||||||
|
rq->buff.map_dir = DMA_FROM_DEVICE;
|
||||||
|
rq->rx_headroom = MLX5_RX_HEADROOM;
|
||||||
|
}
|
||||||
|
|
||||||
switch (priv->params.rq_wq_type) {
|
switch (priv->params.rq_wq_type) {
|
||||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||||
@ -586,7 +587,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
|||||||
byte_count = rq->buff.wqe_sz;
|
byte_count = rq->buff.wqe_sz;
|
||||||
|
|
||||||
/* calc the required page order */
|
/* calc the required page order */
|
||||||
frag_sz = MLX5_RX_HEADROOM +
|
frag_sz = rq->rx_headroom +
|
||||||
byte_count /* packet data */ +
|
byte_count /* packet data */ +
|
||||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||||
frag_sz = SKB_DATA_ALIGN(frag_sz);
|
frag_sz = SKB_DATA_ALIGN(frag_sz);
|
||||||
@ -3153,11 +3154,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
|
|||||||
bool reset, was_opened;
|
bool reset, was_opened;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (prog && prog->xdp_adjust_head) {
|
|
||||||
netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&priv->state_lock);
|
mutex_lock(&priv->state_lock);
|
||||||
|
|
||||||
if ((netdev->features & NETIF_F_LRO) && prog) {
|
if ((netdev->features & NETIF_F_LRO) && prog) {
|
||||||
|
@ -264,7 +264,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
|
|||||||
if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
|
if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
|
wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -646,8 +646,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
|
|||||||
|
|
||||||
static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
||||||
struct mlx5e_dma_info *di,
|
struct mlx5e_dma_info *di,
|
||||||
unsigned int data_offset,
|
const struct xdp_buff *xdp)
|
||||||
int len)
|
|
||||||
{
|
{
|
||||||
struct mlx5e_sq *sq = &rq->channel->xdp_sq;
|
struct mlx5e_sq *sq = &rq->channel->xdp_sq;
|
||||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||||
@ -659,9 +658,16 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
|||||||
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
|
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
|
||||||
struct mlx5_wqe_data_seg *dseg;
|
struct mlx5_wqe_data_seg *dseg;
|
||||||
|
|
||||||
|
ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
|
||||||
dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
|
dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
|
||||||
unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
|
unsigned int dma_len = xdp->data_end - xdp->data;
|
||||||
void *data = page_address(di->page) + data_offset;
|
|
||||||
|
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
|
||||||
|
MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
|
||||||
|
rq->stats.xdp_drop++;
|
||||||
|
mlx5e_page_release(rq, di, true);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
|
if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
|
||||||
if (sq->db.xdp.doorbell) {
|
if (sq->db.xdp.doorbell) {
|
||||||
@ -674,13 +680,14 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dma_len -= MLX5E_XDP_MIN_INLINE;
|
||||||
dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
|
dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
memset(wqe, 0, sizeof(*wqe));
|
memset(wqe, 0, sizeof(*wqe));
|
||||||
|
|
||||||
/* copy the inline part */
|
/* copy the inline part */
|
||||||
memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
|
memcpy(eseg->inline_hdr_start, xdp->data, MLX5E_XDP_MIN_INLINE);
|
||||||
eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
|
eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
|
||||||
|
|
||||||
dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
|
dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
|
||||||
@ -703,25 +710,29 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* returns true if packet was consumed by xdp */
|
/* returns true if packet was consumed by xdp */
|
||||||
static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
|
static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
|
||||||
const struct bpf_prog *prog,
|
struct mlx5e_dma_info *di,
|
||||||
struct mlx5e_dma_info *di,
|
void *va, u16 *rx_headroom, u32 *len)
|
||||||
void *data, u16 len)
|
|
||||||
{
|
{
|
||||||
|
const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
|
||||||
struct xdp_buff xdp;
|
struct xdp_buff xdp;
|
||||||
u32 act;
|
u32 act;
|
||||||
|
|
||||||
if (!prog)
|
if (!prog)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
xdp.data = data;
|
xdp.data = va + *rx_headroom;
|
||||||
xdp.data_end = xdp.data + len;
|
xdp.data_end = xdp.data + *len;
|
||||||
|
xdp.data_hard_start = va;
|
||||||
|
|
||||||
act = bpf_prog_run_xdp(prog, &xdp);
|
act = bpf_prog_run_xdp(prog, &xdp);
|
||||||
switch (act) {
|
switch (act) {
|
||||||
case XDP_PASS:
|
case XDP_PASS:
|
||||||
|
*rx_headroom = xdp.data - xdp.data_hard_start;
|
||||||
|
*len = xdp.data_end - xdp.data;
|
||||||
return false;
|
return false;
|
||||||
case XDP_TX:
|
case XDP_TX:
|
||||||
mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
|
mlx5e_xmit_xdp_frame(rq, di, &xdp);
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
bpf_warn_invalid_xdp_action(act);
|
bpf_warn_invalid_xdp_action(act);
|
||||||
@ -740,15 +751,16 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
|||||||
struct mlx5e_dma_info *di;
|
struct mlx5e_dma_info *di;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
void *va, *data;
|
void *va, *data;
|
||||||
|
u16 rx_headroom = rq->rx_headroom;
|
||||||
bool consumed;
|
bool consumed;
|
||||||
|
|
||||||
di = &rq->dma_info[wqe_counter];
|
di = &rq->dma_info[wqe_counter];
|
||||||
va = page_address(di->page);
|
va = page_address(di->page);
|
||||||
data = va + MLX5_RX_HEADROOM;
|
data = va + rx_headroom;
|
||||||
|
|
||||||
dma_sync_single_range_for_cpu(rq->pdev,
|
dma_sync_single_range_for_cpu(rq->pdev,
|
||||||
di->addr,
|
di->addr,
|
||||||
MLX5_RX_HEADROOM,
|
rx_headroom,
|
||||||
rq->buff.wqe_sz,
|
rq->buff.wqe_sz,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
prefetch(data);
|
prefetch(data);
|
||||||
@ -760,8 +772,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
|||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data,
|
consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt);
|
||||||
cqe_bcnt);
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (consumed)
|
if (consumed)
|
||||||
return NULL; /* page/packet was consumed by XDP */
|
return NULL; /* page/packet was consumed by XDP */
|
||||||
@ -777,7 +788,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
|||||||
page_ref_inc(di->page);
|
page_ref_inc(di->page);
|
||||||
mlx5e_page_release(rq, di, true);
|
mlx5e_page_release(rq, di, true);
|
||||||
|
|
||||||
skb_reserve(skb, MLX5_RX_HEADROOM);
|
skb_reserve(skb, rx_headroom);
|
||||||
skb_put(skb, cqe_bcnt);
|
skb_put(skb, cqe_bcnt);
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user