diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index a1772263d84b..a4f6dd5030c0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -189,7 +189,9 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q, static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_rxq_comp *comp, + unsigned int headroom, + unsigned int len, + unsigned int num_sg_elems, bool synced) { struct net_device *netdev = q->lif->netdev; @@ -199,12 +201,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, struct sk_buff *skb; unsigned int i; u16 frag_len; - u16 len; stats = q_to_rx_stats(q); buf_info = &desc_info->bufs[0]; - len = le16_to_cpu(comp->len); prefetchw(buf_info->page); @@ -216,23 +216,26 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, return NULL; } - i = comp->num_sg_elems + 1; + i = num_sg_elems + 1; do { if (unlikely(!buf_info->page)) { dev_kfree_skb(skb); return NULL; } - frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); + if (headroom) + frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN); + else + frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); len -= frag_len; if (!synced) dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info), - 0, frag_len, DMA_FROM_DEVICE); + headroom, frag_len, DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf_info->page, buf_info->page_offset, frag_len, - IONIC_PAGE_SIZE); + buf_info->page, buf_info->page_offset + headroom, + frag_len, IONIC_PAGE_SIZE); if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { dma_unmap_page(dev, buf_info->dma_addr, @@ -240,6 +243,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, buf_info->page = NULL; } + /* only needed on the first buffer */ + if (headroom) + headroom = 0; + buf_info++; i--; @@ -250,7 +257,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_rxq_comp *comp, + unsigned int headroom, + unsigned int len, bool synced) { struct net_device *netdev = q->lif->netdev; @@ -258,12 +266,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, struct ionic_rx_stats *stats; struct device *dev = q->dev; struct sk_buff *skb; - u16 len; stats = q_to_rx_stats(q); buf_info = &desc_info->bufs[0]; - len = le16_to_cpu(comp->len); skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); if (unlikely(!skb)) { @@ -280,10 +286,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, if (!synced) dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info), - 0, len, DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info), len); + headroom, len, DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len); dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info), - 0, len, DMA_FROM_DEVICE); + headroom, len, DMA_FROM_DEVICE); skb_put(skb, len); skb->protocol = eth_type_trans(skb, q->lif->netdev); @@ -303,10 +309,10 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info); xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info), - 0, len, false); + XDP_PACKET_HEADROOM, len, false); dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info), - 0, len, + XDP_PACKET_HEADROOM, len, DMA_FROM_DEVICE); prefetchw(&xdp_buf.data_hard_start); @@ -345,6 +351,7 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_rx_stats *stats; struct ionic_rxq_comp *comp; struct bpf_prog *xdp_prog; + unsigned int headroom; struct sk_buff *skb; u16 len; @@ -366,10 +373,12 @@ static void ionic_rx_clean(struct ionic_queue *q, ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len)) return; + headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; if (len <= q->lif->rx_copybreak) - skb = ionic_rx_copybreak(q, desc_info, comp, !!xdp_prog); + skb = ionic_rx_copybreak(q, desc_info, headroom, len, !!xdp_prog); else - skb = ionic_rx_frags(q, desc_info, comp, !!xdp_prog); + skb = ionic_rx_frags(q, desc_info, headroom, len, + comp->num_sg_elems, !!xdp_prog); if (unlikely(!skb)) { stats->dropped++; @@ -493,8 +502,9 @@ void ionic_rx_fill(struct ionic_queue *q) unsigned int frag_len; unsigned int nfrags; unsigned int n_fill; - unsigned int i, j; unsigned int len; + unsigned int i; + unsigned int j; n_fill = ionic_q_space_avail(q); @@ -503,9 +513,12 @@ void ionic_rx_fill(struct ionic_queue *q) if (n_fill < fill_threshold) return; - len = netdev->mtu + ETH_HLEN + VLAN_HLEN; + len = netdev->mtu + VLAN_ETH_HLEN; for (i = n_fill; i; i--) { + unsigned int headroom; + unsigned int buf_len; + nfrags = 0; remain_len = len; desc_info = &q->info[q->head_idx]; @@ -520,9 +533,18 @@ void ionic_rx_fill(struct ionic_queue *q) } } - /* fill main descriptor - buf[0] */ - desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info)); - frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); + /* fill main descriptor - buf[0] + * XDP uses space in the first buffer, so account for + * head room, tail room, and ip header in the first frag size. + */ + headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; + if (q->xdp_rxq_info) + buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN; + else + buf_len = ionic_rx_buf_size(buf_info); + frag_len = min_t(u16, len, buf_len); + + desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom); desc->len = cpu_to_le16(frag_len); remain_len -= frag_len; buf_info++;