2018-10-02 11:00:32 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2018 Intel Corporation. */
# include <linux/bpf_trace.h>
2020-05-20 22:20:51 +03:00
# include <net/xdp_sock_drv.h>
2018-10-02 11:00:32 +03:00
# include <net/xdp.h>
# include "ixgbe.h"
# include "ixgbe_txrx_common.h"
struct xdp_umem * ixgbe_xsk_umem ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * ring )
{
bool xdp_on = READ_ONCE ( adapter - > xdp_prog ) ;
int qid = ring - > ring_idx ;
2019-03-23 00:16:38 +03:00
if ( ! xdp_on | | ! test_bit ( qid , adapter - > af_xdp_zc_qps ) )
2018-10-02 11:00:32 +03:00
return NULL ;
2019-03-23 00:16:38 +03:00
return xdp_get_umem_from_qid ( adapter - > netdev , qid ) ;
2018-10-02 11:00:32 +03:00
}
static int ixgbe_xsk_umem_enable ( struct ixgbe_adapter * adapter ,
struct xdp_umem * umem ,
u16 qid )
{
2019-03-23 00:16:38 +03:00
struct net_device * netdev = adapter - > netdev ;
2018-10-02 11:00:32 +03:00
bool if_running ;
int err ;
if ( qid > = adapter - > num_rx_queues )
return - EINVAL ;
2019-03-23 00:16:38 +03:00
if ( qid > = netdev - > real_num_rx_queues | |
qid > = netdev - > real_num_tx_queues )
return - EINVAL ;
2018-10-02 11:00:32 +03:00
2020-05-20 22:20:58 +03:00
err = xsk_buff_dma_map ( umem , & adapter - > pdev - > dev , IXGBE_RX_DMA_ATTR ) ;
2018-10-02 11:00:32 +03:00
if ( err )
return err ;
if_running = netif_running ( adapter - > netdev ) & &
2019-03-23 00:16:38 +03:00
ixgbe_enabled_xdp_adapter ( adapter ) ;
2018-10-02 11:00:32 +03:00
if ( if_running )
ixgbe_txrx_ring_disable ( adapter , qid ) ;
2019-03-23 00:16:37 +03:00
set_bit ( qid , adapter - > af_xdp_zc_qps ) ;
2018-10-02 11:00:32 +03:00
2019-01-29 17:03:50 +03:00
if ( if_running ) {
2018-10-02 11:00:32 +03:00
ixgbe_txrx_ring_enable ( adapter , qid ) ;
2019-01-29 17:03:50 +03:00
/* Kick start the NAPI context so that receiving will start */
2019-08-14 10:27:16 +03:00
err = ixgbe_xsk_wakeup ( adapter - > netdev , qid , XDP_WAKEUP_RX ) ;
2019-01-29 17:03:50 +03:00
if ( err )
return err ;
}
return 0 ;
2018-10-02 11:00:32 +03:00
}
static int ixgbe_xsk_umem_disable ( struct ixgbe_adapter * adapter , u16 qid )
{
2019-03-23 00:16:38 +03:00
struct xdp_umem * umem ;
2018-10-02 11:00:32 +03:00
bool if_running ;
2019-03-23 00:16:38 +03:00
umem = xdp_get_umem_from_qid ( adapter - > netdev , qid ) ;
if ( ! umem )
2018-10-02 11:00:32 +03:00
return - EINVAL ;
if_running = netif_running ( adapter - > netdev ) & &
2019-03-23 00:16:38 +03:00
ixgbe_enabled_xdp_adapter ( adapter ) ;
2018-10-02 11:00:32 +03:00
if ( if_running )
ixgbe_txrx_ring_disable ( adapter , qid ) ;
2019-03-23 00:16:37 +03:00
clear_bit ( qid , adapter - > af_xdp_zc_qps ) ;
2020-05-20 22:20:58 +03:00
xsk_buff_dma_unmap ( umem , IXGBE_RX_DMA_ATTR ) ;
2018-10-02 11:00:32 +03:00
if ( if_running )
ixgbe_txrx_ring_enable ( adapter , qid ) ;
return 0 ;
}
int ixgbe_xsk_umem_setup ( struct ixgbe_adapter * adapter , struct xdp_umem * umem ,
u16 qid )
{
return umem ? ixgbe_xsk_umem_enable ( adapter , umem , qid ) :
ixgbe_xsk_umem_disable ( adapter , qid ) ;
}
static int ixgbe_run_xdp_zc ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * rx_ring ,
struct xdp_buff * xdp )
{
int err , result = IXGBE_XDP_PASS ;
struct bpf_prog * xdp_prog ;
struct xdp_frame * xdpf ;
u32 act ;
rcu_read_lock ( ) ;
xdp_prog = READ_ONCE ( rx_ring - > xdp_prog ) ;
act = bpf_prog_run_xdp ( xdp_prog , xdp ) ;
2019-08-27 05:25:24 +03:00
2018-10-02 11:00:32 +03:00
switch ( act ) {
case XDP_PASS :
break ;
case XDP_TX :
2020-05-28 23:47:29 +03:00
xdpf = xdp_convert_buff_to_frame ( xdp ) ;
2018-10-02 11:00:32 +03:00
if ( unlikely ( ! xdpf ) ) {
result = IXGBE_XDP_CONSUMED ;
break ;
}
result = ixgbe_xmit_xdp_ring ( adapter , xdpf ) ;
break ;
case XDP_REDIRECT :
err = xdp_do_redirect ( rx_ring - > netdev , xdp , xdp_prog ) ;
result = ! err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED ;
break ;
default :
bpf_warn_invalid_xdp_action ( act ) ;
2020-06-04 06:07:26 +03:00
fallthrough ;
2018-10-02 11:00:32 +03:00
case XDP_ABORTED :
trace_xdp_exception ( rx_ring - > netdev , xdp_prog , act ) ;
2020-06-04 06:07:26 +03:00
fallthrough ; /* handle aborts by dropping packet */
2018-10-02 11:00:32 +03:00
case XDP_DROP :
result = IXGBE_XDP_CONSUMED ;
break ;
}
rcu_read_unlock ( ) ;
return result ;
}
2020-05-20 22:20:58 +03:00
bool ixgbe_alloc_rx_buffers_zc ( struct ixgbe_ring * rx_ring , u16 count )
2018-10-02 11:00:32 +03:00
{
union ixgbe_adv_rx_desc * rx_desc ;
struct ixgbe_rx_buffer * bi ;
u16 i = rx_ring - > next_to_use ;
2020-05-20 22:20:58 +03:00
dma_addr_t dma ;
2018-10-02 11:00:32 +03:00
bool ok = true ;
/* nothing to do */
2020-05-20 22:20:58 +03:00
if ( ! count )
2018-10-02 11:00:32 +03:00
return true ;
rx_desc = IXGBE_RX_DESC ( rx_ring , i ) ;
bi = & rx_ring - > rx_buffer_info [ i ] ;
i - = rx_ring - > count ;
do {
2020-05-20 22:20:58 +03:00
bi - > xdp = xsk_buff_alloc ( rx_ring - > xsk_umem ) ;
if ( ! bi - > xdp ) {
2018-10-02 11:00:32 +03:00
ok = false ;
break ;
}
2020-05-20 22:20:58 +03:00
dma = xsk_buff_xdp_get_dma ( bi - > xdp ) ;
2018-10-02 11:00:32 +03:00
/* Refresh the desc even if buffer_addrs didn't change
* because each write - back erases this info .
*/
2020-05-20 22:20:58 +03:00
rx_desc - > read . pkt_addr = cpu_to_le64 ( dma ) ;
2018-10-02 11:00:32 +03:00
rx_desc + + ;
bi + + ;
i + + ;
if ( unlikely ( ! i ) ) {
rx_desc = IXGBE_RX_DESC ( rx_ring , 0 ) ;
bi = rx_ring - > rx_buffer_info ;
i - = rx_ring - > count ;
}
/* clear the length for the next_to_use descriptor */
rx_desc - > wb . upper . length = 0 ;
2020-05-20 22:20:58 +03:00
count - - ;
} while ( count ) ;
2018-10-02 11:00:32 +03:00
i + = rx_ring - > count ;
if ( rx_ring - > next_to_use ! = i ) {
rx_ring - > next_to_use = i ;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch . ( Only
* applicable for weak - ordered memory model archs ,
* such as IA - 64 ) .
*/
wmb ( ) ;
writel ( i , rx_ring - > tail ) ;
}
return ok ;
}
static struct sk_buff * ixgbe_construct_skb_zc ( struct ixgbe_ring * rx_ring ,
2020-05-20 22:20:58 +03:00
struct ixgbe_rx_buffer * bi )
2018-10-02 11:00:32 +03:00
{
2020-05-20 22:20:58 +03:00
unsigned int metasize = bi - > xdp - > data - bi - > xdp - > data_meta ;
unsigned int datasize = bi - > xdp - > data_end - bi - > xdp - > data ;
2018-10-02 11:00:32 +03:00
struct sk_buff * skb ;
/* allocate a skb to store the frags */
skb = __napi_alloc_skb ( & rx_ring - > q_vector - > napi ,
2020-05-20 22:20:58 +03:00
bi - > xdp - > data_end - bi - > xdp - > data_hard_start ,
2018-10-02 11:00:32 +03:00
GFP_ATOMIC | __GFP_NOWARN ) ;
if ( unlikely ( ! skb ) )
return NULL ;
2020-05-20 22:20:58 +03:00
skb_reserve ( skb , bi - > xdp - > data - bi - > xdp - > data_hard_start ) ;
memcpy ( __skb_put ( skb , datasize ) , bi - > xdp - > data , datasize ) ;
2018-10-02 11:00:32 +03:00
if ( metasize )
skb_metadata_set ( skb , metasize ) ;
2020-05-20 22:20:58 +03:00
xsk_buff_free ( bi - > xdp ) ;
bi - > xdp = NULL ;
2018-10-02 11:00:32 +03:00
return skb ;
}
static void ixgbe_inc_ntc ( struct ixgbe_ring * rx_ring )
{
u32 ntc = rx_ring - > next_to_clean + 1 ;
ntc = ( ntc < rx_ring - > count ) ? ntc : 0 ;
rx_ring - > next_to_clean = ntc ;
prefetch ( IXGBE_RX_DESC ( rx_ring , ntc ) ) ;
}
int ixgbe_clean_rx_irq_zc ( struct ixgbe_q_vector * q_vector ,
struct ixgbe_ring * rx_ring ,
const int budget )
{
unsigned int total_rx_bytes = 0 , total_rx_packets = 0 ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
u16 cleaned_count = ixgbe_desc_unused ( rx_ring ) ;
unsigned int xdp_res , xdp_xmit = 0 ;
bool failure = false ;
struct sk_buff * skb ;
while ( likely ( total_rx_packets < budget ) ) {
union ixgbe_adv_rx_desc * rx_desc ;
struct ixgbe_rx_buffer * bi ;
unsigned int size ;
/* return some buffers to hardware, one at a time is too slow */
if ( cleaned_count > = IXGBE_RX_BUFFER_WRITE ) {
failure = failure | |
2020-05-20 22:20:58 +03:00
! ixgbe_alloc_rx_buffers_zc ( rx_ring ,
cleaned_count ) ;
2018-10-02 11:00:32 +03:00
cleaned_count = 0 ;
}
rx_desc = IXGBE_RX_DESC ( rx_ring , rx_ring - > next_to_clean ) ;
size = le16_to_cpu ( rx_desc - > wb . upper . length ) ;
if ( ! size )
break ;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* descriptor has been written back
*/
dma_rmb ( ) ;
2020-05-20 22:20:58 +03:00
bi = & rx_ring - > rx_buffer_info [ rx_ring - > next_to_clean ] ;
2018-10-02 11:00:32 +03:00
if ( unlikely ( ! ixgbe_test_staterr ( rx_desc ,
IXGBE_RXD_STAT_EOP ) ) ) {
struct ixgbe_rx_buffer * next_bi ;
2020-05-20 22:20:58 +03:00
xsk_buff_free ( bi - > xdp ) ;
bi - > xdp = NULL ;
2018-10-02 11:00:32 +03:00
ixgbe_inc_ntc ( rx_ring ) ;
next_bi =
& rx_ring - > rx_buffer_info [ rx_ring - > next_to_clean ] ;
2020-05-20 22:20:58 +03:00
next_bi - > discard = true ;
2018-10-02 11:00:32 +03:00
continue ;
}
2020-05-20 22:20:58 +03:00
if ( unlikely ( bi - > discard ) ) {
xsk_buff_free ( bi - > xdp ) ;
bi - > xdp = NULL ;
bi - > discard = false ;
2018-10-02 11:00:32 +03:00
ixgbe_inc_ntc ( rx_ring ) ;
continue ;
}
2020-05-20 22:20:58 +03:00
bi - > xdp - > data_end = bi - > xdp - > data + size ;
xsk_buff_dma_sync_for_cpu ( bi - > xdp ) ;
xdp_res = ixgbe_run_xdp_zc ( adapter , rx_ring , bi - > xdp ) ;
2018-10-02 11:00:32 +03:00
if ( xdp_res ) {
2020-05-20 22:20:58 +03:00
if ( xdp_res & ( IXGBE_XDP_TX | IXGBE_XDP_REDIR ) )
2018-10-02 11:00:32 +03:00
xdp_xmit | = xdp_res ;
2020-05-20 22:20:58 +03:00
else
xsk_buff_free ( bi - > xdp ) ;
bi - > xdp = NULL ;
2018-10-02 11:00:32 +03:00
total_rx_packets + + ;
total_rx_bytes + = size ;
cleaned_count + + ;
ixgbe_inc_ntc ( rx_ring ) ;
continue ;
}
/* XDP_PASS path */
2020-05-20 22:20:58 +03:00
skb = ixgbe_construct_skb_zc ( rx_ring , bi ) ;
2018-10-02 11:00:32 +03:00
if ( ! skb ) {
rx_ring - > rx_stats . alloc_rx_buff_failed + + ;
break ;
}
cleaned_count + + ;
ixgbe_inc_ntc ( rx_ring ) ;
if ( eth_skb_pad ( skb ) )
continue ;
total_rx_bytes + = skb - > len ;
total_rx_packets + + ;
ixgbe_process_skb_fields ( rx_ring , rx_desc , skb ) ;
ixgbe_rx_skb ( q_vector , skb ) ;
}
if ( xdp_xmit & IXGBE_XDP_REDIR )
xdp_do_flush_map ( ) ;
if ( xdp_xmit & IXGBE_XDP_TX ) {
struct ixgbe_ring * ring = adapter - > xdp_ring [ smp_processor_id ( ) ] ;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch .
*/
wmb ( ) ;
writel ( ring - > next_to_use , ring - > tail ) ;
}
u64_stats_update_begin ( & rx_ring - > syncp ) ;
rx_ring - > stats . packets + = total_rx_packets ;
rx_ring - > stats . bytes + = total_rx_bytes ;
u64_stats_update_end ( & rx_ring - > syncp ) ;
q_vector - > rx . total_packets + = total_rx_packets ;
q_vector - > rx . total_bytes + = total_rx_bytes ;
2019-08-14 10:27:19 +03:00
if ( xsk_umem_uses_need_wakeup ( rx_ring - > xsk_umem ) ) {
if ( failure | | rx_ring - > next_to_clean = = rx_ring - > next_to_use )
xsk_set_rx_need_wakeup ( rx_ring - > xsk_umem ) ;
else
xsk_clear_rx_need_wakeup ( rx_ring - > xsk_umem ) ;
return ( int ) total_rx_packets ;
}
2018-10-02 11:00:32 +03:00
return failure ? budget : ( int ) total_rx_packets ;
}
void ixgbe_xsk_clean_rx_ring ( struct ixgbe_ring * rx_ring )
{
2020-05-20 22:20:58 +03:00
struct ixgbe_rx_buffer * bi ;
u16 i ;
2018-10-02 11:00:32 +03:00
2020-05-20 22:20:58 +03:00
for ( i = 0 ; i < rx_ring - > count ; i + + ) {
bi = & rx_ring - > rx_buffer_info [ i ] ;
if ( ! bi - > xdp )
continue ;
xsk_buff_free ( bi - > xdp ) ;
bi - > xdp = NULL ;
2018-10-02 11:00:32 +03:00
}
}
2018-10-02 11:00:34 +03:00
static bool ixgbe_xmit_zc ( struct ixgbe_ring * xdp_ring , unsigned int budget )
{
union ixgbe_adv_tx_desc * tx_desc = NULL ;
struct ixgbe_tx_buffer * tx_bi ;
bool work_done = true ;
2019-06-26 17:35:28 +03:00
struct xdp_desc desc ;
2018-10-02 11:00:34 +03:00
dma_addr_t dma ;
2019-06-26 17:35:28 +03:00
u32 cmd_type ;
2018-10-02 11:00:34 +03:00
while ( budget - - > 0 ) {
2019-02-20 18:20:14 +03:00
if ( unlikely ( ! ixgbe_desc_unused ( xdp_ring ) ) | |
! netif_carrier_ok ( xdp_ring - > netdev ) ) {
2018-10-02 11:00:34 +03:00
work_done = false ;
break ;
}
2019-06-26 17:35:28 +03:00
if ( ! xsk_umem_consume_tx ( xdp_ring - > xsk_umem , & desc ) )
2018-10-02 11:00:34 +03:00
break ;
2020-05-20 22:20:58 +03:00
dma = xsk_buff_raw_get_dma ( xdp_ring - > xsk_umem , desc . addr ) ;
xsk_buff_raw_dma_sync_for_device ( xdp_ring - > xsk_umem , dma ,
desc . len ) ;
2018-10-02 11:00:34 +03:00
tx_bi = & xdp_ring - > tx_buffer_info [ xdp_ring - > next_to_use ] ;
2019-06-26 17:35:28 +03:00
tx_bi - > bytecount = desc . len ;
2018-10-02 11:00:34 +03:00
tx_bi - > xdpf = NULL ;
2019-04-04 19:36:55 +03:00
tx_bi - > gso_segs = 1 ;
2018-10-02 11:00:34 +03:00
tx_desc = IXGBE_TX_DESC ( xdp_ring , xdp_ring - > next_to_use ) ;
tx_desc - > read . buffer_addr = cpu_to_le64 ( dma ) ;
/* put descriptor type bits */
cmd_type = IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS ;
2019-06-26 17:35:28 +03:00
cmd_type | = desc . len | IXGBE_TXD_CMD ;
2018-10-02 11:00:34 +03:00
tx_desc - > read . cmd_type_len = cpu_to_le32 ( cmd_type ) ;
tx_desc - > read . olinfo_status =
2019-06-26 17:35:28 +03:00
cpu_to_le32 ( desc . len < < IXGBE_ADVTXD_PAYLEN_SHIFT ) ;
2018-10-02 11:00:34 +03:00
xdp_ring - > next_to_use + + ;
if ( xdp_ring - > next_to_use = = xdp_ring - > count )
xdp_ring - > next_to_use = 0 ;
}
if ( tx_desc ) {
ixgbe_xdp_ring_update_tail ( xdp_ring ) ;
xsk_umem_consume_tx_done ( xdp_ring - > xsk_umem ) ;
}
return ! ! budget & & work_done ;
}
static void ixgbe_clean_xdp_tx_buffer ( struct ixgbe_ring * tx_ring ,
struct ixgbe_tx_buffer * tx_bi )
{
xdp_return_frame ( tx_bi - > xdpf ) ;
dma_unmap_single ( tx_ring - > dev ,
dma_unmap_addr ( tx_bi , dma ) ,
dma_unmap_len ( tx_bi , len ) , DMA_TO_DEVICE ) ;
dma_unmap_len_set ( tx_bi , len , 0 ) ;
}
bool ixgbe_clean_xdp_tx_irq ( struct ixgbe_q_vector * q_vector ,
struct ixgbe_ring * tx_ring , int napi_budget )
{
2019-08-22 20:12:37 +03:00
u16 ntc = tx_ring - > next_to_clean , ntu = tx_ring - > next_to_use ;
2018-10-02 11:00:34 +03:00
unsigned int total_packets = 0 , total_bytes = 0 ;
struct xdp_umem * umem = tx_ring - > xsk_umem ;
union ixgbe_adv_tx_desc * tx_desc ;
struct ixgbe_tx_buffer * tx_bi ;
2019-08-22 20:12:37 +03:00
u32 xsk_frames = 0 ;
2018-10-02 11:00:34 +03:00
2019-08-22 20:12:37 +03:00
tx_bi = & tx_ring - > tx_buffer_info [ ntc ] ;
tx_desc = IXGBE_TX_DESC ( tx_ring , ntc ) ;
2018-10-02 11:00:34 +03:00
2019-08-22 20:12:37 +03:00
while ( ntc ! = ntu ) {
2018-10-02 11:00:34 +03:00
if ( ! ( tx_desc - > wb . status & cpu_to_le32 ( IXGBE_TXD_STAT_DD ) ) )
break ;
total_bytes + = tx_bi - > bytecount ;
total_packets + = tx_bi - > gso_segs ;
if ( tx_bi - > xdpf )
ixgbe_clean_xdp_tx_buffer ( tx_ring , tx_bi ) ;
else
xsk_frames + + ;
tx_bi - > xdpf = NULL ;
tx_bi + + ;
tx_desc + + ;
2019-08-22 20:12:37 +03:00
ntc + + ;
if ( unlikely ( ntc = = tx_ring - > count ) ) {
ntc = 0 ;
2018-10-02 11:00:34 +03:00
tx_bi = tx_ring - > tx_buffer_info ;
tx_desc = IXGBE_TX_DESC ( tx_ring , 0 ) ;
}
/* issue prefetch for next Tx descriptor */
prefetch ( tx_desc ) ;
2019-08-22 20:12:37 +03:00
}
2018-10-02 11:00:34 +03:00
2019-08-22 20:12:37 +03:00
tx_ring - > next_to_clean = ntc ;
2018-10-02 11:00:34 +03:00
u64_stats_update_begin ( & tx_ring - > syncp ) ;
tx_ring - > stats . bytes + = total_bytes ;
tx_ring - > stats . packets + = total_packets ;
u64_stats_update_end ( & tx_ring - > syncp ) ;
q_vector - > tx . total_bytes + = total_bytes ;
q_vector - > tx . total_packets + = total_packets ;
if ( xsk_frames )
xsk_umem_complete_tx ( umem , xsk_frames ) ;
2019-11-08 22:58:10 +03:00
if ( xsk_umem_uses_need_wakeup ( tx_ring - > xsk_umem ) )
xsk_set_tx_need_wakeup ( tx_ring - > xsk_umem ) ;
2019-08-14 10:27:19 +03:00
2019-08-22 20:12:37 +03:00
return ixgbe_xmit_zc ( tx_ring , q_vector - > tx . work_limit ) ;
2018-10-02 11:00:34 +03:00
}
2019-08-14 10:27:16 +03:00
int ixgbe_xsk_wakeup ( struct net_device * dev , u32 qid , u32 flags )
2018-10-02 11:00:34 +03:00
{
struct ixgbe_adapter * adapter = netdev_priv ( dev ) ;
struct ixgbe_ring * ring ;
if ( test_bit ( __IXGBE_DOWN , & adapter - > state ) )
return - ENETDOWN ;
if ( ! READ_ONCE ( adapter - > xdp_prog ) )
return - ENXIO ;
if ( qid > = adapter - > num_xdp_queues )
return - ENXIO ;
2019-12-17 19:20:47 +03:00
ring = adapter - > xdp_ring [ qid ] ;
if ( test_bit ( __IXGBE_TX_DISABLED , & ring - > state ) )
return - ENETDOWN ;
if ( ! ring - > xsk_umem )
2018-10-02 11:00:34 +03:00
return - ENXIO ;
if ( ! napi_if_scheduled_mark_missed ( & ring - > q_vector - > napi ) ) {
u64 eics = BIT_ULL ( ring - > q_vector - > v_idx ) ;
ixgbe_irq_rearm_queues ( adapter , eics ) ;
}
return 0 ;
}
void ixgbe_xsk_clean_tx_ring ( struct ixgbe_ring * tx_ring )
{
u16 ntc = tx_ring - > next_to_clean , ntu = tx_ring - > next_to_use ;
struct xdp_umem * umem = tx_ring - > xsk_umem ;
struct ixgbe_tx_buffer * tx_bi ;
u32 xsk_frames = 0 ;
while ( ntc ! = ntu ) {
tx_bi = & tx_ring - > tx_buffer_info [ ntc ] ;
if ( tx_bi - > xdpf )
ixgbe_clean_xdp_tx_buffer ( tx_ring , tx_bi ) ;
else
xsk_frames + + ;
tx_bi - > xdpf = NULL ;
ntc + + ;
if ( ntc = = tx_ring - > count )
ntc = 0 ;
}
if ( xsk_frames )
xsk_umem_complete_tx ( umem , xsk_frames ) ;
}