2013-09-11 08:40:01 +00:00
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
2016-01-13 16:51:46 -08:00
* Copyright ( c ) 2013 - 2016 Intel Corporation .
2013-09-11 08:40:01 +00:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
2013-12-18 13:45:51 +00:00
* You should have received a copy of the GNU General Public License along
* with this program . If not , see < http : //www.gnu.org/licenses/>.
2013-09-11 08:40:01 +00:00
*
* The full GNU General Public License is included in this distribution in
* the file called " COPYING " .
*
* Contact Information :
* e1000 - devel Mailing List < e1000 - devel @ lists . sourceforge . net >
* Intel Corporation , 5200 N . E . Elam Young Parkway , Hillsboro , OR 97124 - 6497
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2013-11-28 06:39:31 +00:00
# ifndef _I40E_TXRX_H_
# define _I40E_TXRX_H_
2014-04-09 05:59:02 +00:00
/* Interrupt Throttling and Rate Limiting Goodies */
2013-09-11 08:40:01 +00:00
2013-12-21 05:44:47 +00:00
# define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
2014-10-25 03:24:32 +00:00
# define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
2013-09-11 08:40:01 +00:00
# define I40E_ITR_100K 0x0005
2015-09-28 14:16:53 -04:00
# define I40E_ITR_50K 0x000A
2013-09-11 08:40:01 +00:00
# define I40E_ITR_20K 0x0019
2015-09-28 14:16:53 -04:00
# define I40E_ITR_18K 0x001B
2013-09-11 08:40:01 +00:00
# define I40E_ITR_8K 0x003E
# define I40E_ITR_4K 0x007A
2015-09-28 14:12:37 -04:00
# define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
2015-09-28 14:16:54 -04:00
# define I40E_ITR_RX_DEF I40E_ITR_20K
# define I40E_ITR_TX_DEF I40E_ITR_20K
2013-09-11 08:40:01 +00:00
# define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
# define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
# define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
# define I40E_DEFAULT_IRQ_WORK 256
# define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
# define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
# define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
2015-09-28 14:12:37 -04:00
/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
* the value of the rate limit is non - zero
*/
# define INTRL_ENA BIT(6)
# define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
2016-11-28 16:06:02 -08:00
/**
* i40e_intrl_usec_to_reg - convert interrupt rate limit to register
* @ intrl : interrupt rate limit to convert
*
* This function converts a decimal interrupt rate limit to the appropriate
* register format expected by the firmware when setting interrupt rate limit .
*/
static inline u16 i40e_intrl_usec_to_reg ( int intrl )
{
if ( intrl > > 2 )
return ( ( intrl > > 2 ) | INTRL_ENA ) ;
else
return 0 ;
}
2015-09-28 14:12:37 -04:00
# define I40E_INTRL_8K 125 /* 8000 ints/sec */
# define I40E_INTRL_62K 16 /* 62500 ints/sec */
# define I40E_INTRL_83K 12 /* 83333 ints/sec */
2013-09-11 08:40:01 +00:00
# define I40E_QUEUE_END_OF_LIST 0x7FF
2013-11-20 10:03:09 +00:00
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
* registers and QINT registers or more generally anywhere in the manual
* mentioning ITR_INDX , ITR_NONE cannot be used as an index ' n ' into any
* register but instead is a special value meaning " don't update " ITR0 / 1 / 2.
*/
enum i40e_dyn_idx_t {
I40E_IDX_ITR0 = 0 ,
I40E_IDX_ITR1 = 1 ,
I40E_IDX_ITR2 = 2 ,
I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
} ;
/* these are indexes into ITRN registers */
# define I40E_RX_ITR I40E_IDX_ITR0
# define I40E_TX_ITR I40E_IDX_ITR1
# define I40E_PE_ITR I40E_IDX_ITR2
2013-11-28 06:39:32 +00:00
/* Supported RSS offloads */
# define I40E_DEFAULT_RSS_HENA ( \
2015-06-04 16:24:02 -04:00
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV4_UDP ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV4_TCP ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_FRAG_IPV4 ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV6_UDP ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV6_TCP ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_FRAG_IPV6 ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_L2_PAYLOAD ) )
2013-11-28 06:39:32 +00:00
2015-06-23 19:00:04 -04:00
# define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
2015-08-13 18:54:31 -07:00
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP ) | \
BIT_ULL ( I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP ) )
2015-06-23 19:00:04 -04:00
# define i40e_pf_get_default_rss_hena(pf) \
( ( ( pf ) - > flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE ) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA )
2016-04-20 19:43:37 -07:00
/* Supported Rx Buffer Sizes (a multiple of 128) */
# define I40E_RXBUFFER_256 256
2013-09-11 08:40:01 +00:00
# define I40E_RXBUFFER_2048 2048
# define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
# define I40E_RXBUFFER_4096 4096
# define I40E_RXBUFFER_8192 8192
# define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
* reserve 2 more , and skb_shared_info adds an additional 384 bytes more ,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1 K .
2016-04-20 19:43:37 -07:00
* i . e . RXBUFFER_256 - - > 960 byte skb ( size - 1024 slab )
* i . e . RXBUFFER_512 - - > 1216 byte skb ( size - 2048 slab )
2013-09-11 08:40:01 +00:00
*/
2016-04-20 19:43:37 -07:00
# define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
# define i40e_rx_desc i40e_32byte_rx_desc
2017-01-30 12:29:35 -08:00
# define I40E_RX_DMA_ATTR \
( DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING )
2016-04-20 19:43:37 -07:00
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @ rx_desc : pointer to receive descriptor ( in le64 format )
* @ stat_err_bits : value to mask
*
* This function does some fast chicanery in order to return the
* value of the mask which is really only used for boolean tests .
* The status_error_len doesn ' t need to be shifted because it begins
* at offset zero .
*/
static inline bool i40e_test_staterr ( union i40e_rx_desc * rx_desc ,
const u64 stat_err_bits )
{
return ! ! ( rx_desc - > wb . qword1 . status_error_len &
cpu_to_le64 ( stat_err_bits ) ) ;
}
2013-09-11 08:40:01 +00:00
/* How many Rx Buffers do we bundle into one write to the hardware ? */
# define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
2015-01-24 09:58:35 +00:00
# define I40E_RX_INCREMENT(r, i) \
do { \
( i ) + + ; \
if ( ( i ) = = ( r ) - > count ) \
i = 0 ; \
r - > next_to_clean = i ; \
} while ( 0 )
2013-09-11 08:40:01 +00:00
# define I40E_RX_NEXT_DESC(r, i, n) \
do { \
( i ) + + ; \
if ( ( i ) = = ( r ) - > count ) \
i = 0 ; \
( n ) = I40E_RX_DESC ( ( r ) , ( i ) ) ; \
} while ( 0 )
# define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
do { \
I40E_RX_NEXT_DESC ( ( r ) , ( i ) , ( n ) ) ; \
prefetch ( ( n ) ) ; \
} while ( 0 )
2015-02-21 06:42:35 +00:00
# define I40E_MAX_BUFFER_TXD 8
2013-09-11 08:40:01 +00:00
# define I40E_MIN_TX_LEN 17
2016-02-19 12:17:08 -08:00
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
* the nearest 4 K which represents our maximum read request size .
*/
# define I40E_MAX_READ_REQ_SIZE 4096
# define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
# define I40E_MAX_DATA_PER_TXD_ALIGNED \
( I40E_MAX_DATA_PER_TXD & ~ ( I40E_MAX_READ_REQ_SIZE - 1 ) )
2016-11-08 13:05:14 -08:00
/**
* i40e_txd_use_count - estimate the number of descriptors needed for Tx
* @ size : transmit request size in bytes
*
* Due to hardware alignment restrictions ( 4 K alignment ) , we need to
* assume that we can have no more than 12 K of data per descriptor , even
* though each descriptor can take up to 16 K - 1 bytes of aligned memory .
* Thus , we need to divide by 12 K . But division is slow ! Instead ,
* we decompose the operation into shifts and one relatively cheap
* multiply operation .
*
* To divide by 12 K , we first divide by 4 K , then divide by 3 :
* To divide by 4 K , shift right by 12 bits
* To divide by 3 , multiply by 85 , then divide by 256
* ( Divide by 256 is done by shifting right by 8 bits )
* Finally , we add one to round up . Because 256 isn ' t an exact multiple of
* 3 , we ' ll underestimate near each multiple of 12 K . This is actually more
* accurate as we have 4 K - 1 of wiggle room that we can fit into the last
* segment . For our purposes this is accurate out to 1 M which is orders of
* magnitude greater than our largest possible GSO size .
*
* This would then be implemented as :
* return ( ( ( size > > 12 ) * 85 ) > > 8 ) + 1 ;
*
* Since multiplication and division are commutative , we can reorder
* operations into :
* return ( ( size * 85 ) > > 20 ) + 1 ;
2016-02-19 12:17:08 -08:00
*/
static inline unsigned int i40e_txd_use_count ( unsigned int size )
{
2016-11-08 13:05:14 -08:00
return ( ( size * 85 ) > > 20 ) + 1 ;
2016-02-19 12:17:08 -08:00
}
2013-09-11 08:40:01 +00:00
/* Tx Descriptors needed, worst case */
2014-05-10 04:49:12 +00:00
# define DESC_NEEDED (MAX_SKB_FRAGS + 4)
2014-07-10 07:58:25 +00:00
# define I40E_MIN_DESC_PENDING 4
2013-09-11 08:40:01 +00:00
2015-06-04 16:24:02 -04:00
# define I40E_TX_FLAGS_HW_VLAN BIT(1)
# define I40E_TX_FLAGS_SW_VLAN BIT(2)
# define I40E_TX_FLAGS_TSO BIT(3)
# define I40E_TX_FLAGS_IPV4 BIT(4)
# define I40E_TX_FLAGS_IPV6 BIT(5)
# define I40E_TX_FLAGS_FCCRC BIT(6)
# define I40E_TX_FLAGS_FSO BIT(7)
# define I40E_TX_FLAGS_TSYN BIT(8)
# define I40E_TX_FLAGS_FD_SB BIT(9)
2015-12-14 12:21:18 -08:00
# define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
2013-09-11 08:40:01 +00:00
# define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
# define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
# define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
# define I40E_TX_FLAGS_VLAN_SHIFT 16
struct i40e_tx_buffer {
struct i40e_tx_desc * next_to_watch ;
2014-06-04 08:45:15 +00:00
union {
struct sk_buff * skb ;
void * raw_buf ;
} ;
2013-09-11 08:40:01 +00:00
unsigned int bytecount ;
2013-09-28 06:00:17 +00:00
unsigned short gso_segs ;
2015-08-28 17:55:54 -04:00
2013-09-28 06:00:17 +00:00
DEFINE_DMA_UNMAP_ADDR ( dma ) ;
DEFINE_DMA_UNMAP_LEN ( len ) ;
u32 tx_flags ;
2013-09-11 08:40:01 +00:00
} ;
struct i40e_rx_buffer {
dma_addr_t dma ;
struct page * page ;
2017-02-21 15:55:39 -08:00
# if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset ;
# else
__u16 page_offset ;
# endif
__u16 pagecnt_bias ;
2013-09-11 08:40:01 +00:00
} ;
2013-09-28 06:00:43 +00:00
struct i40e_queue_stats {
2013-09-11 08:40:01 +00:00
u64 packets ;
u64 bytes ;
2013-09-28 06:00:43 +00:00
} ;
struct i40e_tx_queue_stats {
2013-09-11 08:40:01 +00:00
u64 restart_queue ;
u64 tx_busy ;
u64 tx_done_old ;
2015-08-27 11:42:29 -04:00
u64 tx_linearize ;
2015-10-21 19:47:08 -04:00
u64 tx_force_wb ;
2016-01-15 14:33:12 -08:00
u64 tx_lost_interrupt ;
2013-09-11 08:40:01 +00:00
} ;
struct i40e_rx_queue_stats {
u64 non_eop_descs ;
2013-12-18 13:45:59 +00:00
u64 alloc_page_failed ;
u64 alloc_buff_failed ;
2016-01-13 16:51:49 -08:00
u64 page_reuse_count ;
u64 realloc_count ;
2013-09-11 08:40:01 +00:00
} ;
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE ,
__I40E_TX_XPS_INIT_DONE ,
} ;
2016-04-18 11:33:47 -07:00
/* some useful defines for virtchannel interface, which
* is the only remaining user of header split
*/
# define I40E_RX_DTYPE_NO_SPLIT 0
# define I40E_RX_DTYPE_HEADER_SPLIT 1
# define I40E_RX_DTYPE_SPLIT_ALWAYS 2
# define I40E_RX_SPLIT_L2 0x1
# define I40E_RX_SPLIT_IP 0x2
# define I40E_RX_SPLIT_TCP_UDP 0x4
# define I40E_RX_SPLIT_SCTP 0x8
2013-09-11 08:40:01 +00:00
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
2013-09-28 06:00:53 +00:00
struct i40e_ring * next ; /* pointer to next ring in q_vector */
2013-09-11 08:40:01 +00:00
void * desc ; /* Descriptor ring memory */
struct device * dev ; /* Used for DMA mapping */
struct net_device * netdev ; /* netdev ring maps to */
union {
struct i40e_tx_buffer * tx_bi ;
struct i40e_rx_buffer * rx_bi ;
} ;
unsigned long state ;
u16 queue_index ; /* Queue number of ring */
u8 dcb_tc ; /* Traffic class of ring */
u8 __iomem * tail ;
2016-02-19 09:24:04 -05:00
/* high bit set means dynamic, use accessor routines to read/write.
* hardware only supports 2u s resolution for the ITR registers .
* these values always store the USER setting , and must be converted
* before programming to a register .
*/
u16 rx_itr_setting ;
u16 tx_itr_setting ;
2013-09-11 08:40:01 +00:00
u16 count ; /* Number of descriptors */
u16 reg_idx ; /* HW register index of the ring */
u16 rx_buf_len ;
/* used in interrupt processing */
u16 next_to_use ;
u16 next_to_clean ;
u8 atr_sample_rate ;
u8 atr_count ;
bool ring_active ; /* is ring online or not */
2015-01-07 02:55:01 +00:00
bool arm_wb ; /* do something to arm write back */
2015-09-25 18:26:13 -07:00
u8 packet_stride ;
2013-09-11 08:40:01 +00:00
2015-06-05 12:20:30 -04:00
u16 flags ;
# define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
2015-06-05 12:20:31 -04:00
2013-09-11 08:40:01 +00:00
/* stats structs */
2013-09-28 06:00:43 +00:00
struct i40e_queue_stats stats ;
2013-09-28 06:01:03 +00:00
struct u64_stats_sync syncp ;
2013-09-11 08:40:01 +00:00
union {
struct i40e_tx_queue_stats tx_stats ;
struct i40e_rx_queue_stats rx_stats ;
} ;
unsigned int size ; /* length of descriptor ring in bytes */
dma_addr_t dma ; /* physical address of ring */
struct i40e_vsi * vsi ; /* Backreference to associated VSI */
struct i40e_q_vector * q_vector ; /* Backreference to associated vector */
2013-09-28 06:00:58 +00:00
struct rcu_head rcu ; /* to avoid race on free */
2016-04-20 19:43:37 -07:00
u16 next_to_alloc ;
2017-02-09 23:40:25 -08:00
struct sk_buff * skb ; /* When i40e_clean_rx_ring_irq() must
* return before it sees the EOP for
* the current packet , we save that skb
* here and resume receiving this
* packet the next time
* i40e_clean_rx_ring_irq ( ) is called
* for this ring .
*/
2013-09-11 08:40:01 +00:00
} ____cacheline_internodealigned_in_smp ;
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0 ,
I40E_LOW_LATENCY = 1 ,
I40E_BULK_LATENCY = 2 ,
2015-09-28 14:16:53 -04:00
I40E_ULTRA_LATENCY = 3 ,
2013-09-11 08:40:01 +00:00
} ;
struct i40e_ring_container {
/* array of pointers to rings */
2013-09-28 06:00:53 +00:00
struct i40e_ring * ring ;
2013-09-11 08:40:01 +00:00
unsigned int total_bytes ; /* total bytes processed this int */
unsigned int total_packets ; /* total packets processed this int */
u16 count ;
enum i40e_latency_range latency_range ;
u16 itr ;
} ;
2013-09-28 06:00:53 +00:00
/* iterator for handling rings in ring container */
# define i40e_for_each_ring(pos, head) \
for ( pos = ( head ) . ring ; pos ! = NULL ; pos = pos - > next )
2016-04-20 19:43:37 -07:00
bool i40e_alloc_rx_buffers ( struct i40e_ring * rxr , u16 cleaned_count ) ;
2013-09-11 08:40:01 +00:00
netdev_tx_t i40e_lan_xmit_frame ( struct sk_buff * skb , struct net_device * netdev ) ;
void i40e_clean_tx_ring ( struct i40e_ring * tx_ring ) ;
void i40e_clean_rx_ring ( struct i40e_ring * rx_ring ) ;
int i40e_setup_tx_descriptors ( struct i40e_ring * tx_ring ) ;
int i40e_setup_rx_descriptors ( struct i40e_ring * rx_ring ) ;
void i40e_free_tx_resources ( struct i40e_ring * tx_ring ) ;
void i40e_free_rx_resources ( struct i40e_ring * rx_ring ) ;
int i40e_napi_poll ( struct napi_struct * napi , int budget ) ;
2015-09-24 18:13:15 -04:00
void i40e_force_wb ( struct i40e_vsi * vsi , struct i40e_q_vector * q_vector ) ;
2016-01-15 14:33:12 -08:00
u32 i40e_get_tx_pending ( struct i40e_ring * ring , bool in_sw ) ;
2016-02-17 11:02:43 -08:00
int __i40e_maybe_stop_tx ( struct i40e_ring * tx_ring , int size ) ;
2016-02-17 11:02:50 -08:00
bool __i40e_chk_linearize ( struct sk_buff * skb ) ;
2015-09-24 15:43:02 -04:00
/**
* i40e_get_head - Retrieve head from head writeback
* @ tx_ring : tx ring to fetch head of
*
* Returns value of Tx ring head based on value stored
* in head write - back location
* */
static inline u32 i40e_get_head ( struct i40e_ring * tx_ring )
{
void * head = ( struct i40e_tx_desc * ) tx_ring - > desc + tx_ring - > count ;
return le32_to_cpu ( * ( volatile __le32 * ) head ) ;
}
2016-02-17 11:02:43 -08:00
/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @ skb : send buffer
* @ tx_ring : ring to send buffer on
*
* Returns number of data descriptors needed for this skb . Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
* one descriptor .
* */
static inline int i40e_xmit_descriptor_count ( struct sk_buff * skb )
{
const struct skb_frag_struct * frag = & skb_shinfo ( skb ) - > frags [ 0 ] ;
unsigned int nr_frags = skb_shinfo ( skb ) - > nr_frags ;
int count = 0 , size = skb_headlen ( skb ) ;
for ( ; ; ) {
2016-02-19 12:17:08 -08:00
count + = i40e_txd_use_count ( size ) ;
2016-02-17 11:02:43 -08:00
if ( ! nr_frags - - )
break ;
size = skb_frag_size ( frag + + ) ;
}
return count ;
}
/**
* i40e_maybe_stop_tx - 1 st level check for Tx stop conditions
* @ tx_ring : the ring to be checked
* @ size : the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
* */
static inline int i40e_maybe_stop_tx ( struct i40e_ring * tx_ring , int size )
{
if ( likely ( I40E_DESC_UNUSED ( tx_ring ) > = size ) )
return 0 ;
return __i40e_maybe_stop_tx ( tx_ring , size ) ;
}
2016-02-17 11:02:50 -08:00
/**
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @ skb : send buffer
* @ count : number of buffers used
*
* Note : Our HW can ' t scatter - gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb .
* */
static inline bool i40e_chk_linearize ( struct sk_buff * skb , int count )
{
2016-03-30 16:15:37 -07:00
/* Both TSO and single send will work if count is less than 8 */
if ( likely ( count < I40E_MAX_BUFFER_TXD ) )
2016-02-17 11:02:50 -08:00
return false ;
2016-03-30 16:15:37 -07:00
if ( skb_is_gso ( skb ) )
return __i40e_chk_linearize ( skb ) ;
/* we can support up to 8 data buffers for a single send */
return count ! = I40E_MAX_BUFFER_TXD ;
2016-02-17 11:02:50 -08:00
}
2016-04-01 03:56:06 -07:00
2016-09-12 14:18:40 -07:00
/**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ ring : Tx ring to find the netdev equivalent of
* */
static inline struct netdev_queue * txring_txq ( const struct i40e_ring * ring )
{
return netdev_get_tx_queue ( ring - > netdev , ring - > queue_index ) ;
}
2013-11-28 06:39:31 +00:00
# endif /* _I40E_TXRX_H_ */