2015-05-26 19:20:15 -07:00
/*
* Copyright ( C ) 2015 Cavium , Inc .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation .
*/
# include <linux/pci.h>
# include <linux/netdevice.h>
# include <linux/ip.h>
# include <linux/etherdevice.h>
# include <net/ip.h>
# include <net/tso.h>
# include "nic_reg.h"
# include "nic.h"
# include "q_struct.h"
# include "nicvf_queues.h"
struct rbuf_info {
struct page * page ;
void * data ;
u64 offset ;
} ;
# define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
/* Poll a register for a specific value */
static int nicvf_poll_reg ( struct nicvf * nic , int qidx ,
u64 reg , int bit_pos , int bits , int val )
{
u64 bit_mask ;
u64 reg_val ;
int timeout = 10 ;
bit_mask = ( 1ULL < < bits ) - 1 ;
bit_mask = ( bit_mask < < bit_pos ) ;
while ( timeout ) {
reg_val = nicvf_queue_reg_read ( nic , reg , qidx ) ;
if ( ( ( reg_val & bit_mask ) > > bit_pos ) = = val )
return 0 ;
usleep_range ( 1000 , 2000 ) ;
timeout - - ;
}
netdev_err ( nic - > netdev , " Poll on reg 0x%llx failed \n " , reg ) ;
return 1 ;
}
/* Allocate memory for a queue's descriptors */
static int nicvf_alloc_q_desc_mem ( struct nicvf * nic , struct q_desc_mem * dmem ,
int q_len , int desc_size , int align_bytes )
{
dmem - > q_len = q_len ;
dmem - > size = ( desc_size * q_len ) + align_bytes ;
/* Save address, need it while freeing */
dmem - > unalign_base = dma_zalloc_coherent ( & nic - > pdev - > dev , dmem - > size ,
& dmem - > dma , GFP_KERNEL ) ;
if ( ! dmem - > unalign_base )
return - ENOMEM ;
/* Align memory address for 'align_bytes' */
dmem - > phys_base = NICVF_ALIGNED_ADDR ( ( u64 ) dmem - > dma , align_bytes ) ;
2015-06-02 11:00:25 -07:00
dmem - > base = dmem - > unalign_base + ( dmem - > phys_base - dmem - > dma ) ;
2015-05-26 19:20:15 -07:00
return 0 ;
}
/* Free queue's descriptor memory */
static void nicvf_free_q_desc_mem ( struct nicvf * nic , struct q_desc_mem * dmem )
{
if ( ! dmem )
return ;
dma_free_coherent ( & nic - > pdev - > dev , dmem - > size ,
dmem - > unalign_base , dmem - > dma ) ;
dmem - > unalign_base = NULL ;
dmem - > base = NULL ;
}
/* Allocate buffer for packet reception
* HW returns memory address where packet is DMA ' ed but not a pointer
* into RBDR ring , so save buffer address at the start of fragment and
* align the start address to a cache aligned address
*/
static inline int nicvf_alloc_rcv_buffer ( struct nicvf * nic , gfp_t gfp ,
u32 buf_len , u64 * * rbuf )
{
u64 data ;
struct rbuf_info * rinfo ;
int order = get_order ( buf_len ) ;
/* Check if request can be accomodated in previous allocated page */
if ( nic - > rb_page ) {
if ( ( nic - > rb_page_offset + buf_len + buf_len ) >
( PAGE_SIZE < < order ) ) {
nic - > rb_page = NULL ;
} else {
nic - > rb_page_offset + = buf_len ;
get_page ( nic - > rb_page ) ;
}
}
/* Allocate a new page */
if ( ! nic - > rb_page ) {
nic - > rb_page = alloc_pages ( gfp | __GFP_COMP , order ) ;
if ( ! nic - > rb_page ) {
netdev_err ( nic - > netdev , " Failed to allocate new rcv buffer \n " ) ;
return - ENOMEM ;
}
nic - > rb_page_offset = 0 ;
}
data = ( u64 ) page_address ( nic - > rb_page ) + nic - > rb_page_offset ;
/* Align buffer addr to cache line i.e 128 bytes */
rinfo = ( struct rbuf_info * ) ( data + NICVF_RCV_BUF_ALIGN_LEN ( data ) ) ;
/* Save page address for reference updation */
rinfo - > page = nic - > rb_page ;
/* Store start address for later retrieval */
rinfo - > data = ( void * ) data ;
/* Store alignment offset */
rinfo - > offset = NICVF_RCV_BUF_ALIGN_LEN ( data ) ;
data + = rinfo - > offset ;
/* Give next aligned address to hw for DMA */
* rbuf = ( u64 * ) ( data + NICVF_RCV_BUF_ALIGN_BYTES ) ;
return 0 ;
}
/* Retrieve actual buffer start address and build skb for received packet */
static struct sk_buff * nicvf_rb_ptr_to_skb ( struct nicvf * nic ,
u64 rb_ptr , int len )
{
struct sk_buff * skb ;
struct rbuf_info * rinfo ;
rb_ptr = ( u64 ) phys_to_virt ( rb_ptr ) ;
/* Get buffer start address and alignment offset */
rinfo = GET_RBUF_INFO ( rb_ptr ) ;
/* Now build an skb to give to stack */
skb = build_skb ( rinfo - > data , RCV_FRAG_LEN ) ;
if ( ! skb ) {
put_page ( rinfo - > page ) ;
return NULL ;
}
/* Set correct skb->data */
skb_reserve ( skb , rinfo - > offset + NICVF_RCV_BUF_ALIGN_BYTES ) ;
prefetch ( ( void * ) rb_ptr ) ;
return skb ;
}
/* Allocate RBDR ring and populate receive buffers */
static int nicvf_init_rbdr ( struct nicvf * nic , struct rbdr * rbdr ,
int ring_len , int buf_size )
{
int idx ;
u64 * rbuf ;
struct rbdr_entry_t * desc ;
int err ;
err = nicvf_alloc_q_desc_mem ( nic , & rbdr - > dmem , ring_len ,
sizeof ( struct rbdr_entry_t ) ,
NICVF_RCV_BUF_ALIGN_BYTES ) ;
if ( err )
return err ;
rbdr - > desc = rbdr - > dmem . base ;
/* Buffer size has to be in multiples of 128 bytes */
rbdr - > dma_size = buf_size ;
rbdr - > enable = true ;
rbdr - > thresh = RBDR_THRESH ;
nic - > rb_page = NULL ;
for ( idx = 0 ; idx < ring_len ; idx + + ) {
err = nicvf_alloc_rcv_buffer ( nic , GFP_KERNEL , RCV_FRAG_LEN ,
& rbuf ) ;
if ( err )
return err ;
desc = GET_RBDR_DESC ( rbdr , idx ) ;
desc - > buf_addr = virt_to_phys ( rbuf ) > > NICVF_RCV_BUF_ALIGN ;
}
return 0 ;
}
/* Free RBDR ring and its receive buffers */
static void nicvf_free_rbdr ( struct nicvf * nic , struct rbdr * rbdr )
{
int head , tail ;
u64 buf_addr ;
struct rbdr_entry_t * desc ;
struct rbuf_info * rinfo ;
if ( ! rbdr )
return ;
rbdr - > enable = false ;
if ( ! rbdr - > dmem . base )
return ;
head = rbdr - > head ;
tail = rbdr - > tail ;
/* Free SKBs */
while ( head ! = tail ) {
desc = GET_RBDR_DESC ( rbdr , head ) ;
buf_addr = desc - > buf_addr < < NICVF_RCV_BUF_ALIGN ;
rinfo = GET_RBUF_INFO ( ( u64 ) phys_to_virt ( buf_addr ) ) ;
put_page ( rinfo - > page ) ;
head + + ;
head & = ( rbdr - > dmem . q_len - 1 ) ;
}
/* Free SKB of tail desc */
desc = GET_RBDR_DESC ( rbdr , tail ) ;
buf_addr = desc - > buf_addr < < NICVF_RCV_BUF_ALIGN ;
rinfo = GET_RBUF_INFO ( ( u64 ) phys_to_virt ( buf_addr ) ) ;
put_page ( rinfo - > page ) ;
/* Free RBDR ring */
nicvf_free_q_desc_mem ( nic , & rbdr - > dmem ) ;
}
/* Refill receive buffer descriptors with new buffers.
*/
2015-06-02 11:00:23 -07:00
static void nicvf_refill_rbdr ( struct nicvf * nic , gfp_t gfp )
2015-05-26 19:20:15 -07:00
{
struct queue_set * qs = nic - > qs ;
int rbdr_idx = qs - > rbdr_cnt ;
int tail , qcount ;
int refill_rb_cnt ;
struct rbdr * rbdr ;
struct rbdr_entry_t * desc ;
u64 * rbuf ;
int new_rb = 0 ;
refill :
if ( ! rbdr_idx )
return ;
rbdr_idx - - ;
rbdr = & qs - > rbdr [ rbdr_idx ] ;
/* Check if it's enabled */
if ( ! rbdr - > enable )
goto next_rbdr ;
/* Get no of desc's to be refilled */
qcount = nicvf_queue_reg_read ( nic , NIC_QSET_RBDR_0_1_STATUS0 , rbdr_idx ) ;
qcount & = 0x7FFFF ;
/* Doorbell can be ringed with a max of ring size minus 1 */
if ( qcount > = ( qs - > rbdr_len - 1 ) )
goto next_rbdr ;
else
refill_rb_cnt = qs - > rbdr_len - qcount - 1 ;
/* Start filling descs from tail */
tail = nicvf_queue_reg_read ( nic , NIC_QSET_RBDR_0_1_TAIL , rbdr_idx ) > > 3 ;
while ( refill_rb_cnt ) {
tail + + ;
tail & = ( rbdr - > dmem . q_len - 1 ) ;
if ( nicvf_alloc_rcv_buffer ( nic , gfp , RCV_FRAG_LEN , & rbuf ) )
break ;
desc = GET_RBDR_DESC ( rbdr , tail ) ;
desc - > buf_addr = virt_to_phys ( rbuf ) > > NICVF_RCV_BUF_ALIGN ;
refill_rb_cnt - - ;
new_rb + + ;
}
/* make sure all memory stores are done before ringing doorbell */
smp_wmb ( ) ;
/* Check if buffer allocation failed */
if ( refill_rb_cnt )
nic - > rb_alloc_fail = true ;
else
nic - > rb_alloc_fail = false ;
/* Notify HW */
nicvf_queue_reg_write ( nic , NIC_QSET_RBDR_0_1_DOOR ,
rbdr_idx , new_rb ) ;
next_rbdr :
/* Re-enable RBDR interrupts only if buffer allocation is success */
if ( ! nic - > rb_alloc_fail & & rbdr - > enable )
nicvf_enable_intr ( nic , NICVF_INTR_RBDR , rbdr_idx ) ;
if ( rbdr_idx )
goto refill ;
}
/* Alloc rcv buffers in non-atomic mode for better success */
void nicvf_rbdr_work ( struct work_struct * work )
{
struct nicvf * nic = container_of ( work , struct nicvf , rbdr_work . work ) ;
nicvf_refill_rbdr ( nic , GFP_KERNEL ) ;
if ( nic - > rb_alloc_fail )
schedule_delayed_work ( & nic - > rbdr_work , msecs_to_jiffies ( 10 ) ) ;
else
nic - > rb_work_scheduled = false ;
}
/* In Softirq context, alloc rcv buffers in atomic mode */
void nicvf_rbdr_task ( unsigned long data )
{
struct nicvf * nic = ( struct nicvf * ) data ;
nicvf_refill_rbdr ( nic , GFP_ATOMIC ) ;
if ( nic - > rb_alloc_fail ) {
nic - > rb_work_scheduled = true ;
schedule_delayed_work ( & nic - > rbdr_work , msecs_to_jiffies ( 10 ) ) ;
}
}
/* Initialize completion queue */
static int nicvf_init_cmp_queue ( struct nicvf * nic ,
struct cmp_queue * cq , int q_len )
{
int err ;
err = nicvf_alloc_q_desc_mem ( nic , & cq - > dmem , q_len , CMP_QUEUE_DESC_SIZE ,
NICVF_CQ_BASE_ALIGN_BYTES ) ;
if ( err )
return err ;
cq - > desc = cq - > dmem . base ;
cq - > thresh = CMP_QUEUE_CQE_THRESH ;
nic - > cq_coalesce_usecs = ( CMP_QUEUE_TIMER_THRESH * 0.05 ) - 1 ;
return 0 ;
}
static void nicvf_free_cmp_queue ( struct nicvf * nic , struct cmp_queue * cq )
{
if ( ! cq )
return ;
if ( ! cq - > dmem . base )
return ;
nicvf_free_q_desc_mem ( nic , & cq - > dmem ) ;
}
/* Initialize transmit queue */
static int nicvf_init_snd_queue ( struct nicvf * nic ,
struct snd_queue * sq , int q_len )
{
int err ;
err = nicvf_alloc_q_desc_mem ( nic , & sq - > dmem , q_len , SND_QUEUE_DESC_SIZE ,
NICVF_SQ_BASE_ALIGN_BYTES ) ;
if ( err )
return err ;
sq - > desc = sq - > dmem . base ;
2015-06-02 11:00:27 -07:00
sq - > skbuff = kcalloc ( q_len , sizeof ( u64 ) , GFP_KERNEL ) ;
net: thunderx: check if memory allocation was successful
This fixes a coccinelle warning:
coccinelle warnings: (new ones prefixed by >>)
>> drivers/net/ethernet/cavium/thunder/nicvf_queues.c:360:1-11: alloc
>> with no test, possible model on line 367
vim +360 drivers/net/ethernet/cavium/thunder/nicvf_queues.c
354 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len,
SND_QUEUE_DESC_SIZE,
355 NICVF_SQ_BASE_ALIGN_BYTES);
356 if (err)
357 return err;
358
359 sq->desc = sq->dmem.base;
> 360 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_ATOMIC);
361 sq->head = 0;
362 sq->tail = 0;
363 atomic_set(&sq->free_cnt, q_len - 1);
364 sq->thresh = SND_QUEUE_THRESH;
365
366 /* Preallocate memory for TSO segment's header */
> 367 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
368 q_len *
TSO_HEADER_SIZE,
369 &sq->tso_hdrs_phys,
GFP_KERNEL);
370 if (!sq->tso_hdrs)
Reported-by: kbuild test robot <fengguang.wu@intel.com>
Signed-off-by: Aleksey Makarov <aleksey.makarov@caviumnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-06-02 11:00:26 -07:00
if ( ! sq - > skbuff )
return - ENOMEM ;
2015-05-26 19:20:15 -07:00
sq - > head = 0 ;
sq - > tail = 0 ;
atomic_set ( & sq - > free_cnt , q_len - 1 ) ;
sq - > thresh = SND_QUEUE_THRESH ;
/* Preallocate memory for TSO segment's header */
sq - > tso_hdrs = dma_alloc_coherent ( & nic - > pdev - > dev ,
q_len * TSO_HEADER_SIZE ,
& sq - > tso_hdrs_phys , GFP_KERNEL ) ;
if ( ! sq - > tso_hdrs )
return - ENOMEM ;
return 0 ;
}
static void nicvf_free_snd_queue ( struct nicvf * nic , struct snd_queue * sq )
{
if ( ! sq )
return ;
if ( ! sq - > dmem . base )
return ;
if ( sq - > tso_hdrs )
dma_free_coherent ( & nic - > pdev - > dev , sq - > dmem . q_len ,
sq - > tso_hdrs , sq - > tso_hdrs_phys ) ;
kfree ( sq - > skbuff ) ;
nicvf_free_q_desc_mem ( nic , & sq - > dmem ) ;
}
static void nicvf_reclaim_snd_queue ( struct nicvf * nic ,
struct queue_set * qs , int qidx )
{
/* Disable send queue */
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_CFG , qidx , 0 ) ;
/* Check if SQ is stopped */
if ( nicvf_poll_reg ( nic , qidx , NIC_QSET_SQ_0_7_STATUS , 21 , 1 , 0x01 ) )
return ;
/* Reset send queue */
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_CFG , qidx , NICVF_SQ_RESET ) ;
}
static void nicvf_reclaim_rcv_queue ( struct nicvf * nic ,
struct queue_set * qs , int qidx )
{
union nic_mbx mbx = { } ;
/* Make sure all packets in the pipeline are written back into mem */
mbx . msg . msg = NIC_MBOX_MSG_RQ_SW_SYNC ;
nicvf_send_msg_to_pf ( nic , & mbx ) ;
}
static void nicvf_reclaim_cmp_queue ( struct nicvf * nic ,
struct queue_set * qs , int qidx )
{
/* Disable timer threshold (doesn't get reset upon CQ reset */
nicvf_queue_reg_write ( nic , NIC_QSET_CQ_0_7_CFG2 , qidx , 0 ) ;
/* Disable completion queue */
nicvf_queue_reg_write ( nic , NIC_QSET_CQ_0_7_CFG , qidx , 0 ) ;
/* Reset completion queue */
nicvf_queue_reg_write ( nic , NIC_QSET_CQ_0_7_CFG , qidx , NICVF_CQ_RESET ) ;
}
static void nicvf_reclaim_rbdr ( struct nicvf * nic ,
struct rbdr * rbdr , int qidx )
{
u64 tmp , fifo_state ;
int timeout = 10 ;
/* Save head and tail pointers for feeing up buffers */
rbdr - > head = nicvf_queue_reg_read ( nic ,
NIC_QSET_RBDR_0_1_HEAD ,
qidx ) > > 3 ;
rbdr - > tail = nicvf_queue_reg_read ( nic ,
NIC_QSET_RBDR_0_1_TAIL ,
qidx ) > > 3 ;
/* If RBDR FIFO is in 'FAIL' state then do a reset first
* before relaiming .
*/
fifo_state = nicvf_queue_reg_read ( nic , NIC_QSET_RBDR_0_1_STATUS0 , qidx ) ;
if ( ( ( fifo_state > > 62 ) & 0x03 ) = = 0x3 )
nicvf_queue_reg_write ( nic , NIC_QSET_RBDR_0_1_CFG ,
qidx , NICVF_RBDR_RESET ) ;
/* Disable RBDR */
nicvf_queue_reg_write ( nic , NIC_QSET_RBDR_0_1_CFG , qidx , 0 ) ;
if ( nicvf_poll_reg ( nic , qidx , NIC_QSET_RBDR_0_1_STATUS0 , 62 , 2 , 0x00 ) )
return ;
while ( 1 ) {
tmp = nicvf_queue_reg_read ( nic ,
NIC_QSET_RBDR_0_1_PREFETCH_STATUS ,
qidx ) ;
if ( ( tmp & 0xFFFFFFFF ) = = ( ( tmp > > 32 ) & 0xFFFFFFFF ) )
break ;
usleep_range ( 1000 , 2000 ) ;
timeout - - ;
if ( ! timeout ) {
netdev_err ( nic - > netdev ,
" Failed polling on prefetch status \n " ) ;
return ;
}
}
nicvf_queue_reg_write ( nic , NIC_QSET_RBDR_0_1_CFG ,
qidx , NICVF_RBDR_RESET ) ;
if ( nicvf_poll_reg ( nic , qidx , NIC_QSET_RBDR_0_1_STATUS0 , 62 , 2 , 0x02 ) )
return ;
nicvf_queue_reg_write ( nic , NIC_QSET_RBDR_0_1_CFG , qidx , 0x00 ) ;
if ( nicvf_poll_reg ( nic , qidx , NIC_QSET_RBDR_0_1_STATUS0 , 62 , 2 , 0x00 ) )
return ;
}
/* Configures receive queue */
static void nicvf_rcv_queue_config ( struct nicvf * nic , struct queue_set * qs ,
int qidx , bool enable )
{
union nic_mbx mbx = { } ;
struct rcv_queue * rq ;
struct rq_cfg rq_cfg ;
rq = & qs - > rq [ qidx ] ;
rq - > enable = enable ;
/* Disable receive queue */
nicvf_queue_reg_write ( nic , NIC_QSET_RQ_0_7_CFG , qidx , 0 ) ;
if ( ! rq - > enable ) {
nicvf_reclaim_rcv_queue ( nic , qs , qidx ) ;
return ;
}
rq - > cq_qs = qs - > vnic_id ;
rq - > cq_idx = qidx ;
rq - > start_rbdr_qs = qs - > vnic_id ;
rq - > start_qs_rbdr_idx = qs - > rbdr_cnt - 1 ;
rq - > cont_rbdr_qs = qs - > vnic_id ;
rq - > cont_qs_rbdr_idx = qs - > rbdr_cnt - 1 ;
/* all writes of RBDR data to be loaded into L2 Cache as well*/
rq - > caching = 1 ;
/* Send a mailbox msg to PF to config RQ */
mbx . rq . msg = NIC_MBOX_MSG_RQ_CFG ;
mbx . rq . qs_num = qs - > vnic_id ;
mbx . rq . rq_num = qidx ;
mbx . rq . cfg = ( rq - > caching < < 26 ) | ( rq - > cq_qs < < 19 ) |
( rq - > cq_idx < < 16 ) | ( rq - > cont_rbdr_qs < < 9 ) |
( rq - > cont_qs_rbdr_idx < < 8 ) |
( rq - > start_rbdr_qs < < 1 ) | ( rq - > start_qs_rbdr_idx ) ;
nicvf_send_msg_to_pf ( nic , & mbx ) ;
mbx . rq . msg = NIC_MBOX_MSG_RQ_BP_CFG ;
mbx . rq . cfg = ( 1ULL < < 63 ) | ( 1ULL < < 62 ) | ( qs - > vnic_id < < 0 ) ;
nicvf_send_msg_to_pf ( nic , & mbx ) ;
/* RQ drop config
* Enable CQ drop to reserve sufficient CQEs for all tx packets
*/
mbx . rq . msg = NIC_MBOX_MSG_RQ_DROP_CFG ;
mbx . rq . cfg = ( 1ULL < < 62 ) | ( RQ_CQ_DROP < < 8 ) ;
nicvf_send_msg_to_pf ( nic , & mbx ) ;
nicvf_queue_reg_write ( nic , NIC_QSET_RQ_GEN_CFG , qidx , 0x00 ) ;
/* Enable Receive queue */
rq_cfg . ena = 1 ;
rq_cfg . tcp_ena = 0 ;
nicvf_queue_reg_write ( nic , NIC_QSET_RQ_0_7_CFG , qidx , * ( u64 * ) & rq_cfg ) ;
}
/* Configures completion queue */
void nicvf_cmp_queue_config ( struct nicvf * nic , struct queue_set * qs ,
int qidx , bool enable )
{
struct cmp_queue * cq ;
struct cq_cfg cq_cfg ;
cq = & qs - > cq [ qidx ] ;
cq - > enable = enable ;
if ( ! cq - > enable ) {
nicvf_reclaim_cmp_queue ( nic , qs , qidx ) ;
return ;
}
/* Reset completion queue */
nicvf_queue_reg_write ( nic , NIC_QSET_CQ_0_7_CFG , qidx , NICVF_CQ_RESET ) ;
if ( ! cq - > enable )
return ;
spin_lock_init ( & cq - > lock ) ;
/* Set completion queue base address */
nicvf_queue_reg_write ( nic , NIC_QSET_CQ_0_7_BASE ,
qidx , ( u64 ) ( cq - > dmem . phys_base ) ) ;
/* Enable Completion queue */
cq_cfg . ena = 1 ;
cq_cfg . reset = 0 ;
cq_cfg . caching = 0 ;
cq_cfg . qsize = CMP_QSIZE ;
cq_cfg . avg_con = 0 ;
nicvf_queue_reg_write ( nic , NIC_QSET_CQ_0_7_CFG , qidx , * ( u64 * ) & cq_cfg ) ;
/* Set threshold value for interrupt generation */
nicvf_queue_reg_write ( nic , NIC_QSET_CQ_0_7_THRESH , qidx , cq - > thresh ) ;
nicvf_queue_reg_write ( nic , NIC_QSET_CQ_0_7_CFG2 ,
qidx , nic - > cq_coalesce_usecs ) ;
}
/* Configures transmit queue */
static void nicvf_snd_queue_config ( struct nicvf * nic , struct queue_set * qs ,
int qidx , bool enable )
{
union nic_mbx mbx = { } ;
struct snd_queue * sq ;
struct sq_cfg sq_cfg ;
sq = & qs - > sq [ qidx ] ;
sq - > enable = enable ;
if ( ! sq - > enable ) {
nicvf_reclaim_snd_queue ( nic , qs , qidx ) ;
return ;
}
/* Reset send queue */
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_CFG , qidx , NICVF_SQ_RESET ) ;
sq - > cq_qs = qs - > vnic_id ;
sq - > cq_idx = qidx ;
/* Send a mailbox msg to PF to config SQ */
mbx . sq . msg = NIC_MBOX_MSG_SQ_CFG ;
mbx . sq . qs_num = qs - > vnic_id ;
mbx . sq . sq_num = qidx ;
mbx . sq . cfg = ( sq - > cq_qs < < 3 ) | sq - > cq_idx ;
nicvf_send_msg_to_pf ( nic , & mbx ) ;
/* Set queue base address */
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_BASE ,
qidx , ( u64 ) ( sq - > dmem . phys_base ) ) ;
/* Enable send queue & set queue size */
sq_cfg . ena = 1 ;
sq_cfg . reset = 0 ;
sq_cfg . ldwb = 0 ;
sq_cfg . qsize = SND_QSIZE ;
sq_cfg . tstmp_bgx_intf = 0 ;
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_CFG , qidx , * ( u64 * ) & sq_cfg ) ;
/* Set threshold value for interrupt generation */
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_THRESH , qidx , sq - > thresh ) ;
/* Set queue:cpu affinity for better load distribution */
if ( cpu_online ( qidx ) ) {
cpumask_set_cpu ( qidx , & sq - > affinity_mask ) ;
netif_set_xps_queue ( nic - > netdev ,
& sq - > affinity_mask , qidx ) ;
}
}
/* Configures receive buffer descriptor ring */
static void nicvf_rbdr_config ( struct nicvf * nic , struct queue_set * qs ,
int qidx , bool enable )
{
struct rbdr * rbdr ;
struct rbdr_cfg rbdr_cfg ;
rbdr = & qs - > rbdr [ qidx ] ;
nicvf_reclaim_rbdr ( nic , rbdr , qidx ) ;
if ( ! enable )
return ;
/* Set descriptor base address */
nicvf_queue_reg_write ( nic , NIC_QSET_RBDR_0_1_BASE ,
qidx , ( u64 ) ( rbdr - > dmem . phys_base ) ) ;
/* Enable RBDR & set queue size */
/* Buffer size should be in multiples of 128 bytes */
rbdr_cfg . ena = 1 ;
rbdr_cfg . reset = 0 ;
rbdr_cfg . ldwb = 0 ;
rbdr_cfg . qsize = RBDR_SIZE ;
rbdr_cfg . avg_con = 0 ;
rbdr_cfg . lines = rbdr - > dma_size / 128 ;
nicvf_queue_reg_write ( nic , NIC_QSET_RBDR_0_1_CFG ,
qidx , * ( u64 * ) & rbdr_cfg ) ;
/* Notify HW */
nicvf_queue_reg_write ( nic , NIC_QSET_RBDR_0_1_DOOR ,
qidx , qs - > rbdr_len - 1 ) ;
/* Set threshold value for interrupt generation */
nicvf_queue_reg_write ( nic , NIC_QSET_RBDR_0_1_THRESH ,
qidx , rbdr - > thresh - 1 ) ;
}
/* Requests PF to assign and enable Qset */
void nicvf_qset_config ( struct nicvf * nic , bool enable )
{
union nic_mbx mbx = { } ;
struct queue_set * qs = nic - > qs ;
struct qs_cfg * qs_cfg ;
if ( ! qs ) {
netdev_warn ( nic - > netdev ,
" Qset is still not allocated, don't init queues \n " ) ;
return ;
}
qs - > enable = enable ;
qs - > vnic_id = nic - > vf_id ;
/* Send a mailbox msg to PF to config Qset */
mbx . qs . msg = NIC_MBOX_MSG_QS_CFG ;
mbx . qs . num = qs - > vnic_id ;
mbx . qs . cfg = 0 ;
qs_cfg = ( struct qs_cfg * ) & mbx . qs . cfg ;
if ( qs - > enable ) {
qs_cfg - > ena = 1 ;
# ifdef __BIG_ENDIAN
qs_cfg - > be = 1 ;
# endif
qs_cfg - > vnic = qs - > vnic_id ;
}
nicvf_send_msg_to_pf ( nic , & mbx ) ;
}
static void nicvf_free_resources ( struct nicvf * nic )
{
int qidx ;
struct queue_set * qs = nic - > qs ;
/* Free receive buffer descriptor ring */
for ( qidx = 0 ; qidx < qs - > rbdr_cnt ; qidx + + )
nicvf_free_rbdr ( nic , & qs - > rbdr [ qidx ] ) ;
/* Free completion queue */
for ( qidx = 0 ; qidx < qs - > cq_cnt ; qidx + + )
nicvf_free_cmp_queue ( nic , & qs - > cq [ qidx ] ) ;
/* Free send queue */
for ( qidx = 0 ; qidx < qs - > sq_cnt ; qidx + + )
nicvf_free_snd_queue ( nic , & qs - > sq [ qidx ] ) ;
}
static int nicvf_alloc_resources ( struct nicvf * nic )
{
int qidx ;
struct queue_set * qs = nic - > qs ;
/* Alloc receive buffer descriptor ring */
for ( qidx = 0 ; qidx < qs - > rbdr_cnt ; qidx + + ) {
if ( nicvf_init_rbdr ( nic , & qs - > rbdr [ qidx ] , qs - > rbdr_len ,
DMA_BUFFER_LEN ) )
goto alloc_fail ;
}
/* Alloc send queue */
for ( qidx = 0 ; qidx < qs - > sq_cnt ; qidx + + ) {
if ( nicvf_init_snd_queue ( nic , & qs - > sq [ qidx ] , qs - > sq_len ) )
goto alloc_fail ;
}
/* Alloc completion queue */
for ( qidx = 0 ; qidx < qs - > cq_cnt ; qidx + + ) {
if ( nicvf_init_cmp_queue ( nic , & qs - > cq [ qidx ] , qs - > cq_len ) )
goto alloc_fail ;
}
return 0 ;
alloc_fail :
nicvf_free_resources ( nic ) ;
return - ENOMEM ;
}
int nicvf_set_qset_resources ( struct nicvf * nic )
{
struct queue_set * qs ;
qs = devm_kzalloc ( & nic - > pdev - > dev , sizeof ( * qs ) , GFP_KERNEL ) ;
if ( ! qs )
return - ENOMEM ;
nic - > qs = qs ;
/* Set count of each queue */
qs - > rbdr_cnt = RBDR_CNT ;
qs - > rq_cnt = RCV_QUEUE_CNT ;
qs - > sq_cnt = SND_QUEUE_CNT ;
qs - > cq_cnt = CMP_QUEUE_CNT ;
/* Set queue lengths */
qs - > rbdr_len = RCV_BUF_COUNT ;
qs - > sq_len = SND_QUEUE_LEN ;
qs - > cq_len = CMP_QUEUE_LEN ;
return 0 ;
}
int nicvf_config_data_transfer ( struct nicvf * nic , bool enable )
{
bool disable = false ;
struct queue_set * qs = nic - > qs ;
int qidx ;
if ( ! qs )
return 0 ;
if ( enable ) {
if ( nicvf_alloc_resources ( nic ) )
return - ENOMEM ;
for ( qidx = 0 ; qidx < qs - > sq_cnt ; qidx + + )
nicvf_snd_queue_config ( nic , qs , qidx , enable ) ;
for ( qidx = 0 ; qidx < qs - > cq_cnt ; qidx + + )
nicvf_cmp_queue_config ( nic , qs , qidx , enable ) ;
for ( qidx = 0 ; qidx < qs - > rbdr_cnt ; qidx + + )
nicvf_rbdr_config ( nic , qs , qidx , enable ) ;
for ( qidx = 0 ; qidx < qs - > rq_cnt ; qidx + + )
nicvf_rcv_queue_config ( nic , qs , qidx , enable ) ;
} else {
for ( qidx = 0 ; qidx < qs - > rq_cnt ; qidx + + )
nicvf_rcv_queue_config ( nic , qs , qidx , disable ) ;
for ( qidx = 0 ; qidx < qs - > rbdr_cnt ; qidx + + )
nicvf_rbdr_config ( nic , qs , qidx , disable ) ;
for ( qidx = 0 ; qidx < qs - > sq_cnt ; qidx + + )
nicvf_snd_queue_config ( nic , qs , qidx , disable ) ;
for ( qidx = 0 ; qidx < qs - > cq_cnt ; qidx + + )
nicvf_cmp_queue_config ( nic , qs , qidx , disable ) ;
nicvf_free_resources ( nic ) ;
}
return 0 ;
}
/* Get a free desc from SQ
* returns descriptor ponter & descriptor number
*/
static inline int nicvf_get_sq_desc ( struct snd_queue * sq , int desc_cnt )
{
int qentry ;
qentry = sq - > tail ;
atomic_sub ( desc_cnt , & sq - > free_cnt ) ;
sq - > tail + = desc_cnt ;
sq - > tail & = ( sq - > dmem . q_len - 1 ) ;
return qentry ;
}
/* Free descriptor back to SQ for future use */
void nicvf_put_sq_desc ( struct snd_queue * sq , int desc_cnt )
{
atomic_add ( desc_cnt , & sq - > free_cnt ) ;
sq - > head + = desc_cnt ;
sq - > head & = ( sq - > dmem . q_len - 1 ) ;
}
static inline int nicvf_get_nxt_sqentry ( struct snd_queue * sq , int qentry )
{
qentry + + ;
qentry & = ( sq - > dmem . q_len - 1 ) ;
return qentry ;
}
void nicvf_sq_enable ( struct nicvf * nic , struct snd_queue * sq , int qidx )
{
u64 sq_cfg ;
sq_cfg = nicvf_queue_reg_read ( nic , NIC_QSET_SQ_0_7_CFG , qidx ) ;
sq_cfg | = NICVF_SQ_EN ;
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_CFG , qidx , sq_cfg ) ;
/* Ring doorbell so that H/W restarts processing SQEs */
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_DOOR , qidx , 0 ) ;
}
void nicvf_sq_disable ( struct nicvf * nic , int qidx )
{
u64 sq_cfg ;
sq_cfg = nicvf_queue_reg_read ( nic , NIC_QSET_SQ_0_7_CFG , qidx ) ;
sq_cfg & = ~ NICVF_SQ_EN ;
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_CFG , qidx , sq_cfg ) ;
}
void nicvf_sq_free_used_descs ( struct net_device * netdev , struct snd_queue * sq ,
int qidx )
{
u64 head , tail ;
struct sk_buff * skb ;
struct nicvf * nic = netdev_priv ( netdev ) ;
struct sq_hdr_subdesc * hdr ;
head = nicvf_queue_reg_read ( nic , NIC_QSET_SQ_0_7_HEAD , qidx ) > > 4 ;
tail = nicvf_queue_reg_read ( nic , NIC_QSET_SQ_0_7_TAIL , qidx ) > > 4 ;
while ( sq - > head ! = head ) {
hdr = ( struct sq_hdr_subdesc * ) GET_SQ_DESC ( sq , sq - > head ) ;
if ( hdr - > subdesc_type ! = SQ_DESC_TYPE_HEADER ) {
nicvf_put_sq_desc ( sq , 1 ) ;
continue ;
}
skb = ( struct sk_buff * ) sq - > skbuff [ sq - > head ] ;
atomic64_add ( 1 , ( atomic64_t * ) & netdev - > stats . tx_packets ) ;
atomic64_add ( hdr - > tot_len ,
( atomic64_t * ) & netdev - > stats . tx_bytes ) ;
dev_kfree_skb_any ( skb ) ;
nicvf_put_sq_desc ( sq , hdr - > subdesc_cnt + 1 ) ;
}
}
/* Calculate no of SQ subdescriptors needed to transmit all
* segments of this TSO packet .
* Taken from ' Tilera network driver ' with a minor modification .
*/
static int nicvf_tso_count_subdescs ( struct sk_buff * skb )
{
struct skb_shared_info * sh = skb_shinfo ( skb ) ;
unsigned int sh_len = skb_transport_offset ( skb ) + tcp_hdrlen ( skb ) ;
unsigned int data_len = skb - > len - sh_len ;
unsigned int p_len = sh - > gso_size ;
long f_id = - 1 ; /* id of the current fragment */
long f_size = skb_headlen ( skb ) - sh_len ; /* current fragment size */
long f_used = 0 ; /* bytes used from the current fragment */
long n ; /* size of the current piece of payload */
int num_edescs = 0 ;
int segment ;
for ( segment = 0 ; segment < sh - > gso_segs ; segment + + ) {
unsigned int p_used = 0 ;
/* One edesc for header and for each piece of the payload. */
for ( num_edescs + + ; p_used < p_len ; num_edescs + + ) {
/* Advance as needed. */
while ( f_used > = f_size ) {
f_id + + ;
f_size = skb_frag_size ( & sh - > frags [ f_id ] ) ;
f_used = 0 ;
}
/* Use bytes from the current fragment. */
n = p_len - p_used ;
if ( n > f_size - f_used )
n = f_size - f_used ;
f_used + = n ;
p_used + = n ;
}
/* The last segment may be less than gso_size. */
data_len - = p_len ;
if ( data_len < p_len )
p_len = data_len ;
}
/* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
return num_edescs + sh - > gso_segs ;
}
/* Get the number of SQ descriptors needed to xmit this skb */
static int nicvf_sq_subdesc_required ( struct nicvf * nic , struct sk_buff * skb )
{
int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT ;
if ( skb_shinfo ( skb ) - > gso_size ) {
subdesc_cnt = nicvf_tso_count_subdescs ( skb ) ;
return subdesc_cnt ;
}
if ( skb_shinfo ( skb ) - > nr_frags )
subdesc_cnt + = skb_shinfo ( skb ) - > nr_frags ;
return subdesc_cnt ;
}
/* Add SQ HEADER subdescriptor.
* First subdescriptor for every send descriptor .
*/
static inline void
nicvf_sq_add_hdr_subdesc ( struct snd_queue * sq , int qentry ,
int subdesc_cnt , struct sk_buff * skb , int len )
{
int proto ;
struct sq_hdr_subdesc * hdr ;
hdr = ( struct sq_hdr_subdesc * ) GET_SQ_DESC ( sq , qentry ) ;
sq - > skbuff [ qentry ] = ( u64 ) skb ;
memset ( hdr , 0 , SND_QUEUE_DESC_SIZE ) ;
hdr - > subdesc_type = SQ_DESC_TYPE_HEADER ;
/* Enable notification via CQE after processing SQE */
hdr - > post_cqe = 1 ;
/* No of subdescriptors following this */
hdr - > subdesc_cnt = subdesc_cnt ;
hdr - > tot_len = len ;
/* Offload checksum calculation to HW */
if ( skb - > ip_summed = = CHECKSUM_PARTIAL ) {
if ( skb - > protocol ! = htons ( ETH_P_IP ) )
return ;
hdr - > csum_l3 = 1 ; /* Enable IP csum calculation */
hdr - > l3_offset = skb_network_offset ( skb ) ;
hdr - > l4_offset = skb_transport_offset ( skb ) ;
proto = ip_hdr ( skb ) - > protocol ;
switch ( proto ) {
case IPPROTO_TCP :
hdr - > csum_l4 = SEND_L4_CSUM_TCP ;
break ;
case IPPROTO_UDP :
hdr - > csum_l4 = SEND_L4_CSUM_UDP ;
break ;
case IPPROTO_SCTP :
hdr - > csum_l4 = SEND_L4_CSUM_SCTP ;
break ;
}
}
}
/* SQ GATHER subdescriptor
* Must follow HDR descriptor
*/
static inline void nicvf_sq_add_gather_subdesc ( struct snd_queue * sq , int qentry ,
int size , u64 data )
{
struct sq_gather_subdesc * gather ;
qentry & = ( sq - > dmem . q_len - 1 ) ;
gather = ( struct sq_gather_subdesc * ) GET_SQ_DESC ( sq , qentry ) ;
memset ( gather , 0 , SND_QUEUE_DESC_SIZE ) ;
gather - > subdesc_type = SQ_DESC_TYPE_GATHER ;
gather - > ld_type = NIC_SEND_LD_TYPE_E_LDWB ;
gather - > size = size ;
gather - > addr = data ;
}
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
static int nicvf_sq_append_tso ( struct nicvf * nic , struct snd_queue * sq ,
int qentry , struct sk_buff * skb )
{
struct tso_t tso ;
int seg_subdescs = 0 , desc_cnt = 0 ;
int seg_len , total_len , data_left ;
int hdr_qentry = qentry ;
int hdr_len = skb_transport_offset ( skb ) + tcp_hdrlen ( skb ) ;
tso_start ( skb , & tso ) ;
total_len = skb - > len - hdr_len ;
while ( total_len > 0 ) {
char * hdr ;
/* Save Qentry for adding HDR_SUBDESC at the end */
hdr_qentry = qentry ;
data_left = min_t ( int , skb_shinfo ( skb ) - > gso_size , total_len ) ;
total_len - = data_left ;
/* Add segment's header */
qentry = nicvf_get_nxt_sqentry ( sq , qentry ) ;
hdr = sq - > tso_hdrs + qentry * TSO_HEADER_SIZE ;
tso_build_hdr ( skb , hdr , & tso , data_left , total_len = = 0 ) ;
nicvf_sq_add_gather_subdesc ( sq , qentry , hdr_len ,
sq - > tso_hdrs_phys +
qentry * TSO_HEADER_SIZE ) ;
/* HDR_SUDESC + GATHER */
seg_subdescs = 2 ;
seg_len = hdr_len ;
/* Add segment's payload fragments */
while ( data_left > 0 ) {
int size ;
size = min_t ( int , tso . size , data_left ) ;
qentry = nicvf_get_nxt_sqentry ( sq , qentry ) ;
nicvf_sq_add_gather_subdesc ( sq , qentry , size ,
virt_to_phys ( tso . data ) ) ;
seg_subdescs + + ;
seg_len + = size ;
data_left - = size ;
tso_build_data ( skb , & tso , size ) ;
}
nicvf_sq_add_hdr_subdesc ( sq , hdr_qentry ,
seg_subdescs - 1 , skb , seg_len ) ;
sq - > skbuff [ hdr_qentry ] = 0 ;
qentry = nicvf_get_nxt_sqentry ( sq , qentry ) ;
desc_cnt + = seg_subdescs ;
}
/* Save SKB in the last segment for freeing */
sq - > skbuff [ hdr_qentry ] = ( u64 ) skb ;
/* make sure all memory stores are done before ringing doorbell */
smp_wmb ( ) ;
/* Inform HW to xmit all TSO segments */
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_DOOR ,
skb_get_queue_mapping ( skb ) , desc_cnt ) ;
return 1 ;
}
/* Append an skb to a SQ for packet transfer. */
int nicvf_sq_append_skb ( struct nicvf * nic , struct sk_buff * skb )
{
int i , size ;
int subdesc_cnt ;
int sq_num , qentry ;
struct queue_set * qs = nic - > qs ;
struct snd_queue * sq ;
sq_num = skb_get_queue_mapping ( skb ) ;
sq = & qs - > sq [ sq_num ] ;
subdesc_cnt = nicvf_sq_subdesc_required ( nic , skb ) ;
if ( subdesc_cnt > atomic_read ( & sq - > free_cnt ) )
goto append_fail ;
qentry = nicvf_get_sq_desc ( sq , subdesc_cnt ) ;
/* Check if its a TSO packet */
if ( skb_shinfo ( skb ) - > gso_size )
return nicvf_sq_append_tso ( nic , sq , qentry , skb ) ;
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc ( sq , qentry , subdesc_cnt - 1 , skb , skb - > len ) ;
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry ( sq , qentry ) ;
size = skb_is_nonlinear ( skb ) ? skb_headlen ( skb ) : skb - > len ;
nicvf_sq_add_gather_subdesc ( sq , qentry , size , virt_to_phys ( skb - > data ) ) ;
/* Check for scattered buffer */
if ( ! skb_is_nonlinear ( skb ) )
goto doorbell ;
for ( i = 0 ; i < skb_shinfo ( skb ) - > nr_frags ; i + + ) {
const struct skb_frag_struct * frag ;
frag = & skb_shinfo ( skb ) - > frags [ i ] ;
qentry = nicvf_get_nxt_sqentry ( sq , qentry ) ;
size = skb_frag_size ( frag ) ;
nicvf_sq_add_gather_subdesc ( sq , qentry , size ,
virt_to_phys (
skb_frag_address ( frag ) ) ) ;
}
doorbell :
/* make sure all memory stores are done before ringing doorbell */
smp_wmb ( ) ;
/* Inform HW to xmit new packet */
nicvf_queue_reg_write ( nic , NIC_QSET_SQ_0_7_DOOR ,
sq_num , subdesc_cnt ) ;
return 1 ;
append_fail :
netdev_dbg ( nic - > netdev , " Not enough SQ descriptors to xmit pkt \n " ) ;
return 0 ;
}
static inline unsigned frag_num ( unsigned i )
{
# ifdef __BIG_ENDIAN
return ( i & ~ 3 ) + 3 - ( i & 3 ) ;
# else
return i ;
# endif
}
/* Returns SKB for a received packet */
struct sk_buff * nicvf_get_rcv_skb ( struct nicvf * nic , struct cqe_rx_t * cqe_rx )
{
int frag ;
int payload_len = 0 ;
struct sk_buff * skb = NULL ;
struct sk_buff * skb_frag = NULL ;
struct sk_buff * prev_frag = NULL ;
u16 * rb_lens = NULL ;
u64 * rb_ptrs = NULL ;
rb_lens = ( void * ) cqe_rx + ( 3 * sizeof ( u64 ) ) ;
rb_ptrs = ( void * ) cqe_rx + ( 6 * sizeof ( u64 ) ) ;
netdev_dbg ( nic - > netdev , " %s rb_cnt %d rb0_ptr %llx rb0_sz %d \n " ,
__func__ , cqe_rx - > rb_cnt , cqe_rx - > rb0_ptr , cqe_rx - > rb0_sz ) ;
for ( frag = 0 ; frag < cqe_rx - > rb_cnt ; frag + + ) {
payload_len = rb_lens [ frag_num ( frag ) ] ;
if ( ! frag ) {
/* First fragment */
skb = nicvf_rb_ptr_to_skb ( nic ,
* rb_ptrs - cqe_rx - > align_pad ,
payload_len ) ;
if ( ! skb )
return NULL ;
skb_reserve ( skb , cqe_rx - > align_pad ) ;
skb_put ( skb , payload_len ) ;
} else {
/* Add fragments */
skb_frag = nicvf_rb_ptr_to_skb ( nic , * rb_ptrs ,
payload_len ) ;
if ( ! skb_frag ) {
dev_kfree_skb ( skb ) ;
return NULL ;
}
if ( ! skb_shinfo ( skb ) - > frag_list )
skb_shinfo ( skb ) - > frag_list = skb_frag ;
else
prev_frag - > next = skb_frag ;
prev_frag = skb_frag ;
skb - > len + = payload_len ;
skb - > data_len + = payload_len ;
skb_frag - > len = payload_len ;
}
/* Next buffer pointer */
rb_ptrs + + ;
}
return skb ;
}
/* Enable interrupt */
void nicvf_enable_intr ( struct nicvf * nic , int int_type , int q_idx )
{
u64 reg_val ;
reg_val = nicvf_reg_read ( nic , NIC_VF_ENA_W1S ) ;
switch ( int_type ) {
case NICVF_INTR_CQ :
reg_val | = ( ( 1ULL < < q_idx ) < < NICVF_INTR_CQ_SHIFT ) ;
break ;
case NICVF_INTR_SQ :
reg_val | = ( ( 1ULL < < q_idx ) < < NICVF_INTR_SQ_SHIFT ) ;
break ;
case NICVF_INTR_RBDR :
reg_val | = ( ( 1ULL < < q_idx ) < < NICVF_INTR_RBDR_SHIFT ) ;
break ;
case NICVF_INTR_PKT_DROP :
reg_val | = ( 1ULL < < NICVF_INTR_PKT_DROP_SHIFT ) ;
break ;
case NICVF_INTR_TCP_TIMER :
reg_val | = ( 1ULL < < NICVF_INTR_TCP_TIMER_SHIFT ) ;
break ;
case NICVF_INTR_MBOX :
reg_val | = ( 1ULL < < NICVF_INTR_MBOX_SHIFT ) ;
break ;
case NICVF_INTR_QS_ERR :
reg_val | = ( 1ULL < < NICVF_INTR_QS_ERR_SHIFT ) ;
break ;
default :
netdev_err ( nic - > netdev ,
" Failed to enable interrupt: unknown type \n " ) ;
break ;
}
nicvf_reg_write ( nic , NIC_VF_ENA_W1S , reg_val ) ;
}
/* Disable interrupt */
void nicvf_disable_intr ( struct nicvf * nic , int int_type , int q_idx )
{
u64 reg_val = 0 ;
switch ( int_type ) {
case NICVF_INTR_CQ :
reg_val | = ( ( 1ULL < < q_idx ) < < NICVF_INTR_CQ_SHIFT ) ;
break ;
case NICVF_INTR_SQ :
reg_val | = ( ( 1ULL < < q_idx ) < < NICVF_INTR_SQ_SHIFT ) ;
break ;
case NICVF_INTR_RBDR :
reg_val | = ( ( 1ULL < < q_idx ) < < NICVF_INTR_RBDR_SHIFT ) ;
break ;
case NICVF_INTR_PKT_DROP :
reg_val | = ( 1ULL < < NICVF_INTR_PKT_DROP_SHIFT ) ;
break ;
case NICVF_INTR_TCP_TIMER :
reg_val | = ( 1ULL < < NICVF_INTR_TCP_TIMER_SHIFT ) ;
break ;
case NICVF_INTR_MBOX :
reg_val | = ( 1ULL < < NICVF_INTR_MBOX_SHIFT ) ;
break ;
case NICVF_INTR_QS_ERR :
reg_val | = ( 1ULL < < NICVF_INTR_QS_ERR_SHIFT ) ;
break ;
default :
netdev_err ( nic - > netdev ,
" Failed to disable interrupt: unknown type \n " ) ;
break ;
}
nicvf_reg_write ( nic , NIC_VF_ENA_W1C , reg_val ) ;
}
/* Clear interrupt */
void nicvf_clear_intr ( struct nicvf * nic , int int_type , int q_idx )
{
u64 reg_val = 0 ;
switch ( int_type ) {
case NICVF_INTR_CQ :
reg_val = ( ( 1ULL < < q_idx ) < < NICVF_INTR_CQ_SHIFT ) ;
break ;
case NICVF_INTR_SQ :
reg_val = ( ( 1ULL < < q_idx ) < < NICVF_INTR_SQ_SHIFT ) ;
break ;
case NICVF_INTR_RBDR :
reg_val = ( ( 1ULL < < q_idx ) < < NICVF_INTR_RBDR_SHIFT ) ;
break ;
case NICVF_INTR_PKT_DROP :
reg_val = ( 1ULL < < NICVF_INTR_PKT_DROP_SHIFT ) ;
break ;
case NICVF_INTR_TCP_TIMER :
reg_val = ( 1ULL < < NICVF_INTR_TCP_TIMER_SHIFT ) ;
break ;
case NICVF_INTR_MBOX :
reg_val = ( 1ULL < < NICVF_INTR_MBOX_SHIFT ) ;
break ;
case NICVF_INTR_QS_ERR :
reg_val | = ( 1ULL < < NICVF_INTR_QS_ERR_SHIFT ) ;
break ;
default :
netdev_err ( nic - > netdev ,
" Failed to clear interrupt: unknown type \n " ) ;
break ;
}
nicvf_reg_write ( nic , NIC_VF_INT , reg_val ) ;
}
/* Check if interrupt is enabled */
int nicvf_is_intr_enabled ( struct nicvf * nic , int int_type , int q_idx )
{
u64 reg_val ;
u64 mask = 0xff ;
reg_val = nicvf_reg_read ( nic , NIC_VF_ENA_W1S ) ;
switch ( int_type ) {
case NICVF_INTR_CQ :
mask = ( ( 1ULL < < q_idx ) < < NICVF_INTR_CQ_SHIFT ) ;
break ;
case NICVF_INTR_SQ :
mask = ( ( 1ULL < < q_idx ) < < NICVF_INTR_SQ_SHIFT ) ;
break ;
case NICVF_INTR_RBDR :
mask = ( ( 1ULL < < q_idx ) < < NICVF_INTR_RBDR_SHIFT ) ;
break ;
case NICVF_INTR_PKT_DROP :
mask = NICVF_INTR_PKT_DROP_MASK ;
break ;
case NICVF_INTR_TCP_TIMER :
mask = NICVF_INTR_TCP_TIMER_MASK ;
break ;
case NICVF_INTR_MBOX :
mask = NICVF_INTR_MBOX_MASK ;
break ;
case NICVF_INTR_QS_ERR :
mask = NICVF_INTR_QS_ERR_MASK ;
break ;
default :
netdev_err ( nic - > netdev ,
" Failed to check interrupt enable: unknown type \n " ) ;
break ;
}
return ( reg_val & mask ) ;
}
void nicvf_update_rq_stats ( struct nicvf * nic , int rq_idx )
{
struct rcv_queue * rq ;
# define GET_RQ_STATS(reg) \
nicvf_reg_read ( nic , NIC_QSET_RQ_0_7_STAT_0_1 | \
( rq_idx < < NIC_Q_NUM_SHIFT ) | ( reg < < 3 ) )
rq = & nic - > qs - > rq [ rq_idx ] ;
rq - > stats . bytes = GET_RQ_STATS ( RQ_SQ_STATS_OCTS ) ;
rq - > stats . pkts = GET_RQ_STATS ( RQ_SQ_STATS_PKTS ) ;
}
void nicvf_update_sq_stats ( struct nicvf * nic , int sq_idx )
{
struct snd_queue * sq ;
# define GET_SQ_STATS(reg) \
nicvf_reg_read ( nic , NIC_QSET_SQ_0_7_STAT_0_1 | \
( sq_idx < < NIC_Q_NUM_SHIFT ) | ( reg < < 3 ) )
sq = & nic - > qs - > sq [ sq_idx ] ;
sq - > stats . bytes = GET_SQ_STATS ( RQ_SQ_STATS_OCTS ) ;
sq - > stats . pkts = GET_SQ_STATS ( RQ_SQ_STATS_PKTS ) ;
}
/* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs ( struct nicvf * nic ,
struct cmp_queue * cq , struct cqe_rx_t * cqe_rx )
{
struct cmp_queue_stats * stats = & cq - > stats ;
if ( ! cqe_rx - > err_level & & ! cqe_rx - > err_opcode ) {
stats - > rx . errop . good + + ;
return 0 ;
}
if ( netif_msg_rx_err ( nic ) )
netdev_err ( nic - > netdev ,
" %s: RX error CQE err_level 0x%x err_opcode 0x%x \n " ,
nic - > netdev - > name ,
cqe_rx - > err_level , cqe_rx - > err_opcode ) ;
switch ( cqe_rx - > err_level ) {
case CQ_ERRLVL_MAC :
stats - > rx . errlvl . mac_errs + + ;
break ;
case CQ_ERRLVL_L2 :
stats - > rx . errlvl . l2_errs + + ;
break ;
case CQ_ERRLVL_L3 :
stats - > rx . errlvl . l3_errs + + ;
break ;
case CQ_ERRLVL_L4 :
stats - > rx . errlvl . l4_errs + + ;
break ;
}
switch ( cqe_rx - > err_opcode ) {
case CQ_RX_ERROP_RE_PARTIAL :
stats - > rx . errop . partial_pkts + + ;
break ;
case CQ_RX_ERROP_RE_JABBER :
stats - > rx . errop . jabber_errs + + ;
break ;
case CQ_RX_ERROP_RE_FCS :
stats - > rx . errop . fcs_errs + + ;
break ;
case CQ_RX_ERROP_RE_TERMINATE :
stats - > rx . errop . terminate_errs + + ;
break ;
case CQ_RX_ERROP_RE_RX_CTL :
stats - > rx . errop . bgx_rx_errs + + ;
break ;
case CQ_RX_ERROP_PREL2_ERR :
stats - > rx . errop . prel2_errs + + ;
break ;
case CQ_RX_ERROP_L2_FRAGMENT :
stats - > rx . errop . l2_frags + + ;
break ;
case CQ_RX_ERROP_L2_OVERRUN :
stats - > rx . errop . l2_overruns + + ;
break ;
case CQ_RX_ERROP_L2_PFCS :
stats - > rx . errop . l2_pfcs + + ;
break ;
case CQ_RX_ERROP_L2_PUNY :
stats - > rx . errop . l2_puny + + ;
break ;
case CQ_RX_ERROP_L2_MAL :
stats - > rx . errop . l2_hdr_malformed + + ;
break ;
case CQ_RX_ERROP_L2_OVERSIZE :
stats - > rx . errop . l2_oversize + + ;
break ;
case CQ_RX_ERROP_L2_UNDERSIZE :
stats - > rx . errop . l2_undersize + + ;
break ;
case CQ_RX_ERROP_L2_LENMISM :
stats - > rx . errop . l2_len_mismatch + + ;
break ;
case CQ_RX_ERROP_L2_PCLP :
stats - > rx . errop . l2_pclp + + ;
break ;
case CQ_RX_ERROP_IP_NOT :
stats - > rx . errop . non_ip + + ;
break ;
case CQ_RX_ERROP_IP_CSUM_ERR :
stats - > rx . errop . ip_csum_err + + ;
break ;
case CQ_RX_ERROP_IP_MAL :
stats - > rx . errop . ip_hdr_malformed + + ;
break ;
case CQ_RX_ERROP_IP_MALD :
stats - > rx . errop . ip_payload_malformed + + ;
break ;
case CQ_RX_ERROP_IP_HOP :
stats - > rx . errop . ip_hop_errs + + ;
break ;
case CQ_RX_ERROP_L3_ICRC :
stats - > rx . errop . l3_icrc_errs + + ;
break ;
case CQ_RX_ERROP_L3_PCLP :
stats - > rx . errop . l3_pclp + + ;
break ;
case CQ_RX_ERROP_L4_MAL :
stats - > rx . errop . l4_malformed + + ;
break ;
case CQ_RX_ERROP_L4_CHK :
stats - > rx . errop . l4_csum_errs + + ;
break ;
case CQ_RX_ERROP_UDP_LEN :
stats - > rx . errop . udp_len_err + + ;
break ;
case CQ_RX_ERROP_L4_PORT :
stats - > rx . errop . bad_l4_port + + ;
break ;
case CQ_RX_ERROP_TCP_FLAG :
stats - > rx . errop . bad_tcp_flag + + ;
break ;
case CQ_RX_ERROP_TCP_OFFSET :
stats - > rx . errop . tcp_offset_errs + + ;
break ;
case CQ_RX_ERROP_L4_PCLP :
stats - > rx . errop . l4_pclp + + ;
break ;
case CQ_RX_ERROP_RBDR_TRUNC :
stats - > rx . errop . pkt_truncated + + ;
break ;
}
return 1 ;
}
/* Check for errors in the send cmp.queue entry */
int nicvf_check_cqe_tx_errs ( struct nicvf * nic ,
struct cmp_queue * cq , struct cqe_send_t * cqe_tx )
{
struct cmp_queue_stats * stats = & cq - > stats ;
switch ( cqe_tx - > send_status ) {
case CQ_TX_ERROP_GOOD :
stats - > tx . good + + ;
return 0 ;
case CQ_TX_ERROP_DESC_FAULT :
stats - > tx . desc_fault + + ;
break ;
case CQ_TX_ERROP_HDR_CONS_ERR :
stats - > tx . hdr_cons_err + + ;
break ;
case CQ_TX_ERROP_SUBDC_ERR :
stats - > tx . subdesc_err + + ;
break ;
case CQ_TX_ERROP_IMM_SIZE_OFLOW :
stats - > tx . imm_size_oflow + + ;
break ;
case CQ_TX_ERROP_DATA_SEQUENCE_ERR :
stats - > tx . data_seq_err + + ;
break ;
case CQ_TX_ERROP_MEM_SEQUENCE_ERR :
stats - > tx . mem_seq_err + + ;
break ;
case CQ_TX_ERROP_LOCK_VIOL :
stats - > tx . lock_viol + + ;
break ;
case CQ_TX_ERROP_DATA_FAULT :
stats - > tx . data_fault + + ;
break ;
case CQ_TX_ERROP_TSTMP_CONFLICT :
stats - > tx . tstmp_conflict + + ;
break ;
case CQ_TX_ERROP_TSTMP_TIMEOUT :
stats - > tx . tstmp_timeout + + ;
break ;
case CQ_TX_ERROP_MEM_FAULT :
stats - > tx . mem_fault + + ;
break ;
case CQ_TX_ERROP_CK_OVERLAP :
stats - > tx . csum_overlap + + ;
break ;
case CQ_TX_ERROP_CK_OFLOW :
stats - > tx . csum_overflow + + ;
break ;
}
return 1 ;
}