2015-10-26 11:02:25 +02:00
/* QLogic qed NIC Driver
2017-01-01 13:57:00 +02:00
* Copyright ( c ) 2015 - 2017 QLogic Corporation
2015-10-26 11:02:25 +02:00
*
2017-01-01 13:57:00 +02:00
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
2015-10-26 11:02:25 +02:00
*/
# include <linux/types.h>
# include <asm/byteorder.h>
# include <linux/io.h>
# include <linux/bitops.h>
# include <linux/delay.h>
# include <linux/dma-mapping.h>
# include <linux/errno.h>
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/pci.h>
# include <linux/slab.h>
# include <linux/string.h>
# include "qed.h"
# include "qed_hsi.h"
# include "qed_hw.h"
# include "qed_init_ops.h"
# include "qed_int.h"
# include "qed_mcp.h"
# include "qed_reg_addr.h"
# include "qed_sp.h"
2016-05-11 16:36:14 +03:00
# include "qed_sriov.h"
# include "qed_vf.h"
2015-10-26 11:02:25 +02:00
struct qed_pi_info {
qed_int_comp_cb_t comp_cb ;
void * cookie ;
} ;
struct qed_sb_sp_info {
2017-12-27 19:30:05 +02:00
struct qed_sb_info sb_info ;
2015-10-26 11:02:25 +02:00
/* per protocol index data */
2017-12-27 19:30:06 +02:00
struct qed_pi_info pi_info_arr [ PIS_PER_SB_E4 ] ;
2015-10-26 11:02:25 +02:00
} ;
2016-02-28 12:26:54 +02:00
enum qed_attention_type {
QED_ATTN_TYPE_ATTN ,
QED_ATTN_TYPE_PARITY ,
} ;
2015-10-26 11:02:31 +02:00
# define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE ( struct atten_status_block , p_hwfn )
2016-02-28 12:26:53 +02:00
struct aeu_invert_reg_bit {
char bit_name [ 30 ] ;
# define ATTENTION_PARITY (1 << 0)
# define ATTENTION_LENGTH_MASK (0x00000ff0)
# define ATTENTION_LENGTH_SHIFT (4)
# define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
ATTENTION_LENGTH_SHIFT )
2017-12-27 19:30:05 +02:00
# define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
2016-02-28 12:26:53 +02:00
# define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
# define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
ATTENTION_PARITY )
/* Multiple bits start with this offset */
# define ATTENTION_OFFSET_MASK (0x000ff000)
# define ATTENTION_OFFSET_SHIFT (12)
2017-05-29 09:53:11 +03:00
# define ATTENTION_BB_MASK (0x00700000)
# define ATTENTION_BB_SHIFT (20)
# define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
# define ATTENTION_BB_DIFFERENT BIT(23)
2016-02-28 12:26:53 +02:00
unsigned int flags ;
2016-02-28 12:26:54 +02:00
2016-02-28 12:26:55 +02:00
/* Callback to call if attention will be triggered */
int ( * cb ) ( struct qed_hwfn * p_hwfn ) ;
2016-02-28 12:26:54 +02:00
enum block_id block_index ;
2016-02-28 12:26:53 +02:00
} ;
struct aeu_invert_reg {
struct aeu_invert_reg_bit bits [ 32 ] ;
} ;
# define MAX_ATTN_GRPS (8)
# define NUM_ATTN_REGS (9)
2016-02-28 12:26:55 +02:00
/* Specific HW attention callbacks */
static int qed_mcp_attn_cb ( struct qed_hwfn * p_hwfn )
{
u32 tmp = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt , MCP_REG_CPU_STATE ) ;
/* This might occur on certain instances; Log it once then mask it */
DP_INFO ( p_hwfn - > cdev , " MCP_REG_CPU_STATE: %08x - Masking... \n " ,
tmp ) ;
qed_wr ( p_hwfn , p_hwfn - > p_dpc_ptt , MCP_REG_CPU_EVENT_MASK ,
0xffffffff ) ;
return 0 ;
}
# define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
# define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
# define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
# define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
# define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
# define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
# define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
# define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
# define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
# define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
# define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
# define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
# define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
static int qed_pswhst_attn_cb ( struct qed_hwfn * p_hwfn )
{
u32 tmp = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PSWHST_REG_INCORRECT_ACCESS_VALID ) ;
if ( tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS ) {
u32 addr , data , length ;
addr = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PSWHST_REG_INCORRECT_ACCESS_ADDRESS ) ;
data = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PSWHST_REG_INCORRECT_ACCESS_DATA ) ;
length = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PSWHST_REG_INCORRECT_ACCESS_LENGTH ) ;
DP_INFO ( p_hwfn - > cdev ,
" Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x] \n " ,
addr , length ,
( u8 ) GET_FIELD ( data , ATTENTION_INCORRECT_ACCESS_PF_ID ) ,
( u8 ) GET_FIELD ( data , ATTENTION_INCORRECT_ACCESS_VF_ID ) ,
( u8 ) GET_FIELD ( data ,
ATTENTION_INCORRECT_ACCESS_VF_VALID ) ,
( u8 ) GET_FIELD ( data ,
ATTENTION_INCORRECT_ACCESS_CLIENT ) ,
( u8 ) GET_FIELD ( data , ATTENTION_INCORRECT_ACCESS_WR ) ,
( u8 ) GET_FIELD ( data ,
ATTENTION_INCORRECT_ACCESS_BYTE_EN ) ,
data ) ;
}
return 0 ;
}
# define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
# define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
# define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
# define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
# define QED_GRC_ATTENTION_MASTER_MASK (0xf)
# define QED_GRC_ATTENTION_MASTER_SHIFT (24)
# define QED_GRC_ATTENTION_PF_MASK (0xf)
# define QED_GRC_ATTENTION_PF_SHIFT (0)
# define QED_GRC_ATTENTION_VF_MASK (0xff)
# define QED_GRC_ATTENTION_VF_SHIFT (4)
# define QED_GRC_ATTENTION_PRIV_MASK (0x3)
# define QED_GRC_ATTENTION_PRIV_SHIFT (14)
# define QED_GRC_ATTENTION_PRIV_VF (0)
static const char * attn_master_to_str ( u8 master )
{
switch ( master ) {
case 1 : return " PXP " ;
case 2 : return " MCP " ;
case 3 : return " MSDM " ;
case 4 : return " PSDM " ;
case 5 : return " YSDM " ;
case 6 : return " USDM " ;
case 7 : return " TSDM " ;
case 8 : return " XSDM " ;
case 9 : return " DBU " ;
case 10 : return " DMAE " ;
default :
2016-09-17 23:44:17 +09:00
return " Unknown " ;
2016-02-28 12:26:55 +02:00
}
}
static int qed_grc_attn_cb ( struct qed_hwfn * p_hwfn )
{
u32 tmp , tmp2 ;
/* We've already cleared the timeout interrupt register, so we learn
* of interrupts via the validity register
*/
tmp = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID ) ;
if ( ! ( tmp & QED_GRC_ATTENTION_VALID_BIT ) )
goto out ;
/* Read the GRC timeout information */
tmp = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 ) ;
tmp2 = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 ) ;
DP_INFO ( p_hwfn - > cdev ,
" GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x] \n " ,
tmp2 , tmp ,
( tmp & QED_GRC_ATTENTION_RDWR_BIT ) ? " Write to " : " Read from " ,
GET_FIELD ( tmp , QED_GRC_ATTENTION_ADDRESS ) < < 2 ,
attn_master_to_str ( GET_FIELD ( tmp , QED_GRC_ATTENTION_MASTER ) ) ,
GET_FIELD ( tmp2 , QED_GRC_ATTENTION_PF ) ,
( GET_FIELD ( tmp2 , QED_GRC_ATTENTION_PRIV ) = =
QED_GRC_ATTENTION_PRIV_VF ) ? " VF " : " (Ireelevant) " ,
GET_FIELD ( tmp2 , QED_GRC_ATTENTION_VF ) ) ;
out :
/* Regardles of anything else, clean the validity bit */
qed_wr ( p_hwfn , p_hwfn - > p_dpc_ptt ,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID , 0 ) ;
return 0 ;
}
# define PGLUE_ATTENTION_VALID (1 << 29)
# define PGLUE_ATTENTION_RD_VALID (1 << 26)
# define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
# define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
# define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
# define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
# define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
# define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
# define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
# define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
# define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
# define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
# define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
# define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
# define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
# define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
# define PGLUE_ATTENTION_ILT_VALID (1 << 23)
static int qed_pglub_rbc_attn_cb ( struct qed_hwfn * p_hwfn )
{
u32 tmp ;
tmp = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_TX_ERR_WR_DETAILS2 ) ;
if ( tmp & PGLUE_ATTENTION_VALID ) {
u32 addr_lo , addr_hi , details ;
addr_lo = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_TX_ERR_WR_ADD_31_0 ) ;
addr_hi = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_TX_ERR_WR_ADD_63_32 ) ;
details = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_TX_ERR_WR_DETAILS ) ;
DP_INFO ( p_hwfn ,
" Illegal write by chip to [%08x:%08x] blocked. \n "
" Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] \n "
" Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x] \n " ,
addr_hi , addr_lo , details ,
( u8 ) GET_FIELD ( details , PGLUE_ATTENTION_DETAILS_PFID ) ,
( u8 ) GET_FIELD ( details , PGLUE_ATTENTION_DETAILS_VFID ) ,
GET_FIELD ( details ,
PGLUE_ATTENTION_DETAILS_VF_VALID ) ? 1 : 0 ,
tmp ,
GET_FIELD ( tmp ,
PGLUE_ATTENTION_DETAILS2_WAS_ERR ) ? 1 : 0 ,
GET_FIELD ( tmp ,
PGLUE_ATTENTION_DETAILS2_BME ) ? 1 : 0 ,
GET_FIELD ( tmp ,
PGLUE_ATTENTION_DETAILS2_FID_EN ) ? 1 : 0 ) ;
}
tmp = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_TX_ERR_RD_DETAILS2 ) ;
if ( tmp & PGLUE_ATTENTION_RD_VALID ) {
u32 addr_lo , addr_hi , details ;
addr_lo = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_TX_ERR_RD_ADD_31_0 ) ;
addr_hi = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_TX_ERR_RD_ADD_63_32 ) ;
details = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_TX_ERR_RD_DETAILS ) ;
DP_INFO ( p_hwfn ,
" Illegal read by chip from [%08x:%08x] blocked. \n "
" Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] \n "
" Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x] \n " ,
addr_hi , addr_lo , details ,
( u8 ) GET_FIELD ( details , PGLUE_ATTENTION_DETAILS_PFID ) ,
( u8 ) GET_FIELD ( details , PGLUE_ATTENTION_DETAILS_VFID ) ,
GET_FIELD ( details ,
PGLUE_ATTENTION_DETAILS_VF_VALID ) ? 1 : 0 ,
tmp ,
GET_FIELD ( tmp , PGLUE_ATTENTION_DETAILS2_WAS_ERR ) ? 1
: 0 ,
GET_FIELD ( tmp , PGLUE_ATTENTION_DETAILS2_BME ) ? 1 : 0 ,
GET_FIELD ( tmp , PGLUE_ATTENTION_DETAILS2_FID_EN ) ? 1
: 0 ) ;
}
tmp = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL ) ;
if ( tmp & PGLUE_ATTENTION_ICPL_VALID )
2018-01-16 00:18:36 +09:00
DP_INFO ( p_hwfn , " ICPL error - %08x \n " , tmp ) ;
2016-02-28 12:26:55 +02:00
tmp = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS ) ;
if ( tmp & PGLUE_ATTENTION_ZLR_VALID ) {
u32 addr_hi , addr_lo ;
addr_lo = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 ) ;
addr_hi = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 ) ;
DP_INFO ( p_hwfn , " ZLR eror - %08x [Address %08x:%08x] \n " ,
tmp , addr_hi , addr_lo ) ;
}
tmp = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_VF_ILT_ERR_DETAILS2 ) ;
if ( tmp & PGLUE_ATTENTION_ILT_VALID ) {
u32 addr_hi , addr_lo , details ;
addr_lo = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 ) ;
addr_hi = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 ) ;
details = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_VF_ILT_ERR_DETAILS ) ;
DP_INFO ( p_hwfn ,
" ILT error - Details %08x Details2 %08x [Address %08x:%08x] \n " ,
details , tmp , addr_hi , addr_lo ) ;
}
/* Clear the indications */
qed_wr ( p_hwfn , p_hwfn - > p_dpc_ptt ,
PGLUE_B_REG_LATCHED_ERRORS_CLR , ( 1 < < 2 ) ) ;
return 0 ;
}
# define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
# define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
# define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
# define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
static int qed_dorq_attn_cb ( struct qed_hwfn * p_hwfn )
{
u32 reason ;
reason = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt , DORQ_REG_DB_DROP_REASON ) &
QED_DORQ_ATTENTION_REASON_MASK ;
if ( reason ) {
u32 details = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
DORQ_REG_DB_DROP_DETAILS ) ;
DP_INFO ( p_hwfn - > cdev ,
2016-09-17 23:44:17 +09:00
" DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x \n " ,
2016-02-28 12:26:55 +02:00
qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
DORQ_REG_DB_DROP_DETAILS_ADDRESS ) ,
( u16 ) ( details & QED_DORQ_ATTENTION_OPAQUE_MASK ) ,
GET_FIELD ( details , QED_DORQ_ATTENTION_SIZE ) * 4 ,
reason ) ;
}
return - EINVAL ;
}
2017-05-29 09:53:11 +03:00
/* Instead of major changes to the data-structure, we have a some 'special'
* identifiers for sources that changed meaning between adapters .
*/
enum aeu_invert_reg_special_type {
AEU_INVERT_REG_SPECIAL_CNIG_0 ,
AEU_INVERT_REG_SPECIAL_CNIG_1 ,
AEU_INVERT_REG_SPECIAL_CNIG_2 ,
AEU_INVERT_REG_SPECIAL_CNIG_3 ,
AEU_INVERT_REG_SPECIAL_MAX ,
} ;
static struct aeu_invert_reg_bit
aeu_descs_special [ AEU_INVERT_REG_SPECIAL_MAX ] = {
{ " CNIG port 0 " , ATTENTION_SINGLE , NULL , BLOCK_CNIG } ,
{ " CNIG port 1 " , ATTENTION_SINGLE , NULL , BLOCK_CNIG } ,
{ " CNIG port 2 " , ATTENTION_SINGLE , NULL , BLOCK_CNIG } ,
{ " CNIG port 3 " , ATTENTION_SINGLE , NULL , BLOCK_CNIG } ,
} ;
2016-02-28 12:26:53 +02:00
/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
static struct aeu_invert_reg aeu_descs [ NUM_ATTN_REGS ] = {
{
{ /* After Invert 1 */
{ " GPIO0 function%d " ,
2016-02-28 12:26:55 +02:00
( 32 < < ATTENTION_LENGTH_SHIFT ) , NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:53 +02:00
}
} ,
{
{ /* After Invert 2 */
2016-02-28 12:26:55 +02:00
{ " PGLUE config_space " , ATTENTION_SINGLE ,
NULL , MAX_BLOCK_ID } ,
{ " PGLUE misc_flr " , ATTENTION_SINGLE ,
NULL , MAX_BLOCK_ID } ,
{ " PGLUE B RBC " , ATTENTION_PAR_INT ,
qed_pglub_rbc_attn_cb , BLOCK_PGLUE_B } ,
{ " PGLUE misc_mctp " , ATTENTION_SINGLE ,
NULL , MAX_BLOCK_ID } ,
{ " Flash event " , ATTENTION_SINGLE , NULL , MAX_BLOCK_ID } ,
{ " SMB event " , ATTENTION_SINGLE , NULL , MAX_BLOCK_ID } ,
{ " Main Power " , ATTENTION_SINGLE , NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:53 +02:00
{ " SW timers #%d " , ( 8 < < ATTENTION_LENGTH_SHIFT ) |
2016-02-28 12:26:54 +02:00
( 1 < < ATTENTION_OFFSET_SHIFT ) ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:53 +02:00
{ " PCIE glue/PXP VPD %d " ,
2016-02-28 12:26:55 +02:00
( 16 < < ATTENTION_LENGTH_SHIFT ) , NULL , BLOCK_PGLCS } ,
2016-02-28 12:26:53 +02:00
}
} ,
{
{ /* After Invert 3 */
{ " General Attention %d " ,
2016-02-28 12:26:55 +02:00
( 32 < < ATTENTION_LENGTH_SHIFT ) , NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:53 +02:00
}
} ,
{
{ /* After Invert 4 */
2016-02-28 12:26:54 +02:00
{ " General Attention 32 " , ATTENTION_SINGLE ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:53 +02:00
{ " General Attention %d " ,
( 2 < < ATTENTION_LENGTH_SHIFT ) |
2016-02-28 12:26:55 +02:00
( 33 < < ATTENTION_OFFSET_SHIFT ) , NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:54 +02:00
{ " General Attention 35 " , ATTENTION_SINGLE ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
2017-05-29 09:53:11 +03:00
{ " NWS Parity " ,
ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
ATTENTION_BB ( AEU_INVERT_REG_SPECIAL_CNIG_0 ) ,
NULL , BLOCK_NWS } ,
{ " NWS Interrupt " ,
ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
ATTENTION_BB ( AEU_INVERT_REG_SPECIAL_CNIG_1 ) ,
NULL , BLOCK_NWS } ,
{ " NWM Parity " ,
ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
ATTENTION_BB ( AEU_INVERT_REG_SPECIAL_CNIG_2 ) ,
NULL , BLOCK_NWM } ,
{ " NWM Interrupt " ,
ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
ATTENTION_BB ( AEU_INVERT_REG_SPECIAL_CNIG_3 ) ,
NULL , BLOCK_NWM } ,
2016-02-28 12:26:55 +02:00
{ " MCP CPU " , ATTENTION_SINGLE ,
qed_mcp_attn_cb , MAX_BLOCK_ID } ,
{ " MCP Watchdog timer " , ATTENTION_SINGLE ,
NULL , MAX_BLOCK_ID } ,
{ " MCP M2P " , ATTENTION_SINGLE , NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:54 +02:00
{ " AVS stop status ready " , ATTENTION_SINGLE ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
{ " MSTAT " , ATTENTION_PAR_INT , NULL , MAX_BLOCK_ID } ,
{ " MSTAT per-path " , ATTENTION_PAR_INT ,
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:54 +02:00
{ " Reserved %d " , ( 6 < < ATTENTION_LENGTH_SHIFT ) ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
{ " NIG " , ATTENTION_PAR_INT , NULL , BLOCK_NIG } ,
{ " BMB/OPTE/MCP " , ATTENTION_PAR_INT , NULL , BLOCK_BMB } ,
{ " BTB " , ATTENTION_PAR_INT , NULL , BLOCK_BTB } ,
{ " BRB " , ATTENTION_PAR_INT , NULL , BLOCK_BRB } ,
{ " PRS " , ATTENTION_PAR_INT , NULL , BLOCK_PRS } ,
2016-02-28 12:26:53 +02:00
}
} ,
{
{ /* After Invert 5 */
2016-02-28 12:26:55 +02:00
{ " SRC " , ATTENTION_PAR_INT , NULL , BLOCK_SRC } ,
{ " PB Client1 " , ATTENTION_PAR_INT , NULL , BLOCK_PBF_PB1 } ,
{ " PB Client2 " , ATTENTION_PAR_INT , NULL , BLOCK_PBF_PB2 } ,
{ " RPB " , ATTENTION_PAR_INT , NULL , BLOCK_RPB } ,
{ " PBF " , ATTENTION_PAR_INT , NULL , BLOCK_PBF } ,
{ " QM " , ATTENTION_PAR_INT , NULL , BLOCK_QM } ,
{ " TM " , ATTENTION_PAR_INT , NULL , BLOCK_TM } ,
{ " MCM " , ATTENTION_PAR_INT , NULL , BLOCK_MCM } ,
{ " MSDM " , ATTENTION_PAR_INT , NULL , BLOCK_MSDM } ,
{ " MSEM " , ATTENTION_PAR_INT , NULL , BLOCK_MSEM } ,
{ " PCM " , ATTENTION_PAR_INT , NULL , BLOCK_PCM } ,
{ " PSDM " , ATTENTION_PAR_INT , NULL , BLOCK_PSDM } ,
{ " PSEM " , ATTENTION_PAR_INT , NULL , BLOCK_PSEM } ,
{ " TCM " , ATTENTION_PAR_INT , NULL , BLOCK_TCM } ,
{ " TSDM " , ATTENTION_PAR_INT , NULL , BLOCK_TSDM } ,
{ " TSEM " , ATTENTION_PAR_INT , NULL , BLOCK_TSEM } ,
2016-02-28 12:26:53 +02:00
}
} ,
{
{ /* After Invert 6 */
2016-02-28 12:26:55 +02:00
{ " UCM " , ATTENTION_PAR_INT , NULL , BLOCK_UCM } ,
{ " USDM " , ATTENTION_PAR_INT , NULL , BLOCK_USDM } ,
{ " USEM " , ATTENTION_PAR_INT , NULL , BLOCK_USEM } ,
{ " XCM " , ATTENTION_PAR_INT , NULL , BLOCK_XCM } ,
{ " XSDM " , ATTENTION_PAR_INT , NULL , BLOCK_XSDM } ,
{ " XSEM " , ATTENTION_PAR_INT , NULL , BLOCK_XSEM } ,
{ " YCM " , ATTENTION_PAR_INT , NULL , BLOCK_YCM } ,
{ " YSDM " , ATTENTION_PAR_INT , NULL , BLOCK_YSDM } ,
{ " YSEM " , ATTENTION_PAR_INT , NULL , BLOCK_YSEM } ,
{ " XYLD " , ATTENTION_PAR_INT , NULL , BLOCK_XYLD } ,
{ " TMLD " , ATTENTION_PAR_INT , NULL , BLOCK_TMLD } ,
{ " MYLD " , ATTENTION_PAR_INT , NULL , BLOCK_MULD } ,
{ " YULD " , ATTENTION_PAR_INT , NULL , BLOCK_YULD } ,
{ " DORQ " , ATTENTION_PAR_INT ,
qed_dorq_attn_cb , BLOCK_DORQ } ,
{ " DBG " , ATTENTION_PAR_INT , NULL , BLOCK_DBG } ,
{ " IPC " , ATTENTION_PAR_INT , NULL , BLOCK_IPC } ,
2016-02-28 12:26:53 +02:00
}
} ,
{
{ /* After Invert 7 */
2016-02-28 12:26:55 +02:00
{ " CCFC " , ATTENTION_PAR_INT , NULL , BLOCK_CCFC } ,
{ " CDU " , ATTENTION_PAR_INT , NULL , BLOCK_CDU } ,
{ " DMAE " , ATTENTION_PAR_INT , NULL , BLOCK_DMAE } ,
{ " IGU " , ATTENTION_PAR_INT , NULL , BLOCK_IGU } ,
{ " ATC " , ATTENTION_PAR_INT , NULL , MAX_BLOCK_ID } ,
{ " CAU " , ATTENTION_PAR_INT , NULL , BLOCK_CAU } ,
{ " PTU " , ATTENTION_PAR_INT , NULL , BLOCK_PTU } ,
{ " PRM " , ATTENTION_PAR_INT , NULL , BLOCK_PRM } ,
{ " TCFC " , ATTENTION_PAR_INT , NULL , BLOCK_TCFC } ,
{ " RDIF " , ATTENTION_PAR_INT , NULL , BLOCK_RDIF } ,
{ " TDIF " , ATTENTION_PAR_INT , NULL , BLOCK_TDIF } ,
{ " RSS " , ATTENTION_PAR_INT , NULL , BLOCK_RSS } ,
{ " MISC " , ATTENTION_PAR_INT , NULL , BLOCK_MISC } ,
{ " MISCS " , ATTENTION_PAR_INT , NULL , BLOCK_MISCS } ,
{ " PCIE " , ATTENTION_PAR , NULL , BLOCK_PCIE } ,
{ " Vaux PCI core " , ATTENTION_SINGLE , NULL , BLOCK_PGLCS } ,
{ " PSWRQ " , ATTENTION_PAR_INT , NULL , BLOCK_PSWRQ } ,
2016-02-28 12:26:53 +02:00
}
} ,
{
{ /* After Invert 8 */
2016-02-28 12:26:55 +02:00
{ " PSWRQ (pci_clk) " , ATTENTION_PAR_INT ,
NULL , BLOCK_PSWRQ2 } ,
{ " PSWWR " , ATTENTION_PAR_INT , NULL , BLOCK_PSWWR } ,
{ " PSWWR (pci_clk) " , ATTENTION_PAR_INT ,
NULL , BLOCK_PSWWR2 } ,
{ " PSWRD " , ATTENTION_PAR_INT , NULL , BLOCK_PSWRD } ,
{ " PSWRD (pci_clk) " , ATTENTION_PAR_INT ,
NULL , BLOCK_PSWRD2 } ,
{ " PSWHST " , ATTENTION_PAR_INT ,
qed_pswhst_attn_cb , BLOCK_PSWHST } ,
{ " PSWHST (pci_clk) " , ATTENTION_PAR_INT ,
NULL , BLOCK_PSWHST2 } ,
{ " GRC " , ATTENTION_PAR_INT ,
qed_grc_attn_cb , BLOCK_GRC } ,
{ " CPMU " , ATTENTION_PAR_INT , NULL , BLOCK_CPMU } ,
{ " NCSI " , ATTENTION_PAR_INT , NULL , BLOCK_NCSI } ,
{ " MSEM PRAM " , ATTENTION_PAR , NULL , MAX_BLOCK_ID } ,
{ " PSEM PRAM " , ATTENTION_PAR , NULL , MAX_BLOCK_ID } ,
{ " TSEM PRAM " , ATTENTION_PAR , NULL , MAX_BLOCK_ID } ,
{ " USEM PRAM " , ATTENTION_PAR , NULL , MAX_BLOCK_ID } ,
{ " XSEM PRAM " , ATTENTION_PAR , NULL , MAX_BLOCK_ID } ,
{ " YSEM PRAM " , ATTENTION_PAR , NULL , MAX_BLOCK_ID } ,
{ " pxp_misc_mps " , ATTENTION_PAR , NULL , BLOCK_PGLCS } ,
2016-02-28 12:26:54 +02:00
{ " PCIE glue/PXP Exp. ROM " , ATTENTION_SINGLE ,
2016-02-28 12:26:55 +02:00
NULL , BLOCK_PGLCS } ,
{ " PERST_B assertion " , ATTENTION_SINGLE ,
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:54 +02:00
{ " PERST_B deassertion " , ATTENTION_SINGLE ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:54 +02:00
{ " Reserved %d " , ( 2 < < ATTENTION_LENGTH_SHIFT ) ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:53 +02:00
}
} ,
{
{ /* After Invert 9 */
2016-02-28 12:26:55 +02:00
{ " MCP Latched memory " , ATTENTION_PAR ,
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:54 +02:00
{ " MCP Latched scratchpad cache " , ATTENTION_SINGLE ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
{ " MCP Latched ump_tx " , ATTENTION_PAR ,
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:54 +02:00
{ " MCP Latched scratchpad " , ATTENTION_PAR ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:54 +02:00
{ " Reserved %d " , ( 28 < < ATTENTION_LENGTH_SHIFT ) ,
2016-02-28 12:26:55 +02:00
NULL , MAX_BLOCK_ID } ,
2016-02-28 12:26:53 +02:00
}
} ,
} ;
2017-05-29 09:53:11 +03:00
static struct aeu_invert_reg_bit *
qed_int_aeu_translate ( struct qed_hwfn * p_hwfn ,
struct aeu_invert_reg_bit * p_bit )
{
if ( ! QED_IS_BB ( p_hwfn - > cdev ) )
return p_bit ;
if ( ! ( p_bit - > flags & ATTENTION_BB_DIFFERENT ) )
return p_bit ;
return & aeu_descs_special [ ( p_bit - > flags & ATTENTION_BB_MASK ) > >
ATTENTION_BB_SHIFT ] ;
}
static bool qed_int_is_parity_flag ( struct qed_hwfn * p_hwfn ,
struct aeu_invert_reg_bit * p_bit )
{
return ! ! ( qed_int_aeu_translate ( p_hwfn , p_bit ) - > flags &
ATTENTION_PARITY ) ;
}
2016-02-28 12:26:53 +02:00
# define ATTN_STATE_BITS (0xfff)
2015-10-26 11:02:31 +02:00
# define ATTN_BITS_MASKABLE (0x3ff)
struct qed_sb_attn_info {
/* Virtual & Physical address of the SB */
struct atten_status_block * sb_attn ;
2016-02-28 12:26:53 +02:00
dma_addr_t sb_phys ;
2015-10-26 11:02:31 +02:00
/* Last seen running index */
2016-02-28 12:26:53 +02:00
u16 index ;
/* A mask of the AEU bits resulting in a parity error */
u32 parity_mask [ NUM_ATTN_REGS ] ;
/* A pointer to the attention description structure */
struct aeu_invert_reg * p_aeu_desc ;
2015-10-26 11:02:31 +02:00
/* Previously asserted attentions, which are still unasserted */
2016-02-28 12:26:53 +02:00
u16 known_attn ;
2015-10-26 11:02:31 +02:00
/* Cleanup address for the link's general hw attention */
2016-02-28 12:26:53 +02:00
u32 mfw_attn_addr ;
2015-10-26 11:02:31 +02:00
} ;
static inline u16 qed_attn_update_idx ( struct qed_hwfn * p_hwfn ,
2016-08-15 10:42:43 +03:00
struct qed_sb_attn_info * p_sb_desc )
2015-10-26 11:02:31 +02:00
{
2016-08-15 10:42:43 +03:00
u16 rc = 0 , index ;
2015-10-26 11:02:31 +02:00
/* Make certain HW write took affect */
mmiowb ( ) ;
index = le16_to_cpu ( p_sb_desc - > sb_attn - > sb_index ) ;
if ( p_sb_desc - > index ! = index ) {
p_sb_desc - > index = index ;
rc = QED_SB_ATT_IDX ;
}
/* Make certain we got a consistent view with HW */
mmiowb ( ) ;
return rc ;
}
/**
* @ brief qed_int_assertion - handles asserted attention bits
*
* @ param p_hwfn
* @ param asserted_bits newly asserted bits
* @ return int
*/
2016-08-15 10:42:43 +03:00
static int qed_int_assertion ( struct qed_hwfn * p_hwfn , u16 asserted_bits )
2015-10-26 11:02:31 +02:00
{
struct qed_sb_attn_info * sb_attn_sw = p_hwfn - > p_sb_attn ;
u32 igu_mask ;
/* Mask the source of the attention in the IGU */
2016-08-15 10:42:43 +03:00
igu_mask = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt , IGU_REG_ATTENTION_ENABLE ) ;
2015-10-26 11:02:31 +02:00
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR , " IGU mask: 0x%08x --> 0x%08x \n " ,
igu_mask , igu_mask & ~ ( asserted_bits & ATTN_BITS_MASKABLE ) ) ;
igu_mask & = ~ ( asserted_bits & ATTN_BITS_MASKABLE ) ;
qed_wr ( p_hwfn , p_hwfn - > p_dpc_ptt , IGU_REG_ATTENTION_ENABLE , igu_mask ) ;
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR ,
" inner known ATTN state: 0x%04x --> 0x%04x \n " ,
sb_attn_sw - > known_attn ,
sb_attn_sw - > known_attn | asserted_bits ) ;
sb_attn_sw - > known_attn | = asserted_bits ;
/* Handle MCP events */
if ( asserted_bits & 0x100 ) {
qed_mcp_handle_events ( p_hwfn , p_hwfn - > p_dpc_ptt ) ;
/* Clean the MCP attention */
qed_wr ( p_hwfn , p_hwfn - > p_dpc_ptt ,
sb_attn_sw - > mfw_attn_addr , 0 ) ;
}
DIRECT_REG_WR ( ( u8 __iomem * ) p_hwfn - > regview +
GTT_BAR0_MAP_REG_IGU_CMD +
( ( IGU_CMD_ATTN_BIT_SET_UPPER -
IGU_CMD_INT_ACK_BASE ) < < 3 ) ,
( u32 ) asserted_bits ) ;
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR , " set cmd IGU: 0x%04x \n " ,
asserted_bits ) ;
return 0 ;
}
2017-05-29 09:53:10 +03:00
static void qed_int_attn_print ( struct qed_hwfn * p_hwfn ,
enum block_id id ,
enum dbg_attn_type type , bool b_clear )
2016-02-28 12:26:54 +02:00
{
2017-05-29 09:53:10 +03:00
struct dbg_attn_block_result attn_results ;
enum dbg_status status ;
2016-02-28 12:26:54 +02:00
2017-05-29 09:53:10 +03:00
memset ( & attn_results , 0 , sizeof ( attn_results ) ) ;
2016-02-28 12:26:54 +02:00
2017-05-29 09:53:10 +03:00
status = qed_dbg_read_attn ( p_hwfn , p_hwfn - > p_dpc_ptt , id , type ,
b_clear , & attn_results ) ;
if ( status ! = DBG_STATUS_OK )
2016-02-28 12:26:54 +02:00
DP_NOTICE ( p_hwfn ,
2017-05-29 09:53:10 +03:00
" Failed to parse attention information [status: %s] \n " ,
qed_dbg_get_status_str ( status ) ) ;
else
qed_dbg_parse_attn ( p_hwfn , & attn_results ) ;
2016-02-28 12:26:54 +02:00
}
2016-02-28 12:26:53 +02:00
/**
* @ brief qed_int_deassertion_aeu_bit - handles the effects of a single
* cause of the attention
*
* @ param p_hwfn
* @ param p_aeu - descriptor of an AEU bit which caused the attention
* @ param aeu_en_reg - register offset of the AEU enable reg . which configured
* this bit to this group .
* @ param bit_index - index of this bit in the aeu_en_reg
*
* @ return int
*/
static int
qed_int_deassertion_aeu_bit ( struct qed_hwfn * p_hwfn ,
struct aeu_invert_reg_bit * p_aeu ,
u32 aeu_en_reg ,
2017-05-29 09:53:12 +03:00
const char * p_bit_name , u32 bitmask )
2016-02-28 12:26:53 +02:00
{
2017-05-29 09:53:10 +03:00
bool b_fatal = false ;
2016-02-28 12:26:53 +02:00
int rc = - EINVAL ;
2016-02-28 12:26:55 +02:00
u32 val ;
2016-02-28 12:26:53 +02:00
DP_INFO ( p_hwfn , " Deasserted attention `%s'[%08x] \n " ,
2017-05-29 09:53:12 +03:00
p_bit_name , bitmask ) ;
2016-02-28 12:26:53 +02:00
2016-02-28 12:26:55 +02:00
/* Call callback before clearing the interrupt status */
if ( p_aeu - > cb ) {
DP_INFO ( p_hwfn , " `%s (attention)': Calling Callback function \n " ,
2017-05-29 09:53:12 +03:00
p_bit_name ) ;
2016-02-28 12:26:55 +02:00
rc = p_aeu - > cb ( p_hwfn ) ;
}
2017-05-29 09:53:10 +03:00
if ( rc )
b_fatal = true ;
/* Print HW block interrupt registers */
if ( p_aeu - > block_index ! = MAX_BLOCK_ID )
qed_int_attn_print ( p_hwfn , p_aeu - > block_index ,
ATTN_TYPE_INTERRUPT , ! b_fatal ) ;
2016-02-28 12:26:54 +02:00
2016-02-28 12:26:55 +02:00
/* If the attention is benign, no need to prevent it */
if ( ! rc )
goto out ;
2016-02-28 12:26:53 +02:00
/* Prevent this Attention from being asserted in the future */
val = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt , aeu_en_reg ) ;
2016-02-28 12:26:55 +02:00
qed_wr ( p_hwfn , p_hwfn - > p_dpc_ptt , aeu_en_reg , ( val & ~ bitmask ) ) ;
2016-02-28 12:26:53 +02:00
DP_INFO ( p_hwfn , " `%s' - Disabled future attentions \n " ,
2017-05-29 09:53:12 +03:00
p_bit_name ) ;
2016-02-28 12:26:53 +02:00
2016-02-28 12:26:55 +02:00
out :
2016-02-28 12:26:53 +02:00
return rc ;
}
2016-02-28 12:26:54 +02:00
/**
* @ brief qed_int_deassertion_parity - handle a single parity AEU source
*
* @ param p_hwfn
* @ param p_aeu - descriptor of an AEU bit which caused the parity
2017-05-29 09:53:13 +03:00
* @ param aeu_en_reg - address of the AEU enable register
2016-02-28 12:26:54 +02:00
* @ param bit_index
*/
static void qed_int_deassertion_parity ( struct qed_hwfn * p_hwfn ,
struct aeu_invert_reg_bit * p_aeu ,
2017-05-29 09:53:13 +03:00
u32 aeu_en_reg , u8 bit_index )
2016-02-28 12:26:54 +02:00
{
2017-05-29 09:53:13 +03:00
u32 block_id = p_aeu - > block_index , mask , val ;
2016-02-28 12:26:54 +02:00
2017-05-29 09:53:13 +03:00
DP_NOTICE ( p_hwfn - > cdev ,
" %s parity attention is set [address 0x%08x, bit %d] \n " ,
p_aeu - > bit_name , aeu_en_reg , bit_index ) ;
2016-02-28 12:26:54 +02:00
if ( block_id ! = MAX_BLOCK_ID ) {
2017-05-29 09:53:10 +03:00
qed_int_attn_print ( p_hwfn , block_id , ATTN_TYPE_PARITY , false ) ;
2016-02-28 12:26:54 +02:00
/* In BB, there's a single parity bit for several blocks */
if ( block_id = = BLOCK_BTB ) {
2017-05-29 09:53:10 +03:00
qed_int_attn_print ( p_hwfn , BLOCK_OPTE ,
ATTN_TYPE_PARITY , false ) ;
qed_int_attn_print ( p_hwfn , BLOCK_MCP ,
ATTN_TYPE_PARITY , false ) ;
2016-02-28 12:26:54 +02:00
}
}
2017-05-29 09:53:13 +03:00
/* Prevent this parity error from being re-asserted */
mask = ~ BIT ( bit_index ) ;
val = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt , aeu_en_reg ) ;
qed_wr ( p_hwfn , p_hwfn - > p_dpc_ptt , aeu_en_reg , val & mask ) ;
DP_INFO ( p_hwfn , " `%s' - Disabled future parity errors \n " ,
p_aeu - > bit_name ) ;
2016-02-28 12:26:54 +02:00
}
2015-10-26 11:02:31 +02:00
/**
* @ brief - handles deassertion of previously asserted attentions .
*
* @ param p_hwfn
* @ param deasserted_bits - newly deasserted bits
* @ return int
*
*/
static int qed_int_deassertion ( struct qed_hwfn * p_hwfn ,
u16 deasserted_bits )
{
struct qed_sb_attn_info * sb_attn_sw = p_hwfn - > p_sb_attn ;
2017-05-29 09:53:13 +03:00
u32 aeu_inv_arr [ NUM_ATTN_REGS ] , aeu_mask , aeu_en , en ;
2016-02-28 12:26:53 +02:00
u8 i , j , k , bit_idx ;
int rc = 0 ;
/* Read the attention registers in the AEU */
for ( i = 0 ; i < NUM_ATTN_REGS ; i + + ) {
aeu_inv_arr [ i ] = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt ,
MISC_REG_AEU_AFTER_INVERT_1_IGU +
i * 0x4 ) ;
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR ,
" Deasserted bits [%d]: %08x \n " ,
i , aeu_inv_arr [ i ] ) ;
}
/* Find parity attentions first */
for ( i = 0 ; i < NUM_ATTN_REGS ; i + + ) {
struct aeu_invert_reg * p_aeu = & sb_attn_sw - > p_aeu_desc [ i ] ;
u32 parities ;
2017-05-29 09:53:13 +03:00
aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof ( u32 ) ;
en = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt , aeu_en ) ;
2016-02-28 12:26:53 +02:00
/* Skip register in which no parity bit is currently set */
parities = sb_attn_sw - > parity_mask [ i ] & aeu_inv_arr [ i ] & en ;
if ( ! parities )
continue ;
2015-10-26 11:02:31 +02:00
2016-02-28 12:26:53 +02:00
for ( j = 0 , bit_idx = 0 ; bit_idx < 32 ; j + + ) {
struct aeu_invert_reg_bit * p_bit = & p_aeu - > bits [ j ] ;
2017-05-29 09:53:11 +03:00
if ( qed_int_is_parity_flag ( p_hwfn , p_bit ) & &
2016-08-15 10:42:43 +03:00
! ! ( parities & BIT ( bit_idx ) ) )
2016-02-28 12:26:54 +02:00
qed_int_deassertion_parity ( p_hwfn , p_bit ,
2017-05-29 09:53:13 +03:00
aeu_en , bit_idx ) ;
2016-02-28 12:26:53 +02:00
bit_idx + = ATTENTION_LENGTH ( p_bit - > flags ) ;
}
}
/* Find non-parity cause for attention and act */
for ( k = 0 ; k < MAX_ATTN_GRPS ; k + + ) {
struct aeu_invert_reg_bit * p_aeu ;
/* Handle only groups whose attention is currently deasserted */
if ( ! ( deasserted_bits & ( 1 < < k ) ) )
continue ;
for ( i = 0 ; i < NUM_ATTN_REGS ; i + + ) {
2017-05-29 09:53:13 +03:00
u32 bits ;
aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
i * sizeof ( u32 ) +
k * sizeof ( u32 ) * NUM_ATTN_REGS ;
2016-02-28 12:26:53 +02:00
en = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt , aeu_en ) ;
bits = aeu_inv_arr [ i ] & en ;
/* Skip if no bit from this group is currently set */
if ( ! bits )
continue ;
/* Find all set bits from current register which belong
* to current group , making them responsible for the
* previous assertion .
*/
for ( j = 0 , bit_idx = 0 ; bit_idx < 32 ; j + + ) {
2017-05-29 09:53:12 +03:00
long unsigned int bitmask ;
2016-02-28 12:26:53 +02:00
u8 bit , bit_len ;
p_aeu = & sb_attn_sw - > p_aeu_desc [ i ] . bits [ j ] ;
2017-05-29 09:53:11 +03:00
p_aeu = qed_int_aeu_translate ( p_hwfn , p_aeu ) ;
2016-02-28 12:26:53 +02:00
bit = bit_idx ;
bit_len = ATTENTION_LENGTH ( p_aeu - > flags ) ;
2017-05-29 09:53:11 +03:00
if ( qed_int_is_parity_flag ( p_hwfn , p_aeu ) ) {
2016-02-28 12:26:53 +02:00
/* Skip Parity */
bit + + ;
bit_len - - ;
}
bitmask = bits & ( ( ( 1 < < bit_len ) - 1 ) < < bit ) ;
2017-05-29 09:53:12 +03:00
bitmask > > = bit ;
2016-02-28 12:26:53 +02:00
if ( bitmask ) {
2017-05-29 09:53:12 +03:00
u32 flags = p_aeu - > flags ;
char bit_name [ 30 ] ;
u8 num ;
num = ( u8 ) find_first_bit ( & bitmask ,
bit_len ) ;
/* Some bits represent more than a
* a single interrupt . Correctly print
* their name .
*/
if ( ATTENTION_LENGTH ( flags ) > 2 | |
( ( flags & ATTENTION_PAR_INT ) & &
ATTENTION_LENGTH ( flags ) > 1 ) )
snprintf ( bit_name , 30 ,
p_aeu - > bit_name , num ) ;
else
strncpy ( bit_name ,
p_aeu - > bit_name , 30 ) ;
/* We now need to pass bitmask in its
* correct position .
*/
bitmask < < = bit ;
2016-02-28 12:26:53 +02:00
/* Handle source of the attention */
qed_int_deassertion_aeu_bit ( p_hwfn ,
p_aeu ,
aeu_en ,
2017-05-29 09:53:12 +03:00
bit_name ,
2016-02-28 12:26:53 +02:00
bitmask ) ;
}
bit_idx + = ATTENTION_LENGTH ( p_aeu - > flags ) ;
}
}
}
2015-10-26 11:02:31 +02:00
/* Clear IGU indication for the deasserted bits */
DIRECT_REG_WR ( ( u8 __iomem * ) p_hwfn - > regview +
2016-02-28 12:26:53 +02:00
GTT_BAR0_MAP_REG_IGU_CMD +
( ( IGU_CMD_ATTN_BIT_CLR_UPPER -
IGU_CMD_INT_ACK_BASE ) < < 3 ) ,
~ ( ( u32 ) deasserted_bits ) ) ;
2015-10-26 11:02:31 +02:00
/* Unmask deasserted attentions in IGU */
2016-08-15 10:42:43 +03:00
aeu_mask = qed_rd ( p_hwfn , p_hwfn - > p_dpc_ptt , IGU_REG_ATTENTION_ENABLE ) ;
2015-10-26 11:02:31 +02:00
aeu_mask | = ( deasserted_bits & ATTN_BITS_MASKABLE ) ;
qed_wr ( p_hwfn , p_hwfn - > p_dpc_ptt , IGU_REG_ATTENTION_ENABLE , aeu_mask ) ;
/* Clear deassertion from inner state */
sb_attn_sw - > known_attn & = ~ deasserted_bits ;
2016-02-28 12:26:53 +02:00
return rc ;
2015-10-26 11:02:31 +02:00
}
static int qed_int_attentions ( struct qed_hwfn * p_hwfn )
{
struct qed_sb_attn_info * p_sb_attn_sw = p_hwfn - > p_sb_attn ;
struct atten_status_block * p_sb_attn = p_sb_attn_sw - > sb_attn ;
u32 attn_bits = 0 , attn_acks = 0 ;
u16 asserted_bits , deasserted_bits ;
__le16 index ;
int rc = 0 ;
/* Read current attention bits/acks - safeguard against attentions
* by guaranting work on a synchronized timeframe
*/
do {
index = p_sb_attn - > sb_index ;
attn_bits = le32_to_cpu ( p_sb_attn - > atten_bits ) ;
attn_acks = le32_to_cpu ( p_sb_attn - > atten_ack ) ;
} while ( index ! = p_sb_attn - > sb_index ) ;
p_sb_attn - > sb_index = index ;
/* Attention / Deassertion are meaningful (and in correct state)
* only when they differ and consistent with known state - deassertion
* when previous attention & current ack , and assertion when current
* attention with no previous attention
*/
asserted_bits = ( attn_bits & ~ attn_acks & ATTN_STATE_BITS ) &
~ p_sb_attn_sw - > known_attn ;
deasserted_bits = ( ~ attn_bits & attn_acks & ATTN_STATE_BITS ) &
p_sb_attn_sw - > known_attn ;
if ( ( asserted_bits & ~ 0x100 ) | | ( deasserted_bits & ~ 0x100 ) ) {
DP_INFO ( p_hwfn ,
" Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x] \n " ,
index , attn_bits , attn_acks , asserted_bits ,
deasserted_bits , p_sb_attn_sw - > known_attn ) ;
} else if ( asserted_bits = = 0x100 ) {
2016-08-15 10:42:43 +03:00
DP_INFO ( p_hwfn , " MFW indication via attention \n " ) ;
2015-10-26 11:02:31 +02:00
} else {
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR ,
" MFW indication [deassertion] \n " ) ;
}
if ( asserted_bits ) {
rc = qed_int_assertion ( p_hwfn , asserted_bits ) ;
if ( rc )
return rc ;
}
2016-08-15 10:42:43 +03:00
if ( deasserted_bits )
2015-10-26 11:02:31 +02:00
rc = qed_int_deassertion ( p_hwfn , deasserted_bits ) ;
return rc ;
}
static void qed_sb_ack_attn ( struct qed_hwfn * p_hwfn ,
2016-08-15 10:42:43 +03:00
void __iomem * igu_addr , u32 ack_cons )
2015-10-26 11:02:31 +02:00
{
struct igu_prod_cons_update igu_ack = { 0 } ;
igu_ack . sb_id_and_flags =
( ( ack_cons < < IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT ) |
( 1 < < IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT ) |
( IGU_INT_NOP < < IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT ) |
( IGU_SEG_ACCESS_ATTN < <
IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT ) ) ;
DIRECT_REG_WR ( igu_addr , igu_ack . sb_id_and_flags ) ;
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received ( in - order ) by HW .
*/
mmiowb ( ) ;
barrier ( ) ;
}
2015-10-26 11:02:25 +02:00
void qed_int_sp_dpc ( unsigned long hwfn_cookie )
{
struct qed_hwfn * p_hwfn = ( struct qed_hwfn * ) hwfn_cookie ;
struct qed_pi_info * pi_info = NULL ;
2015-10-26 11:02:31 +02:00
struct qed_sb_attn_info * sb_attn ;
2015-10-26 11:02:25 +02:00
struct qed_sb_info * sb_info ;
int arr_size ;
u16 rc = 0 ;
if ( ! p_hwfn - > p_sp_sb ) {
DP_ERR ( p_hwfn - > cdev , " DPC called - no p_sp_sb \n " ) ;
return ;
}
sb_info = & p_hwfn - > p_sp_sb - > sb_info ;
arr_size = ARRAY_SIZE ( p_hwfn - > p_sp_sb - > pi_info_arr ) ;
if ( ! sb_info ) {
DP_ERR ( p_hwfn - > cdev ,
" Status block is NULL - cannot ack interrupts \n " ) ;
return ;
}
2015-10-26 11:02:31 +02:00
if ( ! p_hwfn - > p_sb_attn ) {
DP_ERR ( p_hwfn - > cdev , " DPC called - no p_sb_attn " ) ;
return ;
}
sb_attn = p_hwfn - > p_sb_attn ;
2015-10-26 11:02:25 +02:00
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR , " DPC Called! (hwfn %p %d) \n " ,
p_hwfn , p_hwfn - > my_id ) ;
/* Disable ack for def status block. Required both for msix +
* inta in non - mask mode , in inta does no harm .
*/
qed_sb_ack ( sb_info , IGU_INT_DISABLE , 0 ) ;
/* Gather Interrupts/Attentions information */
if ( ! sb_info - > sb_virt ) {
2016-08-15 10:42:43 +03:00
DP_ERR ( p_hwfn - > cdev ,
" Interrupt Status block is NULL - cannot check for new interrupts! \n " ) ;
2015-10-26 11:02:25 +02:00
} else {
u32 tmp_index = sb_info - > sb_ack ;
rc = qed_sb_update_sb_idx ( sb_info ) ;
DP_VERBOSE ( p_hwfn - > cdev , NETIF_MSG_INTR ,
" Interrupt indices: 0x%08x --> 0x%08x \n " ,
tmp_index , sb_info - > sb_ack ) ;
}
2015-10-26 11:02:31 +02:00
if ( ! sb_attn | | ! sb_attn - > sb_attn ) {
2016-08-15 10:42:43 +03:00
DP_ERR ( p_hwfn - > cdev ,
" Attentions Status block is NULL - cannot check for new attentions! \n " ) ;
2015-10-26 11:02:31 +02:00
} else {
u16 tmp_index = sb_attn - > index ;
rc | = qed_attn_update_idx ( p_hwfn , sb_attn ) ;
DP_VERBOSE ( p_hwfn - > cdev , NETIF_MSG_INTR ,
" Attention indices: 0x%08x --> 0x%08x \n " ,
tmp_index , sb_attn - > index ) ;
}
2015-10-26 11:02:25 +02:00
/* Check if we expect interrupts at this time. if not just ack them */
if ( ! ( rc & QED_SB_EVENT_MASK ) ) {
qed_sb_ack ( sb_info , IGU_INT_ENABLE , 1 ) ;
return ;
}
/* Check the validity of the DPC ptt. If not ack interrupts and fail */
if ( ! p_hwfn - > p_dpc_ptt ) {
DP_NOTICE ( p_hwfn - > cdev , " Failed to allocate PTT \n " ) ;
qed_sb_ack ( sb_info , IGU_INT_ENABLE , 1 ) ;
return ;
}
2015-10-26 11:02:31 +02:00
if ( rc & QED_SB_ATT_IDX )
qed_int_attentions ( p_hwfn ) ;
2015-10-26 11:02:25 +02:00
if ( rc & QED_SB_IDX ) {
int pi ;
/* Look for a free index */
for ( pi = 0 ; pi < arr_size ; pi + + ) {
pi_info = & p_hwfn - > p_sp_sb - > pi_info_arr [ pi ] ;
if ( pi_info - > comp_cb )
pi_info - > comp_cb ( p_hwfn , pi_info - > cookie ) ;
}
}
2015-10-26 11:02:31 +02:00
if ( sb_attn & & ( rc & QED_SB_ATT_IDX ) )
/* This should be done before the interrupts are enabled,
* since otherwise a new attention will be generated .
*/
qed_sb_ack_attn ( p_hwfn , sb_info - > igu_addr , sb_attn - > index ) ;
2015-10-26 11:02:25 +02:00
qed_sb_ack ( sb_info , IGU_INT_ENABLE , 1 ) ;
}
2015-10-26 11:02:31 +02:00
static void qed_int_sb_attn_free ( struct qed_hwfn * p_hwfn )
{
2016-02-28 12:26:52 +02:00
struct qed_sb_attn_info * p_sb = p_hwfn - > p_sb_attn ;
if ( ! p_sb )
return ;
if ( p_sb - > sb_attn )
dma_free_coherent ( & p_hwfn - > cdev - > pdev - > dev ,
SB_ATTN_ALIGNED_SIZE ( p_hwfn ) ,
2016-08-15 10:42:43 +03:00
p_sb - > sb_attn , p_sb - > sb_phys ) ;
2016-02-28 12:26:52 +02:00
kfree ( p_sb ) ;
2017-05-21 12:10:56 +03:00
p_hwfn - > p_sb_attn = NULL ;
2015-10-26 11:02:31 +02:00
}
static void qed_int_sb_attn_setup ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt )
{
struct qed_sb_attn_info * sb_info = p_hwfn - > p_sb_attn ;
memset ( sb_info - > sb_attn , 0 , sizeof ( * sb_info - > sb_attn ) ) ;
sb_info - > index = 0 ;
sb_info - > known_attn = 0 ;
/* Configure Attention Status Block in IGU */
qed_wr ( p_hwfn , p_ptt , IGU_REG_ATTN_MSG_ADDR_L ,
lower_32_bits ( p_hwfn - > p_sb_attn - > sb_phys ) ) ;
qed_wr ( p_hwfn , p_ptt , IGU_REG_ATTN_MSG_ADDR_H ,
upper_32_bits ( p_hwfn - > p_sb_attn - > sb_phys ) ) ;
}
static void qed_int_sb_attn_init ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt ,
2016-08-15 10:42:43 +03:00
void * sb_virt_addr , dma_addr_t sb_phy_addr )
2015-10-26 11:02:31 +02:00
{
struct qed_sb_attn_info * sb_info = p_hwfn - > p_sb_attn ;
2016-02-28 12:26:53 +02:00
int i , j , k ;
2015-10-26 11:02:31 +02:00
sb_info - > sb_attn = sb_virt_addr ;
sb_info - > sb_phys = sb_phy_addr ;
2016-02-28 12:26:53 +02:00
/* Set the pointer to the AEU descriptors */
sb_info - > p_aeu_desc = aeu_descs ;
/* Calculate Parity Masks */
memset ( sb_info - > parity_mask , 0 , sizeof ( u32 ) * NUM_ATTN_REGS ) ;
for ( i = 0 ; i < NUM_ATTN_REGS ; i + + ) {
/* j is array index, k is bit index */
for ( j = 0 , k = 0 ; k < 32 ; j + + ) {
2017-05-29 09:53:11 +03:00
struct aeu_invert_reg_bit * p_aeu ;
2016-02-28 12:26:53 +02:00
2017-05-29 09:53:11 +03:00
p_aeu = & aeu_descs [ i ] . bits [ j ] ;
if ( qed_int_is_parity_flag ( p_hwfn , p_aeu ) )
2016-02-28 12:26:53 +02:00
sb_info - > parity_mask [ i ] | = 1 < < k ;
2017-05-29 09:53:11 +03:00
k + = ATTENTION_LENGTH ( p_aeu - > flags ) ;
2016-02-28 12:26:53 +02:00
}
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR ,
" Attn Mask [Reg %d]: 0x%08x \n " ,
i , sb_info - > parity_mask [ i ] ) ;
}
2015-10-26 11:02:31 +02:00
/* Set the address of cleanup for the mcp attention */
sb_info - > mfw_attn_addr = ( p_hwfn - > rel_pf_id < < 3 ) +
MISC_REG_AEU_GENERAL_ATTN_0 ;
qed_int_sb_attn_setup ( p_hwfn , p_ptt ) ;
}
static int qed_int_sb_attn_alloc ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt )
{
struct qed_dev * cdev = p_hwfn - > cdev ;
struct qed_sb_attn_info * p_sb ;
dma_addr_t p_phys = 0 ;
2016-08-15 10:42:43 +03:00
void * p_virt ;
2015-10-26 11:02:31 +02:00
/* SB struct */
2016-02-21 11:40:07 +02:00
p_sb = kmalloc ( sizeof ( * p_sb ) , GFP_KERNEL ) ;
2016-09-04 14:24:03 -07:00
if ( ! p_sb )
2015-10-26 11:02:31 +02:00
return - ENOMEM ;
/* SB ring */
p_virt = dma_alloc_coherent ( & cdev - > pdev - > dev ,
SB_ATTN_ALIGNED_SIZE ( p_hwfn ) ,
& p_phys , GFP_KERNEL ) ;
if ( ! p_virt ) {
kfree ( p_sb ) ;
return - ENOMEM ;
}
/* Attention setup */
p_hwfn - > p_sb_attn = p_sb ;
qed_int_sb_attn_init ( p_hwfn , p_ptt , p_virt , p_phys ) ;
return 0 ;
}
2015-10-26 11:02:25 +02:00
/* coalescing timeout = timeset << (timer_res + 1) */
# define QED_CAU_DEF_RX_USECS 24
# define QED_CAU_DEF_TX_USECS 48
void qed_init_cau_sb_entry ( struct qed_hwfn * p_hwfn ,
struct cau_sb_entry * p_sb_entry ,
2016-08-15 10:42:43 +03:00
u8 pf_id , u16 vf_number , u8 vf_valid )
2015-10-26 11:02:25 +02:00
{
2016-02-28 12:26:52 +02:00
struct qed_dev * cdev = p_hwfn - > cdev ;
2015-10-26 11:02:25 +02:00
u32 cau_state ;
2016-06-21 09:36:21 -04:00
u8 timer_res ;
2015-10-26 11:02:25 +02:00
memset ( p_sb_entry , 0 , sizeof ( * p_sb_entry ) ) ;
SET_FIELD ( p_sb_entry - > params , CAU_SB_ENTRY_PF_NUMBER , pf_id ) ;
SET_FIELD ( p_sb_entry - > params , CAU_SB_ENTRY_VF_NUMBER , vf_number ) ;
SET_FIELD ( p_sb_entry - > params , CAU_SB_ENTRY_VF_VALID , vf_valid ) ;
SET_FIELD ( p_sb_entry - > params , CAU_SB_ENTRY_SB_TIMESET0 , 0x7F ) ;
SET_FIELD ( p_sb_entry - > params , CAU_SB_ENTRY_SB_TIMESET1 , 0x7F ) ;
cau_state = CAU_HC_DISABLE_STATE ;
2016-02-28 12:26:52 +02:00
if ( cdev - > int_coalescing_mode = = QED_COAL_MODE_ENABLE ) {
2015-10-26 11:02:25 +02:00
cau_state = CAU_HC_ENABLE_STATE ;
2016-02-28 12:26:52 +02:00
if ( ! cdev - > rx_coalesce_usecs )
cdev - > rx_coalesce_usecs = QED_CAU_DEF_RX_USECS ;
if ( ! cdev - > tx_coalesce_usecs )
cdev - > tx_coalesce_usecs = QED_CAU_DEF_TX_USECS ;
2015-10-26 11:02:25 +02:00
}
2016-06-21 09:36:21 -04:00
/* Coalesce = (timeset << timer-res), timeset is 7bit wide */
if ( cdev - > rx_coalesce_usecs < = 0x7F )
timer_res = 0 ;
else if ( cdev - > rx_coalesce_usecs < = 0xFF )
timer_res = 1 ;
else
timer_res = 2 ;
SET_FIELD ( p_sb_entry - > params , CAU_SB_ENTRY_TIMER_RES0 , timer_res ) ;
if ( cdev - > tx_coalesce_usecs < = 0x7F )
timer_res = 0 ;
else if ( cdev - > tx_coalesce_usecs < = 0xFF )
timer_res = 1 ;
else
timer_res = 2 ;
SET_FIELD ( p_sb_entry - > params , CAU_SB_ENTRY_TIMER_RES1 , timer_res ) ;
2015-10-26 11:02:25 +02:00
SET_FIELD ( p_sb_entry - > data , CAU_SB_ENTRY_STATE0 , cau_state ) ;
SET_FIELD ( p_sb_entry - > data , CAU_SB_ENTRY_STATE1 , cau_state ) ;
}
2017-06-01 15:29:01 +03:00
static void qed_int_cau_conf_pi ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt ,
u16 igu_sb_id ,
u32 pi_index ,
enum qed_coalescing_fsm coalescing_fsm ,
u8 timeset )
{
struct cau_pi_entry pi_entry ;
u32 sb_offset , pi_offset ;
if ( IS_VF ( p_hwfn - > cdev ) )
return ;
2017-12-27 19:30:06 +02:00
sb_offset = igu_sb_id * PIS_PER_SB_E4 ;
2017-06-01 15:29:01 +03:00
memset ( & pi_entry , 0 , sizeof ( struct cau_pi_entry ) ) ;
SET_FIELD ( pi_entry . prod , CAU_PI_ENTRY_PI_TIMESET , timeset ) ;
if ( coalescing_fsm = = QED_COAL_RX_STATE_MACHINE )
SET_FIELD ( pi_entry . prod , CAU_PI_ENTRY_FSM_SEL , 0 ) ;
else
SET_FIELD ( pi_entry . prod , CAU_PI_ENTRY_FSM_SEL , 1 ) ;
pi_offset = sb_offset + pi_index ;
if ( p_hwfn - > hw_init_done ) {
qed_wr ( p_hwfn , p_ptt ,
CAU_REG_PI_MEMORY + pi_offset * sizeof ( u32 ) ,
* ( ( u32 * ) & ( pi_entry ) ) ) ;
} else {
STORE_RT_REG ( p_hwfn ,
CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset ,
* ( ( u32 * ) & ( pi_entry ) ) ) ;
}
}
2015-10-26 11:02:25 +02:00
void qed_int_cau_conf_sb ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt ,
dma_addr_t sb_phys ,
2016-08-15 10:42:43 +03:00
u16 igu_sb_id , u16 vf_number , u8 vf_valid )
2015-10-26 11:02:25 +02:00
{
struct cau_sb_entry sb_entry ;
qed_init_cau_sb_entry ( p_hwfn , & sb_entry , p_hwfn - > rel_pf_id ,
vf_number , vf_valid ) ;
if ( p_hwfn - > hw_init_done ) {
2016-02-21 11:40:08 +02:00
/* Wide-bus, initialize via DMAE */
u64 phys_addr = ( u64 ) sb_phys ;
qed_dmae_host2grc ( p_hwfn , p_ptt , ( u64 ) ( uintptr_t ) & phys_addr ,
CAU_REG_SB_ADDR_MEMORY +
igu_sb_id * sizeof ( u64 ) , 2 , 0 ) ;
qed_dmae_host2grc ( p_hwfn , p_ptt , ( u64 ) ( uintptr_t ) & sb_entry ,
CAU_REG_SB_VAR_MEMORY +
igu_sb_id * sizeof ( u64 ) , 2 , 0 ) ;
2015-10-26 11:02:25 +02:00
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG ( p_hwfn ,
CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
igu_sb_id * 2 ,
sb_phys ) ;
STORE_RT_REG_AGG ( p_hwfn ,
CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
igu_sb_id * 2 ,
sb_entry ) ;
}
/* Configure pi coalescing if set */
if ( p_hwfn - > cdev - > int_coalescing_mode = = QED_COAL_MODE_ENABLE ) {
2017-04-03 12:21:09 +03:00
u8 num_tc = p_hwfn - > hw_info . num_hw_tc ;
2016-06-21 09:36:21 -04:00
u8 timeset , timer_res ;
2017-04-03 12:21:09 +03:00
u8 i ;
2015-10-26 11:02:25 +02:00
2016-06-21 09:36:21 -04:00
/* timeset = (coalesce >> timer-res), timeset is 7bit wide */
if ( p_hwfn - > cdev - > rx_coalesce_usecs < = 0x7F )
timer_res = 0 ;
else if ( p_hwfn - > cdev - > rx_coalesce_usecs < = 0xFF )
timer_res = 1 ;
else
timer_res = 2 ;
timeset = ( u8 ) ( p_hwfn - > cdev - > rx_coalesce_usecs > > timer_res ) ;
2015-10-26 11:02:25 +02:00
qed_int_cau_conf_pi ( p_hwfn , p_ptt , igu_sb_id , RX_PI ,
2016-08-15 10:42:43 +03:00
QED_COAL_RX_STATE_MACHINE , timeset ) ;
2015-10-26 11:02:25 +02:00
2016-06-21 09:36:21 -04:00
if ( p_hwfn - > cdev - > tx_coalesce_usecs < = 0x7F )
timer_res = 0 ;
else if ( p_hwfn - > cdev - > tx_coalesce_usecs < = 0xFF )
timer_res = 1 ;
else
timer_res = 2 ;
timeset = ( u8 ) ( p_hwfn - > cdev - > tx_coalesce_usecs > > timer_res ) ;
2015-10-26 11:02:25 +02:00
for ( i = 0 ; i < num_tc ; i + + ) {
qed_int_cau_conf_pi ( p_hwfn , p_ptt ,
igu_sb_id , TX_PI ( i ) ,
QED_COAL_TX_STATE_MACHINE ,
timeset ) ;
}
}
}
void qed_int_sb_setup ( struct qed_hwfn * p_hwfn ,
2016-08-15 10:42:43 +03:00
struct qed_ptt * p_ptt , struct qed_sb_info * sb_info )
2015-10-26 11:02:25 +02:00
{
/* zero status block and ack counter */
sb_info - > sb_ack = 0 ;
memset ( sb_info - > sb_virt , 0 , sizeof ( * sb_info - > sb_virt ) ) ;
2016-05-11 16:36:14 +03:00
if ( IS_PF ( p_hwfn - > cdev ) )
qed_int_cau_conf_sb ( p_hwfn , p_ptt , sb_info - > sb_phys ,
sb_info - > igu_sb_id , 0 , 0 ) ;
2015-10-26 11:02:25 +02:00
}
2017-06-01 15:29:08 +03:00
struct qed_igu_block * qed_get_igu_free_sb ( struct qed_hwfn * p_hwfn , bool b_is_pf )
{
struct qed_igu_block * p_block ;
u16 igu_id ;
for ( igu_id = 0 ; igu_id < QED_MAPPING_MEMORY_SIZE ( p_hwfn - > cdev ) ;
igu_id + + ) {
p_block = & p_hwfn - > hw_info . p_igu_info - > entry [ igu_id ] ;
if ( ! ( p_block - > status & QED_IGU_STATUS_VALID ) | |
! ( p_block - > status & QED_IGU_STATUS_FREE ) )
continue ;
if ( ! ! ( p_block - > status & QED_IGU_STATUS_PF ) = = b_is_pf )
return p_block ;
}
return NULL ;
}
2017-06-01 15:29:05 +03:00
static u16 qed_get_pf_igu_sb_id ( struct qed_hwfn * p_hwfn , u16 vector_id )
{
struct qed_igu_block * p_block ;
u16 igu_id ;
for ( igu_id = 0 ; igu_id < QED_MAPPING_MEMORY_SIZE ( p_hwfn - > cdev ) ;
igu_id + + ) {
p_block = & p_hwfn - > hw_info . p_igu_info - > entry [ igu_id ] ;
if ( ! ( p_block - > status & QED_IGU_STATUS_VALID ) | |
! p_block - > is_pf | |
p_block - > vector_number ! = vector_id )
continue ;
return igu_id ;
}
return QED_SB_INVALID_IDX ;
}
2017-06-01 15:29:09 +03:00
u16 qed_get_igu_sb_id ( struct qed_hwfn * p_hwfn , u16 sb_id )
2015-10-26 11:02:25 +02:00
{
u16 igu_sb_id ;
/* Assuming continuous set of IGU SBs dedicated for given PF */
if ( sb_id = = QED_SP_SB_ID )
igu_sb_id = p_hwfn - > hw_info . p_igu_info - > igu_dsb_id ;
2016-05-11 16:36:14 +03:00
else if ( IS_PF ( p_hwfn - > cdev ) )
2017-06-01 15:29:05 +03:00
igu_sb_id = qed_get_pf_igu_sb_id ( p_hwfn , sb_id + 1 ) ;
2016-05-11 16:36:14 +03:00
else
igu_sb_id = qed_vf_get_igu_sb_id ( p_hwfn , sb_id ) ;
2015-10-26 11:02:25 +02:00
2016-08-15 10:42:45 +03:00
if ( sb_id = = QED_SP_SB_ID )
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR ,
" Slowpath SB index in IGU is 0x%04x \n " , igu_sb_id ) ;
else
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR ,
" SB [%04x] <--> IGU SB [%04x] \n " , sb_id , igu_sb_id ) ;
2015-10-26 11:02:25 +02:00
return igu_sb_id ;
}
int qed_int_sb_init ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt ,
struct qed_sb_info * sb_info ,
2016-08-15 10:42:43 +03:00
void * sb_virt_addr , dma_addr_t sb_phy_addr , u16 sb_id )
2015-10-26 11:02:25 +02:00
{
sb_info - > sb_virt = sb_virt_addr ;
sb_info - > sb_phys = sb_phy_addr ;
sb_info - > igu_sb_id = qed_get_igu_sb_id ( p_hwfn , sb_id ) ;
if ( sb_id ! = QED_SP_SB_ID ) {
2017-06-01 15:29:09 +03:00
if ( IS_PF ( p_hwfn - > cdev ) ) {
struct qed_igu_info * p_info ;
struct qed_igu_block * p_block ;
p_info = p_hwfn - > hw_info . p_igu_info ;
p_block = & p_info - > entry [ sb_info - > igu_sb_id ] ;
p_block - > sb_info = sb_info ;
p_block - > status & = ~ QED_IGU_STATUS_FREE ;
p_info - > usage . free_cnt - - ;
} else {
qed_vf_set_sb_info ( p_hwfn , sb_id , sb_info ) ;
}
2015-10-26 11:02:25 +02:00
}
sb_info - > cdev = p_hwfn - > cdev ;
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
2016-05-11 16:36:14 +03:00
if ( IS_PF ( p_hwfn - > cdev ) ) {
sb_info - > igu_addr = ( u8 __iomem * ) p_hwfn - > regview +
GTT_BAR0_MAP_REG_IGU_CMD +
( sb_info - > igu_sb_id < < 3 ) ;
} else {
sb_info - > igu_addr = ( u8 __iomem * ) p_hwfn - > regview +
PXP_VF_BAR0_START_IGU +
( ( IGU_CMD_INT_ACK_BASE +
sb_info - > igu_sb_id ) < < 3 ) ;
}
2015-10-26 11:02:25 +02:00
sb_info - > flags | = QED_SB_INFO_INIT ;
qed_int_sb_setup ( p_hwfn , p_ptt , sb_info ) ;
return 0 ;
}
int qed_int_sb_release ( struct qed_hwfn * p_hwfn ,
2016-08-15 10:42:43 +03:00
struct qed_sb_info * sb_info , u16 sb_id )
2015-10-26 11:02:25 +02:00
{
2017-06-01 15:29:09 +03:00
struct qed_igu_block * p_block ;
struct qed_igu_info * p_info ;
if ( ! sb_info )
return 0 ;
2015-10-26 11:02:25 +02:00
/* zero status block and ack counter */
sb_info - > sb_ack = 0 ;
memset ( sb_info - > sb_virt , 0 , sizeof ( * sb_info - > sb_virt ) ) ;
2017-06-01 15:29:09 +03:00
if ( IS_VF ( p_hwfn - > cdev ) ) {
qed_vf_set_sb_info ( p_hwfn , sb_id , NULL ) ;
return 0 ;
2016-02-28 12:26:52 +02:00
}
2015-10-26 11:02:25 +02:00
2017-06-01 15:29:09 +03:00
p_info = p_hwfn - > hw_info . p_igu_info ;
p_block = & p_info - > entry [ sb_info - > igu_sb_id ] ;
/* Vector 0 is reserved to Default SB */
if ( ! p_block - > vector_number ) {
DP_ERR ( p_hwfn , " Do Not free sp sb using this function " ) ;
return - EINVAL ;
}
/* Lose reference to client's SB info, and fix counters */
p_block - > sb_info = NULL ;
p_block - > status | = QED_IGU_STATUS_FREE ;
p_info - > usage . free_cnt + + ;
2015-10-26 11:02:25 +02:00
return 0 ;
}
static void qed_int_sp_sb_free ( struct qed_hwfn * p_hwfn )
{
struct qed_sb_sp_info * p_sb = p_hwfn - > p_sp_sb ;
2016-02-28 12:26:52 +02:00
if ( ! p_sb )
return ;
if ( p_sb - > sb_info . sb_virt )
dma_free_coherent ( & p_hwfn - > cdev - > pdev - > dev ,
SB_ALIGNED_SIZE ( p_hwfn ) ,
p_sb - > sb_info . sb_virt ,
p_sb - > sb_info . sb_phys ) ;
kfree ( p_sb ) ;
2017-05-21 12:10:56 +03:00
p_hwfn - > p_sp_sb = NULL ;
2015-10-26 11:02:25 +02:00
}
2016-08-15 10:42:43 +03:00
static int qed_int_sp_sb_alloc ( struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt )
2015-10-26 11:02:25 +02:00
{
struct qed_sb_sp_info * p_sb ;
dma_addr_t p_phys = 0 ;
void * p_virt ;
/* SB struct */
2016-02-21 11:40:07 +02:00
p_sb = kmalloc ( sizeof ( * p_sb ) , GFP_KERNEL ) ;
2016-09-04 14:24:03 -07:00
if ( ! p_sb )
2015-10-26 11:02:25 +02:00
return - ENOMEM ;
/* SB ring */
p_virt = dma_alloc_coherent ( & p_hwfn - > cdev - > pdev - > dev ,
SB_ALIGNED_SIZE ( p_hwfn ) ,
& p_phys , GFP_KERNEL ) ;
if ( ! p_virt ) {
kfree ( p_sb ) ;
return - ENOMEM ;
}
/* Status Block setup */
p_hwfn - > p_sp_sb = p_sb ;
qed_int_sb_init ( p_hwfn , p_ptt , & p_sb - > sb_info , p_virt ,
p_phys , QED_SP_SB_ID ) ;
memset ( p_sb - > pi_info_arr , 0 , sizeof ( p_sb - > pi_info_arr ) ) ;
return 0 ;
}
int qed_int_register_cb ( struct qed_hwfn * p_hwfn ,
qed_int_comp_cb_t comp_cb ,
2016-08-15 10:42:43 +03:00
void * cookie , u8 * sb_idx , __le16 * * p_fw_cons )
2015-10-26 11:02:25 +02:00
{
struct qed_sb_sp_info * p_sp_sb = p_hwfn - > p_sp_sb ;
2016-02-28 12:26:52 +02:00
int rc = - ENOMEM ;
2015-10-26 11:02:25 +02:00
u8 pi ;
/* Look for a free index */
for ( pi = 0 ; pi < ARRAY_SIZE ( p_sp_sb - > pi_info_arr ) ; pi + + ) {
2016-02-28 12:26:52 +02:00
if ( p_sp_sb - > pi_info_arr [ pi ] . comp_cb )
continue ;
p_sp_sb - > pi_info_arr [ pi ] . comp_cb = comp_cb ;
p_sp_sb - > pi_info_arr [ pi ] . cookie = cookie ;
* sb_idx = pi ;
* p_fw_cons = & p_sp_sb - > sb_info . sb_virt - > pi_array [ pi ] ;
rc = 0 ;
break ;
2015-10-26 11:02:25 +02:00
}
2016-02-28 12:26:52 +02:00
return rc ;
2015-10-26 11:02:25 +02:00
}
int qed_int_unregister_cb ( struct qed_hwfn * p_hwfn , u8 pi )
{
struct qed_sb_sp_info * p_sp_sb = p_hwfn - > p_sp_sb ;
2016-02-28 12:26:52 +02:00
if ( p_sp_sb - > pi_info_arr [ pi ] . comp_cb = = NULL )
return - ENOMEM ;
2015-10-26 11:02:25 +02:00
2016-02-28 12:26:52 +02:00
p_sp_sb - > pi_info_arr [ pi ] . comp_cb = NULL ;
p_sp_sb - > pi_info_arr [ pi ] . cookie = NULL ;
return 0 ;
2015-10-26 11:02:25 +02:00
}
u16 qed_int_get_sp_sb_id ( struct qed_hwfn * p_hwfn )
{
return p_hwfn - > p_sp_sb - > sb_info . igu_sb_id ;
}
void qed_int_igu_enable_int ( struct qed_hwfn * p_hwfn ,
2016-08-15 10:42:43 +03:00
struct qed_ptt * p_ptt , enum qed_int_mode int_mode )
2015-10-26 11:02:25 +02:00
{
2015-10-26 11:02:31 +02:00
u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN ;
2015-10-26 11:02:25 +02:00
p_hwfn - > cdev - > int_mode = int_mode ;
switch ( p_hwfn - > cdev - > int_mode ) {
case QED_INT_MODE_INTA :
igu_pf_conf | = IGU_PF_CONF_INT_LINE_EN ;
igu_pf_conf | = IGU_PF_CONF_SINGLE_ISR_EN ;
break ;
case QED_INT_MODE_MSI :
igu_pf_conf | = IGU_PF_CONF_MSI_MSIX_EN ;
igu_pf_conf | = IGU_PF_CONF_SINGLE_ISR_EN ;
break ;
case QED_INT_MODE_MSIX :
igu_pf_conf | = IGU_PF_CONF_MSI_MSIX_EN ;
break ;
case QED_INT_MODE_POLL :
break ;
}
qed_wr ( p_hwfn , p_ptt , IGU_REG_PF_CONFIGURATION , igu_pf_conf ) ;
}
2017-06-01 15:29:02 +03:00
static void qed_int_igu_enable_attn ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt )
2015-10-26 11:02:25 +02:00
{
2016-02-28 12:26:53 +02:00
/* Configure AEU signal change to produce attentions */
qed_wr ( p_hwfn , p_ptt , IGU_REG_ATTENTION_ENABLE , 0 ) ;
2015-10-26 11:02:31 +02:00
qed_wr ( p_hwfn , p_ptt , IGU_REG_LEADING_EDGE_LATCH , 0xfff ) ;
qed_wr ( p_hwfn , p_ptt , IGU_REG_TRAILING_EDGE_LATCH , 0xfff ) ;
2016-02-28 12:26:53 +02:00
qed_wr ( p_hwfn , p_ptt , IGU_REG_ATTENTION_ENABLE , 0xfff ) ;
2015-10-26 11:02:31 +02:00
2015-10-26 11:02:25 +02:00
/* Flush the writes to IGU */
mmiowb ( ) ;
2015-10-26 11:02:31 +02:00
/* Unmask AEU signals toward IGU */
qed_wr ( p_hwfn , p_ptt , MISC_REG_AEU_MASK_ATTN_IGU , 0xff ) ;
2017-06-01 15:29:02 +03:00
}
int
qed_int_igu_enable ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt , enum qed_int_mode int_mode )
{
int rc = 0 ;
qed_int_igu_enable_attn ( p_hwfn , p_ptt ) ;
2015-12-07 06:25:59 -05:00
if ( ( int_mode ! = QED_INT_MODE_INTA ) | | IS_LEAD_HWFN ( p_hwfn ) ) {
rc = qed_slowpath_irq_req ( p_hwfn ) ;
2016-08-15 10:42:43 +03:00
if ( rc ) {
2015-12-07 06:25:59 -05:00
DP_NOTICE ( p_hwfn , " Slowpath IRQ request failed \n " ) ;
return - EINVAL ;
}
p_hwfn - > b_int_requested = true ;
}
/* Enable interrupt Generation */
qed_int_igu_enable_int ( p_hwfn , p_ptt , int_mode ) ;
p_hwfn - > b_int_enabled = 1 ;
return rc ;
2015-10-26 11:02:25 +02:00
}
2016-08-15 10:42:43 +03:00
void qed_int_igu_disable_int ( struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt )
2015-10-26 11:02:25 +02:00
{
p_hwfn - > b_int_enabled = 0 ;
2016-05-11 16:36:14 +03:00
if ( IS_VF ( p_hwfn - > cdev ) )
return ;
2015-10-26 11:02:25 +02:00
qed_wr ( p_hwfn , p_ptt , IGU_REG_PF_CONFIGURATION , 0 ) ;
}
# define IGU_CLEANUP_SLEEP_LENGTH (1000)
2016-05-15 14:48:06 +03:00
static void qed_int_igu_cleanup_sb ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt ,
2017-06-01 15:29:04 +03:00
u16 igu_sb_id ,
bool cleanup_set , u16 opaque_fid )
2015-10-26 11:02:25 +02:00
{
2016-05-15 14:48:06 +03:00
u32 cmd_ctrl = 0 , val = 0 , sb_bit = 0 , sb_bit_addr = 0 , data = 0 ;
2017-06-01 15:29:04 +03:00
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id ;
2015-10-26 11:02:25 +02:00
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH ;
/* Set the data field */
SET_FIELD ( data , IGU_CLEANUP_CLEANUP_SET , cleanup_set ? 1 : 0 ) ;
SET_FIELD ( data , IGU_CLEANUP_CLEANUP_TYPE , 0 ) ;
SET_FIELD ( data , IGU_CLEANUP_COMMAND_TYPE , IGU_COMMAND_TYPE_SET ) ;
/* Set the control register */
SET_FIELD ( cmd_ctrl , IGU_CTRL_REG_PXP_ADDR , pxp_addr ) ;
SET_FIELD ( cmd_ctrl , IGU_CTRL_REG_FID , opaque_fid ) ;
SET_FIELD ( cmd_ctrl , IGU_CTRL_REG_TYPE , IGU_CTRL_CMD_TYPE_WR ) ;
qed_wr ( p_hwfn , p_ptt , IGU_REG_COMMAND_REG_32LSB_DATA , data ) ;
barrier ( ) ;
qed_wr ( p_hwfn , p_ptt , IGU_REG_COMMAND_REG_CTRL , cmd_ctrl ) ;
/* Flush the write to IGU */
mmiowb ( ) ;
/* calculate where to read the status bit from */
2017-06-01 15:29:04 +03:00
sb_bit = 1 < < ( igu_sb_id % 32 ) ;
sb_bit_addr = igu_sb_id / 32 * sizeof ( u32 ) ;
2015-10-26 11:02:25 +02:00
sb_bit_addr + = IGU_REG_CLEANUP_STATUS_0 ;
/* Now wait for the command to complete */
do {
val = qed_rd ( p_hwfn , p_ptt , sb_bit_addr ) ;
if ( ( val & sb_bit ) = = ( cleanup_set ? sb_bit : 0 ) )
break ;
usleep_range ( 5000 , 10000 ) ;
} while ( - - sleep_cnt ) ;
if ( ! sleep_cnt )
DP_NOTICE ( p_hwfn ,
" Timeout waiting for clear status 0x%08x [for sb %d] \n " ,
2017-06-01 15:29:04 +03:00
val , igu_sb_id ) ;
2015-10-26 11:02:25 +02:00
}
void qed_int_igu_init_pure_rt_single ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt ,
2017-06-01 15:29:04 +03:00
u16 igu_sb_id , u16 opaque , bool b_set )
2015-10-26 11:02:25 +02:00
{
2017-06-01 15:29:07 +03:00
struct qed_igu_block * p_block ;
2016-05-15 14:48:06 +03:00
int pi , i ;
2015-10-26 11:02:25 +02:00
2017-06-01 15:29:07 +03:00
p_block = & p_hwfn - > hw_info . p_igu_info - > entry [ igu_sb_id ] ;
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR ,
" Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x \n " ,
igu_sb_id ,
p_block - > function_id ,
p_block - > is_pf , p_block - > vector_number ) ;
2015-10-26 11:02:25 +02:00
/* Set */
if ( b_set )
2017-06-01 15:29:04 +03:00
qed_int_igu_cleanup_sb ( p_hwfn , p_ptt , igu_sb_id , 1 , opaque ) ;
2015-10-26 11:02:25 +02:00
/* Clear */
2017-06-01 15:29:04 +03:00
qed_int_igu_cleanup_sb ( p_hwfn , p_ptt , igu_sb_id , 0 , opaque ) ;
2015-10-26 11:02:25 +02:00
2016-05-15 14:48:06 +03:00
/* Wait for the IGU SB to cleanup */
for ( i = 0 ; i < IGU_CLEANUP_SLEEP_LENGTH ; i + + ) {
u32 val ;
val = qed_rd ( p_hwfn , p_ptt ,
2017-06-01 15:29:04 +03:00
IGU_REG_WRITE_DONE_PENDING +
( ( igu_sb_id / 32 ) * 4 ) ) ;
if ( val & BIT ( ( igu_sb_id % 32 ) ) )
2016-05-15 14:48:06 +03:00
usleep_range ( 10 , 20 ) ;
else
break ;
}
if ( i = = IGU_CLEANUP_SLEEP_LENGTH )
DP_NOTICE ( p_hwfn ,
" Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING \n " ,
2017-06-01 15:29:04 +03:00
igu_sb_id ) ;
2016-05-15 14:48:06 +03:00
2015-10-26 11:02:25 +02:00
/* Clear the CAU for the SB */
for ( pi = 0 ; pi < 12 ; pi + + )
qed_wr ( p_hwfn , p_ptt ,
2017-06-01 15:29:04 +03:00
CAU_REG_PI_MEMORY + ( igu_sb_id * 12 + pi ) * 4 , 0 ) ;
2015-10-26 11:02:25 +02:00
}
void qed_int_igu_init_pure_rt ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt ,
2016-05-15 14:48:06 +03:00
bool b_set , bool b_slowpath )
2015-10-26 11:02:25 +02:00
{
2017-06-01 15:29:07 +03:00
struct qed_igu_info * p_info = p_hwfn - > hw_info . p_igu_info ;
struct qed_igu_block * p_block ;
u16 igu_sb_id = 0 ;
u32 val = 0 ;
2015-10-26 11:02:25 +02:00
val = qed_rd ( p_hwfn , p_ptt , IGU_REG_BLOCK_CONFIGURATION ) ;
val | = IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ;
val & = ~ IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ;
qed_wr ( p_hwfn , p_ptt , IGU_REG_BLOCK_CONFIGURATION , val ) ;
2017-06-01 15:29:07 +03:00
for ( igu_sb_id = 0 ;
igu_sb_id < QED_MAPPING_MEMORY_SIZE ( p_hwfn - > cdev ) ; igu_sb_id + + ) {
p_block = & p_info - > entry [ igu_sb_id ] ;
if ( ! ( p_block - > status & QED_IGU_STATUS_VALID ) | |
! p_block - > is_pf | |
( p_block - > status & QED_IGU_STATUS_DSB ) )
continue ;
2015-10-26 11:02:25 +02:00
2017-06-01 15:29:04 +03:00
qed_int_igu_init_pure_rt_single ( p_hwfn , p_ptt , igu_sb_id ,
2015-10-26 11:02:25 +02:00
p_hwfn - > hw_info . opaque_fid ,
b_set ) ;
2017-06-01 15:29:07 +03:00
}
2015-10-26 11:02:25 +02:00
2017-06-01 15:29:07 +03:00
if ( b_slowpath )
qed_int_igu_init_pure_rt_single ( p_hwfn , p_ptt ,
p_info - > igu_dsb_id ,
p_hwfn - > hw_info . opaque_fid ,
b_set ) ;
2015-10-26 11:02:25 +02:00
}
2017-06-01 15:29:10 +03:00
int qed_int_igu_reset_cam ( struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt )
{
struct qed_igu_info * p_info = p_hwfn - > hw_info . p_igu_info ;
struct qed_igu_block * p_block ;
int pf_sbs , vf_sbs ;
u16 igu_sb_id ;
u32 val , rval ;
if ( ! RESC_NUM ( p_hwfn , QED_SB ) ) {
p_info - > b_allow_pf_vf_change = false ;
} else {
/* Use the numbers the MFW have provided -
* don ' t forget MFW accounts for the default SB as well .
*/
p_info - > b_allow_pf_vf_change = true ;
if ( p_info - > usage . cnt ! = RESC_NUM ( p_hwfn , QED_SB ) - 1 ) {
DP_INFO ( p_hwfn ,
" MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x \n " ,
RESC_NUM ( p_hwfn , QED_SB ) - 1 ,
p_info - > usage . cnt ) ;
p_info - > usage . cnt = RESC_NUM ( p_hwfn , QED_SB ) - 1 ;
}
if ( IS_PF_SRIOV ( p_hwfn ) ) {
u16 vfs = p_hwfn - > cdev - > p_iov_info - > total_vfs ;
if ( vfs ! = p_info - > usage . iov_cnt )
DP_VERBOSE ( p_hwfn ,
NETIF_MSG_INTR ,
" 0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x \n " ,
p_info - > usage . iov_cnt , vfs ) ;
/* At this point we know how many SBs we have totally
* in IGU + number of PF SBs . So we can validate that
* we ' d have sufficient for VF .
*/
if ( vfs > p_info - > usage . free_cnt +
p_info - > usage . free_cnt_iov - p_info - > usage . cnt ) {
DP_NOTICE ( p_hwfn ,
" Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required \n " ,
p_info - > usage . free_cnt +
p_info - > usage . free_cnt_iov ,
p_info - > usage . cnt , vfs ) ;
return - EINVAL ;
}
/* Currently cap the number of VFs SBs by the
* number of VFs .
*/
p_info - > usage . iov_cnt = vfs ;
}
}
/* Mark all SBs as free, now in the right PF/VFs division */
p_info - > usage . free_cnt = p_info - > usage . cnt ;
p_info - > usage . free_cnt_iov = p_info - > usage . iov_cnt ;
p_info - > usage . orig = p_info - > usage . cnt ;
p_info - > usage . iov_orig = p_info - > usage . iov_cnt ;
/* We now proceed to re-configure the IGU cam to reflect the initial
* configuration . We can start with the Default SB .
*/
pf_sbs = p_info - > usage . cnt ;
vf_sbs = p_info - > usage . iov_cnt ;
for ( igu_sb_id = p_info - > igu_dsb_id ;
igu_sb_id < QED_MAPPING_MEMORY_SIZE ( p_hwfn - > cdev ) ; igu_sb_id + + ) {
p_block = & p_info - > entry [ igu_sb_id ] ;
val = 0 ;
if ( ! ( p_block - > status & QED_IGU_STATUS_VALID ) )
continue ;
if ( p_block - > status & QED_IGU_STATUS_DSB ) {
p_block - > function_id = p_hwfn - > rel_pf_id ;
p_block - > is_pf = 1 ;
p_block - > vector_number = 0 ;
p_block - > status = QED_IGU_STATUS_VALID |
QED_IGU_STATUS_PF |
QED_IGU_STATUS_DSB ;
} else if ( pf_sbs ) {
pf_sbs - - ;
p_block - > function_id = p_hwfn - > rel_pf_id ;
p_block - > is_pf = 1 ;
p_block - > vector_number = p_info - > usage . cnt - pf_sbs ;
p_block - > status = QED_IGU_STATUS_VALID |
QED_IGU_STATUS_PF |
QED_IGU_STATUS_FREE ;
} else if ( vf_sbs ) {
p_block - > function_id =
p_hwfn - > cdev - > p_iov_info - > first_vf_in_pf +
p_info - > usage . iov_cnt - vf_sbs ;
p_block - > is_pf = 0 ;
p_block - > vector_number = 0 ;
p_block - > status = QED_IGU_STATUS_VALID |
QED_IGU_STATUS_FREE ;
vf_sbs - - ;
} else {
p_block - > function_id = 0 ;
p_block - > is_pf = 0 ;
p_block - > vector_number = 0 ;
}
SET_FIELD ( val , IGU_MAPPING_LINE_FUNCTION_NUMBER ,
p_block - > function_id ) ;
SET_FIELD ( val , IGU_MAPPING_LINE_PF_VALID , p_block - > is_pf ) ;
SET_FIELD ( val , IGU_MAPPING_LINE_VECTOR_NUMBER ,
p_block - > vector_number ) ;
/* VF entries would be enabled when VF is initializaed */
SET_FIELD ( val , IGU_MAPPING_LINE_VALID , p_block - > is_pf ) ;
rval = qed_rd ( p_hwfn , p_ptt ,
IGU_REG_MAPPING_MEMORY + sizeof ( u32 ) * igu_sb_id ) ;
if ( rval ! = val ) {
qed_wr ( p_hwfn , p_ptt ,
IGU_REG_MAPPING_MEMORY +
sizeof ( u32 ) * igu_sb_id , val ) ;
DP_VERBOSE ( p_hwfn ,
NETIF_MSG_INTR ,
" IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x] \n " ,
igu_sb_id ,
p_block - > function_id ,
p_block - > is_pf ,
p_block - > vector_number , rval , val ) ;
}
}
return 0 ;
}
2017-06-01 15:29:03 +03:00
static void qed_int_igu_read_cam_block ( struct qed_hwfn * p_hwfn ,
struct qed_ptt * p_ptt , u16 igu_sb_id )
2016-02-28 12:26:52 +02:00
{
u32 val = qed_rd ( p_hwfn , p_ptt ,
2017-06-01 15:29:03 +03:00
IGU_REG_MAPPING_MEMORY + sizeof ( u32 ) * igu_sb_id ) ;
2016-02-28 12:26:52 +02:00
struct qed_igu_block * p_block ;
2017-06-01 15:29:03 +03:00
p_block = & p_hwfn - > hw_info . p_igu_info - > entry [ igu_sb_id ] ;
2016-02-28 12:26:52 +02:00
/* Fill the block information */
2017-06-01 15:29:03 +03:00
p_block - > function_id = GET_FIELD ( val , IGU_MAPPING_LINE_FUNCTION_NUMBER ) ;
p_block - > is_pf = GET_FIELD ( val , IGU_MAPPING_LINE_PF_VALID ) ;
p_block - > vector_number = GET_FIELD ( val , IGU_MAPPING_LINE_VECTOR_NUMBER ) ;
2017-06-01 15:29:07 +03:00
p_block - > igu_sb_id = igu_sb_id ;
2016-02-28 12:26:52 +02:00
}
2016-08-15 10:42:43 +03:00
int qed_int_igu_read_cam ( struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt )
2015-10-26 11:02:25 +02:00
{
struct qed_igu_info * p_igu_info ;
2017-06-01 15:29:03 +03:00
struct qed_igu_block * p_block ;
u32 min_vf = 0 , max_vf = 0 ;
u16 igu_sb_id ;
2015-10-26 11:02:25 +02:00
2016-02-21 11:40:07 +02:00
p_hwfn - > hw_info . p_igu_info = kzalloc ( sizeof ( * p_igu_info ) , GFP_KERNEL ) ;
2015-10-26 11:02:25 +02:00
if ( ! p_hwfn - > hw_info . p_igu_info )
return - ENOMEM ;
p_igu_info = p_hwfn - > hw_info . p_igu_info ;
2017-06-01 15:29:03 +03:00
/* Distinguish between existent and non-existent default SB */
p_igu_info - > igu_dsb_id = QED_SB_INVALID_IDX ;
2015-10-26 11:02:25 +02:00
2017-06-01 15:29:03 +03:00
/* Find the range of VF ids whose SB belong to this PF */
2016-05-11 16:36:14 +03:00
if ( p_hwfn - > cdev - > p_iov_info ) {
struct qed_hw_sriov_info * p_iov = p_hwfn - > cdev - > p_iov_info ;
min_vf = p_iov - > first_vf_in_pf ;
max_vf = p_iov - > first_vf_in_pf + p_iov - > total_vfs ;
}
2017-06-01 15:29:03 +03:00
for ( igu_sb_id = 0 ;
igu_sb_id < QED_MAPPING_MEMORY_SIZE ( p_hwfn - > cdev ) ; igu_sb_id + + ) {
/* Read current entry; Notice it might not belong to this PF */
qed_int_igu_read_cam_block ( p_hwfn , p_ptt , igu_sb_id ) ;
p_block = & p_igu_info - > entry [ igu_sb_id ] ;
if ( ( p_block - > is_pf ) & &
( p_block - > function_id = = p_hwfn - > rel_pf_id ) ) {
p_block - > status = QED_IGU_STATUS_PF |
QED_IGU_STATUS_VALID |
QED_IGU_STATUS_FREE ;
2017-06-01 15:29:07 +03:00
if ( p_igu_info - > igu_dsb_id ! = QED_SB_INVALID_IDX )
2017-06-01 15:29:06 +03:00
p_igu_info - > usage . cnt + + ;
2017-06-01 15:29:03 +03:00
} else if ( ! ( p_block - > is_pf ) & &
( p_block - > function_id > = min_vf ) & &
( p_block - > function_id < max_vf ) ) {
/* Available for VFs of this PF */
p_block - > status = QED_IGU_STATUS_VALID |
QED_IGU_STATUS_FREE ;
2017-06-01 15:29:07 +03:00
if ( p_igu_info - > igu_dsb_id ! = QED_SB_INVALID_IDX )
p_igu_info - > usage . iov_cnt + + ;
2015-10-26 11:02:25 +02:00
}
2016-10-31 07:14:26 +02:00
2017-06-01 15:29:03 +03:00
/* Mark the First entry belonging to the PF or its VFs
2017-06-01 15:29:10 +03:00
* as the default SB [ we ' ll reset IGU prior to first usage ] .
2017-06-01 15:29:03 +03:00
*/
if ( ( p_block - > status & QED_IGU_STATUS_VALID ) & &
( p_igu_info - > igu_dsb_id = = QED_SB_INVALID_IDX ) ) {
p_igu_info - > igu_dsb_id = igu_sb_id ;
p_block - > status | = QED_IGU_STATUS_DSB ;
}
/* limit number of prints by having each PF print only its
* entries with the exception of PF0 which would print
* everything .
*/
if ( ( p_block - > status & QED_IGU_STATUS_VALID ) | |
( p_hwfn - > abs_pf_id = = 0 ) ) {
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR ,
" IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x \n " ,
igu_sb_id , p_block - > function_id ,
p_block - > is_pf , p_block - > vector_number ) ;
2016-10-31 07:14:26 +02:00
}
}
2016-05-11 16:36:14 +03:00
2017-06-01 15:29:03 +03:00
if ( p_igu_info - > igu_dsb_id = = QED_SB_INVALID_IDX ) {
2015-10-26 11:02:25 +02:00
DP_NOTICE ( p_hwfn ,
2017-06-01 15:29:03 +03:00
" IGU CAM returned invalid values igu_dsb_id=0x%x \n " ,
p_igu_info - > igu_dsb_id ) ;
2015-10-26 11:02:25 +02:00
return - EINVAL ;
}
2017-06-01 15:29:03 +03:00
/* All non default SB are considered free at this point */
2017-06-01 15:29:06 +03:00
p_igu_info - > usage . free_cnt = p_igu_info - > usage . cnt ;
p_igu_info - > usage . free_cnt_iov = p_igu_info - > usage . iov_cnt ;
2017-06-01 15:29:03 +03:00
DP_VERBOSE ( p_hwfn , NETIF_MSG_INTR ,
2017-06-01 15:29:10 +03:00
" igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation] \n " ,
2017-06-01 15:29:03 +03:00
p_igu_info - > igu_dsb_id ,
2017-06-01 15:29:06 +03:00
p_igu_info - > usage . cnt , p_igu_info - > usage . iov_cnt ) ;
2017-06-01 15:29:03 +03:00
2015-10-26 11:02:25 +02:00
return 0 ;
}
/**
* @ brief Initialize igu runtime registers
*
* @ param p_hwfn
*/
void qed_int_igu_init_rt ( struct qed_hwfn * p_hwfn )
{
2016-08-15 10:42:43 +03:00
u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN ;
2015-10-26 11:02:25 +02:00
STORE_RT_REG ( p_hwfn , IGU_REG_PF_CONFIGURATION_RT_OFFSET , igu_pf_conf ) ;
}
u64 qed_int_igu_read_sisr_reg ( struct qed_hwfn * p_hwfn )
{
u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
IGU_CMD_INT_ACK_BASE ;
u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
IGU_CMD_INT_ACK_BASE ;
2016-08-15 10:42:43 +03:00
u32 intr_status_hi = 0 , intr_status_lo = 0 ;
u64 intr_status = 0 ;
2015-10-26 11:02:25 +02:00
intr_status_lo = REG_RD ( p_hwfn ,
GTT_BAR0_MAP_REG_IGU_CMD +
lsb_igu_cmd_addr * 8 ) ;
intr_status_hi = REG_RD ( p_hwfn ,
GTT_BAR0_MAP_REG_IGU_CMD +
msb_igu_cmd_addr * 8 ) ;
intr_status = ( ( u64 ) intr_status_hi < < 32 ) + ( u64 ) intr_status_lo ;
return intr_status ;
}
static void qed_int_sp_dpc_setup ( struct qed_hwfn * p_hwfn )
{
tasklet_init ( p_hwfn - > sp_dpc ,
qed_int_sp_dpc , ( unsigned long ) p_hwfn ) ;
p_hwfn - > b_sp_dpc_enabled = true ;
}
static int qed_int_sp_dpc_alloc ( struct qed_hwfn * p_hwfn )
{
2016-02-21 11:40:07 +02:00
p_hwfn - > sp_dpc = kmalloc ( sizeof ( * p_hwfn - > sp_dpc ) , GFP_KERNEL ) ;
2015-10-26 11:02:25 +02:00
if ( ! p_hwfn - > sp_dpc )
return - ENOMEM ;
return 0 ;
}
static void qed_int_sp_dpc_free ( struct qed_hwfn * p_hwfn )
{
kfree ( p_hwfn - > sp_dpc ) ;
2017-05-21 12:10:56 +03:00
p_hwfn - > sp_dpc = NULL ;
2015-10-26 11:02:25 +02:00
}
2016-08-15 10:42:43 +03:00
int qed_int_alloc ( struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt )
2015-10-26 11:02:25 +02:00
{
int rc = 0 ;
rc = qed_int_sp_dpc_alloc ( p_hwfn ) ;
2016-09-04 14:24:03 -07:00
if ( rc )
2015-10-26 11:02:25 +02:00
return rc ;
2016-09-04 14:24:03 -07:00
2015-10-26 11:02:25 +02:00
rc = qed_int_sp_sb_alloc ( p_hwfn , p_ptt ) ;
2016-09-04 14:24:03 -07:00
if ( rc )
2015-10-26 11:02:25 +02:00
return rc ;
2016-09-04 14:24:03 -07:00
2015-10-26 11:02:31 +02:00
rc = qed_int_sb_attn_alloc ( p_hwfn , p_ptt ) ;
2016-08-15 10:42:44 +03:00
2015-10-26 11:02:25 +02:00
return rc ;
}
void qed_int_free ( struct qed_hwfn * p_hwfn )
{
qed_int_sp_sb_free ( p_hwfn ) ;
2015-10-26 11:02:31 +02:00
qed_int_sb_attn_free ( p_hwfn ) ;
2015-10-26 11:02:25 +02:00
qed_int_sp_dpc_free ( p_hwfn ) ;
}
2016-08-15 10:42:43 +03:00
void qed_int_setup ( struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt )
2015-10-26 11:02:25 +02:00
{
2016-02-28 12:26:53 +02:00
qed_int_sb_setup ( p_hwfn , p_ptt , & p_hwfn - > p_sp_sb - > sb_info ) ;
qed_int_sb_attn_setup ( p_hwfn , p_ptt ) ;
2015-10-26 11:02:25 +02:00
qed_int_sp_dpc_setup ( p_hwfn ) ;
}
2016-02-28 12:26:52 +02:00
void qed_int_get_num_sbs ( struct qed_hwfn * p_hwfn ,
struct qed_sb_cnt_info * p_sb_cnt_info )
2015-10-26 11:02:25 +02:00
{
struct qed_igu_info * info = p_hwfn - > hw_info . p_igu_info ;
2016-02-28 12:26:52 +02:00
if ( ! info | | ! p_sb_cnt_info )
return ;
2015-10-26 11:02:25 +02:00
2017-06-01 15:29:06 +03:00
memcpy ( p_sb_cnt_info , & info - > usage , sizeof ( * p_sb_cnt_info ) ) ;
2015-10-26 11:02:25 +02:00
}
2015-12-07 06:25:59 -05:00
void qed_int_disable_post_isr_release ( struct qed_dev * cdev )
{
int i ;
for_each_hwfn ( cdev , i )
cdev - > hwfns [ i ] . b_int_requested = false ;
}
2016-06-21 09:36:21 -04:00
int qed_int_set_timer_res ( struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt ,
u8 timer_res , u16 sb_id , bool tx )
{
struct cau_sb_entry sb_entry ;
int rc ;
if ( ! p_hwfn - > hw_init_done ) {
DP_ERR ( p_hwfn , " hardware not initialized yet \n " ) ;
return - EINVAL ;
}
rc = qed_dmae_grc2host ( p_hwfn , p_ptt , CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof ( u64 ) ,
( u64 ) ( uintptr_t ) & sb_entry , 2 , 0 ) ;
if ( rc ) {
DP_ERR ( p_hwfn , " dmae_grc2host failed %d \n " , rc ) ;
return rc ;
}
if ( tx )
SET_FIELD ( sb_entry . params , CAU_SB_ENTRY_TIMER_RES1 , timer_res ) ;
else
SET_FIELD ( sb_entry . params , CAU_SB_ENTRY_TIMER_RES0 , timer_res ) ;
rc = qed_dmae_host2grc ( p_hwfn , p_ptt ,
( u64 ) ( uintptr_t ) & sb_entry ,
CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof ( u64 ) , 2 , 0 ) ;
if ( rc ) {
DP_ERR ( p_hwfn , " dmae_host2grc failed %d \n " , rc ) ;
return rc ;
}
return rc ;
}