2009-09-24 04:46:15 +04:00
/*
2010-09-15 22:50:55 +04:00
* Copyright ( c ) 2005 - 2010 Brocade Communications Systems , Inc .
2009-09-24 04:46:15 +04:00
* All rights reserved
* www . brocade . com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License ( GPL ) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*/
2010-12-10 06:12:32 +03:00
# include "bfad_drv.h"
2010-09-15 22:50:55 +04:00
# include "bfa_modules.h"
2011-06-14 02:50:35 +04:00
# include "bfi_reg.h"
2009-09-24 04:46:15 +04:00
2010-09-15 22:50:55 +04:00
BFA_TRC_FILE ( HAL , CORE ) ;
2009-09-24 04:46:15 +04:00
2010-12-10 06:09:26 +03:00
/*
* BFA module list terminated by NULL
*/
static struct bfa_module_s * hal_mods [ ] = {
2011-06-25 07:28:17 +04:00
& hal_mod_fcdiag ,
2010-12-10 06:09:26 +03:00
& hal_mod_sgpg ,
& hal_mod_fcport ,
& hal_mod_fcxp ,
& hal_mod_lps ,
& hal_mod_uf ,
& hal_mod_rport ,
2011-06-14 02:53:58 +04:00
& hal_mod_fcp ,
2010-12-10 06:09:26 +03:00
NULL
} ;
/*
* Message handlers for various modules .
*/
static bfa_isr_func_t bfa_isrs [ BFI_MC_MAX ] = {
bfa_isr_unhandled , /* NONE */
bfa_isr_unhandled , /* BFI_MC_IOC */
2011-06-25 07:28:17 +04:00
bfa_fcdiag_intr , /* BFI_MC_DIAG */
2010-12-10 06:09:26 +03:00
bfa_isr_unhandled , /* BFI_MC_FLASH */
bfa_isr_unhandled , /* BFI_MC_CEE */
bfa_fcport_isr , /* BFI_MC_FCPORT */
bfa_isr_unhandled , /* BFI_MC_IOCFC */
bfa_isr_unhandled , /* BFI_MC_LL */
bfa_uf_isr , /* BFI_MC_UF */
bfa_fcxp_isr , /* BFI_MC_FCXP */
bfa_lps_isr , /* BFI_MC_LPS */
bfa_rport_isr , /* BFI_MC_RPORT */
2011-06-14 02:53:58 +04:00
bfa_itn_isr , /* BFI_MC_ITN */
2010-12-10 06:09:26 +03:00
bfa_isr_unhandled , /* BFI_MC_IOIM_READ */
bfa_isr_unhandled , /* BFI_MC_IOIM_WRITE */
bfa_isr_unhandled , /* BFI_MC_IOIM_IO */
bfa_ioim_isr , /* BFI_MC_IOIM */
bfa_ioim_good_comp_isr , /* BFI_MC_IOIM_IOCOM */
bfa_tskim_isr , /* BFI_MC_TSKIM */
bfa_isr_unhandled , /* BFI_MC_SBOOT */
bfa_isr_unhandled , /* BFI_MC_IPFC */
bfa_isr_unhandled , /* BFI_MC_PORT */
bfa_isr_unhandled , /* --------- */
bfa_isr_unhandled , /* --------- */
bfa_isr_unhandled , /* --------- */
bfa_isr_unhandled , /* --------- */
bfa_isr_unhandled , /* --------- */
bfa_isr_unhandled , /* --------- */
bfa_isr_unhandled , /* --------- */
bfa_isr_unhandled , /* --------- */
bfa_isr_unhandled , /* --------- */
bfa_isr_unhandled , /* --------- */
} ;
/*
* Message handlers for mailbox command classes
*/
static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs [ BFI_MC_MAX ] = {
NULL ,
NULL , /* BFI_MC_IOC */
NULL , /* BFI_MC_DIAG */
NULL , /* BFI_MC_FLASH */
NULL , /* BFI_MC_CEE */
NULL , /* BFI_MC_PORT */
bfa_iocfc_isr , /* BFI_MC_IOCFC */
NULL ,
} ;
static void
2011-06-25 07:24:29 +04:00
bfa_com_port_attach ( struct bfa_s * bfa )
2010-12-10 06:09:26 +03:00
{
struct bfa_port_s * port = & bfa - > modules . port ;
2011-06-25 07:24:29 +04:00
struct bfa_mem_dma_s * port_dma = BFA_MEM_PORT_DMA ( bfa ) ;
2010-12-10 06:09:26 +03:00
bfa_port_attach ( port , & bfa - > ioc , bfa , bfa - > trcmod ) ;
2011-06-25 07:24:29 +04:00
bfa_port_mem_claim ( port , port_dma - > kva_curp , port_dma - > dma_curp ) ;
2010-12-10 06:09:26 +03:00
}
2011-06-25 07:22:28 +04:00
/*
* ablk module attach
*/
static void
2011-06-25 07:24:29 +04:00
bfa_com_ablk_attach ( struct bfa_s * bfa )
2011-06-25 07:22:28 +04:00
{
struct bfa_ablk_s * ablk = & bfa - > modules . ablk ;
2011-06-25 07:24:29 +04:00
struct bfa_mem_dma_s * ablk_dma = BFA_MEM_ABLK_DMA ( bfa ) ;
2011-06-25 07:22:28 +04:00
bfa_ablk_attach ( ablk , & bfa - > ioc ) ;
2011-06-25 07:24:29 +04:00
bfa_ablk_memclaim ( ablk , ablk_dma - > kva_curp , ablk_dma - > dma_curp ) ;
2011-06-25 07:22:28 +04:00
}
2011-06-25 07:25:36 +04:00
static void
bfa_com_cee_attach ( struct bfa_s * bfa )
{
struct bfa_cee_s * cee = & bfa - > modules . cee ;
struct bfa_mem_dma_s * cee_dma = BFA_MEM_CEE_DMA ( bfa ) ;
cee - > trcmod = bfa - > trcmod ;
bfa_cee_attach ( cee , & bfa - > ioc , bfa ) ;
bfa_cee_mem_claim ( cee , cee_dma - > kva_curp , cee_dma - > dma_curp ) ;
}
2011-06-25 07:26:25 +04:00
static void
bfa_com_sfp_attach ( struct bfa_s * bfa )
{
struct bfa_sfp_s * sfp = BFA_SFP_MOD ( bfa ) ;
struct bfa_mem_dma_s * sfp_dma = BFA_MEM_SFP_DMA ( bfa ) ;
bfa_sfp_attach ( sfp , & bfa - > ioc , bfa , bfa - > trcmod ) ;
bfa_sfp_memclaim ( sfp , sfp_dma - > kva_curp , sfp_dma - > dma_curp ) ;
}
2011-06-25 07:27:13 +04:00
static void
bfa_com_flash_attach ( struct bfa_s * bfa , bfa_boolean_t mincfg )
{
struct bfa_flash_s * flash = BFA_FLASH ( bfa ) ;
struct bfa_mem_dma_s * flash_dma = BFA_MEM_FLASH_DMA ( bfa ) ;
bfa_flash_attach ( flash , & bfa - > ioc , bfa , bfa - > trcmod , mincfg ) ;
bfa_flash_memclaim ( flash , flash_dma - > kva_curp ,
flash_dma - > dma_curp , mincfg ) ;
}
2011-06-25 07:28:17 +04:00
static void
bfa_com_diag_attach ( struct bfa_s * bfa )
{
struct bfa_diag_s * diag = BFA_DIAG_MOD ( bfa ) ;
struct bfa_mem_dma_s * diag_dma = BFA_MEM_DIAG_DMA ( bfa ) ;
bfa_diag_attach ( diag , & bfa - > ioc , bfa , bfa_fcport_beacon , bfa - > trcmod ) ;
bfa_diag_memclaim ( diag , diag_dma - > kva_curp , diag_dma - > dma_curp ) ;
}
2011-06-25 07:28:37 +04:00
static void
bfa_com_phy_attach ( struct bfa_s * bfa , bfa_boolean_t mincfg )
{
struct bfa_phy_s * phy = BFA_PHY ( bfa ) ;
struct bfa_mem_dma_s * phy_dma = BFA_MEM_PHY_DMA ( bfa ) ;
bfa_phy_attach ( phy , & bfa - > ioc , bfa , bfa - > trcmod , mincfg ) ;
bfa_phy_memclaim ( phy , phy_dma - > kva_curp , phy_dma - > dma_curp , mincfg ) ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* BFA IOC FC related definitions
*/
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* IOC local definitions
*/
# define BFA_IOCFC_TOV 5000 /* msecs */
enum {
BFA_IOCFC_ACT_NONE = 0 ,
BFA_IOCFC_ACT_INIT = 1 ,
BFA_IOCFC_ACT_STOP = 2 ,
BFA_IOCFC_ACT_DISABLE = 3 ,
2011-06-25 07:25:15 +04:00
BFA_IOCFC_ACT_ENABLE = 4 ,
2010-09-15 22:50:55 +04:00
} ;
# define DEF_CFG_NUM_FABRICS 1
# define DEF_CFG_NUM_LPORTS 256
# define DEF_CFG_NUM_CQS 4
# define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
# define DEF_CFG_NUM_TSKIM_REQS 128
# define DEF_CFG_NUM_FCXP_REQS 64
# define DEF_CFG_NUM_UF_BUFS 64
# define DEF_CFG_NUM_RPORTS 1024
# define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
# define DEF_CFG_NUM_TINS 256
# define DEF_CFG_NUM_SGPGS 2048
# define DEF_CFG_NUM_REQQ_ELEMS 256
# define DEF_CFG_NUM_RSPQ_ELEMS 64
# define DEF_CFG_NUM_SBOOT_TGTS 16
# define DEF_CFG_NUM_SBOOT_LUNS 16
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* forward declaration for IOC FC functions
*/
static void bfa_iocfc_enable_cbfn ( void * bfa_arg , enum bfa_status status ) ;
static void bfa_iocfc_disable_cbfn ( void * bfa_arg ) ;
static void bfa_iocfc_hbfail_cbfn ( void * bfa_arg ) ;
static void bfa_iocfc_reset_cbfn ( void * bfa_arg ) ;
static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn ;
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* BFA Interrupt handling functions
*/
static void
bfa_reqq_resume ( struct bfa_s * bfa , int qid )
{
struct list_head * waitq , * qe , * qen ;
struct bfa_reqq_wait_s * wqe ;
waitq = bfa_reqq ( bfa , qid ) ;
list_for_each_safe ( qe , qen , waitq ) {
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Callback only as long as there is room in request queue
*/
if ( bfa_reqq_full ( bfa , qid ) )
break ;
list_del ( qe ) ;
wqe = ( struct bfa_reqq_wait_s * ) qe ;
wqe - > qresume ( wqe - > cbarg ) ;
}
}
2011-06-14 02:50:35 +04:00
static inline void
bfa_isr_rspq ( struct bfa_s * bfa , int qid )
{
struct bfi_msg_s * m ;
u32 pi , ci ;
struct list_head * waitq ;
2011-06-25 07:24:08 +04:00
bfa_isr_rspq_ack ( bfa , qid ) ;
2011-06-14 02:50:35 +04:00
ci = bfa_rspq_ci ( bfa , qid ) ;
pi = bfa_rspq_pi ( bfa , qid ) ;
while ( ci ! = pi ) {
m = bfa_rspq_elem ( bfa , qid , ci ) ;
WARN_ON ( m - > mhdr . msg_class > = BFI_MC_MAX ) ;
bfa_isrs [ m - > mhdr . msg_class ] ( bfa , m ) ;
CQ_INCR ( ci , bfa - > iocfc . cfg . drvcfg . num_rspq_elems ) ;
}
/*
* update CI
*/
bfa_rspq_ci ( bfa , qid ) = pi ;
writel ( pi , bfa - > iocfc . bfa_regs . rme_q_ci [ qid ] ) ;
mmiowb ( ) ;
/*
* Resume any pending requests in the corresponding reqq .
*/
waitq = bfa_reqq ( bfa , qid ) ;
if ( ! list_empty ( waitq ) )
bfa_reqq_resume ( bfa , qid ) ;
}
static inline void
bfa_isr_reqq ( struct bfa_s * bfa , int qid )
{
struct list_head * waitq ;
2011-06-25 07:24:08 +04:00
bfa_isr_reqq_ack ( bfa , qid ) ;
2011-06-14 02:50:35 +04:00
/*
* Resume any pending requests in the corresponding reqq .
*/
waitq = bfa_reqq ( bfa , qid ) ;
if ( ! list_empty ( waitq ) )
bfa_reqq_resume ( bfa , qid ) ;
}
2010-09-15 22:50:55 +04:00
void
bfa_msix_all ( struct bfa_s * bfa , int vec )
{
2011-06-25 07:23:38 +04:00
u32 intr , qintr ;
int queue ;
intr = readl ( bfa - > iocfc . bfa_regs . intr_status ) ;
if ( ! intr )
return ;
/*
* RME completion queue interrupt
*/
qintr = intr & __HFN_INT_RME_MASK ;
if ( qintr & & bfa - > queue_process ) {
for ( queue = 0 ; queue < BFI_IOC_MAX_CQS ; queue + + )
bfa_isr_rspq ( bfa , queue ) ;
}
intr & = ~ qintr ;
if ( ! intr )
return ;
/*
* CPE completion queue interrupt
*/
qintr = intr & __HFN_INT_CPE_MASK ;
if ( qintr & & bfa - > queue_process ) {
for ( queue = 0 ; queue < BFI_IOC_MAX_CQS ; queue + + )
bfa_isr_reqq ( bfa , queue ) ;
}
intr & = ~ qintr ;
if ( ! intr )
return ;
bfa_msix_lpu_err ( bfa , intr ) ;
2010-09-15 22:50:55 +04:00
}
bfa_boolean_t
bfa_intx ( struct bfa_s * bfa )
{
u32 intr , qintr ;
int queue ;
2010-10-19 04:12:29 +04:00
intr = readl ( bfa - > iocfc . bfa_regs . intr_status ) ;
2010-09-15 22:50:55 +04:00
if ( ! intr )
return BFA_FALSE ;
2011-06-25 07:24:08 +04:00
qintr = intr & ( __HFN_INT_RME_MASK | __HFN_INT_CPE_MASK ) ;
if ( qintr )
writel ( qintr , bfa - > iocfc . bfa_regs . intr_status ) ;
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* RME completion queue interrupt
*/
qintr = intr & __HFN_INT_RME_MASK ;
2011-06-25 07:24:08 +04:00
if ( qintr & & bfa - > queue_process ) {
for ( queue = 0 ; queue < BFI_IOC_MAX_CQS ; queue + + )
bfa_isr_rspq ( bfa , queue ) ;
2010-09-15 22:50:55 +04:00
}
2011-06-25 07:24:08 +04:00
2010-09-15 22:50:55 +04:00
intr & = ~ qintr ;
if ( ! intr )
return BFA_TRUE ;
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* CPE completion queue interrupt
*/
qintr = intr & __HFN_INT_CPE_MASK ;
2011-06-25 07:24:08 +04:00
if ( qintr & & bfa - > queue_process ) {
for ( queue = 0 ; queue < BFI_IOC_MAX_CQS ; queue + + )
bfa_isr_reqq ( bfa , queue ) ;
2010-09-15 22:50:55 +04:00
}
intr & = ~ qintr ;
if ( ! intr )
return BFA_TRUE ;
bfa_msix_lpu_err ( bfa , intr ) ;
return BFA_TRUE ;
}
void
bfa_isr_enable ( struct bfa_s * bfa )
{
2011-06-14 02:50:35 +04:00
u32 umsk ;
2010-09-15 22:50:55 +04:00
int pci_func = bfa_ioc_pcifn ( & bfa - > ioc ) ;
bfa_trc ( bfa , pci_func ) ;
2011-06-14 02:52:12 +04:00
bfa_msix_ctrl_install ( bfa ) ;
2011-06-14 02:50:35 +04:00
if ( bfa_asic_id_ct2 ( bfa - > ioc . pcidev . device_id ) ) {
umsk = __HFN_INT_ERR_MASK_CT2 ;
umsk | = pci_func = = 0 ?
__HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2 ;
} else {
umsk = __HFN_INT_ERR_MASK ;
umsk | = pci_func = = 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK ;
}
writel ( umsk , bfa - > iocfc . bfa_regs . intr_status ) ;
writel ( ~ umsk , bfa - > iocfc . bfa_regs . intr_mask ) ;
bfa - > iocfc . intr_mask = ~ umsk ;
2010-09-15 22:50:55 +04:00
bfa_isr_mode_set ( bfa , bfa - > msix . nvecs ! = 0 ) ;
}
void
bfa_isr_disable ( struct bfa_s * bfa )
{
bfa_isr_mode_set ( bfa , BFA_FALSE ) ;
2010-10-19 04:12:29 +04:00
writel ( - 1L , bfa - > iocfc . bfa_regs . intr_mask ) ;
2010-09-15 22:50:55 +04:00
bfa_msix_uninstall ( bfa ) ;
}
void
2011-06-14 02:50:35 +04:00
bfa_msix_reqq ( struct bfa_s * bfa , int vec )
2010-09-15 22:50:55 +04:00
{
2011-06-14 02:50:35 +04:00
bfa_isr_reqq ( bfa , vec - bfa - > iocfc . hwif . cpe_vec_q0 ) ;
2010-09-15 22:50:55 +04:00
}
void
bfa_isr_unhandled ( struct bfa_s * bfa , struct bfi_msg_s * m )
{
bfa_trc ( bfa , m - > mhdr . msg_class ) ;
bfa_trc ( bfa , m - > mhdr . msg_id ) ;
bfa_trc ( bfa , m - > mhdr . mtag . i2htok ) ;
2010-12-27 08:46:35 +03:00
WARN_ON ( 1 ) ;
2010-09-15 22:50:55 +04:00
bfa_trc_stop ( bfa - > trcmod ) ;
}
void
2011-06-14 02:50:35 +04:00
bfa_msix_rspq ( struct bfa_s * bfa , int vec )
2010-09-15 22:50:55 +04:00
{
2011-06-14 02:50:35 +04:00
bfa_isr_rspq ( bfa , vec - bfa - > iocfc . hwif . rme_vec_q0 ) ;
2010-09-15 22:50:55 +04:00
}
void
bfa_msix_lpu_err ( struct bfa_s * bfa , int vec )
{
u32 intr , curr_value ;
2011-06-14 02:50:35 +04:00
bfa_boolean_t lpu_isr , halt_isr , pss_isr ;
2010-09-15 22:50:55 +04:00
2010-10-19 04:12:29 +04:00
intr = readl ( bfa - > iocfc . bfa_regs . intr_status ) ;
2010-09-15 22:50:55 +04:00
2011-06-14 02:50:35 +04:00
if ( bfa_asic_id_ct2 ( bfa - > ioc . pcidev . device_id ) ) {
halt_isr = intr & __HFN_INT_CPQ_HALT_CT2 ;
pss_isr = intr & __HFN_INT_ERR_PSS_CT2 ;
lpu_isr = intr & ( __HFN_INT_MBOX_LPU0_CT2 |
__HFN_INT_MBOX_LPU1_CT2 ) ;
intr & = __HFN_INT_ERR_MASK_CT2 ;
} else {
halt_isr = intr & __HFN_INT_LL_HALT ;
pss_isr = intr & __HFN_INT_ERR_PSS ;
lpu_isr = intr & ( __HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1 ) ;
intr & = __HFN_INT_ERR_MASK ;
}
2010-09-15 22:50:55 +04:00
2011-06-14 02:50:35 +04:00
if ( lpu_isr )
bfa_ioc_mbox_isr ( & bfa - > ioc ) ;
2010-09-15 22:50:55 +04:00
if ( intr ) {
2011-06-14 02:50:35 +04:00
if ( halt_isr ) {
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* If LL_HALT bit is set then FW Init Halt LL Port
* Register needs to be cleared as well so Interrupt
* Status Register will be cleared .
*/
2010-10-19 04:12:29 +04:00
curr_value = readl ( bfa - > ioc . ioc_regs . ll_halt ) ;
2010-09-15 22:50:55 +04:00
curr_value & = ~ __FW_INIT_HALT_P ;
2010-10-19 04:12:29 +04:00
writel ( curr_value , bfa - > ioc . ioc_regs . ll_halt ) ;
2010-09-15 22:50:55 +04:00
}
2011-06-14 02:50:35 +04:00
if ( pss_isr ) {
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* ERR_PSS bit needs to be cleared as well in case
* interrups are shared so driver ' s interrupt handler is
2011-03-31 05:57:33 +04:00
* still called even though it is already masked out .
2010-09-15 22:50:55 +04:00
*/
2010-10-19 04:12:29 +04:00
curr_value = readl (
2010-09-15 22:50:55 +04:00
bfa - > ioc . ioc_regs . pss_err_status_reg ) ;
2010-10-19 04:12:29 +04:00
writel ( curr_value ,
bfa - > ioc . ioc_regs . pss_err_status_reg ) ;
2010-09-15 22:50:55 +04:00
}
2010-10-19 04:12:29 +04:00
writel ( intr , bfa - > iocfc . bfa_regs . intr_status ) ;
2010-12-10 06:08:43 +03:00
bfa_ioc_error_isr ( & bfa - > ioc ) ;
2010-09-15 22:50:55 +04:00
}
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* BFA IOC FC related functions
*/
2010-10-19 04:17:23 +04:00
/*
2010-12-10 06:07:46 +03:00
* BFA IOC private functions
2010-09-15 22:50:55 +04:00
*/
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
*/
static void
bfa_iocfc_send_cfg ( void * bfa_arg )
{
struct bfa_s * bfa = bfa_arg ;
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
struct bfi_iocfc_cfg_req_s cfg_req ;
struct bfi_iocfc_cfg_s * cfg_info = iocfc - > cfginfo ;
struct bfa_iocfc_cfg_s * cfg = & iocfc - > cfg ;
int i ;
2010-12-27 08:46:35 +03:00
WARN_ON ( cfg - > fwcfg . num_cqs > BFI_IOC_MAX_CQS ) ;
2010-09-15 22:50:55 +04:00
bfa_trc ( bfa , cfg - > fwcfg . num_cqs ) ;
bfa_iocfc_reset_queues ( bfa ) ;
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* initialize IOC configuration info
*/
2011-06-25 07:23:38 +04:00
cfg_info - > single_msix_vec = 0 ;
if ( bfa - > msix . nvecs = = 1 )
cfg_info - > single_msix_vec = 1 ;
2010-09-15 22:50:55 +04:00
cfg_info - > endian_sig = BFI_IOC_ENDIAN_SIG ;
cfg_info - > num_cqs = cfg - > fwcfg . num_cqs ;
2011-06-14 02:53:58 +04:00
cfg_info - > num_ioim_reqs = cpu_to_be16 ( cfg - > fwcfg . num_ioim_reqs ) ;
cfg_info - > num_fwtio_reqs = cpu_to_be16 ( cfg - > fwcfg . num_fwtio_reqs ) ;
2010-09-15 22:50:55 +04:00
bfa_dma_be_addr_set ( cfg_info - > cfgrsp_addr , iocfc - > cfgrsp_dma . pa ) ;
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* dma map REQ and RSP circular queues and shadow pointers
*/
for ( i = 0 ; i < cfg - > fwcfg . num_cqs ; i + + ) {
bfa_dma_be_addr_set ( cfg_info - > req_cq_ba [ i ] ,
iocfc - > req_cq_ba [ i ] . pa ) ;
bfa_dma_be_addr_set ( cfg_info - > req_shadow_ci [ i ] ,
iocfc - > req_cq_shadow_ci [ i ] . pa ) ;
cfg_info - > req_cq_elems [ i ] =
2010-10-19 04:10:50 +04:00
cpu_to_be16 ( cfg - > drvcfg . num_reqq_elems ) ;
2010-09-15 22:50:55 +04:00
bfa_dma_be_addr_set ( cfg_info - > rsp_cq_ba [ i ] ,
iocfc - > rsp_cq_ba [ i ] . pa ) ;
bfa_dma_be_addr_set ( cfg_info - > rsp_shadow_pi [ i ] ,
iocfc - > rsp_cq_shadow_pi [ i ] . pa ) ;
cfg_info - > rsp_cq_elems [ i ] =
2010-10-19 04:10:50 +04:00
cpu_to_be16 ( cfg - > drvcfg . num_rspq_elems ) ;
2010-09-15 22:50:55 +04:00
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Enable interrupt coalescing if it is driver init path
* and not ioc disable / enable path .
*/
if ( ! iocfc - > cfgdone )
cfg_info - > intr_attr . coalesce = BFA_TRUE ;
iocfc - > cfgdone = BFA_FALSE ;
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* dma map IOC configuration itself
*/
bfi_h2i_set ( cfg_req . mh , BFI_MC_IOCFC , BFI_IOCFC_H2I_CFG_REQ ,
2011-06-25 07:24:08 +04:00
bfa_fn_lpu ( bfa ) ) ;
2010-09-15 22:50:55 +04:00
bfa_dma_be_addr_set ( cfg_req . ioc_cfg_dma_addr , iocfc - > cfg_info . pa ) ;
bfa_ioc_mbox_send ( & bfa - > ioc , & cfg_req ,
sizeof ( struct bfi_iocfc_cfg_req_s ) ) ;
}
static void
bfa_iocfc_init_mem ( struct bfa_s * bfa , void * bfad , struct bfa_iocfc_cfg_s * cfg ,
struct bfa_pcidev_s * pcidev )
{
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
bfa - > bfad = bfad ;
iocfc - > bfa = bfa ;
iocfc - > action = BFA_IOCFC_ACT_NONE ;
2010-10-19 04:08:54 +04:00
iocfc - > cfg = * cfg ;
2010-09-15 22:50:55 +04:00
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Initialize chip specific handlers .
*/
2011-06-14 02:50:35 +04:00
if ( bfa_asic_id_ctc ( bfa_ioc_devid ( & bfa - > ioc ) ) ) {
2010-09-15 22:50:55 +04:00
iocfc - > hwif . hw_reginit = bfa_hwct_reginit ;
iocfc - > hwif . hw_reqq_ack = bfa_hwct_reqq_ack ;
iocfc - > hwif . hw_rspq_ack = bfa_hwct_rspq_ack ;
iocfc - > hwif . hw_msix_init = bfa_hwct_msix_init ;
2011-06-14 02:52:12 +04:00
iocfc - > hwif . hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install ;
iocfc - > hwif . hw_msix_queue_install = bfa_hwct_msix_queue_install ;
2010-09-15 22:50:55 +04:00
iocfc - > hwif . hw_msix_uninstall = bfa_hwct_msix_uninstall ;
iocfc - > hwif . hw_isr_mode_set = bfa_hwct_isr_mode_set ;
iocfc - > hwif . hw_msix_getvecs = bfa_hwct_msix_getvecs ;
iocfc - > hwif . hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range ;
2011-06-14 02:50:35 +04:00
iocfc - > hwif . rme_vec_q0 = BFI_MSIX_RME_QMIN_CT ;
iocfc - > hwif . cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT ;
2010-09-15 22:50:55 +04:00
} else {
iocfc - > hwif . hw_reginit = bfa_hwcb_reginit ;
2011-06-25 07:24:08 +04:00
iocfc - > hwif . hw_reqq_ack = NULL ;
iocfc - > hwif . hw_rspq_ack = NULL ;
2010-09-15 22:50:55 +04:00
iocfc - > hwif . hw_msix_init = bfa_hwcb_msix_init ;
2011-06-14 02:52:12 +04:00
iocfc - > hwif . hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install ;
iocfc - > hwif . hw_msix_queue_install = bfa_hwcb_msix_queue_install ;
2010-09-15 22:50:55 +04:00
iocfc - > hwif . hw_msix_uninstall = bfa_hwcb_msix_uninstall ;
iocfc - > hwif . hw_isr_mode_set = bfa_hwcb_isr_mode_set ;
iocfc - > hwif . hw_msix_getvecs = bfa_hwcb_msix_getvecs ;
iocfc - > hwif . hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range ;
2011-06-14 02:50:35 +04:00
iocfc - > hwif . rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
bfa_ioc_pcifn ( & bfa - > ioc ) * BFI_IOC_MAX_CQS ;
iocfc - > hwif . cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
bfa_ioc_pcifn ( & bfa - > ioc ) * BFI_IOC_MAX_CQS ;
}
if ( bfa_asic_id_ct2 ( bfa_ioc_devid ( & bfa - > ioc ) ) ) {
iocfc - > hwif . hw_reginit = bfa_hwct2_reginit ;
iocfc - > hwif . hw_isr_mode_set = NULL ;
2011-06-25 07:24:08 +04:00
iocfc - > hwif . hw_rspq_ack = NULL ;
2010-09-15 22:50:55 +04:00
}
iocfc - > hwif . hw_reginit ( bfa ) ;
bfa - > msix . nvecs = 0 ;
}
static void
2011-06-25 07:24:29 +04:00
bfa_iocfc_mem_claim ( struct bfa_s * bfa , struct bfa_iocfc_cfg_s * cfg )
2010-09-15 22:50:55 +04:00
{
2011-06-25 07:24:29 +04:00
u8 * dm_kva = NULL ;
u64 dm_pa = 0 ;
int i , per_reqq_sz , per_rspq_sz , dbgsz ;
2010-09-15 22:50:55 +04:00
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
2011-06-25 07:24:29 +04:00
struct bfa_mem_dma_s * ioc_dma = BFA_MEM_IOC_DMA ( bfa ) ;
struct bfa_mem_dma_s * iocfc_dma = BFA_MEM_IOCFC_DMA ( bfa ) ;
struct bfa_mem_dma_s * reqq_dma , * rspq_dma ;
2010-09-15 22:50:55 +04:00
2011-06-25 07:24:29 +04:00
/* First allocate dma memory for IOC */
bfa_ioc_mem_claim ( & bfa - > ioc , bfa_mem_dma_virt ( ioc_dma ) ,
bfa_mem_dma_phys ( ioc_dma ) ) ;
2010-09-15 22:50:55 +04:00
2011-06-25 07:24:29 +04:00
/* Claim DMA-able memory for the request/response queues */
2010-09-15 22:50:55 +04:00
per_reqq_sz = BFA_ROUNDUP ( ( cfg - > drvcfg . num_reqq_elems * BFI_LMSG_SZ ) ,
2011-06-25 07:24:29 +04:00
BFA_DMA_ALIGN_SZ ) ;
2010-09-15 22:50:55 +04:00
per_rspq_sz = BFA_ROUNDUP ( ( cfg - > drvcfg . num_rspq_elems * BFI_LMSG_SZ ) ,
2011-06-25 07:24:29 +04:00
BFA_DMA_ALIGN_SZ ) ;
2010-09-15 22:50:55 +04:00
for ( i = 0 ; i < cfg - > fwcfg . num_cqs ; i + + ) {
2011-06-25 07:24:29 +04:00
reqq_dma = BFA_MEM_REQQ_DMA ( bfa , i ) ;
iocfc - > req_cq_ba [ i ] . kva = bfa_mem_dma_virt ( reqq_dma ) ;
iocfc - > req_cq_ba [ i ] . pa = bfa_mem_dma_phys ( reqq_dma ) ;
memset ( iocfc - > req_cq_ba [ i ] . kva , 0 , per_reqq_sz ) ;
rspq_dma = BFA_MEM_RSPQ_DMA ( bfa , i ) ;
iocfc - > rsp_cq_ba [ i ] . kva = bfa_mem_dma_virt ( rspq_dma ) ;
iocfc - > rsp_cq_ba [ i ] . pa = bfa_mem_dma_phys ( rspq_dma ) ;
memset ( iocfc - > rsp_cq_ba [ i ] . kva , 0 , per_rspq_sz ) ;
2010-09-15 22:50:55 +04:00
}
2011-06-25 07:24:29 +04:00
/* Claim IOCFC dma memory - for shadow CI/PI */
dm_kva = bfa_mem_dma_virt ( iocfc_dma ) ;
dm_pa = bfa_mem_dma_phys ( iocfc_dma ) ;
2010-09-15 22:50:55 +04:00
for ( i = 0 ; i < cfg - > fwcfg . num_cqs ; i + + ) {
iocfc - > req_cq_shadow_ci [ i ] . kva = dm_kva ;
iocfc - > req_cq_shadow_ci [ i ] . pa = dm_pa ;
dm_kva + = BFA_CACHELINE_SZ ;
dm_pa + = BFA_CACHELINE_SZ ;
iocfc - > rsp_cq_shadow_pi [ i ] . kva = dm_kva ;
iocfc - > rsp_cq_shadow_pi [ i ] . pa = dm_pa ;
dm_kva + = BFA_CACHELINE_SZ ;
dm_pa + = BFA_CACHELINE_SZ ;
}
2011-06-25 07:24:29 +04:00
/* Claim IOCFC dma memory - for the config info page */
2010-09-15 22:50:55 +04:00
bfa - > iocfc . cfg_info . kva = dm_kva ;
bfa - > iocfc . cfg_info . pa = dm_pa ;
bfa - > iocfc . cfginfo = ( struct bfi_iocfc_cfg_s * ) dm_kva ;
dm_kva + = BFA_ROUNDUP ( sizeof ( struct bfi_iocfc_cfg_s ) , BFA_CACHELINE_SZ ) ;
dm_pa + = BFA_ROUNDUP ( sizeof ( struct bfi_iocfc_cfg_s ) , BFA_CACHELINE_SZ ) ;
2011-06-25 07:24:29 +04:00
/* Claim IOCFC dma memory - for the config response */
2010-09-15 22:50:55 +04:00
bfa - > iocfc . cfgrsp_dma . kva = dm_kva ;
bfa - > iocfc . cfgrsp_dma . pa = dm_pa ;
bfa - > iocfc . cfgrsp = ( struct bfi_iocfc_cfgrsp_s * ) dm_kva ;
2011-06-25 07:24:29 +04:00
dm_kva + = BFA_ROUNDUP ( sizeof ( struct bfi_iocfc_cfgrsp_s ) ,
BFA_CACHELINE_SZ ) ;
2010-09-15 22:50:55 +04:00
dm_pa + = BFA_ROUNDUP ( sizeof ( struct bfi_iocfc_cfgrsp_s ) ,
2011-06-25 07:24:29 +04:00
BFA_CACHELINE_SZ ) ;
2010-09-15 22:50:55 +04:00
2011-06-25 07:24:29 +04:00
/* Claim IOCFC kva memory */
2010-12-10 06:08:43 +03:00
dbgsz = ( bfa_auto_recover ) ? BFA_DBG_FWTRC_LEN : 0 ;
2010-09-15 22:50:55 +04:00
if ( dbgsz > 0 ) {
2011-06-25 07:24:29 +04:00
bfa_ioc_debug_memclaim ( & bfa - > ioc , bfa_mem_kva_curp ( iocfc ) ) ;
bfa_mem_kva_curp ( iocfc ) + = dbgsz ;
2010-09-15 22:50:55 +04:00
}
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Start BFA submodules .
*/
static void
bfa_iocfc_start_submod ( struct bfa_s * bfa )
{
int i ;
2011-06-14 02:52:12 +04:00
bfa - > queue_process = BFA_TRUE ;
2011-06-14 02:50:35 +04:00
for ( i = 0 ; i < BFI_IOC_MAX_CQS ; i + + )
2011-06-25 07:24:08 +04:00
bfa_isr_rspq_ack ( bfa , i ) ;
2010-09-15 22:50:55 +04:00
for ( i = 0 ; hal_mods [ i ] ; i + + )
hal_mods [ i ] - > start ( bfa ) ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Disable BFA submodules .
*/
static void
bfa_iocfc_disable_submod ( struct bfa_s * bfa )
{
int i ;
for ( i = 0 ; hal_mods [ i ] ; i + + )
hal_mods [ i ] - > iocdisable ( bfa ) ;
}
static void
bfa_iocfc_init_cb ( void * bfa_arg , bfa_boolean_t complete )
{
struct bfa_s * bfa = bfa_arg ;
if ( complete ) {
if ( bfa - > iocfc . cfgdone )
bfa_cb_init ( bfa - > bfad , BFA_STATUS_OK ) ;
else
bfa_cb_init ( bfa - > bfad , BFA_STATUS_FAILED ) ;
} else {
if ( bfa - > iocfc . cfgdone )
bfa - > iocfc . action = BFA_IOCFC_ACT_NONE ;
}
}
static void
bfa_iocfc_stop_cb ( void * bfa_arg , bfa_boolean_t compl )
{
struct bfa_s * bfa = bfa_arg ;
struct bfad_s * bfad = bfa - > bfad ;
if ( compl )
complete ( & bfad - > comp ) ;
else
bfa - > iocfc . action = BFA_IOCFC_ACT_NONE ;
}
2011-06-25 07:25:15 +04:00
static void
bfa_iocfc_enable_cb ( void * bfa_arg , bfa_boolean_t compl )
{
struct bfa_s * bfa = bfa_arg ;
struct bfad_s * bfad = bfa - > bfad ;
if ( compl )
complete ( & bfad - > enable_comp ) ;
}
2010-09-15 22:50:55 +04:00
static void
bfa_iocfc_disable_cb ( void * bfa_arg , bfa_boolean_t compl )
{
struct bfa_s * bfa = bfa_arg ;
struct bfad_s * bfad = bfa - > bfad ;
if ( compl )
complete ( & bfad - > disable_comp ) ;
}
2011-06-14 02:50:35 +04:00
/**
* configure queue registers from firmware response
*/
static void
bfa_iocfc_qreg ( struct bfa_s * bfa , struct bfi_iocfc_qreg_s * qreg )
{
int i ;
struct bfa_iocfc_regs_s * r = & bfa - > iocfc . bfa_regs ;
void __iomem * kva = bfa_ioc_bar0 ( & bfa - > ioc ) ;
for ( i = 0 ; i < BFI_IOC_MAX_CQS ; i + + ) {
2011-06-25 07:24:08 +04:00
bfa - > iocfc . hw_qid [ i ] = qreg - > hw_qid [ i ] ;
2011-06-14 02:50:35 +04:00
r - > cpe_q_ci [ i ] = kva + be32_to_cpu ( qreg - > cpe_q_ci_off [ i ] ) ;
r - > cpe_q_pi [ i ] = kva + be32_to_cpu ( qreg - > cpe_q_pi_off [ i ] ) ;
r - > cpe_q_ctrl [ i ] = kva + be32_to_cpu ( qreg - > cpe_qctl_off [ i ] ) ;
r - > rme_q_ci [ i ] = kva + be32_to_cpu ( qreg - > rme_q_ci_off [ i ] ) ;
r - > rme_q_pi [ i ] = kva + be32_to_cpu ( qreg - > rme_q_pi_off [ i ] ) ;
r - > rme_q_ctrl [ i ] = kva + be32_to_cpu ( qreg - > rme_qctl_off [ i ] ) ;
}
}
2011-06-25 07:24:08 +04:00
static void
bfa_iocfc_res_recfg ( struct bfa_s * bfa , struct bfa_iocfc_fwcfg_s * fwcfg )
{
bfa_fcxp_res_recfg ( bfa , fwcfg - > num_fcxp_reqs ) ;
bfa_uf_res_recfg ( bfa , fwcfg - > num_uf_bufs ) ;
bfa_rport_res_recfg ( bfa , fwcfg - > num_rports ) ;
bfa_fcp_res_recfg ( bfa , fwcfg - > num_ioim_reqs ) ;
bfa_tskim_res_recfg ( bfa , fwcfg - > num_tskim_reqs ) ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Update BFA configuration from firmware configuration .
*/
static void
bfa_iocfc_cfgrsp ( struct bfa_s * bfa )
{
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
struct bfi_iocfc_cfgrsp_s * cfgrsp = iocfc - > cfgrsp ;
struct bfa_iocfc_fwcfg_s * fwcfg = & cfgrsp - > fwcfg ;
fwcfg - > num_cqs = fwcfg - > num_cqs ;
2010-10-19 04:10:50 +04:00
fwcfg - > num_ioim_reqs = be16_to_cpu ( fwcfg - > num_ioim_reqs ) ;
2011-06-14 02:53:58 +04:00
fwcfg - > num_fwtio_reqs = be16_to_cpu ( fwcfg - > num_fwtio_reqs ) ;
2010-10-19 04:10:50 +04:00
fwcfg - > num_tskim_reqs = be16_to_cpu ( fwcfg - > num_tskim_reqs ) ;
fwcfg - > num_fcxp_reqs = be16_to_cpu ( fwcfg - > num_fcxp_reqs ) ;
fwcfg - > num_uf_bufs = be16_to_cpu ( fwcfg - > num_uf_bufs ) ;
fwcfg - > num_rports = be16_to_cpu ( fwcfg - > num_rports ) ;
2010-09-15 22:50:55 +04:00
iocfc - > cfgdone = BFA_TRUE ;
2011-06-14 02:50:35 +04:00
/*
* configure queue register offsets as learnt from firmware
*/
bfa_iocfc_qreg ( bfa , & cfgrsp - > qreg ) ;
2011-06-25 07:24:08 +04:00
/*
* Re - configure resources as learnt from Firmware
*/
bfa_iocfc_res_recfg ( bfa , fwcfg ) ;
2011-06-14 02:52:12 +04:00
/*
* Install MSIX queue handlers
*/
bfa_msix_queue_install ( bfa ) ;
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Configuration is complete - initialize / start submodules
*/
bfa_fcport_init ( bfa ) ;
if ( iocfc - > action = = BFA_IOCFC_ACT_INIT )
bfa_cb_queue ( bfa , & iocfc - > init_hcb_qe , bfa_iocfc_init_cb , bfa ) ;
2011-06-25 07:25:15 +04:00
else {
if ( bfa - > iocfc . action = = BFA_IOCFC_ACT_ENABLE )
bfa_cb_queue ( bfa , & bfa - > iocfc . en_hcb_qe ,
bfa_iocfc_enable_cb , bfa ) ;
2010-09-15 22:50:55 +04:00
bfa_iocfc_start_submod ( bfa ) ;
2011-06-25 07:25:15 +04:00
}
2010-09-15 22:50:55 +04:00
}
void
bfa_iocfc_reset_queues ( struct bfa_s * bfa )
{
int q ;
for ( q = 0 ; q < BFI_IOC_MAX_CQS ; q + + ) {
bfa_reqq_ci ( bfa , q ) = 0 ;
bfa_reqq_pi ( bfa , q ) = 0 ;
bfa_rspq_ci ( bfa , q ) = 0 ;
bfa_rspq_pi ( bfa , q ) = 0 ;
}
}
2011-06-25 07:23:19 +04:00
/* Fabric Assigned Address specific functions */
/*
* Check whether IOC is ready before sending command down
*/
static bfa_status_t
bfa_faa_validate_request ( struct bfa_s * bfa )
{
enum bfa_ioc_type_e ioc_type = bfa_get_type ( bfa ) ;
u32 card_type = bfa - > ioc . attr - > card_type ;
if ( bfa_ioc_is_operational ( & bfa - > ioc ) ) {
if ( ( ioc_type ! = BFA_IOC_TYPE_FC ) | | bfa_mfg_is_mezz ( card_type ) )
return BFA_STATUS_FEATURE_NOT_SUPPORTED ;
} else {
if ( ! bfa_ioc_is_acq_addr ( & bfa - > ioc ) )
return BFA_STATUS_IOC_NON_OP ;
}
return BFA_STATUS_OK ;
}
bfa_status_t
bfa_faa_enable ( struct bfa_s * bfa , bfa_cb_iocfc_t cbfn , void * cbarg )
{
struct bfi_faa_en_dis_s faa_enable_req ;
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
bfa_status_t status ;
iocfc - > faa_args . faa_cb . faa_cbfn = cbfn ;
iocfc - > faa_args . faa_cb . faa_cbarg = cbarg ;
status = bfa_faa_validate_request ( bfa ) ;
if ( status ! = BFA_STATUS_OK )
return status ;
if ( iocfc - > faa_args . busy = = BFA_TRUE )
return BFA_STATUS_DEVBUSY ;
if ( iocfc - > faa_args . faa_state = = BFA_FAA_ENABLED )
return BFA_STATUS_FAA_ENABLED ;
if ( bfa_fcport_is_trunk_enabled ( bfa ) )
return BFA_STATUS_ERROR_TRUNK_ENABLED ;
bfa_fcport_cfg_faa ( bfa , BFA_FAA_ENABLED ) ;
iocfc - > faa_args . busy = BFA_TRUE ;
memset ( & faa_enable_req , 0 , sizeof ( struct bfi_faa_en_dis_s ) ) ;
bfi_h2i_set ( faa_enable_req . mh , BFI_MC_IOCFC ,
2011-06-25 07:24:08 +04:00
BFI_IOCFC_H2I_FAA_ENABLE_REQ , bfa_fn_lpu ( bfa ) ) ;
2011-06-25 07:23:19 +04:00
bfa_ioc_mbox_send ( & bfa - > ioc , & faa_enable_req ,
sizeof ( struct bfi_faa_en_dis_s ) ) ;
return BFA_STATUS_OK ;
}
bfa_status_t
bfa_faa_disable ( struct bfa_s * bfa , bfa_cb_iocfc_t cbfn ,
void * cbarg )
{
struct bfi_faa_en_dis_s faa_disable_req ;
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
bfa_status_t status ;
iocfc - > faa_args . faa_cb . faa_cbfn = cbfn ;
iocfc - > faa_args . faa_cb . faa_cbarg = cbarg ;
status = bfa_faa_validate_request ( bfa ) ;
if ( status ! = BFA_STATUS_OK )
return status ;
if ( iocfc - > faa_args . busy = = BFA_TRUE )
return BFA_STATUS_DEVBUSY ;
if ( iocfc - > faa_args . faa_state = = BFA_FAA_DISABLED )
return BFA_STATUS_FAA_DISABLED ;
bfa_fcport_cfg_faa ( bfa , BFA_FAA_DISABLED ) ;
iocfc - > faa_args . busy = BFA_TRUE ;
memset ( & faa_disable_req , 0 , sizeof ( struct bfi_faa_en_dis_s ) ) ;
bfi_h2i_set ( faa_disable_req . mh , BFI_MC_IOCFC ,
2011-06-25 07:24:08 +04:00
BFI_IOCFC_H2I_FAA_DISABLE_REQ , bfa_fn_lpu ( bfa ) ) ;
2011-06-25 07:23:19 +04:00
bfa_ioc_mbox_send ( & bfa - > ioc , & faa_disable_req ,
sizeof ( struct bfi_faa_en_dis_s ) ) ;
return BFA_STATUS_OK ;
}
bfa_status_t
bfa_faa_query ( struct bfa_s * bfa , struct bfa_faa_attr_s * attr ,
bfa_cb_iocfc_t cbfn , void * cbarg )
{
struct bfi_faa_query_s faa_attr_req ;
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
bfa_status_t status ;
iocfc - > faa_args . faa_attr = attr ;
iocfc - > faa_args . faa_cb . faa_cbfn = cbfn ;
iocfc - > faa_args . faa_cb . faa_cbarg = cbarg ;
status = bfa_faa_validate_request ( bfa ) ;
if ( status ! = BFA_STATUS_OK )
return status ;
if ( iocfc - > faa_args . busy = = BFA_TRUE )
return BFA_STATUS_DEVBUSY ;
iocfc - > faa_args . busy = BFA_TRUE ;
memset ( & faa_attr_req , 0 , sizeof ( struct bfi_faa_query_s ) ) ;
bfi_h2i_set ( faa_attr_req . mh , BFI_MC_IOCFC ,
2011-06-25 07:24:08 +04:00
BFI_IOCFC_H2I_FAA_QUERY_REQ , bfa_fn_lpu ( bfa ) ) ;
2011-06-25 07:23:19 +04:00
bfa_ioc_mbox_send ( & bfa - > ioc , & faa_attr_req ,
sizeof ( struct bfi_faa_query_s ) ) ;
return BFA_STATUS_OK ;
}
/*
* FAA enable response
*/
static void
bfa_faa_enable_reply ( struct bfa_iocfc_s * iocfc ,
struct bfi_faa_en_dis_rsp_s * rsp )
{
void * cbarg = iocfc - > faa_args . faa_cb . faa_cbarg ;
bfa_status_t status = rsp - > status ;
WARN_ON ( ! iocfc - > faa_args . faa_cb . faa_cbfn ) ;
iocfc - > faa_args . faa_cb . faa_cbfn ( cbarg , status ) ;
iocfc - > faa_args . busy = BFA_FALSE ;
}
/*
* FAA disable response
*/
static void
bfa_faa_disable_reply ( struct bfa_iocfc_s * iocfc ,
struct bfi_faa_en_dis_rsp_s * rsp )
{
void * cbarg = iocfc - > faa_args . faa_cb . faa_cbarg ;
bfa_status_t status = rsp - > status ;
WARN_ON ( ! iocfc - > faa_args . faa_cb . faa_cbfn ) ;
iocfc - > faa_args . faa_cb . faa_cbfn ( cbarg , status ) ;
iocfc - > faa_args . busy = BFA_FALSE ;
}
/*
* FAA query response
*/
static void
bfa_faa_query_reply ( struct bfa_iocfc_s * iocfc ,
bfi_faa_query_rsp_t * rsp )
{
void * cbarg = iocfc - > faa_args . faa_cb . faa_cbarg ;
if ( iocfc - > faa_args . faa_attr ) {
iocfc - > faa_args . faa_attr - > faa = rsp - > faa ;
iocfc - > faa_args . faa_attr - > faa_state = rsp - > faa_status ;
iocfc - > faa_args . faa_attr - > pwwn_source = rsp - > addr_source ;
}
WARN_ON ( ! iocfc - > faa_args . faa_cb . faa_cbfn ) ;
iocfc - > faa_args . faa_cb . faa_cbfn ( cbarg , BFA_STATUS_OK ) ;
iocfc - > faa_args . busy = BFA_FALSE ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* IOC enable request is complete
*/
static void
bfa_iocfc_enable_cbfn ( void * bfa_arg , enum bfa_status status )
{
struct bfa_s * bfa = bfa_arg ;
2011-06-25 07:23:19 +04:00
if ( status = = BFA_STATUS_FAA_ACQ_ADDR ) {
bfa_cb_queue ( bfa , & bfa - > iocfc . init_hcb_qe ,
bfa_iocfc_init_cb , bfa ) ;
return ;
}
2010-09-15 22:50:55 +04:00
if ( status ! = BFA_STATUS_OK ) {
bfa_isr_disable ( bfa ) ;
if ( bfa - > iocfc . action = = BFA_IOCFC_ACT_INIT )
bfa_cb_queue ( bfa , & bfa - > iocfc . init_hcb_qe ,
bfa_iocfc_init_cb , bfa ) ;
2011-06-25 07:25:15 +04:00
else if ( bfa - > iocfc . action = = BFA_IOCFC_ACT_ENABLE )
bfa_cb_queue ( bfa , & bfa - > iocfc . en_hcb_qe ,
bfa_iocfc_enable_cb , bfa ) ;
2010-09-15 22:50:55 +04:00
return ;
}
bfa_iocfc_send_cfg ( bfa ) ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* IOC disable request is complete
*/
static void
bfa_iocfc_disable_cbfn ( void * bfa_arg )
{
struct bfa_s * bfa = bfa_arg ;
bfa_isr_disable ( bfa ) ;
bfa_iocfc_disable_submod ( bfa ) ;
if ( bfa - > iocfc . action = = BFA_IOCFC_ACT_STOP )
bfa_cb_queue ( bfa , & bfa - > iocfc . stop_hcb_qe , bfa_iocfc_stop_cb ,
bfa ) ;
else {
2010-12-27 08:46:35 +03:00
WARN_ON ( bfa - > iocfc . action ! = BFA_IOCFC_ACT_DISABLE ) ;
2010-09-15 22:50:55 +04:00
bfa_cb_queue ( bfa , & bfa - > iocfc . dis_hcb_qe , bfa_iocfc_disable_cb ,
bfa ) ;
}
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Notify sub - modules of hardware failure .
*/
static void
bfa_iocfc_hbfail_cbfn ( void * bfa_arg )
{
struct bfa_s * bfa = bfa_arg ;
2011-06-14 02:52:12 +04:00
bfa - > queue_process = BFA_FALSE ;
2010-09-15 22:50:55 +04:00
bfa_isr_disable ( bfa ) ;
bfa_iocfc_disable_submod ( bfa ) ;
if ( bfa - > iocfc . action = = BFA_IOCFC_ACT_INIT )
bfa_cb_queue ( bfa , & bfa - > iocfc . init_hcb_qe , bfa_iocfc_init_cb ,
bfa ) ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Actions on chip - reset completion .
*/
static void
bfa_iocfc_reset_cbfn ( void * bfa_arg )
{
struct bfa_s * bfa = bfa_arg ;
bfa_iocfc_reset_queues ( bfa ) ;
bfa_isr_enable ( bfa ) ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Query IOC memory requirement information .
*/
void
2011-06-25 07:24:29 +04:00
bfa_iocfc_meminfo ( struct bfa_iocfc_cfg_s * cfg , struct bfa_meminfo_s * meminfo ,
struct bfa_s * bfa )
2010-09-15 22:50:55 +04:00
{
2011-06-25 07:24:29 +04:00
int q , per_reqq_sz , per_rspq_sz ;
struct bfa_mem_dma_s * ioc_dma = BFA_MEM_IOC_DMA ( bfa ) ;
struct bfa_mem_dma_s * iocfc_dma = BFA_MEM_IOCFC_DMA ( bfa ) ;
struct bfa_mem_kva_s * iocfc_kva = BFA_MEM_IOCFC_KVA ( bfa ) ;
u32 dm_len = 0 ;
2010-09-15 22:50:55 +04:00
2011-06-25 07:24:29 +04:00
/* dma memory setup for IOC */
bfa_mem_dma_setup ( meminfo , ioc_dma ,
BFA_ROUNDUP ( sizeof ( struct bfi_ioc_attr_s ) , BFA_DMA_ALIGN_SZ ) ) ;
/* dma memory setup for REQ/RSP queues */
per_reqq_sz = BFA_ROUNDUP ( ( cfg - > drvcfg . num_reqq_elems * BFI_LMSG_SZ ) ,
BFA_DMA_ALIGN_SZ ) ;
per_rspq_sz = BFA_ROUNDUP ( ( cfg - > drvcfg . num_rspq_elems * BFI_LMSG_SZ ) ,
BFA_DMA_ALIGN_SZ ) ;
for ( q = 0 ; q < cfg - > fwcfg . num_cqs ; q + + ) {
bfa_mem_dma_setup ( meminfo , BFA_MEM_REQQ_DMA ( bfa , q ) ,
per_reqq_sz ) ;
bfa_mem_dma_setup ( meminfo , BFA_MEM_RSPQ_DMA ( bfa , q ) ,
per_rspq_sz ) ;
}
/* IOCFC dma memory - calculate Shadow CI/PI size */
for ( q = 0 ; q < cfg - > fwcfg . num_cqs ; q + + )
dm_len + = ( 2 * BFA_CACHELINE_SZ ) ;
/* IOCFC dma memory - calculate config info / rsp size */
dm_len + = BFA_ROUNDUP ( sizeof ( struct bfi_iocfc_cfg_s ) , BFA_CACHELINE_SZ ) ;
dm_len + = BFA_ROUNDUP ( sizeof ( struct bfi_iocfc_cfgrsp_s ) ,
BFA_CACHELINE_SZ ) ;
/* dma memory setup for IOCFC */
bfa_mem_dma_setup ( meminfo , iocfc_dma , dm_len ) ;
/* kva memory setup for IOCFC */
bfa_mem_kva_setup ( meminfo , iocfc_kva ,
( ( bfa_auto_recover ) ? BFA_DBG_FWTRC_LEN : 0 ) ) ;
2010-09-15 22:50:55 +04:00
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Query IOC memory requirement information .
*/
void
bfa_iocfc_attach ( struct bfa_s * bfa , void * bfad , struct bfa_iocfc_cfg_s * cfg ,
2011-06-25 07:24:29 +04:00
struct bfa_pcidev_s * pcidev )
2010-09-15 22:50:55 +04:00
{
int i ;
struct bfa_ioc_s * ioc = & bfa - > ioc ;
bfa_iocfc_cbfn . enable_cbfn = bfa_iocfc_enable_cbfn ;
bfa_iocfc_cbfn . disable_cbfn = bfa_iocfc_disable_cbfn ;
bfa_iocfc_cbfn . hbfail_cbfn = bfa_iocfc_hbfail_cbfn ;
bfa_iocfc_cbfn . reset_cbfn = bfa_iocfc_reset_cbfn ;
ioc - > trcmod = bfa - > trcmod ;
bfa_ioc_attach ( & bfa - > ioc , bfa , & bfa_iocfc_cbfn , & bfa - > timer_mod ) ;
2011-06-14 02:42:10 +04:00
bfa_ioc_pci_init ( & bfa - > ioc , pcidev , BFI_PCIFN_CLASS_FC ) ;
2010-09-15 22:50:55 +04:00
bfa_ioc_mbox_register ( & bfa - > ioc , bfa_mbox_isrs ) ;
bfa_iocfc_init_mem ( bfa , bfad , cfg , pcidev ) ;
2011-06-25 07:24:29 +04:00
bfa_iocfc_mem_claim ( bfa , cfg ) ;
2010-12-10 06:08:43 +03:00
INIT_LIST_HEAD ( & bfa - > timer_mod . timer_q ) ;
2010-09-15 22:50:55 +04:00
INIT_LIST_HEAD ( & bfa - > comp_q ) ;
for ( i = 0 ; i < BFI_IOC_MAX_CQS ; i + + )
INIT_LIST_HEAD ( & bfa - > reqq_waitq [ i ] ) ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Query IOC memory requirement information .
*/
void
bfa_iocfc_init ( struct bfa_s * bfa )
{
bfa - > iocfc . action = BFA_IOCFC_ACT_INIT ;
bfa_ioc_enable ( & bfa - > ioc ) ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* IOC start called from bfa_start ( ) . Called to start IOC operations
* at driver instantiation for this instance .
*/
void
bfa_iocfc_start ( struct bfa_s * bfa )
{
if ( bfa - > iocfc . cfgdone )
bfa_iocfc_start_submod ( bfa ) ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* IOC stop called from bfa_stop ( ) . Called only when driver is unloaded
* for this instance .
*/
void
bfa_iocfc_stop ( struct bfa_s * bfa )
{
bfa - > iocfc . action = BFA_IOCFC_ACT_STOP ;
2011-06-14 02:52:12 +04:00
bfa - > queue_process = BFA_FALSE ;
2010-09-15 22:50:55 +04:00
bfa_ioc_disable ( & bfa - > ioc ) ;
}
void
bfa_iocfc_isr ( void * bfaarg , struct bfi_mbmsg_s * m )
{
struct bfa_s * bfa = bfaarg ;
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
union bfi_iocfc_i2h_msg_u * msg ;
msg = ( union bfi_iocfc_i2h_msg_u * ) m ;
bfa_trc ( bfa , msg - > mh . msg_id ) ;
switch ( msg - > mh . msg_id ) {
case BFI_IOCFC_I2H_CFG_REPLY :
bfa_iocfc_cfgrsp ( bfa ) ;
break ;
case BFI_IOCFC_I2H_UPDATEQ_RSP :
iocfc - > updateq_cbfn ( iocfc - > updateq_cbarg , BFA_STATUS_OK ) ;
break ;
2011-06-25 07:23:19 +04:00
case BFI_IOCFC_I2H_FAA_ENABLE_RSP :
bfa_faa_enable_reply ( iocfc ,
( struct bfi_faa_en_dis_rsp_s * ) msg ) ;
break ;
case BFI_IOCFC_I2H_FAA_DISABLE_RSP :
bfa_faa_disable_reply ( iocfc ,
( struct bfi_faa_en_dis_rsp_s * ) msg ) ;
break ;
case BFI_IOCFC_I2H_FAA_QUERY_RSP :
bfa_faa_query_reply ( iocfc , ( bfi_faa_query_rsp_t * ) msg ) ;
break ;
2010-09-15 22:50:55 +04:00
default :
2010-12-27 08:46:35 +03:00
WARN_ON ( 1 ) ;
2010-09-15 22:50:55 +04:00
}
}
void
bfa_iocfc_get_attr ( struct bfa_s * bfa , struct bfa_iocfc_attr_s * attr )
{
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
attr - > intr_attr . coalesce = iocfc - > cfginfo - > intr_attr . coalesce ;
attr - > intr_attr . delay = iocfc - > cfginfo - > intr_attr . delay ?
2010-10-19 04:10:50 +04:00
be16_to_cpu ( iocfc - > cfginfo - > intr_attr . delay ) :
be16_to_cpu ( iocfc - > cfgrsp - > intr_attr . delay ) ;
2010-09-15 22:50:55 +04:00
attr - > intr_attr . latency = iocfc - > cfginfo - > intr_attr . latency ?
2010-10-19 04:10:50 +04:00
be16_to_cpu ( iocfc - > cfginfo - > intr_attr . latency ) :
be16_to_cpu ( iocfc - > cfgrsp - > intr_attr . latency ) ;
2010-09-15 22:50:55 +04:00
attr - > config = iocfc - > cfg ;
}
bfa_status_t
bfa_iocfc_israttr_set ( struct bfa_s * bfa , struct bfa_iocfc_intr_attr_s * attr )
{
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
struct bfi_iocfc_set_intr_req_s * m ;
iocfc - > cfginfo - > intr_attr . coalesce = attr - > coalesce ;
2010-10-19 04:10:50 +04:00
iocfc - > cfginfo - > intr_attr . delay = cpu_to_be16 ( attr - > delay ) ;
iocfc - > cfginfo - > intr_attr . latency = cpu_to_be16 ( attr - > latency ) ;
2010-09-15 22:50:55 +04:00
if ( ! bfa_iocfc_is_operational ( bfa ) )
return BFA_STATUS_OK ;
m = bfa_reqq_next ( bfa , BFA_REQQ_IOC ) ;
if ( ! m )
return BFA_STATUS_DEVBUSY ;
bfi_h2i_set ( m - > mh , BFI_MC_IOCFC , BFI_IOCFC_H2I_SET_INTR_REQ ,
2011-06-25 07:24:08 +04:00
bfa_fn_lpu ( bfa ) ) ;
2010-09-15 22:50:55 +04:00
m - > coalesce = iocfc - > cfginfo - > intr_attr . coalesce ;
m - > delay = iocfc - > cfginfo - > intr_attr . delay ;
m - > latency = iocfc - > cfginfo - > intr_attr . latency ;
bfa_trc ( bfa , attr - > delay ) ;
bfa_trc ( bfa , attr - > latency ) ;
2011-06-25 07:24:08 +04:00
bfa_reqq_produce ( bfa , BFA_REQQ_IOC , m - > mh ) ;
2010-09-15 22:50:55 +04:00
return BFA_STATUS_OK ;
}
void
2011-06-25 07:24:29 +04:00
bfa_iocfc_set_snsbase ( struct bfa_s * bfa , int seg_no , u64 snsbase_pa )
2010-09-15 22:50:55 +04:00
{
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
iocfc - > cfginfo - > sense_buf_len = ( BFI_IOIM_SNSLEN - 1 ) ;
2011-06-25 07:24:29 +04:00
bfa_dma_be_addr_set ( iocfc - > cfginfo - > ioim_snsbase [ seg_no ] , snsbase_pa ) ;
2010-09-15 22:50:55 +04:00
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Enable IOC after it is disabled .
*/
void
bfa_iocfc_enable ( struct bfa_s * bfa )
{
bfa_plog_str ( bfa - > plog , BFA_PL_MID_HAL , BFA_PL_EID_MISC , 0 ,
" IOC Enable " ) ;
2011-06-25 07:25:15 +04:00
bfa - > iocfc . action = BFA_IOCFC_ACT_ENABLE ;
2010-09-15 22:50:55 +04:00
bfa_ioc_enable ( & bfa - > ioc ) ;
}
void
bfa_iocfc_disable ( struct bfa_s * bfa )
{
bfa_plog_str ( bfa - > plog , BFA_PL_MID_HAL , BFA_PL_EID_MISC , 0 ,
" IOC Disable " ) ;
bfa - > iocfc . action = BFA_IOCFC_ACT_DISABLE ;
2011-06-14 02:52:12 +04:00
bfa - > queue_process = BFA_FALSE ;
2010-09-15 22:50:55 +04:00
bfa_ioc_disable ( & bfa - > ioc ) ;
}
bfa_boolean_t
bfa_iocfc_is_operational ( struct bfa_s * bfa )
{
return bfa_ioc_is_operational ( & bfa - > ioc ) & & bfa - > iocfc . cfgdone ;
}
2010-10-19 04:17:23 +04:00
/*
2010-09-15 22:50:55 +04:00
* Return boot target port wwns - - read from boot information in flash .
*/
void
bfa_iocfc_get_bootwwns ( struct bfa_s * bfa , u8 * nwwns , wwn_t * wwns )
{
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
struct bfi_iocfc_cfgrsp_s * cfgrsp = iocfc - > cfgrsp ;
int i ;
if ( cfgrsp - > pbc_cfg . boot_enabled & & cfgrsp - > pbc_cfg . nbluns ) {
bfa_trc ( bfa , cfgrsp - > pbc_cfg . nbluns ) ;
* nwwns = cfgrsp - > pbc_cfg . nbluns ;
for ( i = 0 ; i < cfgrsp - > pbc_cfg . nbluns ; i + + )
wwns [ i ] = cfgrsp - > pbc_cfg . blun [ i ] . tgt_pwwn ;
return ;
}
* nwwns = cfgrsp - > bootwwns . nwwns ;
memcpy ( wwns , cfgrsp - > bootwwns . wwn , sizeof ( cfgrsp - > bootwwns . wwn ) ) ;
}
int
bfa_iocfc_get_pbc_vports ( struct bfa_s * bfa , struct bfi_pbc_vport_s * pbc_vport )
{
struct bfa_iocfc_s * iocfc = & bfa - > iocfc ;
struct bfi_iocfc_cfgrsp_s * cfgrsp = iocfc - > cfgrsp ;
memcpy ( pbc_vport , cfgrsp - > pbc_cfg . vport , sizeof ( cfgrsp - > pbc_cfg . vport ) ) ;
return cfgrsp - > pbc_cfg . nvports ;
}
2009-09-24 04:46:15 +04:00
2010-10-19 04:17:23 +04:00
/*
2009-09-24 04:46:15 +04:00
* Use this function query the memory requirement of the BFA library .
* This function needs to be called before bfa_attach ( ) to get the
* memory required of the BFA layer for a given driver configuration .
*
* This call will fail , if the cap is out of range compared to pre - defined
* values within the BFA library
*
2010-09-15 22:50:55 +04:00
* @ param [ in ] cfg - pointer to bfa_ioc_cfg_t . Driver layer should indicate
* its configuration in this structure .
2009-09-24 04:46:15 +04:00
* The default values for struct bfa_iocfc_cfg_s can be
* fetched using bfa_cfg_get_default ( ) API .
*
2010-09-15 22:50:55 +04:00
* If cap ' s boundary check fails , the library will use
2009-09-24 04:46:15 +04:00
* the default bfa_cap_t values ( and log a warning msg ) .
*
* @ param [ out ] meminfo - pointer to bfa_meminfo_t . This content
2010-09-15 22:50:55 +04:00
* indicates the memory type ( see bfa_mem_type_t ) and
2009-09-24 04:46:15 +04:00
* amount of memory required .
*
* Driver should allocate the memory , populate the
* starting address for each block and provide the same
* structure as input parameter to bfa_attach ( ) call .
*
2011-06-25 07:24:29 +04:00
* @ param [ in ] bfa - pointer to the bfa structure , used while fetching the
* dma , kva memory information of the bfa sub - modules .
*
2009-09-24 04:46:15 +04:00
* @ return void
*
* Special Considerations : @ note
*/
void
2011-06-25 07:24:29 +04:00
bfa_cfg_get_meminfo ( struct bfa_iocfc_cfg_s * cfg , struct bfa_meminfo_s * meminfo ,
struct bfa_s * bfa )
2009-09-24 04:46:15 +04:00
{
2010-09-15 22:50:55 +04:00
int i ;
2011-06-25 07:24:29 +04:00
struct bfa_mem_dma_s * port_dma = BFA_MEM_PORT_DMA ( bfa ) ;
struct bfa_mem_dma_s * ablk_dma = BFA_MEM_ABLK_DMA ( bfa ) ;
2011-06-25 07:25:36 +04:00
struct bfa_mem_dma_s * cee_dma = BFA_MEM_CEE_DMA ( bfa ) ;
2011-06-25 07:26:25 +04:00
struct bfa_mem_dma_s * sfp_dma = BFA_MEM_SFP_DMA ( bfa ) ;
2011-06-25 07:27:13 +04:00
struct bfa_mem_dma_s * flash_dma = BFA_MEM_FLASH_DMA ( bfa ) ;
2011-06-25 07:28:17 +04:00
struct bfa_mem_dma_s * diag_dma = BFA_MEM_DIAG_DMA ( bfa ) ;
2011-06-25 07:28:37 +04:00
struct bfa_mem_dma_s * phy_dma = BFA_MEM_PHY_DMA ( bfa ) ;
2009-09-24 04:46:15 +04:00
2010-12-27 08:46:35 +03:00
WARN_ON ( ( cfg = = NULL ) | | ( meminfo = = NULL ) ) ;
2009-09-24 04:46:15 +04:00
2010-10-19 04:08:54 +04:00
memset ( ( void * ) meminfo , 0 , sizeof ( struct bfa_meminfo_s ) ) ;
2009-09-24 04:46:15 +04:00
2011-06-25 07:24:29 +04:00
/* Initialize the DMA & KVA meminfo queues */
INIT_LIST_HEAD ( & meminfo - > dma_info . qe ) ;
INIT_LIST_HEAD ( & meminfo - > kva_info . qe ) ;
2009-09-24 04:46:15 +04:00
2011-06-25 07:24:29 +04:00
bfa_iocfc_meminfo ( cfg , meminfo , bfa ) ;
2009-09-24 04:46:15 +04:00
2011-06-25 07:24:29 +04:00
for ( i = 0 ; hal_mods [ i ] ; i + + )
hal_mods [ i ] - > meminfo ( cfg , meminfo , bfa ) ;
2009-09-24 04:46:15 +04:00
2011-06-25 07:24:29 +04:00
/* dma info setup */
bfa_mem_dma_setup ( meminfo , port_dma , bfa_port_meminfo ( ) ) ;
bfa_mem_dma_setup ( meminfo , ablk_dma , bfa_ablk_meminfo ( ) ) ;
2011-06-25 07:25:36 +04:00
bfa_mem_dma_setup ( meminfo , cee_dma , bfa_cee_meminfo ( ) ) ;
2011-06-25 07:26:25 +04:00
bfa_mem_dma_setup ( meminfo , sfp_dma , bfa_sfp_meminfo ( ) ) ;
2011-06-25 07:27:13 +04:00
bfa_mem_dma_setup ( meminfo , flash_dma ,
bfa_flash_meminfo ( cfg - > drvcfg . min_cfg ) ) ;
2011-06-25 07:28:17 +04:00
bfa_mem_dma_setup ( meminfo , diag_dma , bfa_diag_meminfo ( ) ) ;
2011-06-25 07:28:37 +04:00
bfa_mem_dma_setup ( meminfo , phy_dma ,
bfa_phy_meminfo ( cfg - > drvcfg . min_cfg ) ) ;
2009-09-24 04:46:15 +04:00
}
2010-10-19 04:17:23 +04:00
/*
2009-09-24 04:46:15 +04:00
* Use this function to do attach the driver instance with the BFA
* library . This function will not trigger any HW initialization
* process ( which will be done in bfa_init ( ) call )
*
* This call will fail , if the cap is out of range compared to
* pre - defined values within the BFA library
*
* @ param [ out ] bfa Pointer to bfa_t .
2010-09-15 22:50:55 +04:00
* @ param [ in ] bfad Opaque handle back to the driver ' s IOC structure
2009-09-24 04:46:15 +04:00
* @ param [ in ] cfg Pointer to bfa_ioc_cfg_t . Should be same structure
2010-09-15 22:50:55 +04:00
* that was used in bfa_cfg_get_meminfo ( ) .
* @ param [ in ] meminfo Pointer to bfa_meminfo_t . The driver should
* use the bfa_cfg_get_meminfo ( ) call to
* find the memory blocks required , allocate the
* required memory and provide the starting addresses .
* @ param [ in ] pcidev pointer to struct bfa_pcidev_s
2009-09-24 04:46:15 +04:00
*
* @ return
* void
*
* Special Considerations :
*
* @ note
*
*/
void
bfa_attach ( struct bfa_s * bfa , void * bfad , struct bfa_iocfc_cfg_s * cfg ,
struct bfa_meminfo_s * meminfo , struct bfa_pcidev_s * pcidev )
{
2011-06-25 07:24:29 +04:00
int i ;
struct bfa_mem_dma_s * dma_info , * dma_elem ;
struct bfa_mem_kva_s * kva_info , * kva_elem ;
struct list_head * dm_qe , * km_qe ;
2009-09-24 04:46:15 +04:00
bfa - > fcs = BFA_FALSE ;
2010-12-27 08:46:35 +03:00
WARN_ON ( ( cfg = = NULL ) | | ( meminfo = = NULL ) ) ;
2009-09-24 04:46:15 +04:00
2011-06-25 07:24:29 +04:00
/* Initialize memory pointers for iterative allocation */
dma_info = & meminfo - > dma_info ;
dma_info - > kva_curp = dma_info - > kva ;
dma_info - > dma_curp = dma_info - > dma ;
kva_info = & meminfo - > kva_info ;
kva_info - > kva_curp = kva_info - > kva ;
list_for_each ( dm_qe , & dma_info - > qe ) {
dma_elem = ( struct bfa_mem_dma_s * ) dm_qe ;
dma_elem - > kva_curp = dma_elem - > kva ;
dma_elem - > dma_curp = dma_elem - > dma ;
}
list_for_each ( km_qe , & kva_info - > qe ) {
kva_elem = ( struct bfa_mem_kva_s * ) km_qe ;
kva_elem - > kva_curp = kva_elem - > kva ;
2009-09-24 04:46:15 +04:00
}
2011-06-25 07:24:29 +04:00
bfa_iocfc_attach ( bfa , bfad , cfg , pcidev ) ;
2009-09-24 04:46:15 +04:00
for ( i = 0 ; hal_mods [ i ] ; i + + )
2011-06-25 07:24:29 +04:00
hal_mods [ i ] - > attach ( bfa , bfad , cfg , pcidev ) ;
2009-09-24 04:46:15 +04:00
2011-06-25 07:24:29 +04:00
bfa_com_port_attach ( bfa ) ;
bfa_com_ablk_attach ( bfa ) ;
2011-06-25 07:25:36 +04:00
bfa_com_cee_attach ( bfa ) ;
2011-06-25 07:26:25 +04:00
bfa_com_sfp_attach ( bfa ) ;
2011-06-25 07:27:13 +04:00
bfa_com_flash_attach ( bfa , cfg - > drvcfg . min_cfg ) ;
2011-06-25 07:28:17 +04:00
bfa_com_diag_attach ( bfa ) ;
2011-06-25 07:28:37 +04:00
bfa_com_phy_attach ( bfa , cfg - > drvcfg . min_cfg ) ;
2009-09-24 04:46:15 +04:00
}
2010-10-19 04:17:23 +04:00
/*
2009-09-24 04:46:15 +04:00
* Use this function to delete a BFA IOC . IOC should be stopped ( by
* calling bfa_stop ( ) ) before this function call .
*
* @ param [ in ] bfa - pointer to bfa_t .
*
* @ return
* void
*
* Special Considerations :
*
* @ note
*/
void
bfa_detach ( struct bfa_s * bfa )
{
int i ;
for ( i = 0 ; hal_mods [ i ] ; i + + )
hal_mods [ i ] - > detach ( bfa ) ;
2010-12-10 06:08:43 +03:00
bfa_ioc_detach ( & bfa - > ioc ) ;
2009-09-24 04:46:15 +04:00
}
void
bfa_comp_deq ( struct bfa_s * bfa , struct list_head * comp_q )
{
INIT_LIST_HEAD ( comp_q ) ;
list_splice_tail_init ( & bfa - > comp_q , comp_q ) ;
}
void
bfa_comp_process ( struct bfa_s * bfa , struct list_head * comp_q )
{
2010-09-15 22:50:55 +04:00
struct list_head * qe ;
struct list_head * qen ;
struct bfa_cb_qe_s * hcb_qe ;
2009-09-24 04:46:15 +04:00
list_for_each_safe ( qe , qen , comp_q ) {
hcb_qe = ( struct bfa_cb_qe_s * ) qe ;
hcb_qe - > cbfn ( hcb_qe - > cbarg , BFA_TRUE ) ;
}
}
void
bfa_comp_free ( struct bfa_s * bfa , struct list_head * comp_q )
{
2010-09-15 22:50:55 +04:00
struct list_head * qe ;
struct bfa_cb_qe_s * hcb_qe ;
2009-09-24 04:46:15 +04:00
while ( ! list_empty ( comp_q ) ) {
bfa_q_deq ( comp_q , & qe ) ;
hcb_qe = ( struct bfa_cb_qe_s * ) qe ;
hcb_qe - > cbfn ( hcb_qe - > cbarg , BFA_FALSE ) ;
}
}
2010-10-19 04:17:23 +04:00
/*
2009-09-24 04:46:15 +04:00
* Return the list of PCI vendor / device id lists supported by this
* BFA instance .
*/
void
bfa_get_pciids ( struct bfa_pciid_s * * pciids , int * npciids )
{
static struct bfa_pciid_s __pciids [ ] = {
{ BFA_PCI_VENDOR_ID_BROCADE , BFA_PCI_DEVICE_ID_FC_8G2P } ,
{ BFA_PCI_VENDOR_ID_BROCADE , BFA_PCI_DEVICE_ID_FC_8G1P } ,
{ BFA_PCI_VENDOR_ID_BROCADE , BFA_PCI_DEVICE_ID_CT } ,
2010-07-09 06:45:20 +04:00
{ BFA_PCI_VENDOR_ID_BROCADE , BFA_PCI_DEVICE_ID_CT_FC } ,
2009-09-24 04:46:15 +04:00
} ;
2010-09-15 22:50:55 +04:00
* npciids = sizeof ( __pciids ) / sizeof ( __pciids [ 0 ] ) ;
2009-09-24 04:46:15 +04:00
* pciids = __pciids ;
}
2010-10-19 04:17:23 +04:00
/*
2009-09-24 04:46:15 +04:00
* Use this function query the default struct bfa_iocfc_cfg_s value ( compiled
* into BFA layer ) . The OS driver can then turn back and overwrite entries that
* have been configured by the user .
*
* @ param [ in ] cfg - pointer to bfa_ioc_cfg_t
*
* @ return
* void
*
* Special Considerations :
2010-09-15 22:50:55 +04:00
* note
2009-09-24 04:46:15 +04:00
*/
void
bfa_cfg_get_default ( struct bfa_iocfc_cfg_s * cfg )
{
cfg - > fwcfg . num_fabrics = DEF_CFG_NUM_FABRICS ;
cfg - > fwcfg . num_lports = DEF_CFG_NUM_LPORTS ;
cfg - > fwcfg . num_rports = DEF_CFG_NUM_RPORTS ;
cfg - > fwcfg . num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS ;
cfg - > fwcfg . num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS ;
cfg - > fwcfg . num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS ;
cfg - > fwcfg . num_uf_bufs = DEF_CFG_NUM_UF_BUFS ;
cfg - > fwcfg . num_cqs = DEF_CFG_NUM_CQS ;
2011-06-14 02:53:58 +04:00
cfg - > fwcfg . num_fwtio_reqs = 0 ;
2009-09-24 04:46:15 +04:00
cfg - > drvcfg . num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS ;
cfg - > drvcfg . num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS ;
cfg - > drvcfg . num_sgpgs = DEF_CFG_NUM_SGPGS ;
cfg - > drvcfg . num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS ;
cfg - > drvcfg . num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS ;
cfg - > drvcfg . path_tov = BFA_FCPIM_PATHTOV_DEF ;
cfg - > drvcfg . ioc_recover = BFA_FALSE ;
cfg - > drvcfg . delay_comp = BFA_FALSE ;
}
void
bfa_cfg_get_min ( struct bfa_iocfc_cfg_s * cfg )
{
bfa_cfg_get_default ( cfg ) ;
cfg - > fwcfg . num_ioim_reqs = BFA_IOIM_MIN ;
cfg - > fwcfg . num_tskim_reqs = BFA_TSKIM_MIN ;
cfg - > fwcfg . num_fcxp_reqs = BFA_FCXP_MIN ;
cfg - > fwcfg . num_uf_bufs = BFA_UF_MIN ;
cfg - > fwcfg . num_rports = BFA_RPORT_MIN ;
2011-06-14 02:53:58 +04:00
cfg - > fwcfg . num_fwtio_reqs = 0 ;
2009-09-24 04:46:15 +04:00
cfg - > drvcfg . num_sgpgs = BFA_SGPG_MIN ;
cfg - > drvcfg . num_reqq_elems = BFA_REQQ_NELEMS_MIN ;
cfg - > drvcfg . num_rspq_elems = BFA_RSPQ_NELEMS_MIN ;
2010-09-15 22:50:55 +04:00
cfg - > drvcfg . min_cfg = BFA_TRUE ;
2009-09-24 04:46:15 +04:00
}