2010-07-27 16:34:34 +04:00
/* bnx2x_cmn.h: Broadcom Everest network driver.
*
2011-05-05 03:51:13 +04:00
* Copyright ( c ) 2007 - 2011 Broadcom Corporation
2010-07-27 16:34:34 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation .
*
* Maintained by : Eilon Greenstein < eilong @ broadcom . com >
* Written by : Eliezer Tamir
* Based on code from Michael Chan ' s bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
* Slowpath and fastpath rework by Vladislav Zolotarov
* Statistics and Link management by Yitchak Gertner
*
*/
# ifndef BNX2X_CMN_H
# define BNX2X_CMN_H
# include <linux/types.h>
2011-06-14 15:33:44 +04:00
# include <linux/pci.h>
2010-07-27 16:34:34 +04:00
# include <linux/netdevice.h>
# include "bnx2x.h"
2011-06-14 15:33:44 +04:00
/* This is used as a replacement for an MCP if it's not present */
extern int load_count [ 2 ] [ 3 ] ; /* per-path: 0-common, 1-port0, 2-port1 */
2010-10-06 07:32:10 +04:00
extern int num_queues ;
2010-07-27 16:34:34 +04:00
2011-05-05 03:50:33 +04:00
/************************ Macros ********************************/
# define BNX2X_PCI_FREE(x, y, size) \
do { \
if ( x ) { \
dma_free_coherent ( & bp - > pdev - > dev , size , ( void * ) x , y ) ; \
x = NULL ; \
y = 0 ; \
} \
} while ( 0 )
# define BNX2X_FREE(x) \
do { \
if ( x ) { \
kfree ( ( void * ) x ) ; \
x = NULL ; \
} \
} while ( 0 )
# define BNX2X_PCI_ALLOC(x, y, size) \
do { \
x = dma_alloc_coherent ( & bp - > pdev - > dev , size , y , GFP_KERNEL ) ; \
if ( x = = NULL ) \
goto alloc_mem_err ; \
memset ( ( void * ) x , 0 , size ) ; \
} while ( 0 )
# define BNX2X_ALLOC(x, size) \
do { \
x = kzalloc ( size , GFP_KERNEL ) ; \
if ( x = = NULL ) \
goto alloc_mem_err ; \
} while ( 0 )
2010-07-27 16:34:34 +04:00
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
*/
2011-06-14 15:33:44 +04:00
/* Init */
/**
* bnx2x_send_unload_req - request unload mode from the MCP .
*
* @ bp : driver handle
* @ unload_mode : requested function ' s unload mode
*
* Return unload mode returned by the MCP : COMMON , PORT or FUNC .
*/
u32 bnx2x_send_unload_req ( struct bnx2x * bp , int unload_mode ) ;
/**
* bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP .
*
* @ bp : driver handle
*/
void bnx2x_send_unload_done ( struct bnx2x * bp ) ;
/**
* bnx2x_config_rss_pf - configure RSS parameters .
*
* @ bp : driver handle
* @ ind_table : indirection table to configure
* @ config_hash : re - configure RSS hash keys configuration
*/
int bnx2x_config_rss_pf ( struct bnx2x * bp , u8 * ind_table , bool config_hash ) ;
/**
* bnx2x__init_func_obj - init function object
*
* @ bp : driver handle
*
* Initializes the Function Object with the appropriate
* parameters which include a function slow path driver
* interface .
*/
void bnx2x__init_func_obj ( struct bnx2x * bp ) ;
/**
* bnx2x_setup_queue - setup eth queue .
*
* @ bp : driver handle
* @ fp : pointer to the fastpath structure
* @ leading : boolean
*
*/
int bnx2x_setup_queue ( struct bnx2x * bp , struct bnx2x_fastpath * fp ,
bool leading ) ;
/**
* bnx2x_setup_leading - bring up a leading eth queue .
*
* @ bp : driver handle
*/
int bnx2x_setup_leading ( struct bnx2x * bp ) ;
/**
* bnx2x_fw_command - send the MCP a request
*
* @ bp : driver handle
* @ command : request
* @ param : request ' s parameter
*
* block until there is a reply
*/
u32 bnx2x_fw_command ( struct bnx2x * bp , u32 command , u32 param ) ;
2010-07-27 16:34:34 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_initial_phy_init - initialize link parameters structure variables .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
* @ load_mode : current mode
2010-07-27 16:34:34 +04:00
*/
u8 bnx2x_initial_phy_init ( struct bnx2x * bp , int load_mode ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_link_set - configure hw according to link parameters structure .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-07-27 16:34:34 +04:00
*/
void bnx2x_link_set ( struct bnx2x * bp ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_link_test - query link status .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
* @ is_serdes : bool
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* Returns 0 if link is UP .
2010-07-27 16:34:34 +04:00
*/
2010-09-07 15:41:20 +04:00
u8 bnx2x_link_test ( struct bnx2x * bp , u8 is_serdes ) ;
2010-07-27 16:34:34 +04:00
2011-06-14 15:33:44 +04:00
/**
* bnx2x_drv_pulse - write driver pulse to shmem
*
* @ bp : driver handle
*
* writes the value in bp - > fw_drv_pulse_wr_seq to drv_pulse mbox
* in the shmem .
*/
void bnx2x_drv_pulse ( struct bnx2x * bp ) ;
/**
* bnx2x_igu_ack_sb - update IGU with current SB value
*
* @ bp : driver handle
* @ igu_sb_id : SB id
* @ segment : SB segment
* @ index : SB index
* @ op : SB operation
* @ update : is HW update required
*/
void bnx2x_igu_ack_sb ( struct bnx2x * bp , u8 igu_sb_id , u8 segment ,
u16 index , u8 op , u8 update ) ;
2010-07-27 16:34:34 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x__link_status_update - handles link status change .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-07-27 16:34:34 +04:00
*/
void bnx2x__link_status_update ( struct bnx2x * bp ) ;
2010-10-06 07:34:21 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_link_report - report link status to upper layer .
2010-10-06 07:34:21 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-10-06 07:34:21 +04:00
*/
void bnx2x_link_report ( struct bnx2x * bp ) ;
2011-05-05 03:48:23 +04:00
/* None-atomic version of bnx2x_link_report() */
void __bnx2x_link_report ( struct bnx2x * bp ) ;
2010-12-01 23:39:28 +03:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_get_mf_speed - calculate MF speed .
2010-12-01 23:39:28 +03:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-12-01 23:39:28 +03:00
*
2011-05-05 03:52:40 +04:00
* Takes into account current linespeed and MF configuration .
2010-12-01 23:39:28 +03:00
*/
u16 bnx2x_get_mf_speed ( struct bnx2x * bp ) ;
2010-07-27 16:34:34 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_msix_sp_int - MSI - X slowpath interrupt handler
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ irq : irq number
* @ dev_instance : private instance
2010-07-27 16:34:34 +04:00
*/
irqreturn_t bnx2x_msix_sp_int ( int irq , void * dev_instance ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_interrupt - non MSI - X interrupt handler
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ irq : irq number
* @ dev_instance : private instance
2010-07-27 16:34:34 +04:00
*/
irqreturn_t bnx2x_interrupt ( int irq , void * dev_instance ) ;
# ifdef BCM_CNIC
/**
2011-05-05 03:52:40 +04:00
* bnx2x_cnic_notify - send command to cnic driver
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
* @ cmd : command
2010-07-27 16:34:34 +04:00
*/
int bnx2x_cnic_notify ( struct bnx2x * bp , int cmd ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-07-27 16:34:34 +04:00
*/
void bnx2x_setup_cnic_irq_info ( struct bnx2x * bp ) ;
# endif
/**
2011-05-05 03:52:40 +04:00
* bnx2x_int_enable - enable HW interrupts .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-07-27 16:34:34 +04:00
*/
void bnx2x_int_enable ( struct bnx2x * bp ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_int_disable_sync - disable interrupts .
*
* @ bp : driver handle
* @ disable_hw : true , disable HW interrupts .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* This function ensures that there are no
* ISRs or SP DPCs ( sp_task ) are running after it returns .
2010-07-27 16:34:34 +04:00
*/
void bnx2x_int_disable_sync ( struct bnx2x * bp , int disable_hw ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_nic_init - init driver internals .
*
* @ bp : driver handle
* @ load_code : COMMON , PORT or FUNCTION
*
* Initializes :
2010-07-27 16:34:34 +04:00
* - rings
* - status blocks
* - etc .
*/
void bnx2x_nic_init ( struct bnx2x * bp , u32 load_code ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_alloc_mem - allocate driver ' s memory .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-07-27 16:34:34 +04:00
*/
int bnx2x_alloc_mem ( struct bnx2x * bp ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_free_mem - release driver ' s memory .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-07-27 16:34:34 +04:00
*/
void bnx2x_free_mem ( struct bnx2x * bp ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_set_num_queues - set number of queues according to mode .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-07-27 16:34:34 +04:00
*/
2010-10-06 07:32:10 +04:00
void bnx2x_set_num_queues ( struct bnx2x * bp ) ;
2010-07-27 16:34:34 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_chip_cleanup - cleanup chip internals .
*
* @ bp : driver handle
* @ unload_mode : COMMON , PORT , FUNCTION
*
2010-07-27 16:34:34 +04:00
* - Cleanup MAC configuration .
2011-05-05 03:52:40 +04:00
* - Closes clients .
2010-07-27 16:34:34 +04:00
* - etc .
*/
void bnx2x_chip_cleanup ( struct bnx2x * bp , int unload_mode ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_acquire_hw_lock - acquire HW lock .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
* @ resource : resource bit which was locked
2010-07-27 16:34:34 +04:00
*/
int bnx2x_acquire_hw_lock ( struct bnx2x * bp , u32 resource ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_release_hw_lock - release HW lock .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
* @ resource : resource bit which was locked
2010-07-27 16:34:34 +04:00
*/
int bnx2x_release_hw_lock ( struct bnx2x * bp , u32 resource ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_set_eth_mac - configure eth MAC address in the HW
*
* @ bp : driver handle
* @ set : set or clear
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* Configures according to the value in netdev - > dev_addr .
2010-07-27 16:34:34 +04:00
*/
2011-06-14 15:33:44 +04:00
int bnx2x_set_eth_mac ( struct bnx2x * bp , bool set ) ;
2010-07-27 16:34:34 +04:00
2010-12-13 08:44:01 +03:00
/**
2011-06-14 15:33:44 +04:00
* bnx2x_set_rx_mode - set MAC filtering configurations .
2010-12-13 08:44:01 +03:00
*
2011-06-14 15:33:44 +04:00
* @ dev : netdevice
2010-12-13 08:44:01 +03:00
*
2011-06-14 15:33:44 +04:00
* called with netif_tx_lock from dev_mcast . c
* If bp - > state is OPEN , should be called with
* netif_addr_lock_bh ( )
2010-12-13 08:44:01 +03:00
*/
2011-06-14 15:33:44 +04:00
void bnx2x_set_rx_mode ( struct net_device * dev ) ;
2010-12-13 08:44:01 +03:00
/**
2011-06-14 15:33:44 +04:00
* bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW .
2010-12-13 08:44:01 +03:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2011-06-14 15:33:44 +04:00
*
* If bp - > state is OPEN , should be called with
* netif_addr_lock_bh ( ) .
2010-12-13 08:44:01 +03:00
*/
2011-06-14 15:33:44 +04:00
void bnx2x_set_storm_rx_mode ( struct bnx2x * bp ) ;
2010-12-13 08:44:01 +03:00
2010-07-27 16:34:34 +04:00
/**
2011-06-14 15:33:44 +04:00
* bnx2x_set_q_rx_mode - configures rx_mode for a single queue .
2010-07-27 16:34:34 +04:00
*
2011-06-14 15:33:44 +04:00
* @ bp : driver handle
* @ cl_id : client id
* @ rx_mode_flags : rx mode configuration
* @ rx_accept_flags : rx accept configuration
* @ tx_accept_flags : tx accept configuration ( tx switch )
* @ ramrod_flags : ramrod configuration
2010-07-27 16:34:34 +04:00
*/
2011-06-14 15:33:44 +04:00
void bnx2x_set_q_rx_mode ( struct bnx2x * bp , u8 cl_id ,
unsigned long rx_mode_flags ,
unsigned long rx_accept_flags ,
unsigned long tx_accept_flags ,
unsigned long ramrod_flags ) ;
2010-07-27 16:34:34 +04:00
/* Parity errors related */
void bnx2x_inc_load_cnt ( struct bnx2x * bp ) ;
u32 bnx2x_dec_load_cnt ( struct bnx2x * bp ) ;
bool bnx2x_chk_parity_attn ( struct bnx2x * bp ) ;
bool bnx2x_reset_is_done ( struct bnx2x * bp ) ;
void bnx2x_disable_close_the_gate ( struct bnx2x * bp ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_sp_event - handle ramrods completion .
2010-07-27 16:34:34 +04:00
*
2011-05-05 03:52:40 +04:00
* @ fp : fastpath handle for the event
* @ rr_cqe : eth_rx_cqe
2010-07-27 16:34:34 +04:00
*/
2010-10-06 07:34:21 +04:00
void bnx2x_sp_event ( struct bnx2x_fastpath * fp , union eth_rx_cqe * rr_cqe ) ;
2010-07-27 16:34:34 +04:00
2010-10-06 07:23:26 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_ilt_set_info - prepare ILT configurations .
2010-10-06 07:23:26 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-10-06 07:23:26 +04:00
*/
void bnx2x_ilt_set_info ( struct bnx2x * bp ) ;
2010-07-27 16:34:34 +04:00
2010-12-13 08:44:18 +03:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_dcbx_init - initialize dcbx protocol .
2010-12-13 08:44:18 +03:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-12-13 08:44:18 +03:00
*/
void bnx2x_dcbx_init ( struct bnx2x * bp ) ;
2010-10-06 07:34:21 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_set_power_state - set power state to the requested value .
2010-10-06 07:34:21 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
* @ state : required state D0 or D3hot
2010-10-06 07:34:21 +04:00
*
2011-05-05 03:52:40 +04:00
* Currently only D0 and D3hot are supported .
2010-10-06 07:34:21 +04:00
*/
int bnx2x_set_power_state ( struct bnx2x * bp , pci_power_t state ) ;
2011-03-06 13:50:44 +03:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_update_max_mf_config - update MAX part of MF configuration in HW .
2011-03-06 13:50:44 +03:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
* @ value : new value
2011-03-06 13:50:44 +03:00
*/
void bnx2x_update_max_mf_config ( struct bnx2x * bp , u32 value ) ;
2011-06-14 15:33:44 +04:00
/* Error handling */
void bnx2x_panic_dump ( struct bnx2x * bp ) ;
2011-03-06 13:50:44 +03:00
2011-06-14 05:33:25 +04:00
void bnx2x_fw_dump_lvl ( struct bnx2x * bp , const char * lvl ) ;
2010-10-06 07:34:21 +04:00
/* dev_close main block */
int bnx2x_nic_unload ( struct bnx2x * bp , int unload_mode ) ;
/* dev_open main block */
int bnx2x_nic_load ( struct bnx2x * bp , int load_mode ) ;
/* hard_xmit callback */
netdev_tx_t bnx2x_start_xmit ( struct sk_buff * skb , struct net_device * dev ) ;
2010-12-13 08:44:09 +03:00
/* select_queue callback */
u16 bnx2x_select_queue ( struct net_device * dev , struct sk_buff * skb ) ;
2011-06-14 05:33:30 +04:00
/* reload helper */
int bnx2x_reload_if_running ( struct net_device * dev ) ;
2010-10-06 07:34:21 +04:00
int bnx2x_change_mac_addr ( struct net_device * dev , void * p ) ;
/* NAPI poll Rx part */
int bnx2x_rx_int ( struct bnx2x_fastpath * fp , int budget ) ;
2011-06-14 15:33:44 +04:00
void bnx2x_update_rx_prod ( struct bnx2x * bp , struct bnx2x_fastpath * fp ,
u16 bd_prod , u16 rx_comp_prod , u16 rx_sge_prod ) ;
2010-10-06 07:34:21 +04:00
/* NAPI poll Tx part */
int bnx2x_tx_int ( struct bnx2x_fastpath * fp ) ;
/* suspend/resume callbacks */
int bnx2x_suspend ( struct pci_dev * pdev , pm_message_t state ) ;
int bnx2x_resume ( struct pci_dev * pdev ) ;
/* Release IRQ vectors */
void bnx2x_free_irq ( struct bnx2x * bp ) ;
2011-05-05 03:50:33 +04:00
void bnx2x_free_fp_mem ( struct bnx2x * bp ) ;
int bnx2x_alloc_fp_mem ( struct bnx2x * bp ) ;
2010-10-06 07:34:21 +04:00
void bnx2x_init_rx_rings ( struct bnx2x * bp ) ;
void bnx2x_free_skbs ( struct bnx2x * bp ) ;
void bnx2x_netif_stop ( struct bnx2x * bp , int disable_hw ) ;
void bnx2x_netif_start ( struct bnx2x * bp ) ;
2010-10-06 07:32:10 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_enable_msix - set msix configuration .
2010-10-06 07:32:10 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-10-06 07:32:10 +04:00
*
2011-05-05 03:52:40 +04:00
* fills msix_table , requests vectors , updates num_queues
* according to number of available vectors .
2010-10-06 07:32:10 +04:00
*/
int bnx2x_enable_msix ( struct bnx2x * bp ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_enable_msi - request msi mode from OS , updated internals accordingly
2010-10-06 07:32:10 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-10-06 07:32:10 +04:00
*/
int bnx2x_enable_msi ( struct bnx2x * bp ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_poll - NAPI callback
2010-10-06 07:32:10 +04:00
*
2011-05-05 03:52:40 +04:00
* @ napi : napi structure
* @ budget :
2010-10-06 07:32:10 +04:00
*
*/
int bnx2x_poll ( struct napi_struct * napi , int budget ) ;
2010-10-06 07:34:21 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
2010-10-06 07:34:21 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-10-06 07:34:21 +04:00
*/
int __devinit bnx2x_alloc_mem_bp ( struct bnx2x * bp ) ;
2011-05-05 03:52:40 +04:00
/**
* bnx2x_free_mem_bp - release memories outsize main driver structure
*
* @ bp : driver handle
*/
2010-10-06 07:34:21 +04:00
void bnx2x_free_mem_bp ( struct bnx2x * bp ) ;
/**
2011-05-05 03:52:40 +04:00
* bnx2x_change_mtu - change mtu netdev callback
2010-10-06 07:34:21 +04:00
*
2011-05-05 03:52:40 +04:00
* @ dev : net device
* @ new_mtu : requested mtu
2010-10-06 07:34:21 +04:00
*
*/
int bnx2x_change_mtu ( struct net_device * dev , int new_mtu ) ;
2011-04-12 13:38:23 +04:00
u32 bnx2x_fix_features ( struct net_device * dev , u32 features ) ;
int bnx2x_set_features ( struct net_device * dev , u32 features ) ;
2010-10-06 07:34:21 +04:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_tx_timeout - tx timeout netdev callback
2010-10-06 07:34:21 +04:00
*
2011-05-05 03:52:40 +04:00
* @ dev : net device
2010-10-06 07:34:21 +04:00
*/
void bnx2x_tx_timeout ( struct net_device * dev ) ;
2011-06-14 15:33:44 +04:00
/*********************** Inlines **********************************/
/*********************** Fast path ********************************/
2010-07-27 16:34:34 +04:00
static inline void bnx2x_update_fpsb_idx ( struct bnx2x_fastpath * fp )
{
barrier ( ) ; /* status block is written to by the chip */
2010-10-06 07:23:26 +04:00
fp - > fp_hc_idx = fp - > sb_running_index [ SM_RX_ID ] ;
2010-07-27 16:34:34 +04:00
}
2011-06-14 15:33:44 +04:00
static inline void bnx2x_update_rx_prod_gen ( struct bnx2x * bp ,
struct bnx2x_fastpath * fp , u16 bd_prod ,
u16 rx_comp_prod , u16 rx_sge_prod , u32 start )
2010-07-27 16:34:34 +04:00
{
struct ustorm_eth_rx_producers rx_prods = { 0 } ;
2011-06-14 15:33:44 +04:00
u32 i ;
2010-07-27 16:34:34 +04:00
/* Update producers */
rx_prods . bd_prod = bd_prod ;
rx_prods . cqe_prod = rx_comp_prod ;
rx_prods . sge_prod = rx_sge_prod ;
/*
* Make sure that the BD and SGE data is updated before updating the
* producers since FW might read the BD / SGE right after the producer
* is updated .
* This is only applicable for weak - ordered memory model archs such
* as IA - 64. The following barrier is also mandatory since FW will
* assumes BDs must have buffers .
*/
wmb ( ) ;
2011-06-14 15:33:44 +04:00
for ( i = 0 ; i < sizeof ( rx_prods ) / 4 ; i + + )
REG_WR ( bp , start + i * 4 , ( ( u32 * ) & rx_prods ) [ i ] ) ;
2010-07-27 16:34:34 +04:00
mmiowb ( ) ; /* keep prod updates ordered */
DP ( NETIF_MSG_RX_STATUS ,
" queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u \n " ,
fp - > index , bd_prod , rx_comp_prod , rx_sge_prod ) ;
}
2010-10-06 07:28:26 +04:00
static inline void bnx2x_igu_ack_sb_gen ( struct bnx2x * bp , u8 igu_sb_id ,
u8 segment , u16 index , u8 op ,
u8 update , u32 igu_addr )
{
struct igu_regular cmd_data = { 0 } ;
cmd_data . sb_id_and_flags =
( ( index < < IGU_REGULAR_SB_INDEX_SHIFT ) |
( segment < < IGU_REGULAR_SEGMENT_ACCESS_SHIFT ) |
( update < < IGU_REGULAR_BUPDATE_SHIFT ) |
( op < < IGU_REGULAR_ENABLE_INT_SHIFT ) ) ;
DP ( NETIF_MSG_HW , " write 0x%08x to IGU addr 0x%x \n " ,
cmd_data . sb_id_and_flags , igu_addr ) ;
REG_WR ( bp , igu_addr , cmd_data . sb_id_and_flags ) ;
/* Make sure that ACK is written */
mmiowb ( ) ;
barrier ( ) ;
}
2011-06-14 15:33:44 +04:00
static inline void bnx2x_igu_clear_sb_gen ( struct bnx2x * bp , u8 func ,
2010-10-06 07:28:26 +04:00
u8 idu_sb_id , bool is_Pf )
{
u32 data , ctl , cnt = 100 ;
u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA ;
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL ;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + ( idu_sb_id / 32 ) * 4 ;
u32 sb_bit = 1 < < ( idu_sb_id % 32 ) ;
2011-06-14 15:33:44 +04:00
u32 func_encode = func |
2010-10-06 07:28:26 +04:00
( ( is_Pf = = true ? 1 : 0 ) < < IGU_FID_ENCODE_IS_PF_SHIFT ) ;
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id ;
/* Not supported in BC mode */
if ( CHIP_INT_MODE_IS_BC ( bp ) )
return ;
data = ( IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
< < IGU_REGULAR_CLEANUP_TYPE_SHIFT ) |
IGU_REGULAR_CLEANUP_SET |
IGU_REGULAR_BCLEANUP ;
ctl = addr_encode < < IGU_CTRL_REG_ADDRESS_SHIFT |
func_encode < < IGU_CTRL_REG_FID_SHIFT |
IGU_CTRL_CMD_TYPE_WR < < IGU_CTRL_REG_TYPE_SHIFT ;
2010-07-27 16:34:34 +04:00
2010-10-06 07:28:26 +04:00
DP ( NETIF_MSG_HW , " write 0x%08x to IGU(via GRC) addr 0x%x \n " ,
data , igu_addr_data ) ;
REG_WR ( bp , igu_addr_data , data ) ;
mmiowb ( ) ;
barrier ( ) ;
DP ( NETIF_MSG_HW , " write 0x%08x to IGU(via GRC) addr 0x%x \n " ,
ctl , igu_addr_ctl ) ;
REG_WR ( bp , igu_addr_ctl , ctl ) ;
mmiowb ( ) ;
barrier ( ) ;
2010-07-27 16:34:34 +04:00
2010-10-06 07:28:26 +04:00
/* wait for clean up to finish */
while ( ! ( REG_RD ( bp , igu_addr_ack ) & sb_bit ) & & - - cnt )
msleep ( 20 ) ;
if ( ! ( REG_RD ( bp , igu_addr_ack ) & sb_bit ) ) {
DP ( NETIF_MSG_HW , " Unable to finish IGU cleanup: "
" idu_sb_id %d offset %d bit %d (cnt %d) \n " ,
idu_sb_id , idu_sb_id / 32 , idu_sb_id % 32 , cnt ) ;
}
}
static inline void bnx2x_hc_ack_sb ( struct bnx2x * bp , u8 sb_id ,
u8 storm , u16 index , u8 op , u8 update )
2010-07-27 16:34:34 +04:00
{
u32 hc_addr = ( HC_REG_COMMAND_REG + BP_PORT ( bp ) * 32 +
COMMAND_REG_INT_ACK ) ;
struct igu_ack_register igu_ack ;
igu_ack . status_block_index = index ;
igu_ack . sb_id_and_flags =
( ( sb_id < < IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT ) |
( storm < < IGU_ACK_REGISTER_STORM_ID_SHIFT ) |
( update < < IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT ) |
( op < < IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT ) ) ;
DP ( BNX2X_MSG_OFF , " write 0x%08x to HC addr 0x%x \n " ,
( * ( u32 * ) & igu_ack ) , hc_addr ) ;
REG_WR ( bp , hc_addr , ( * ( u32 * ) & igu_ack ) ) ;
/* Make sure that ACK is written */
mmiowb ( ) ;
barrier ( ) ;
}
2010-10-06 07:28:26 +04:00
static inline void bnx2x_ack_sb ( struct bnx2x * bp , u8 igu_sb_id , u8 storm ,
u16 index , u8 op , u8 update )
{
if ( bp - > common . int_block = = INT_BLOCK_HC )
bnx2x_hc_ack_sb ( bp , igu_sb_id , storm , index , op , update ) ;
else {
u8 segment ;
if ( CHIP_INT_MODE_IS_BC ( bp ) )
segment = storm ;
else if ( igu_sb_id ! = bp - > igu_dsb_id )
segment = IGU_SEG_ACCESS_DEF ;
else if ( storm = = ATTENTION_ID )
segment = IGU_SEG_ACCESS_ATTN ;
else
segment = IGU_SEG_ACCESS_DEF ;
bnx2x_igu_ack_sb ( bp , igu_sb_id , segment , index , op , update ) ;
}
}
static inline u16 bnx2x_hc_ack_int ( struct bnx2x * bp )
2010-07-27 16:34:34 +04:00
{
u32 hc_addr = ( HC_REG_COMMAND_REG + BP_PORT ( bp ) * 32 +
COMMAND_REG_SIMD_MASK ) ;
u32 result = REG_RD ( bp , hc_addr ) ;
DP ( BNX2X_MSG_OFF , " read 0x%08x from HC addr 0x%x \n " ,
result , hc_addr ) ;
2010-10-06 07:28:26 +04:00
barrier ( ) ;
2010-07-27 16:34:34 +04:00
return result ;
}
2010-10-06 07:28:26 +04:00
static inline u16 bnx2x_igu_ack_int ( struct bnx2x * bp )
{
u32 igu_addr = ( BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER * 8 ) ;
u32 result = REG_RD ( bp , igu_addr ) ;
DP ( NETIF_MSG_HW , " read 0x%08x from IGU addr 0x%x \n " ,
result , igu_addr ) ;
barrier ( ) ;
return result ;
}
static inline u16 bnx2x_ack_int ( struct bnx2x * bp )
{
barrier ( ) ;
if ( bp - > common . int_block = = INT_BLOCK_HC )
return bnx2x_hc_ack_int ( bp ) ;
else
return bnx2x_igu_ack_int ( bp ) ;
}
2010-07-27 16:34:34 +04:00
static inline int bnx2x_has_tx_work_unload ( struct bnx2x_fastpath * fp )
{
/* Tell compiler that consumer and producer can change */
barrier ( ) ;
2010-09-23 09:40:09 +04:00
return fp - > tx_pkt_prod ! = fp - > tx_pkt_cons ;
2010-07-27 16:34:34 +04:00
}
static inline u16 bnx2x_tx_avail ( struct bnx2x_fastpath * fp )
{
s16 used ;
u16 prod ;
u16 cons ;
prod = fp - > tx_bd_prod ;
cons = fp - > tx_bd_cons ;
/* NUM_TX_RINGS = number of "next-page" entries
It will be used as a threshold */
used = SUB_S16 ( prod , cons ) + ( s16 ) NUM_TX_RINGS ;
# ifdef BNX2X_STOP_ON_ERROR
WARN_ON ( used < 0 ) ;
WARN_ON ( used > fp - > bp - > tx_ring_size ) ;
WARN_ON ( ( fp - > bp - > tx_ring_size - used ) > MAX_TX_AVAIL ) ;
# endif
return ( s16 ) ( fp - > bp - > tx_ring_size ) - used ;
}
static inline int bnx2x_has_tx_work ( struct bnx2x_fastpath * fp )
{
u16 hw_cons ;
/* Tell compiler that status block fields can change */
barrier ( ) ;
hw_cons = le16_to_cpu ( * fp - > tx_cons_sb ) ;
return hw_cons ! = fp - > tx_pkt_cons ;
}
2010-10-06 07:23:26 +04:00
static inline int bnx2x_has_rx_work ( struct bnx2x_fastpath * fp )
{
u16 rx_cons_sb ;
/* Tell compiler that status block fields can change */
barrier ( ) ;
rx_cons_sb = le16_to_cpu ( * fp - > rx_cons_sb ) ;
if ( ( rx_cons_sb & MAX_RCQ_DESC_CNT ) = = MAX_RCQ_DESC_CNT )
rx_cons_sb + + ;
return ( fp - > rx_comp_cons ! = rx_cons_sb ) ;
}
2010-10-06 07:34:21 +04:00
2010-10-06 07:28:26 +04:00
/**
2011-06-14 15:33:44 +04:00
* bnx2x_tx_disable - disables tx from stack point of view
2010-10-06 07:28:26 +04:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
2010-10-06 07:28:26 +04:00
*/
static inline void bnx2x_tx_disable ( struct bnx2x * bp )
{
netif_tx_disable ( bp - > dev ) ;
netif_carrier_off ( bp - > dev ) ;
}
2010-07-27 16:34:34 +04:00
static inline void bnx2x_free_rx_sge ( struct bnx2x * bp ,
struct bnx2x_fastpath * fp , u16 index )
{
struct sw_rx_page * sw_buf = & fp - > rx_page_ring [ index ] ;
struct page * page = sw_buf - > page ;
struct eth_rx_sge * sge = & fp - > rx_sge_ring [ index ] ;
/* Skip "next page" elements */
if ( ! page )
return ;
dma_unmap_page ( & bp - > pdev - > dev , dma_unmap_addr ( sw_buf , mapping ) ,
2010-10-06 07:30:27 +04:00
SGE_PAGE_SIZE * PAGES_PER_SGE , DMA_FROM_DEVICE ) ;
2010-07-27 16:34:34 +04:00
__free_pages ( page , PAGES_PER_SGE_SHIFT ) ;
sw_buf - > page = NULL ;
sge - > addr_hi = 0 ;
sge - > addr_lo = 0 ;
}
2010-10-06 07:32:10 +04:00
static inline void bnx2x_add_all_napi ( struct bnx2x * bp )
{
int i ;
2010-10-06 07:23:26 +04:00
2010-10-06 07:32:10 +04:00
/* Add NAPI objects */
2011-06-14 15:33:44 +04:00
for_each_rx_queue ( bp , i )
2010-10-06 07:32:10 +04:00
netif_napi_add ( bp - > dev , & bnx2x_fp ( bp , i , napi ) ,
bnx2x_poll , BNX2X_NAPI_WEIGHT ) ;
}
2010-10-06 07:23:26 +04:00
2010-10-06 07:32:10 +04:00
static inline void bnx2x_del_all_napi ( struct bnx2x * bp )
{
int i ;
2011-06-14 15:33:44 +04:00
for_each_rx_queue ( bp , i )
2010-10-06 07:32:10 +04:00
netif_napi_del ( & bnx2x_fp ( bp , i , napi ) ) ;
}
2010-10-06 07:23:26 +04:00
2010-10-06 07:32:10 +04:00
static inline void bnx2x_disable_msi ( struct bnx2x * bp )
{
if ( bp - > flags & USING_MSIX_FLAG ) {
pci_disable_msix ( bp - > pdev ) ;
bp - > flags & = ~ USING_MSIX_FLAG ;
} else if ( bp - > flags & USING_MSI_FLAG ) {
pci_disable_msi ( bp - > pdev ) ;
bp - > flags & = ~ USING_MSI_FLAG ;
}
}
static inline int bnx2x_calc_num_queues ( struct bnx2x * bp )
{
return num_queues ?
min_t ( int , num_queues , BNX2X_MAX_QUEUES ( bp ) ) :
min_t ( int , num_online_cpus ( ) , BNX2X_MAX_QUEUES ( bp ) ) ;
}
2010-10-06 07:23:26 +04:00
static inline void bnx2x_clear_sge_mask_next_elems ( struct bnx2x_fastpath * fp )
2010-07-27 16:34:34 +04:00
{
2010-10-06 07:23:26 +04:00
int i , j ;
2010-07-27 16:34:34 +04:00
2010-10-06 07:23:26 +04:00
for ( i = 1 ; i < = NUM_RX_SGE_PAGES ; i + + ) {
int idx = RX_SGE_CNT * i - 1 ;
for ( j = 0 ; j < 2 ; j + + ) {
2011-06-14 15:33:44 +04:00
BIT_VEC64_CLEAR_BIT ( fp - > sge_mask , idx ) ;
2010-10-06 07:23:26 +04:00
idx - - ;
}
}
}
static inline void bnx2x_init_sge_ring_bit_mask ( struct bnx2x_fastpath * fp )
{
/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
memset ( fp - > sge_mask , 0xff ,
2011-06-14 15:33:44 +04:00
( NUM_RX_SGE > > BIT_VEC64_ELEM_SHIFT ) * sizeof ( u64 ) ) ;
2010-10-06 07:23:26 +04:00
/* Clear the two last indices in the page to 1:
these are the indices that correspond to the " next " element ,
hence will never be indicated and should be removed from
the calculations . */
bnx2x_clear_sge_mask_next_elems ( fp ) ;
2010-07-27 16:34:34 +04:00
}
static inline int bnx2x_alloc_rx_sge ( struct bnx2x * bp ,
struct bnx2x_fastpath * fp , u16 index )
{
struct page * page = alloc_pages ( GFP_ATOMIC , PAGES_PER_SGE_SHIFT ) ;
struct sw_rx_page * sw_buf = & fp - > rx_page_ring [ index ] ;
struct eth_rx_sge * sge = & fp - > rx_sge_ring [ index ] ;
dma_addr_t mapping ;
if ( unlikely ( page = = NULL ) )
return - ENOMEM ;
mapping = dma_map_page ( & bp - > pdev - > dev , page , 0 ,
SGE_PAGE_SIZE * PAGES_PER_SGE , DMA_FROM_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( & bp - > pdev - > dev , mapping ) ) ) {
__free_pages ( page , PAGES_PER_SGE_SHIFT ) ;
return - ENOMEM ;
}
sw_buf - > page = page ;
dma_unmap_addr_set ( sw_buf , mapping , mapping ) ;
sge - > addr_hi = cpu_to_le32 ( U64_HI ( mapping ) ) ;
sge - > addr_lo = cpu_to_le32 ( U64_LO ( mapping ) ) ;
return 0 ;
}
2010-10-06 07:34:21 +04:00
2010-07-27 16:34:34 +04:00
static inline int bnx2x_alloc_rx_skb ( struct bnx2x * bp ,
struct bnx2x_fastpath * fp , u16 index )
{
struct sk_buff * skb ;
struct sw_rx_bd * rx_buf = & fp - > rx_buf_ring [ index ] ;
struct eth_rx_bd * rx_bd = & fp - > rx_desc_ring [ index ] ;
dma_addr_t mapping ;
2011-02-06 22:21:02 +03:00
skb = netdev_alloc_skb ( bp - > dev , fp - > rx_buf_size ) ;
2010-07-27 16:34:34 +04:00
if ( unlikely ( skb = = NULL ) )
return - ENOMEM ;
2011-02-06 22:21:02 +03:00
mapping = dma_map_single ( & bp - > pdev - > dev , skb - > data , fp - > rx_buf_size ,
2010-07-27 16:34:34 +04:00
DMA_FROM_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( & bp - > pdev - > dev , mapping ) ) ) {
2011-05-22 14:06:58 +04:00
dev_kfree_skb_any ( skb ) ;
2010-07-27 16:34:34 +04:00
return - ENOMEM ;
}
rx_buf - > skb = skb ;
dma_unmap_addr_set ( rx_buf , mapping , mapping ) ;
rx_bd - > addr_hi = cpu_to_le32 ( U64_HI ( mapping ) ) ;
rx_bd - > addr_lo = cpu_to_le32 ( U64_LO ( mapping ) ) ;
return 0 ;
}
/* note that we are not allocating a new skb,
* we are just moving one from cons to prod
* we are not creating a new mapping ,
* so there is no need to check for dma_mapping_error ( ) .
*/
static inline void bnx2x_reuse_rx_skb ( struct bnx2x_fastpath * fp ,
2010-10-06 07:29:05 +04:00
u16 cons , u16 prod )
2010-07-27 16:34:34 +04:00
{
struct bnx2x * bp = fp - > bp ;
struct sw_rx_bd * cons_rx_buf = & fp - > rx_buf_ring [ cons ] ;
struct sw_rx_bd * prod_rx_buf = & fp - > rx_buf_ring [ prod ] ;
struct eth_rx_bd * cons_bd = & fp - > rx_desc_ring [ cons ] ;
struct eth_rx_bd * prod_bd = & fp - > rx_desc_ring [ prod ] ;
dma_sync_single_for_device ( & bp - > pdev - > dev ,
dma_unmap_addr ( cons_rx_buf , mapping ) ,
RX_COPY_THRESH , DMA_FROM_DEVICE ) ;
dma_unmap_addr_set ( prod_rx_buf , mapping ,
dma_unmap_addr ( cons_rx_buf , mapping ) ) ;
2011-06-14 15:33:44 +04:00
prod_rx_buf - > skb = cons_rx_buf - > skb ;
2010-07-27 16:34:34 +04:00
* prod_bd = * cons_bd ;
}
2010-10-06 07:34:21 +04:00
2011-06-14 15:33:44 +04:00
/************************* Init ******************************************/
/**
* bnx2x_func_start - init function
*
* @ bp : driver handle
*
* Must be called before sending CLIENT_SETUP for the first client .
*/
static inline int bnx2x_func_start ( struct bnx2x * bp )
{
struct bnx2x_func_state_params func_params = { 0 } ;
struct bnx2x_func_start_params * start_params =
& func_params . params . start ;
/* Prepare parameters for function state transitions */
__set_bit ( RAMROD_COMP_WAIT , & func_params . ramrod_flags ) ;
func_params . f_obj = & bp - > func_obj ;
func_params . cmd = BNX2X_F_CMD_START ;
/* Function parameters */
start_params - > mf_mode = bp - > mf_mode ;
start_params - > sd_vlan_tag = bp - > mf_ov ;
start_params - > network_cos_mode = OVERRIDE_COS ;
return bnx2x_func_state_change ( bp , & func_params ) ;
}
/**
* bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
*
* @ fw_hi : pointer to upper part
* @ fw_mid : pointer to middle part
* @ fw_lo : pointer to lower part
* @ mac : pointer to MAC address
*/
static inline void bnx2x_set_fw_mac_addr ( u16 * fw_hi , u16 * fw_mid , u16 * fw_lo ,
u8 * mac )
{
( ( u8 * ) fw_hi ) [ 0 ] = mac [ 1 ] ;
( ( u8 * ) fw_hi ) [ 1 ] = mac [ 0 ] ;
( ( u8 * ) fw_mid ) [ 0 ] = mac [ 3 ] ;
( ( u8 * ) fw_mid ) [ 1 ] = mac [ 2 ] ;
( ( u8 * ) fw_lo ) [ 0 ] = mac [ 5 ] ;
( ( u8 * ) fw_lo ) [ 1 ] = mac [ 4 ] ;
}
2010-10-06 07:23:26 +04:00
static inline void bnx2x_free_rx_sge_range ( struct bnx2x * bp ,
struct bnx2x_fastpath * fp , int last )
2010-07-27 16:34:34 +04:00
{
2010-10-06 07:23:26 +04:00
int i ;
2010-07-27 16:34:34 +04:00
2011-05-05 03:50:33 +04:00
if ( fp - > disable_tpa )
return ;
2010-10-06 07:23:26 +04:00
for ( i = 0 ; i < last ; i + + )
bnx2x_free_rx_sge ( bp , fp , i ) ;
2010-07-27 16:34:34 +04:00
}
static inline void bnx2x_free_tpa_pool ( struct bnx2x * bp ,
struct bnx2x_fastpath * fp , int last )
{
int i ;
for ( i = 0 ; i < last ; i + + ) {
2011-06-14 15:33:44 +04:00
struct bnx2x_agg_info * tpa_info = & fp - > tpa_info [ i ] ;
struct sw_rx_bd * first_buf = & tpa_info - > first_buf ;
struct sk_buff * skb = first_buf - > skb ;
2010-07-27 16:34:34 +04:00
if ( skb = = NULL ) {
DP ( NETIF_MSG_IFDOWN , " tpa bin %d empty on free \n " , i ) ;
continue ;
}
2011-06-14 15:33:44 +04:00
if ( tpa_info - > tpa_state = = BNX2X_TPA_START )
2010-07-27 16:34:34 +04:00
dma_unmap_single ( & bp - > pdev - > dev ,
2011-06-14 15:33:44 +04:00
dma_unmap_addr ( first_buf , mapping ) ,
2011-02-06 22:21:02 +03:00
fp - > rx_buf_size , DMA_FROM_DEVICE ) ;
2010-07-27 16:34:34 +04:00
dev_kfree_skb ( skb ) ;
2011-06-14 15:33:44 +04:00
first_buf - > skb = NULL ;
2010-07-27 16:34:34 +04:00
}
}
2011-05-05 03:50:33 +04:00
static inline void bnx2x_init_tx_ring_one ( struct bnx2x_fastpath * fp )
2010-07-27 16:34:34 +04:00
{
2011-05-05 03:50:33 +04:00
int i ;
2010-07-27 16:34:34 +04:00
2011-05-05 03:50:33 +04:00
for ( i = 1 ; i < = NUM_TX_RINGS ; i + + ) {
struct eth_tx_next_bd * tx_next_bd =
& fp - > tx_desc_ring [ TX_DESC_CNT * i - 1 ] . next_bd ;
2010-07-27 16:34:34 +04:00
2011-05-05 03:50:33 +04:00
tx_next_bd - > addr_hi =
cpu_to_le32 ( U64_HI ( fp - > tx_desc_mapping +
BCM_PAGE_SIZE * ( i % NUM_TX_RINGS ) ) ) ;
tx_next_bd - > addr_lo =
cpu_to_le32 ( U64_LO ( fp - > tx_desc_mapping +
BCM_PAGE_SIZE * ( i % NUM_TX_RINGS ) ) ) ;
}
2010-07-27 16:34:34 +04:00
2011-05-05 03:50:33 +04:00
SET_FLAG ( fp - > tx_db . data . header . header , DOORBELL_HDR_DB_TYPE , 1 ) ;
fp - > tx_db . data . zero_fill1 = 0 ;
fp - > tx_db . data . prod = 0 ;
2010-07-27 16:34:34 +04:00
2011-05-05 03:50:33 +04:00
fp - > tx_pkt_prod = 0 ;
fp - > tx_pkt_cons = 0 ;
fp - > tx_bd_prod = 0 ;
fp - > tx_bd_cons = 0 ;
fp - > tx_pkt = 0 ;
}
2010-07-27 16:34:34 +04:00
2011-05-05 03:50:33 +04:00
static inline void bnx2x_init_tx_rings ( struct bnx2x * bp )
{
int i ;
for_each_tx_queue ( bp , i )
bnx2x_init_tx_ring_one ( & bp - > fp [ i ] ) ;
2010-07-27 16:34:34 +04:00
}
2010-10-06 07:34:21 +04:00
2010-10-06 07:23:26 +04:00
static inline void bnx2x_set_next_page_rx_bd ( struct bnx2x_fastpath * fp )
2010-07-27 16:34:34 +04:00
{
2010-10-06 07:23:26 +04:00
int i ;
2010-07-27 16:34:34 +04:00
2010-10-06 07:23:26 +04:00
for ( i = 1 ; i < = NUM_RX_RINGS ; i + + ) {
struct eth_rx_bd * rx_bd ;
rx_bd = & fp - > rx_desc_ring [ RX_DESC_CNT * i - 2 ] ;
rx_bd - > addr_hi =
cpu_to_le32 ( U64_HI ( fp - > rx_desc_mapping +
BCM_PAGE_SIZE * ( i % NUM_RX_RINGS ) ) ) ;
rx_bd - > addr_lo =
cpu_to_le32 ( U64_LO ( fp - > rx_desc_mapping +
BCM_PAGE_SIZE * ( i % NUM_RX_RINGS ) ) ) ;
}
2010-07-27 16:34:34 +04:00
}
2010-10-06 07:23:26 +04:00
static inline void bnx2x_set_next_page_sgl ( struct bnx2x_fastpath * fp )
{
int i ;
for ( i = 1 ; i < = NUM_RX_SGE_PAGES ; i + + ) {
struct eth_rx_sge * sge ;
sge = & fp - > rx_sge_ring [ RX_SGE_CNT * i - 2 ] ;
sge - > addr_hi =
cpu_to_le32 ( U64_HI ( fp - > rx_sge_mapping +
BCM_PAGE_SIZE * ( i % NUM_RX_SGE_PAGES ) ) ) ;
sge - > addr_lo =
cpu_to_le32 ( U64_LO ( fp - > rx_sge_mapping +
BCM_PAGE_SIZE * ( i % NUM_RX_SGE_PAGES ) ) ) ;
}
}
static inline void bnx2x_set_next_page_rx_cq ( struct bnx2x_fastpath * fp )
{
int i ;
for ( i = 1 ; i < = NUM_RCQ_RINGS ; i + + ) {
struct eth_rx_cqe_next_page * nextpg ;
nextpg = ( struct eth_rx_cqe_next_page * )
& fp - > rx_comp_ring [ RCQ_DESC_CNT * i - 1 ] ;
nextpg - > addr_hi =
cpu_to_le32 ( U64_HI ( fp - > rx_comp_mapping +
BCM_PAGE_SIZE * ( i % NUM_RCQ_RINGS ) ) ) ;
nextpg - > addr_lo =
cpu_to_le32 ( U64_LO ( fp - > rx_comp_mapping +
BCM_PAGE_SIZE * ( i % NUM_RCQ_RINGS ) ) ) ;
}
}
2011-05-05 03:50:33 +04:00
/* Returns the number of actually allocated BDs */
static inline int bnx2x_alloc_rx_bds ( struct bnx2x_fastpath * fp ,
int rx_ring_size )
{
struct bnx2x * bp = fp - > bp ;
u16 ring_prod , cqe_ring_prod ;
int i ;
fp - > rx_comp_cons = 0 ;
cqe_ring_prod = ring_prod = 0 ;
/* This routine is called only during fo init so
* fp - > eth_q_stats . rx_skb_alloc_failed = 0
*/
for ( i = 0 ; i < rx_ring_size ; i + + ) {
if ( bnx2x_alloc_rx_skb ( bp , fp , ring_prod ) < 0 ) {
fp - > eth_q_stats . rx_skb_alloc_failed + + ;
continue ;
}
ring_prod = NEXT_RX_IDX ( ring_prod ) ;
cqe_ring_prod = NEXT_RCQ_IDX ( cqe_ring_prod ) ;
WARN_ON ( ring_prod < = ( i - fp - > eth_q_stats . rx_skb_alloc_failed ) ) ;
}
if ( fp - > eth_q_stats . rx_skb_alloc_failed )
BNX2X_ERR ( " was only able to allocate "
" %d rx skbs on queue[%d] \n " ,
( i - fp - > eth_q_stats . rx_skb_alloc_failed ) , fp - > index ) ;
fp - > rx_bd_prod = ring_prod ;
/* Limit the CQE producer by the CQE ring size */
fp - > rx_comp_prod = min_t ( u16 , NUM_RCQ_RINGS * RCQ_DESC_CNT ,
cqe_ring_prod ) ;
fp - > rx_pkt = fp - > rx_calls = 0 ;
return i - fp - > eth_q_stats . rx_skb_alloc_failed ;
}
2011-06-14 15:33:44 +04:00
/* Statistics ID are global per chip/path, while Client IDs for E1x are per
* port .
*/
static inline u8 bnx2x_stats_id ( struct bnx2x_fastpath * fp )
{
if ( ! CHIP_IS_E1x ( fp - > bp ) )
return fp - > cl_id ;
else
return fp - > cl_id + BP_PORT ( fp - > bp ) * FP_SB_MAX_E1x ;
}
static inline void bnx2x_init_vlan_mac_fp_objs ( struct bnx2x_fastpath * fp ,
bnx2x_obj_type obj_type )
{
struct bnx2x * bp = fp - > bp ;
/* Configure classification DBs */
bnx2x_init_mac_obj ( bp , & fp - > mac_obj , fp - > cl_id , fp - > cid ,
BP_FUNC ( bp ) , bnx2x_sp ( bp , mac_rdata ) ,
bnx2x_sp_mapping ( bp , mac_rdata ) ,
BNX2X_FILTER_MAC_PENDING ,
& bp - > sp_state , obj_type ,
& bp - > macs_pool ) ;
}
/**
* bnx2x_get_path_func_num - get number of active functions
*
* @ bp : driver handle
*
* Calculates the number of active ( not hidden ) functions on the
* current path .
*/
static inline u8 bnx2x_get_path_func_num ( struct bnx2x * bp )
{
u8 func_num = 0 , i ;
/* 57710 has only one function per-port */
if ( CHIP_IS_E1 ( bp ) )
return 1 ;
/* Calculate a number of functions enabled on the current
* PATH / PORT .
*/
if ( CHIP_REV_IS_SLOW ( bp ) ) {
if ( IS_MF ( bp ) )
func_num = 4 ;
else
func_num = 2 ;
} else {
for ( i = 0 ; i < E1H_FUNC_MAX / 2 ; i + + ) {
u32 func_config =
MF_CFG_RD ( bp ,
func_mf_config [ BP_PORT ( bp ) + 2 * i ] .
config ) ;
func_num + =
( ( func_config & FUNC_MF_CFG_FUNC_HIDE ) ? 0 : 1 ) ;
}
}
WARN_ON ( ! func_num ) ;
return func_num ;
}
static inline void bnx2x_init_bp_objs ( struct bnx2x * bp )
{
/* RX_MODE controlling object */
bnx2x_init_rx_mode_obj ( bp , & bp - > rx_mode_obj ) ;
/* multicast configuration controlling object */
bnx2x_init_mcast_obj ( bp , & bp - > mcast_obj , bp - > fp - > cl_id , bp - > fp - > cid ,
BP_FUNC ( bp ) , BP_FUNC ( bp ) ,
bnx2x_sp ( bp , mcast_rdata ) ,
bnx2x_sp_mapping ( bp , mcast_rdata ) ,
BNX2X_FILTER_MCAST_PENDING , & bp - > sp_state ,
BNX2X_OBJ_TYPE_RX ) ;
/* Setup CAM credit pools */
bnx2x_init_mac_credit_pool ( bp , & bp - > macs_pool , BP_FUNC ( bp ) ,
bnx2x_get_path_func_num ( bp ) ) ;
/* RSS configuration object */
bnx2x_init_rss_config_obj ( bp , & bp - > rss_conf_obj , bp - > fp - > cl_id ,
bp - > fp - > cid , BP_FUNC ( bp ) , BP_FUNC ( bp ) ,
bnx2x_sp ( bp , rss_rdata ) ,
bnx2x_sp_mapping ( bp , rss_rdata ) ,
BNX2X_FILTER_RSS_CONF_PENDING , & bp - > sp_state ,
BNX2X_OBJ_TYPE_RX ) ;
}
static inline u8 bnx2x_fp_qzone_id ( struct bnx2x_fastpath * fp )
{
if ( CHIP_IS_E1x ( fp - > bp ) )
return fp - > cl_id + BP_PORT ( fp - > bp ) * ETH_MAX_RX_CLIENTS_E1H ;
else
return fp - > cl_id ;
}
static inline u32 bnx2x_rx_ustorm_prods_offset ( struct bnx2x_fastpath * fp )
{
struct bnx2x * bp = fp - > bp ;
if ( ! CHIP_IS_E1x ( bp ) )
return USTORM_RX_PRODS_E2_OFFSET ( fp - > cl_qzone_id ) ;
else
return USTORM_RX_PRODS_E1X_OFFSET ( BP_PORT ( bp ) , fp - > cl_id ) ;
}
2010-12-13 08:44:01 +03:00
# ifdef BCM_CNIC
2011-06-14 15:33:44 +04:00
static inline u8 bnx2x_cnic_eth_cl_id ( struct bnx2x * bp , u8 cl_idx )
{
return bp - > cnic_base_cl_id + cl_idx +
( bp - > pf_num > > 1 ) * NONE_ETH_CONTEXT_USE ;
}
static inline u8 bnx2x_cnic_fw_sb_id ( struct bnx2x * bp )
{
/* the 'first' id is allocated for the cnic */
return bp - > base_fw_ndsb ;
}
static inline u8 bnx2x_cnic_igu_sb_id ( struct bnx2x * bp )
{
return bp - > igu_base_sb ;
}
2010-12-13 08:44:01 +03:00
static inline void bnx2x_init_fcoe_fp ( struct bnx2x * bp )
{
2011-06-14 15:33:44 +04:00
struct bnx2x_fastpath * fp = bnx2x_fcoe_fp ( bp ) ;
unsigned long q_type = 0 ;
bnx2x_fcoe ( bp , cl_id ) = bnx2x_cnic_eth_cl_id ( bp ,
BNX2X_FCOE_ETH_CL_ID_IDX ) ;
/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
* 16 ETH clients per function when CNIC is enabled !
*
* Fix it ASAP ! ! !
*/
2010-12-13 08:44:01 +03:00
bnx2x_fcoe ( bp , cid ) = BNX2X_FCOE_ETH_CID ;
bnx2x_fcoe ( bp , fw_sb_id ) = DEF_SB_ID ;
bnx2x_fcoe ( bp , igu_sb_id ) = bp - > igu_dsb_id ;
bnx2x_fcoe ( bp , bp ) = bp ;
bnx2x_fcoe ( bp , index ) = FCOE_IDX ;
bnx2x_fcoe ( bp , rx_cons_sb ) = BNX2X_FCOE_L2_RX_INDEX ;
bnx2x_fcoe ( bp , tx_cons_sb ) = BNX2X_FCOE_L2_TX_INDEX ;
/* qZone id equals to FW (per path) client id */
2011-06-14 15:33:44 +04:00
bnx2x_fcoe ( bp , cl_qzone_id ) = bnx2x_fp_qzone_id ( fp ) ;
2010-12-13 08:44:01 +03:00
/* init shortcut */
2011-06-14 15:33:44 +04:00
bnx2x_fcoe ( bp , ustorm_rx_prods_offset ) =
bnx2x_rx_ustorm_prods_offset ( fp ) ;
/* Configure Queue State object */
__set_bit ( BNX2X_Q_TYPE_HAS_RX , & q_type ) ;
__set_bit ( BNX2X_Q_TYPE_HAS_TX , & q_type ) ;
bnx2x_init_queue_obj ( bp , & fp - > q_obj , fp - > cl_id , fp - > cid , BP_FUNC ( bp ) ,
bnx2x_sp ( bp , q_rdata ) , bnx2x_sp_mapping ( bp , q_rdata ) ,
q_type ) ;
DP ( NETIF_MSG_IFUP , " queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
" igu_sb %d \n " ,
fp - > index , bp , fp - > status_blk . e2_sb , fp - > cl_id , fp - > fw_sb_id ,
fp - > igu_sb_id ) ;
2010-12-13 08:44:01 +03:00
}
# endif
2010-10-06 07:23:26 +04:00
2011-06-14 15:33:44 +04:00
static inline int bnx2x_clean_tx_queue ( struct bnx2x * bp ,
struct bnx2x_fastpath * fp )
{
int cnt = 1000 ;
while ( bnx2x_has_tx_work_unload ( fp ) ) {
if ( ! cnt ) {
BNX2X_ERR ( " timeout waiting for queue[%d]: "
" fp->tx_pkt_prod(%d) != fp->tx_pkt_cons(%d) \n " ,
fp - > index , fp - > tx_pkt_prod , fp - > tx_pkt_cons ) ;
# ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic ( ) ;
return - EBUSY ;
# else
break ;
# endif
}
cnt - - ;
usleep_range ( 1000 , 1000 ) ;
}
return 0 ;
}
2011-06-01 01:26:11 +04:00
int bnx2x_get_link_cfg_idx ( struct bnx2x * bp ) ;
2010-10-06 07:23:26 +04:00
static inline void __storm_memset_struct ( struct bnx2x * bp ,
u32 addr , size_t size , u32 * data )
{
int i ;
for ( i = 0 ; i < size / 4 ; i + + )
REG_WR ( bp , addr + ( i * 4 ) , data [ i ] ) ;
}
2011-06-14 15:33:44 +04:00
static inline void storm_memset_func_cfg ( struct bnx2x * bp ,
struct tstorm_eth_function_common_config * tcfg ,
u16 abs_fid )
2010-10-06 07:23:26 +04:00
{
2011-06-14 15:33:44 +04:00
size_t size = sizeof ( struct tstorm_eth_function_common_config ) ;
2010-10-06 07:23:26 +04:00
u32 addr = BAR_TSTRORM_INTMEM +
2011-06-14 15:33:44 +04:00
TSTORM_FUNCTION_COMMON_CONFIG_OFFSET ( abs_fid ) ;
2010-10-06 07:23:26 +04:00
2011-06-14 15:33:44 +04:00
__storm_memset_struct ( bp , addr , size , ( u32 * ) tcfg ) ;
2010-10-06 07:23:26 +04:00
}
static inline void storm_memset_cmng ( struct bnx2x * bp ,
struct cmng_struct_per_port * cmng ,
u8 port )
{
2011-06-14 15:33:44 +04:00
size_t size = sizeof ( struct cmng_struct_per_port ) ;
2010-10-06 07:23:26 +04:00
u32 addr = BAR_XSTRORM_INTMEM +
XSTORM_CMNG_PER_PORT_VARS_OFFSET ( port ) ;
__storm_memset_struct ( bp , addr , size , ( u32 * ) cmng ) ;
2011-06-14 15:33:44 +04:00
}
/**
* bnx2x_wait_sp_comp - wait for the outstanding SP commands .
*
* @ bp : driver handle
* @ mask : bits that need to be cleared
*/
static inline bool bnx2x_wait_sp_comp ( struct bnx2x * bp , unsigned long mask )
{
int tout = 5000 ; /* Wait for 5 secs tops */
while ( tout - - ) {
smp_mb ( ) ;
netif_addr_lock_bh ( bp - > dev ) ;
if ( ! ( bp - > sp_state & mask ) ) {
netif_addr_unlock_bh ( bp - > dev ) ;
return true ;
}
netif_addr_unlock_bh ( bp - > dev ) ;
2011-04-01 04:04:01 +04:00
2011-06-14 15:33:44 +04:00
usleep_range ( 1000 , 1000 ) ;
}
smp_mb ( ) ;
netif_addr_lock_bh ( bp - > dev ) ;
if ( bp - > sp_state & mask ) {
BNX2X_ERR ( " Filtering completion timed out. sp_state 0x%lx, "
" mask 0x%lx \n " , bp - > sp_state , mask ) ;
netif_addr_unlock_bh ( bp - > dev ) ;
return false ;
}
netif_addr_unlock_bh ( bp - > dev ) ;
2011-04-01 04:04:01 +04:00
2011-06-14 15:33:44 +04:00
return true ;
2010-10-06 07:23:26 +04:00
}
2010-10-06 07:34:21 +04:00
2011-06-14 15:33:44 +04:00
/**
* bnx2x_set_ctx_validation - set CDU context validation values
*
* @ bp : driver handle
* @ cxt : context of the connection on the host memory
* @ cid : SW CID of the connection to be configured
*/
void bnx2x_set_ctx_validation ( struct bnx2x * bp , struct eth_context * cxt ,
u32 cid ) ;
void bnx2x_update_coalesce_sb_index ( struct bnx2x * bp , u8 fw_sb_id ,
u8 sb_index , u8 disable , u16 usec ) ;
2010-07-27 16:34:34 +04:00
void bnx2x_acquire_phy_lock ( struct bnx2x * bp ) ;
void bnx2x_release_phy_lock ( struct bnx2x * bp ) ;
2011-02-28 06:37:20 +03:00
/**
2011-05-05 03:52:40 +04:00
* bnx2x_extract_max_cfg - extract MAX BW part from MF configuration .
2011-02-28 06:37:20 +03:00
*
2011-05-05 03:52:40 +04:00
* @ bp : driver handle
* @ mf_cfg : MF configuration
2011-02-28 06:37:20 +03:00
*
*/
static inline u16 bnx2x_extract_max_cfg ( struct bnx2x * bp , u32 mf_cfg )
{
u16 max_cfg = ( mf_cfg & FUNC_MF_CFG_MAX_BW_MASK ) > >
FUNC_MF_CFG_MAX_BW_SHIFT ;
if ( ! max_cfg ) {
BNX2X_ERR ( " Illegal configuration detected for Max BW - "
" using 100 instead \n " ) ;
max_cfg = 100 ;
}
return max_cfg ;
}
2010-07-27 16:34:34 +04:00
# endif /* BNX2X_CMN_H */