2010-04-01 15:28:25 +00:00
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux .
*
* Copyright ( c ) 2003 - 2010 Chelsio Communications , Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# ifndef __CXGB4_H__
# define __CXGB4_H__
# include <linux/bitops.h>
# include <linux/cache.h>
# include <linux/interrupt.h>
# include <linux/list.h>
# include <linux/netdevice.h>
# include <linux/pci.h>
# include <linux/spinlock.h>
# include <linux/timer.h>
# include <asm/io.h>
# include "cxgb4_uld.h"
# include "t4_hw.h"
# define FW_VERSION_MAJOR 1
# define FW_VERSION_MINOR 1
# define FW_VERSION_MICRO 0
enum {
MAX_NPORTS = 4 , /* max # of ports */
2010-04-27 12:24:16 +00:00
SERNUM_LEN = 24 , /* Serial # length */
2010-04-01 15:28:25 +00:00
EC_LEN = 16 , /* E/C length */
ID_LEN = 16 , /* ID length */
} ;
enum {
MEM_EDC0 ,
MEM_EDC1 ,
MEM_MC
} ;
enum dev_master {
MASTER_CANT ,
MASTER_MAY ,
MASTER_MUST
} ;
enum dev_state {
DEV_STATE_UNINIT ,
DEV_STATE_INIT ,
DEV_STATE_ERR
} ;
enum {
PAUSE_RX = 1 < < 0 ,
PAUSE_TX = 1 < < 1 ,
PAUSE_AUTONEG = 1 < < 2
} ;
struct port_stats {
u64 tx_octets ; /* total # of octets in good frames */
u64 tx_frames ; /* all good frames */
u64 tx_bcast_frames ; /* all broadcast frames */
u64 tx_mcast_frames ; /* all multicast frames */
u64 tx_ucast_frames ; /* all unicast frames */
u64 tx_error_frames ; /* all error frames */
u64 tx_frames_64 ; /* # of Tx frames in a particular range */
u64 tx_frames_65_127 ;
u64 tx_frames_128_255 ;
u64 tx_frames_256_511 ;
u64 tx_frames_512_1023 ;
u64 tx_frames_1024_1518 ;
u64 tx_frames_1519_max ;
u64 tx_drop ; /* # of dropped Tx frames */
u64 tx_pause ; /* # of transmitted pause frames */
u64 tx_ppp0 ; /* # of transmitted PPP prio 0 frames */
u64 tx_ppp1 ; /* # of transmitted PPP prio 1 frames */
u64 tx_ppp2 ; /* # of transmitted PPP prio 2 frames */
u64 tx_ppp3 ; /* # of transmitted PPP prio 3 frames */
u64 tx_ppp4 ; /* # of transmitted PPP prio 4 frames */
u64 tx_ppp5 ; /* # of transmitted PPP prio 5 frames */
u64 tx_ppp6 ; /* # of transmitted PPP prio 6 frames */
u64 tx_ppp7 ; /* # of transmitted PPP prio 7 frames */
u64 rx_octets ; /* total # of octets in good frames */
u64 rx_frames ; /* all good frames */
u64 rx_bcast_frames ; /* all broadcast frames */
u64 rx_mcast_frames ; /* all multicast frames */
u64 rx_ucast_frames ; /* all unicast frames */
u64 rx_too_long ; /* # of frames exceeding MTU */
u64 rx_jabber ; /* # of jabber frames */
u64 rx_fcs_err ; /* # of received frames with bad FCS */
u64 rx_len_err ; /* # of received frames with length error */
u64 rx_symbol_err ; /* symbol errors */
u64 rx_runt ; /* # of short frames */
u64 rx_frames_64 ; /* # of Rx frames in a particular range */
u64 rx_frames_65_127 ;
u64 rx_frames_128_255 ;
u64 rx_frames_256_511 ;
u64 rx_frames_512_1023 ;
u64 rx_frames_1024_1518 ;
u64 rx_frames_1519_max ;
u64 rx_pause ; /* # of received pause frames */
u64 rx_ppp0 ; /* # of received PPP prio 0 frames */
u64 rx_ppp1 ; /* # of received PPP prio 1 frames */
u64 rx_ppp2 ; /* # of received PPP prio 2 frames */
u64 rx_ppp3 ; /* # of received PPP prio 3 frames */
u64 rx_ppp4 ; /* # of received PPP prio 4 frames */
u64 rx_ppp5 ; /* # of received PPP prio 5 frames */
u64 rx_ppp6 ; /* # of received PPP prio 6 frames */
u64 rx_ppp7 ; /* # of received PPP prio 7 frames */
u64 rx_ovflow0 ; /* drops due to buffer-group 0 overflows */
u64 rx_ovflow1 ; /* drops due to buffer-group 1 overflows */
u64 rx_ovflow2 ; /* drops due to buffer-group 2 overflows */
u64 rx_ovflow3 ; /* drops due to buffer-group 3 overflows */
u64 rx_trunc0 ; /* buffer-group 0 truncated packets */
u64 rx_trunc1 ; /* buffer-group 1 truncated packets */
u64 rx_trunc2 ; /* buffer-group 2 truncated packets */
u64 rx_trunc3 ; /* buffer-group 3 truncated packets */
} ;
struct lb_port_stats {
u64 octets ;
u64 frames ;
u64 bcast_frames ;
u64 mcast_frames ;
u64 ucast_frames ;
u64 error_frames ;
u64 frames_64 ;
u64 frames_65_127 ;
u64 frames_128_255 ;
u64 frames_256_511 ;
u64 frames_512_1023 ;
u64 frames_1024_1518 ;
u64 frames_1519_max ;
u64 drop ;
u64 ovflow0 ;
u64 ovflow1 ;
u64 ovflow2 ;
u64 ovflow3 ;
u64 trunc0 ;
u64 trunc1 ;
u64 trunc2 ;
u64 trunc3 ;
} ;
struct tp_tcp_stats {
u32 tcpOutRsts ;
u64 tcpInSegs ;
u64 tcpOutSegs ;
u64 tcpRetransSegs ;
} ;
struct tp_err_stats {
u32 macInErrs [ 4 ] ;
u32 hdrInErrs [ 4 ] ;
u32 tcpInErrs [ 4 ] ;
u32 tnlCongDrops [ 4 ] ;
u32 ofldChanDrops [ 4 ] ;
u32 tnlTxDrops [ 4 ] ;
u32 ofldVlanDrops [ 4 ] ;
u32 tcp6InErrs [ 4 ] ;
u32 ofldNoNeigh ;
u32 ofldCongDefer ;
} ;
struct tp_params {
unsigned int ntxchan ; /* # of Tx channels */
unsigned int tre ; /* log2 of core clocks per TP tick */
} ;
struct vpd_params {
unsigned int cclk ;
u8 ec [ EC_LEN + 1 ] ;
u8 sn [ SERNUM_LEN + 1 ] ;
u8 id [ ID_LEN + 1 ] ;
} ;
struct pci_params {
unsigned char speed ;
unsigned char width ;
} ;
struct adapter_params {
struct tp_params tp ;
struct vpd_params vpd ;
struct pci_params pci ;
2010-06-18 10:05:27 +00:00
unsigned int sf_size ; /* serial flash size in bytes */
unsigned int sf_nsec ; /* # of flash sectors */
unsigned int sf_fw_start ; /* start of FW image in flash */
2010-04-01 15:28:25 +00:00
unsigned int fw_vers ;
unsigned int tp_vers ;
u8 api_vers [ 7 ] ;
unsigned short mtus [ NMTUS ] ;
unsigned short a_wnd [ NCCTRL_WIN ] ;
unsigned short b_wnd [ NCCTRL_WIN ] ;
unsigned char nports ; /* # of ethernet ports */
unsigned char portvec ;
unsigned char rev ; /* chip revision */
unsigned char offload ;
unsigned int ofldq_wr_cred ;
} ;
struct trace_params {
u32 data [ TRACE_LEN / 4 ] ;
u32 mask [ TRACE_LEN / 4 ] ;
unsigned short snap_len ;
unsigned short min_len ;
unsigned char skip_ofst ;
unsigned char skip_len ;
unsigned char invert ;
unsigned char port ;
} ;
struct link_config {
unsigned short supported ; /* link capabilities */
unsigned short advertising ; /* advertised capabilities */
unsigned short requested_speed ; /* speed user has requested */
unsigned short speed ; /* actual link speed */
unsigned char requested_fc ; /* flow control user has requested */
unsigned char fc ; /* actual link flow control */
unsigned char autoneg ; /* autonegotiating? */
unsigned char link_ok ; /* link up? */
} ;
# define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
enum {
MAX_ETH_QSETS = 32 , /* # of Ethernet Tx/Rx queue sets */
MAX_OFLD_QSETS = 16 , /* # of offload Tx/Rx queue sets */
MAX_CTRL_QUEUES = NCHAN , /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN , /* # of streaming RDMA Rx queues */
} ;
enum {
MAX_EGRQ = 128 , /* max # of egress queues, including FLs */
MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
} ;
struct adapter ;
struct vlan_group ;
struct sge_rspq ;
struct port_info {
struct adapter * adapter ;
struct vlan_group * vlan_grp ;
u16 viid ;
s16 xact_addr_filt ; /* index of exact MAC address filter */
u16 rss_size ; /* size of VI's RSS table slice */
s8 mdio_addr ;
u8 port_type ;
u8 mod_type ;
u8 port_id ;
u8 tx_chan ;
u8 lport ; /* associated offload logical port */
u8 rx_offload ; /* CSO, etc */
u8 nqsets ; /* # of qsets */
u8 first_qset ; /* index of first qset */
2010-07-11 12:01:18 +00:00
u8 rss_mode ;
2010-04-01 15:28:25 +00:00
struct link_config link_cfg ;
2010-07-11 12:01:17 +00:00
u16 * rss ;
2010-04-01 15:28:25 +00:00
} ;
/* port_info.rx_offload flags */
enum {
RX_CSO = 1 < < 0 ,
} ;
struct dentry ;
struct work_struct ;
enum { /* adapter flags */
FULL_INIT_DONE = ( 1 < < 0 ) ,
USING_MSI = ( 1 < < 1 ) ,
USING_MSIX = ( 1 < < 2 ) ,
FW_OK = ( 1 < < 4 ) ,
} ;
struct rx_sw_desc ;
struct sge_fl { /* SGE free-buffer queue state */
unsigned int avail ; /* # of available Rx buffers */
unsigned int pend_cred ; /* new buffers since last FL DB ring */
unsigned int cidx ; /* consumer index */
unsigned int pidx ; /* producer index */
unsigned long alloc_failed ; /* # of times buffer allocation failed */
unsigned long large_alloc_failed ;
unsigned long starving ;
/* RO fields */
unsigned int cntxt_id ; /* SGE context id for the free list */
unsigned int size ; /* capacity of free list */
struct rx_sw_desc * sdesc ; /* address of SW Rx descriptor ring */
__be64 * desc ; /* address of HW Rx descriptor ring */
dma_addr_t addr ; /* bus address of HW ring start */
} ;
/* A packet gather list */
struct pkt_gl {
skb_frag_t frags [ MAX_SKB_FRAGS ] ;
void * va ; /* virtual address of first byte */
unsigned int nfrags ; /* # of fragments */
unsigned int tot_len ; /* total length of fragments */
} ;
typedef int ( * rspq_handler_t ) ( struct sge_rspq * q , const __be64 * rsp ,
const struct pkt_gl * gl ) ;
struct sge_rspq { /* state for an SGE response queue */
struct napi_struct napi ;
const __be64 * cur_desc ; /* current descriptor in queue */
unsigned int cidx ; /* consumer index */
u8 gen ; /* current generation bit */
u8 intr_params ; /* interrupt holdoff parameters */
u8 next_intr_params ; /* holdoff params for next interrupt */
u8 pktcnt_idx ; /* interrupt packet threshold */
u8 uld ; /* ULD handling this queue */
u8 idx ; /* queue index within its group */
int offset ; /* offset into current Rx buffer */
u16 cntxt_id ; /* SGE context id for the response q */
u16 abs_id ; /* absolute SGE id for the response q */
__be64 * desc ; /* address of HW response ring */
dma_addr_t phys_addr ; /* physical address of the ring */
unsigned int iqe_len ; /* entry size */
unsigned int size ; /* capacity of response queue */
struct adapter * adap ;
struct net_device * netdev ; /* associated net device */
rspq_handler_t handler ;
} ;
struct sge_eth_stats { /* Ethernet queue statistics */
unsigned long pkts ; /* # of ethernet packets */
unsigned long lro_pkts ; /* # of LRO super packets */
unsigned long lro_merged ; /* # of wire packets merged by LRO */
unsigned long rx_cso ; /* # of Rx checksum offloads */
unsigned long vlan_ex ; /* # of Rx VLAN extractions */
unsigned long rx_drops ; /* # of packets dropped due to no mem */
} ;
struct sge_eth_rxq { /* SW Ethernet Rx queue */
struct sge_rspq rspq ;
struct sge_fl fl ;
struct sge_eth_stats stats ;
} ____cacheline_aligned_in_smp ;
struct sge_ofld_stats { /* offload queue statistics */
unsigned long pkts ; /* # of packets */
unsigned long imm ; /* # of immediate-data packets */
unsigned long an ; /* # of asynchronous notifications */
unsigned long nomem ; /* # of responses deferred due to no mem */
} ;
struct sge_ofld_rxq { /* SW offload Rx queue */
struct sge_rspq rspq ;
struct sge_fl fl ;
struct sge_ofld_stats stats ;
} ____cacheline_aligned_in_smp ;
struct tx_desc {
__be64 flit [ 8 ] ;
} ;
struct tx_sw_desc ;
struct sge_txq {
unsigned int in_use ; /* # of in-use Tx descriptors */
unsigned int size ; /* # of descriptors */
unsigned int cidx ; /* SW consumer index */
unsigned int pidx ; /* producer index */
unsigned long stops ; /* # of times q has been stopped */
unsigned long restarts ; /* # of queue restarts */
unsigned int cntxt_id ; /* SGE context id for the Tx q */
struct tx_desc * desc ; /* address of HW Tx descriptor ring */
struct tx_sw_desc * sdesc ; /* address of SW Tx descriptor ring */
struct sge_qstat * stat ; /* queue status entry */
dma_addr_t phys_addr ; /* physical address of the ring */
} ;
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
struct sge_txq q ;
struct netdev_queue * txq ; /* associated netdev TX queue */
unsigned long tso ; /* # of TSO requests */
unsigned long tx_cso ; /* # of Tx checksum offloads */
unsigned long vlan_ins ; /* # of Tx VLAN insertions */
unsigned long mapping_err ; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp ;
struct sge_ofld_txq { /* state for an SGE offload Tx queue */
struct sge_txq q ;
struct adapter * adap ;
struct sk_buff_head sendq ; /* list of backpressured packets */
struct tasklet_struct qresume_tsk ; /* restarts the queue */
u8 full ; /* the Tx ring is full */
unsigned long mapping_err ; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp ;
struct sge_ctrl_txq { /* state for an SGE control Tx queue */
struct sge_txq q ;
struct adapter * adap ;
struct sk_buff_head sendq ; /* list of backpressured packets */
struct tasklet_struct qresume_tsk ; /* restarts the queue */
u8 full ; /* the Tx ring is full */
} ____cacheline_aligned_in_smp ;
struct sge {
struct sge_eth_txq ethtxq [ MAX_ETH_QSETS ] ;
struct sge_ofld_txq ofldtxq [ MAX_OFLD_QSETS ] ;
struct sge_ctrl_txq ctrlq [ MAX_CTRL_QUEUES ] ;
struct sge_eth_rxq ethrxq [ MAX_ETH_QSETS ] ;
struct sge_ofld_rxq ofldrxq [ MAX_OFLD_QSETS ] ;
struct sge_ofld_rxq rdmarxq [ MAX_RDMA_QUEUES ] ;
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp ;
struct sge_rspq intrq ____cacheline_aligned_in_smp ;
spinlock_t intrq_lock ;
u16 max_ethqsets ; /* # of available Ethernet queue sets */
u16 ethqsets ; /* # of active Ethernet queue sets */
u16 ethtxq_rover ; /* Tx queue to clean up next */
u16 ofldqsets ; /* # of active offload queue sets */
u16 rdmaqs ; /* # of available RDMA Rx queues */
u16 ofld_rxq [ MAX_OFLD_QSETS ] ;
u16 rdma_rxq [ NCHAN ] ;
u16 timer_val [ SGE_NTIMERS ] ;
u8 counter_val [ SGE_NCOUNTERS ] ;
unsigned int starve_thres ;
u8 idma_state [ 2 ] ;
void * egr_map [ MAX_EGRQ ] ; /* qid->queue egress queue map */
struct sge_rspq * ingr_map [ MAX_INGQ ] ; /* qid->queue ingress queue map */
DECLARE_BITMAP ( starving_fl , MAX_EGRQ ) ;
DECLARE_BITMAP ( txq_maperr , MAX_EGRQ ) ;
struct timer_list rx_timer ; /* refills starving FLs */
struct timer_list tx_timer ; /* checks Tx queues */
} ;
# define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
# define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
# define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
struct l2t_data ;
struct adapter {
void __iomem * regs ;
struct pci_dev * pdev ;
struct device * pdev_dev ;
unsigned long registered_device_map ;
2010-08-02 13:19:21 +00:00
unsigned int fn ;
unsigned int flags ;
2010-04-01 15:28:25 +00:00
const char * name ;
int msg_enable ;
struct adapter_params params ;
struct cxgb4_virt_res vres ;
unsigned int swintr ;
unsigned int wol ;
struct {
unsigned short vec ;
char desc [ 14 ] ;
} msix_info [ MAX_INGQ + 1 ] ;
struct sge sge ;
struct net_device * port [ MAX_NPORTS ] ;
u8 chan_map [ NCHAN ] ; /* channel -> port map */
struct l2t_data * l2t ;
void * uld_handle [ CXGB4_ULD_MAX ] ;
struct list_head list_node ;
struct tid_info tids ;
void * * tid_release_head ;
spinlock_t tid_release_lock ;
struct work_struct tid_release_task ;
bool tid_release_task_busy ;
struct dentry * debugfs_root ;
spinlock_t stats_lock ;
} ;
static inline u32 t4_read_reg ( struct adapter * adap , u32 reg_addr )
{
return readl ( adap - > regs + reg_addr ) ;
}
static inline void t4_write_reg ( struct adapter * adap , u32 reg_addr , u32 val )
{
writel ( val , adap - > regs + reg_addr ) ;
}
# ifndef readq
static inline u64 readq ( const volatile void __iomem * addr )
{
return readl ( addr ) + ( ( u64 ) readl ( addr + 4 ) < < 32 ) ;
}
static inline void writeq ( u64 val , volatile void __iomem * addr )
{
writel ( val , addr ) ;
writel ( val > > 32 , addr + 4 ) ;
}
# endif
static inline u64 t4_read_reg64 ( struct adapter * adap , u32 reg_addr )
{
return readq ( adap - > regs + reg_addr ) ;
}
static inline void t4_write_reg64 ( struct adapter * adap , u32 reg_addr , u64 val )
{
writeq ( val , adap - > regs + reg_addr ) ;
}
/**
* netdev2pinfo - return the port_info structure associated with a net_device
* @ dev : the netdev
*
* Return the struct port_info associated with a net_device
*/
static inline struct port_info * netdev2pinfo ( const struct net_device * dev )
{
return netdev_priv ( dev ) ;
}
/**
* adap2pinfo - return the port_info of a port
* @ adap : the adapter
* @ idx : the port index
*
* Return the port_info structure for the port of the given index .
*/
static inline struct port_info * adap2pinfo ( struct adapter * adap , int idx )
{
return netdev_priv ( adap - > port [ idx ] ) ;
}
/**
* netdev2adap - return the adapter structure associated with a net_device
* @ dev : the netdev
*
* Return the struct adapter associated with a net_device
*/
static inline struct adapter * netdev2adap ( const struct net_device * dev )
{
return netdev2pinfo ( dev ) - > adapter ;
}
void t4_os_portmod_changed ( const struct adapter * adap , int port_id ) ;
void t4_os_link_changed ( struct adapter * adap , int port_id , int link_stat ) ;
void * t4_alloc_mem ( size_t size ) ;
void t4_free_mem ( void * addr ) ;
void t4_free_sge_resources ( struct adapter * adap ) ;
irq_handler_t t4_intr_handler ( struct adapter * adap ) ;
netdev_tx_t t4_eth_xmit ( struct sk_buff * skb , struct net_device * dev ) ;
int t4_ethrx_handler ( struct sge_rspq * q , const __be64 * rsp ,
const struct pkt_gl * gl ) ;
int t4_mgmt_tx ( struct adapter * adap , struct sk_buff * skb ) ;
int t4_ofld_send ( struct adapter * adap , struct sk_buff * skb ) ;
int t4_sge_alloc_rxq ( struct adapter * adap , struct sge_rspq * iq , bool fwevtq ,
struct net_device * dev , int intr_idx ,
struct sge_fl * fl , rspq_handler_t hnd ) ;
int t4_sge_alloc_eth_txq ( struct adapter * adap , struct sge_eth_txq * txq ,
struct net_device * dev , struct netdev_queue * netdevq ,
unsigned int iqid ) ;
int t4_sge_alloc_ctrl_txq ( struct adapter * adap , struct sge_ctrl_txq * txq ,
struct net_device * dev , unsigned int iqid ,
unsigned int cmplqid ) ;
int t4_sge_alloc_ofld_txq ( struct adapter * adap , struct sge_ofld_txq * txq ,
struct net_device * dev , unsigned int iqid ) ;
irqreturn_t t4_sge_intr_msix ( int irq , void * cookie ) ;
void t4_sge_init ( struct adapter * adap ) ;
void t4_sge_start ( struct adapter * adap ) ;
void t4_sge_stop ( struct adapter * adap ) ;
# define for_each_port(adapter, iter) \
for ( iter = 0 ; iter < ( adapter ) - > params . nports ; + + iter )
static inline unsigned int core_ticks_per_usec ( const struct adapter * adap )
{
return adap - > params . vpd . cclk / 1000 ;
}
static inline unsigned int us_to_core_ticks ( const struct adapter * adap ,
unsigned int us )
{
return ( us * adap - > params . vpd . cclk ) / 1000 ;
}
void t4_set_reg_field ( struct adapter * adap , unsigned int addr , u32 mask ,
u32 val ) ;
int t4_wr_mbox_meat ( struct adapter * adap , int mbox , const void * cmd , int size ,
void * rpl , bool sleep_ok ) ;
static inline int t4_wr_mbox ( struct adapter * adap , int mbox , const void * cmd ,
int size , void * rpl )
{
return t4_wr_mbox_meat ( adap , mbox , cmd , size , rpl , true ) ;
}
static inline int t4_wr_mbox_ns ( struct adapter * adap , int mbox , const void * cmd ,
int size , void * rpl )
{
return t4_wr_mbox_meat ( adap , mbox , cmd , size , rpl , false ) ;
}
void t4_intr_enable ( struct adapter * adapter ) ;
void t4_intr_disable ( struct adapter * adapter ) ;
void t4_intr_clear ( struct adapter * adapter ) ;
int t4_slow_intr_handler ( struct adapter * adapter ) ;
2010-06-18 10:05:29 +00:00
int t4_wait_dev_ready ( struct adapter * adap ) ;
2010-04-01 15:28:25 +00:00
int t4_link_start ( struct adapter * adap , unsigned int mbox , unsigned int port ,
struct link_config * lc ) ;
int t4_restart_aneg ( struct adapter * adap , unsigned int mbox , unsigned int port ) ;
int t4_seeprom_wp ( struct adapter * adapter , bool enable ) ;
int t4_load_fw ( struct adapter * adapter , const u8 * fw_data , unsigned int size ) ;
int t4_check_fw_version ( struct adapter * adapter ) ;
int t4_prep_adapter ( struct adapter * adapter ) ;
int t4_port_init ( struct adapter * adap , int mbox , int pf , int vf ) ;
void t4_fatal_err ( struct adapter * adapter ) ;
int t4_set_trace_filter ( struct adapter * adapter , const struct trace_params * tp ,
int filter_index , int enable ) ;
void t4_get_trace_filter ( struct adapter * adapter , struct trace_params * tp ,
int filter_index , int * enabled ) ;
int t4_config_rss_range ( struct adapter * adapter , int mbox , unsigned int viid ,
int start , int n , const u16 * rspq , unsigned int nrspq ) ;
int t4_config_glbl_rss ( struct adapter * adapter , int mbox , unsigned int mode ,
unsigned int flags ) ;
int t4_read_rss ( struct adapter * adapter , u16 * entries ) ;
int t4_mc_read ( struct adapter * adap , u32 addr , __be32 * data , u64 * parity ) ;
int t4_edc_read ( struct adapter * adap , int idx , u32 addr , __be32 * data ,
u64 * parity ) ;
void t4_get_port_stats ( struct adapter * adap , int idx , struct port_stats * p ) ;
void t4_get_lb_stats ( struct adapter * adap , int idx , struct lb_port_stats * p ) ;
void t4_read_mtu_tbl ( struct adapter * adap , u16 * mtus , u8 * mtu_log ) ;
void t4_tp_get_err_stats ( struct adapter * adap , struct tp_err_stats * st ) ;
void t4_tp_get_tcp_stats ( struct adapter * adap , struct tp_tcp_stats * v4 ,
struct tp_tcp_stats * v6 ) ;
void t4_load_mtus ( struct adapter * adap , const unsigned short * mtus ,
const unsigned short * alpha , const unsigned short * beta ) ;
void t4_wol_magic_enable ( struct adapter * adap , unsigned int port ,
const u8 * addr ) ;
int t4_wol_pat_enable ( struct adapter * adap , unsigned int port , unsigned int map ,
u64 mask0 , u64 mask1 , unsigned int crc , bool enable ) ;
int t4_fw_hello ( struct adapter * adap , unsigned int mbox , unsigned int evt_mbox ,
enum dev_master master , enum dev_state * state ) ;
int t4_fw_bye ( struct adapter * adap , unsigned int mbox ) ;
int t4_early_init ( struct adapter * adap , unsigned int mbox ) ;
int t4_fw_reset ( struct adapter * adap , unsigned int mbox , int reset ) ;
int t4_query_params ( struct adapter * adap , unsigned int mbox , unsigned int pf ,
unsigned int vf , unsigned int nparams , const u32 * params ,
u32 * val ) ;
int t4_set_params ( struct adapter * adap , unsigned int mbox , unsigned int pf ,
unsigned int vf , unsigned int nparams , const u32 * params ,
const u32 * val ) ;
int t4_cfg_pfvf ( struct adapter * adap , unsigned int mbox , unsigned int pf ,
unsigned int vf , unsigned int txq , unsigned int txq_eth_ctrl ,
unsigned int rxqi , unsigned int rxq , unsigned int tc ,
unsigned int vi , unsigned int cmask , unsigned int pmask ,
unsigned int nexact , unsigned int rcaps , unsigned int wxcaps ) ;
int t4_alloc_vi ( struct adapter * adap , unsigned int mbox , unsigned int port ,
unsigned int pf , unsigned int vf , unsigned int nmac , u8 * mac ,
unsigned int * rss_size ) ;
int t4_free_vi ( struct adapter * adap , unsigned int mbox , unsigned int pf ,
unsigned int vf , unsigned int viid ) ;
int t4_set_rxmode ( struct adapter * adap , unsigned int mbox , unsigned int viid ,
2010-05-10 15:58:07 +00:00
int mtu , int promisc , int all_multi , int bcast , int vlanex ,
bool sleep_ok ) ;
2010-04-01 15:28:25 +00:00
int t4_alloc_mac_filt ( struct adapter * adap , unsigned int mbox ,
unsigned int viid , bool free , unsigned int naddr ,
const u8 * * addr , u16 * idx , u64 * hash , bool sleep_ok ) ;
int t4_change_mac ( struct adapter * adap , unsigned int mbox , unsigned int viid ,
int idx , const u8 * addr , bool persist , bool add_smt ) ;
int t4_set_addr_hash ( struct adapter * adap , unsigned int mbox , unsigned int viid ,
bool ucast , u64 vec , bool sleep_ok ) ;
int t4_enable_vi ( struct adapter * adap , unsigned int mbox , unsigned int viid ,
bool rx_en , bool tx_en ) ;
int t4_identify_port ( struct adapter * adap , unsigned int mbox , unsigned int viid ,
unsigned int nblinks ) ;
int t4_mdio_rd ( struct adapter * adap , unsigned int mbox , unsigned int phy_addr ,
unsigned int mmd , unsigned int reg , u16 * valp ) ;
int t4_mdio_wr ( struct adapter * adap , unsigned int mbox , unsigned int phy_addr ,
unsigned int mmd , unsigned int reg , u16 val ) ;
int t4_iq_start_stop ( struct adapter * adap , unsigned int mbox , bool start ,
unsigned int pf , unsigned int vf , unsigned int iqid ,
unsigned int fl0id , unsigned int fl1id ) ;
int t4_iq_free ( struct adapter * adap , unsigned int mbox , unsigned int pf ,
unsigned int vf , unsigned int iqtype , unsigned int iqid ,
unsigned int fl0id , unsigned int fl1id ) ;
int t4_eth_eq_free ( struct adapter * adap , unsigned int mbox , unsigned int pf ,
unsigned int vf , unsigned int eqid ) ;
int t4_ctrl_eq_free ( struct adapter * adap , unsigned int mbox , unsigned int pf ,
unsigned int vf , unsigned int eqid ) ;
int t4_ofld_eq_free ( struct adapter * adap , unsigned int mbox , unsigned int pf ,
unsigned int vf , unsigned int eqid ) ;
int t4_handle_fw_rpl ( struct adapter * adap , const __be64 * rpl ) ;
# endif /* __CXGB4_H__ */