2007-09-15 14:07:45 -07:00
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
2009-02-01 01:19:20 -08:00
Copyright ( c ) 1999 - 2009 Intel Corporation .
2007-09-15 14:07:45 -07:00
This program is free software ; you can redistribute it and / or modify it
under the terms and conditions of the GNU General Public License ,
version 2 , as published by the Free Software Foundation .
This program is distributed in the hope it will be useful , but WITHOUT
ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
more details .
You should have received a copy of the GNU General Public License along with
this program ; if not , write to the Free Software Foundation , Inc . ,
51 Franklin St - Fifth Floor , Boston , MA 02110 - 1301 USA .
The full GNU General Public License is included in this distribution in
the file called " COPYING " .
Contact Information :
e1000 - devel Mailing List < e1000 - devel @ lists . sourceforge . net >
Intel Corporation , 5200 N . E . Elam Young Parkway , Hillsboro , OR 97124 - 6497
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include <linux/types.h>
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/netdevice.h>
# include <linux/vmalloc.h>
# include <linux/string.h>
# include <linux/in.h>
# include <linux/ip.h>
# include <linux/tcp.h>
2009-07-22 14:07:33 +00:00
# include <linux/pkt_sched.h>
2007-09-15 14:07:45 -07:00
# include <linux/ipv6.h>
# include <net/checksum.h>
# include <net/ip6_checksum.h>
# include <linux/ethtool.h>
# include <linux/if_vlan.h>
2009-05-13 13:11:06 +00:00
# include <scsi/fc/fc_fcoe.h>
2007-09-15 14:07:45 -07:00
# include "ixgbe.h"
# include "ixgbe_common.h"
char ixgbe_driver_name [ ] = " ixgbe " ;
2007-10-29 10:46:24 -07:00
static const char ixgbe_driver_string [ ] =
2008-09-11 20:04:46 -07:00
" Intel(R) 10 Gigabit PCI Express Network Driver " ;
2007-09-15 14:07:45 -07:00
2009-06-04 16:02:44 +00:00
# define DRV_VERSION "2.0.34-k2"
2007-10-29 10:46:24 -07:00
const char ixgbe_driver_version [ ] = DRV_VERSION ;
2009-02-01 01:19:20 -08:00
static char ixgbe_copyright [ ] = " Copyright (c) 1999-2009 Intel Corporation. " ;
2007-09-15 14:07:45 -07:00
static const struct ixgbe_info * ixgbe_info_tbl [ ] = {
2008-09-11 20:04:46 -07:00
[ board_82598 ] = & ixgbe_82598_info ,
2009-02-27 15:45:05 +00:00
[ board_82599 ] = & ixgbe_82599_info ,
2007-09-15 14:07:45 -07:00
} ;
/* ixgbe_pci_tbl - PCI Device ID Table
*
* Wildcard entries ( PCI_ANY_ID ) should come last
* Last entry must be all 0 s
*
* { Vendor ID , Device ID , SubVendor ID , SubDevice ID ,
* Class , Class Mask , private data ( not used ) }
*/
static struct pci_device_id ixgbe_pci_tbl [ ] = {
2009-01-26 20:57:51 -08:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598 ) ,
board_82598 } ,
2007-09-15 14:07:45 -07:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598AF_DUAL_PORT ) ,
2007-10-31 15:22:10 -07:00
board_82598 } ,
2007-09-15 14:07:45 -07:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598AF_SINGLE_PORT ) ,
2007-10-31 15:22:10 -07:00
board_82598 } ,
2008-10-31 00:46:40 -07:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598AT ) ,
board_82598 } ,
2007-09-15 14:07:45 -07:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598EB_CX4 ) ,
2007-10-31 15:22:10 -07:00
board_82598 } ,
2008-08-08 16:24:19 -07:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598_CX4_DUAL_PORT ) ,
board_82598 } ,
2008-11-20 21:11:42 -08:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598_DA_DUAL_PORT ) ,
board_82598 } ,
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM ) ,
board_82598 } ,
2008-09-11 19:58:59 -07:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598EB_XF_LR ) ,
board_82598 } ,
2008-11-20 21:11:42 -08:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598EB_SFP_LOM ) ,
board_82598 } ,
2009-02-01 01:18:23 -08:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598_BX ) ,
board_82598 } ,
2009-02-27 15:45:05 +00:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82599_KX4 ) ,
board_82599 } ,
2009-05-17 20:58:04 +00:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82599_XAUI_LOM ) ,
board_82599 } ,
2009-02-27 15:45:05 +00:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82599_SFP ) ,
board_82599 } ,
2007-09-15 14:07:45 -07:00
/* required last entry */
{ 0 , }
} ;
MODULE_DEVICE_TABLE ( pci , ixgbe_pci_tbl ) ;
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-03-03 15:04:02 -08:00
static int ixgbe_notify_dca ( struct notifier_block * , unsigned long event ,
2008-09-11 20:04:46 -07:00
void * p ) ;
2008-03-03 15:04:02 -08:00
static struct notifier_block dca_notifier = {
. notifier_call = ixgbe_notify_dca ,
. next = NULL ,
. priority = 0
} ;
# endif
2007-09-15 14:07:45 -07:00
MODULE_AUTHOR ( " Intel Corporation, <linux.nics@intel.com> " ) ;
MODULE_DESCRIPTION ( " Intel(R) 10 Gigabit PCI Express Network Driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( DRV_VERSION ) ;
# define DEFAULT_DEBUG_LEVEL_SHIFT 3
2008-02-01 15:59:04 -08:00
static void ixgbe_release_hw_control ( struct ixgbe_adapter * adapter )
{
u32 ctrl_ext ;
/* Let firmware take over control of h/w */
ctrl_ext = IXGBE_READ_REG ( & adapter - > hw , IXGBE_CTRL_EXT ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_CTRL_EXT ,
2008-09-11 20:04:46 -07:00
ctrl_ext & ~ IXGBE_CTRL_EXT_DRV_LOAD ) ;
2008-02-01 15:59:04 -08:00
}
static void ixgbe_get_hw_control ( struct ixgbe_adapter * adapter )
{
u32 ctrl_ext ;
/* Let firmware know the driver has taken over */
ctrl_ext = IXGBE_READ_REG ( & adapter - > hw , IXGBE_CTRL_EXT ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_CTRL_EXT ,
2008-09-11 20:04:46 -07:00
ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD ) ;
2008-02-01 15:59:04 -08:00
}
2007-09-15 14:07:45 -07:00
2009-02-27 15:45:05 +00:00
/*
* ixgbe_set_ivar - set the IVAR registers , mapping interrupt causes to vectors
* @ adapter : pointer to adapter struct
* @ direction : 0 for Rx , 1 for Tx , - 1 for other causes
* @ queue : queue to map the corresponding interrupt to
* @ msix_vector : the vector to map to the corresponding queue
*
*/
static void ixgbe_set_ivar ( struct ixgbe_adapter * adapter , s8 direction ,
u8 queue , u8 msix_vector )
2007-09-15 14:07:45 -07:00
{
u32 ivar , index ;
2009-02-27 15:45:05 +00:00
struct ixgbe_hw * hw = & adapter - > hw ;
switch ( hw - > mac . type ) {
case ixgbe_mac_82598EB :
msix_vector | = IXGBE_IVAR_ALLOC_VAL ;
if ( direction = = - 1 )
direction = 0 ;
index = ( ( ( direction * 64 ) + queue ) > > 2 ) & 0x1F ;
ivar = IXGBE_READ_REG ( hw , IXGBE_IVAR ( index ) ) ;
ivar & = ~ ( 0xFF < < ( 8 * ( queue & 0x3 ) ) ) ;
ivar | = ( msix_vector < < ( 8 * ( queue & 0x3 ) ) ) ;
IXGBE_WRITE_REG ( hw , IXGBE_IVAR ( index ) , ivar ) ;
break ;
case ixgbe_mac_82599EB :
if ( direction = = - 1 ) {
/* other causes */
msix_vector | = IXGBE_IVAR_ALLOC_VAL ;
index = ( ( queue & 1 ) * 8 ) ;
ivar = IXGBE_READ_REG ( & adapter - > hw , IXGBE_IVAR_MISC ) ;
ivar & = ~ ( 0xFF < < index ) ;
ivar | = ( msix_vector < < index ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_IVAR_MISC , ivar ) ;
break ;
} else {
/* tx or rx causes */
msix_vector | = IXGBE_IVAR_ALLOC_VAL ;
index = ( ( 16 * ( queue & 1 ) ) + ( 8 * direction ) ) ;
ivar = IXGBE_READ_REG ( hw , IXGBE_IVAR ( queue > > 1 ) ) ;
ivar & = ~ ( 0xFF < < index ) ;
ivar | = ( msix_vector < < index ) ;
IXGBE_WRITE_REG ( hw , IXGBE_IVAR ( queue > > 1 ) , ivar ) ;
break ;
}
default :
break ;
}
2007-09-15 14:07:45 -07:00
}
2009-06-04 16:00:09 +00:00
static inline void ixgbe_irq_rearm_queues ( struct ixgbe_adapter * adapter ,
u64 qmask )
{
u32 mask ;
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB ) {
mask = ( IXGBE_EIMS_RTX_QUEUE & qmask ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EICS , mask ) ;
} else {
mask = ( qmask & 0xFFFFFFFF ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EICS_EX ( 0 ) , mask ) ;
mask = ( qmask > > 32 ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EICS_EX ( 1 ) , mask ) ;
}
}
2007-09-15 14:07:45 -07:00
static void ixgbe_unmap_and_free_tx_resource ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_tx_buffer
* tx_buffer_info )
2007-09-15 14:07:45 -07:00
{
2009-03-31 21:34:23 +00:00
tx_buffer_info - > dma = 0 ;
2007-09-15 14:07:45 -07:00
if ( tx_buffer_info - > skb ) {
2009-03-31 21:34:23 +00:00
skb_dma_unmap ( & adapter - > pdev - > dev , tx_buffer_info - > skb ,
DMA_TO_DEVICE ) ;
2007-09-15 14:07:45 -07:00
dev_kfree_skb_any ( tx_buffer_info - > skb ) ;
tx_buffer_info - > skb = NULL ;
}
2009-03-31 21:34:23 +00:00
tx_buffer_info - > time_stamp = 0 ;
2007-09-15 14:07:45 -07:00
/* tx_buffer_info must be completely set up in the transmit path */
}
static inline bool ixgbe_check_tx_hang ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * tx_ring ,
unsigned int eop )
2007-09-15 14:07:45 -07:00
{
2008-08-26 04:27:13 -07:00
struct ixgbe_hw * hw = & adapter - > hw ;
2007-09-15 14:07:45 -07:00
/* Detect a transmit hang in hardware, this serializes the
2008-08-26 04:27:13 -07:00
* check with the clearing of time_stamp and movement of eop */
2007-09-15 14:07:45 -07:00
adapter - > detect_tx_hung = false ;
2009-03-31 21:34:23 +00:00
if ( tx_ring - > tx_buffer_info [ eop ] . time_stamp & &
2007-09-15 14:07:45 -07:00
time_after ( jiffies , tx_ring - > tx_buffer_info [ eop ] . time_stamp + HZ ) & &
! ( IXGBE_READ_REG ( & adapter - > hw , IXGBE_TFCS ) & IXGBE_TFCS_TXOFF ) ) {
/* detected Tx unit hang */
2008-08-26 04:27:13 -07:00
union ixgbe_adv_tx_desc * tx_desc ;
tx_desc = IXGBE_TX_DESC_ADV ( * tx_ring , eop ) ;
2007-09-15 14:07:45 -07:00
DPRINTK ( DRV , ERR , " Detected Tx Unit Hang \n "
2008-08-26 04:27:13 -07:00
" Tx Queue <%d> \n "
" TDH, TDT <%x>, <%x> \n "
2007-09-15 14:07:45 -07:00
" next_to_use <%x> \n "
" next_to_clean <%x> \n "
" tx_buffer_info[next_to_clean] \n "
" time_stamp <%lx> \n "
2008-08-26 04:27:13 -07:00
" jiffies <%lx> \n " ,
tx_ring - > queue_index ,
2009-03-31 21:34:23 +00:00
IXGBE_READ_REG ( hw , tx_ring - > head ) ,
IXGBE_READ_REG ( hw , tx_ring - > tail ) ,
2008-08-26 04:27:13 -07:00
tx_ring - > next_to_use , eop ,
tx_ring - > tx_buffer_info [ eop ] . time_stamp , jiffies ) ;
2007-09-15 14:07:45 -07:00
return true ;
}
return false ;
}
2008-09-11 20:04:46 -07:00
# define IXGBE_MAX_TXD_PWR 14
# define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
2008-02-01 15:58:49 -08:00
/* Tx Descriptors needed, worst case */
# define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
( ( ( S ) & ( IXGBE_MAX_DATA_PER_TXD - 1 ) ) ? 1 : 0 ) )
# define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
2008-09-11 20:04:46 -07:00
MAX_SKB_FRAGS * TXD_USE_COUNT ( PAGE_SIZE ) + 1 ) /* for context */
2008-02-01 15:58:49 -08:00
2008-08-26 04:27:13 -07:00
static void ixgbe_tx_timeout ( struct net_device * netdev ) ;
2007-09-15 14:07:45 -07:00
/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
2009-06-04 16:00:09 +00:00
* @ q_vector : structure containing interrupt and ring information
2008-08-26 04:27:13 -07:00
* @ tx_ring : tx ring to clean
2007-09-15 14:07:45 -07:00
* */
2009-06-04 16:00:09 +00:00
static bool ixgbe_clean_tx_irq ( struct ixgbe_q_vector * q_vector ,
2008-08-26 04:27:13 -07:00
struct ixgbe_ring * tx_ring )
2007-09-15 14:07:45 -07:00
{
2009-06-04 16:00:09 +00:00
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2008-08-26 04:27:13 -07:00
struct net_device * netdev = adapter - > netdev ;
2009-02-06 21:47:24 -08:00
union ixgbe_adv_tx_desc * tx_desc , * eop_desc ;
struct ixgbe_tx_buffer * tx_buffer_info ;
unsigned int i , eop , count = 0 ;
2008-08-26 04:27:13 -07:00
unsigned int total_bytes = 0 , total_packets = 0 ;
2007-09-15 14:07:45 -07:00
i = tx_ring - > next_to_clean ;
2009-02-06 21:47:24 -08:00
eop = tx_ring - > tx_buffer_info [ i ] . next_to_watch ;
eop_desc = IXGBE_TX_DESC_ADV ( * tx_ring , eop ) ;
while ( ( eop_desc - > wb . status & cpu_to_le32 ( IXGBE_TXD_STAT_DD ) ) & &
2009-03-13 22:14:10 +00:00
( count < tx_ring - > work_limit ) ) {
2009-02-06 21:47:24 -08:00
bool cleaned = false ;
for ( ; ! cleaned ; count + + ) {
struct sk_buff * skb ;
2007-09-15 14:07:45 -07:00
tx_desc = IXGBE_TX_DESC_ADV ( * tx_ring , i ) ;
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
2009-02-06 21:47:24 -08:00
cleaned = ( i = = eop ) ;
2008-08-26 04:27:13 -07:00
skb = tx_buffer_info - > skb ;
2007-09-15 14:07:45 -07:00
2009-02-06 21:47:24 -08:00
if ( cleaned & & skb ) {
2008-02-01 15:58:49 -08:00
unsigned int segs , bytecount ;
2009-06-08 14:38:44 +00:00
unsigned int hlen = skb_headlen ( skb ) ;
2008-08-26 04:27:13 -07:00
/* gso_segs is currently only valid for tcp */
2008-02-01 15:58:49 -08:00
segs = skb_shinfo ( skb ) - > gso_segs ? : 1 ;
2009-06-08 14:38:44 +00:00
# ifdef IXGBE_FCOE
/* adjust for FCoE Sequence Offload */
if ( ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED )
& & ( skb - > protocol = = htons ( ETH_P_FCOE ) ) & &
skb_is_gso ( skb ) ) {
hlen = skb_transport_offset ( skb ) +
sizeof ( struct fc_frame_header ) +
sizeof ( struct fcoe_crc_eof ) ;
segs = DIV_ROUND_UP ( skb - > len - hlen ,
skb_shinfo ( skb ) - > gso_size ) ;
}
# endif /* IXGBE_FCOE */
2008-02-01 15:58:49 -08:00
/* multiply data chunks by size of headers */
2009-06-08 14:38:44 +00:00
bytecount = ( ( segs - 1 ) * hlen ) + skb - > len ;
2008-08-26 04:27:13 -07:00
total_packets + = segs ;
total_bytes + = bytecount ;
2008-02-01 15:58:49 -08:00
}
2008-08-26 04:27:13 -07:00
2007-09-15 14:07:45 -07:00
ixgbe_unmap_and_free_tx_resource ( adapter ,
2008-08-26 04:27:13 -07:00
tx_buffer_info ) ;
2007-09-15 14:07:45 -07:00
2009-02-06 21:47:24 -08:00
tx_desc - > wb . status = 0 ;
2007-09-15 14:07:45 -07:00
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
2008-08-26 04:27:13 -07:00
}
2009-02-06 21:47:24 -08:00
eop = tx_ring - > tx_buffer_info [ i ] . next_to_watch ;
eop_desc = IXGBE_TX_DESC_ADV ( * tx_ring , eop ) ;
}
2007-09-15 14:07:45 -07:00
tx_ring - > next_to_clean = i ;
2008-02-01 15:58:49 -08:00
# define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2008-08-26 04:27:13 -07:00
if ( unlikely ( count & & netif_carrier_ok ( netdev ) & &
( IXGBE_DESC_UNUSED ( tx_ring ) > = TX_WAKE_THRESHOLD ) ) ) {
2008-02-01 15:58:49 -08:00
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean .
*/
smp_mb ( ) ;
2008-03-03 15:03:52 -08:00
if ( __netif_subqueue_stopped ( netdev , tx_ring - > queue_index ) & &
! test_bit ( __IXGBE_DOWN , & adapter - > state ) ) {
netif_wake_subqueue ( netdev , tx_ring - > queue_index ) ;
2008-08-26 04:27:13 -07:00
+ + adapter - > restart_queue ;
2008-03-03 15:03:52 -08:00
}
2008-02-01 15:58:49 -08:00
}
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:13 -07:00
if ( adapter - > detect_tx_hung ) {
if ( ixgbe_check_tx_hang ( adapter , tx_ring , i ) ) {
/* schedule immediate reset if we believe we hung */
DPRINTK ( PROBE , INFO ,
" tx hang %d detected, resetting adapter \n " ,
adapter - > tx_timeout_count + 1 ) ;
ixgbe_tx_timeout ( adapter - > netdev ) ;
}
}
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:13 -07:00
/* re-arm the interrupt */
2009-06-04 16:00:09 +00:00
if ( count > = tx_ring - > work_limit )
ixgbe_irq_rearm_queues ( adapter , ( ( u64 ) 1 < < q_vector - > v_idx ) ) ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:13 -07:00
tx_ring - > total_bytes + = total_bytes ;
tx_ring - > total_packets + = total_packets ;
tx_ring - > stats . packets + = total_packets ;
2009-02-06 21:47:24 -08:00
tx_ring - > stats . bytes + = total_bytes ;
2008-08-26 04:27:13 -07:00
adapter - > net_stats . tx_bytes + = total_bytes ;
adapter - > net_stats . tx_packets + = total_packets ;
2009-03-13 22:14:10 +00:00
return ( count < tx_ring - > work_limit ) ;
2007-09-15 14:07:45 -07:00
}
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-03-03 15:04:02 -08:00
static void ixgbe_update_rx_dca ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * rx_ring )
2008-03-03 15:04:02 -08:00
{
u32 rxctrl ;
int cpu = get_cpu ( ) ;
2008-08-26 04:27:08 -07:00
int q = rx_ring - adapter - > rx_ring ;
2008-03-03 15:04:02 -08:00
2008-08-26 04:27:08 -07:00
if ( rx_ring - > cpu ! = cpu ) {
2008-03-03 15:04:02 -08:00
rxctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_DCA_RXCTRL ( q ) ) ;
2009-02-27 15:45:05 +00:00
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB ) {
rxctrl & = ~ IXGBE_DCA_RXCTRL_CPUID_MASK ;
rxctrl | = dca3_get_tag ( & adapter - > pdev - > dev , cpu ) ;
} else if ( adapter - > hw . mac . type = = ixgbe_mac_82599EB ) {
rxctrl & = ~ IXGBE_DCA_RXCTRL_CPUID_MASK_82599 ;
rxctrl | = ( dca3_get_tag ( & adapter - > pdev - > dev , cpu ) < <
IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 ) ;
}
2008-03-03 15:04:02 -08:00
rxctrl | = IXGBE_DCA_RXCTRL_DESC_DCA_EN ;
rxctrl | = IXGBE_DCA_RXCTRL_HEAD_DCA_EN ;
2009-01-19 16:54:13 -08:00
rxctrl & = ~ ( IXGBE_DCA_RXCTRL_DESC_RRO_EN ) ;
rxctrl & = ~ ( IXGBE_DCA_RXCTRL_DESC_WRO_EN |
2009-02-27 15:45:05 +00:00
IXGBE_DCA_RXCTRL_DESC_HSRO_EN ) ;
2008-03-03 15:04:02 -08:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_RXCTRL ( q ) , rxctrl ) ;
2008-08-26 04:27:08 -07:00
rx_ring - > cpu = cpu ;
2008-03-03 15:04:02 -08:00
}
put_cpu ( ) ;
}
static void ixgbe_update_tx_dca ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * tx_ring )
2008-03-03 15:04:02 -08:00
{
u32 txctrl ;
int cpu = get_cpu ( ) ;
2008-08-26 04:27:08 -07:00
int q = tx_ring - adapter - > tx_ring ;
2008-03-03 15:04:02 -08:00
2008-08-26 04:27:08 -07:00
if ( tx_ring - > cpu ! = cpu ) {
2008-03-03 15:04:02 -08:00
txctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_DCA_TXCTRL ( q ) ) ;
2009-02-27 15:45:05 +00:00
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB ) {
txctrl & = ~ IXGBE_DCA_TXCTRL_CPUID_MASK ;
txctrl | = dca3_get_tag ( & adapter - > pdev - > dev , cpu ) ;
} else if ( adapter - > hw . mac . type = = ixgbe_mac_82599EB ) {
txctrl & = ~ IXGBE_DCA_TXCTRL_CPUID_MASK_82599 ;
txctrl | = ( dca3_get_tag ( & adapter - > pdev - > dev , cpu ) < <
IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 ) ;
}
2008-03-03 15:04:02 -08:00
txctrl | = IXGBE_DCA_TXCTRL_DESC_DCA_EN ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_TXCTRL ( q ) , txctrl ) ;
2008-08-26 04:27:08 -07:00
tx_ring - > cpu = cpu ;
2008-03-03 15:04:02 -08:00
}
put_cpu ( ) ;
}
static void ixgbe_setup_dca ( struct ixgbe_adapter * adapter )
{
int i ;
if ( ! ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED ) )
return ;
2009-05-21 13:07:12 +00:00
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_CTRL , 2 ) ;
2008-03-03 15:04:02 -08:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
adapter - > tx_ring [ i ] . cpu = - 1 ;
ixgbe_update_tx_dca ( adapter , & adapter - > tx_ring [ i ] ) ;
}
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
adapter - > rx_ring [ i ] . cpu = - 1 ;
ixgbe_update_rx_dca ( adapter , & adapter - > rx_ring [ i ] ) ;
}
}
static int __ixgbe_notify_dca ( struct device * dev , void * data )
{
struct net_device * netdev = dev_get_drvdata ( dev ) ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
unsigned long event = * ( unsigned long * ) data ;
switch ( event ) {
case DCA_PROVIDER_ADD :
2008-08-26 04:27:21 -07:00
/* if we're already enabled, don't do it again */
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED )
break ;
2008-03-27 14:39:17 +03:00
if ( dca_add_requester ( dev ) = = 0 ) {
2008-08-26 04:27:21 -07:00
adapter - > flags | = IXGBE_FLAG_DCA_ENABLED ;
2008-03-03 15:04:02 -08:00
ixgbe_setup_dca ( adapter ) ;
break ;
}
/* Fall Through since DCA is disabled. */
case DCA_PROVIDER_REMOVE :
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED ) {
dca_remove_requester ( dev ) ;
adapter - > flags & = ~ IXGBE_FLAG_DCA_ENABLED ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_CTRL , 1 ) ;
}
break ;
}
2008-03-27 14:39:17 +03:00
return 0 ;
2008-03-03 15:04:02 -08:00
}
2008-10-16 05:09:31 -04:00
# endif /* CONFIG_IXGBE_DCA */
2007-09-15 14:07:45 -07:00
/**
* ixgbe_receive_skb - Send a completed packet up the stack
* @ adapter : board private structure
* @ skb : packet to send up
2008-06-18 15:32:19 -07:00
* @ status : hardware indication of status of receive
* @ rx_ring : rx descriptor ring ( for a specific queue ) to setup
* @ rx_desc : rx descriptor
2007-09-15 14:07:45 -07:00
* */
2009-01-18 21:49:45 -08:00
static void ixgbe_receive_skb ( struct ixgbe_q_vector * q_vector ,
2008-09-11 20:04:46 -07:00
struct sk_buff * skb , u8 status ,
2009-05-06 10:43:47 +00:00
struct ixgbe_ring * ring ,
2008-06-18 15:32:19 -07:00
union ixgbe_adv_rx_desc * rx_desc )
2007-09-15 14:07:45 -07:00
{
2009-01-18 21:49:45 -08:00
struct ixgbe_adapter * adapter = q_vector - > adapter ;
struct napi_struct * napi = & q_vector - > napi ;
2008-06-18 15:32:19 -07:00
bool is_vlan = ( status & IXGBE_RXD_STAT_VP ) ;
u16 tag = le16_to_cpu ( rx_desc - > wb . upper . vlan ) ;
2007-09-15 14:07:45 -07:00
2009-05-06 10:43:47 +00:00
skb_record_rx_queue ( skb , ring - > queue_index ) ;
2009-04-27 22:35:33 +00:00
if ( ! ( adapter - > flags & IXGBE_FLAG_IN_NETPOLL ) ) {
2008-11-20 20:52:10 -08:00
if ( adapter - > vlgrp & & is_vlan & & ( tag ! = 0 ) )
2009-01-18 21:49:45 -08:00
vlan_gro_receive ( napi , adapter - > vlgrp , tag , skb ) ;
2007-09-15 14:07:45 -07:00
else
2009-01-18 21:49:45 -08:00
napi_gro_receive ( napi , skb ) ;
2008-06-18 15:32:19 -07:00
} else {
2009-04-27 22:35:33 +00:00
if ( adapter - > vlgrp & & is_vlan & & ( tag ! = 0 ) )
vlan_hwaccel_rx ( skb , adapter - > vlgrp , tag ) ;
else
netif_rx ( skb ) ;
2007-09-15 14:07:45 -07:00
}
}
2008-02-01 15:59:09 -08:00
/**
* ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
* @ adapter : address of board private structure
* @ status_err : hardware indication of status of receive
* @ skb : skb currently being received and modified
* */
2007-09-15 14:07:45 -07:00
static inline void ixgbe_rx_checksum ( struct ixgbe_adapter * adapter ,
2009-07-23 18:00:39 +00:00
union ixgbe_adv_rx_desc * rx_desc ,
struct sk_buff * skb )
2007-09-15 14:07:45 -07:00
{
2009-07-23 18:00:39 +00:00
u32 status_err = le32_to_cpu ( rx_desc - > wb . upper . status_error ) ;
2007-09-15 14:07:45 -07:00
skb - > ip_summed = CHECKSUM_NONE ;
2008-08-26 04:26:56 -07:00
/* Rx csum disabled */
if ( ! ( adapter - > flags & IXGBE_FLAG_RX_CSUM_ENABLED ) )
2007-09-15 14:07:45 -07:00
return ;
2008-02-01 15:59:09 -08:00
/* if IP and error */
if ( ( status_err & IXGBE_RXD_STAT_IPCS ) & &
( status_err & IXGBE_RXDADV_ERR_IPE ) ) {
2007-09-15 14:07:45 -07:00
adapter - > hw_csum_rx_error + + ;
return ;
}
2008-02-01 15:59:09 -08:00
if ( ! ( status_err & IXGBE_RXD_STAT_L4CS ) )
return ;
if ( status_err & IXGBE_RXDADV_ERR_TCPE ) {
2009-07-23 18:00:39 +00:00
u16 pkt_info = rx_desc - > wb . lower . lo_dword . hs_rss . pkt_info ;
/*
* 82599 errata , UDP frames with a 0 checksum can be marked as
* checksum errors .
*/
if ( ( pkt_info & IXGBE_RXDADV_PKTTYPE_UDP ) & &
( adapter - > hw . mac . type = = ixgbe_mac_82599EB ) )
return ;
2008-02-01 15:59:09 -08:00
adapter - > hw_csum_rx_error + + ;
return ;
}
2007-09-15 14:07:45 -07:00
/* It must be a TCP or UDP packet with a valid checksum */
2008-02-01 15:59:09 -08:00
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
2007-09-15 14:07:45 -07:00
adapter - > hw_csum_rx_good + + ;
}
2009-02-27 15:45:05 +00:00
static inline void ixgbe_release_rx_desc ( struct ixgbe_hw * hw ,
struct ixgbe_ring * rx_ring , u32 val )
{
/*
* Force memory writes to complete before letting h / w
* know there are new descriptors to fetch . ( Only
* applicable for weak - ordered memory model archs ,
* such as IA - 64 ) .
*/
wmb ( ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RDT ( rx_ring - > reg_idx ) , val ) ;
}
2007-09-15 14:07:45 -07:00
/**
* ixgbe_alloc_rx_buffers - Replace used receive buffers ; packet split
* @ adapter : address of board private structure
* */
static void ixgbe_alloc_rx_buffers ( struct ixgbe_adapter * adapter ,
2008-08-26 04:27:16 -07:00
struct ixgbe_ring * rx_ring ,
int cleaned_count )
2007-09-15 14:07:45 -07:00
{
struct pci_dev * pdev = adapter - > pdev ;
union ixgbe_adv_rx_desc * rx_desc ;
2008-08-26 04:27:08 -07:00
struct ixgbe_rx_buffer * bi ;
2007-09-15 14:07:45 -07:00
unsigned int i ;
i = rx_ring - > next_to_use ;
2008-08-26 04:27:08 -07:00
bi = & rx_ring - > rx_buffer_info [ i ] ;
2007-09-15 14:07:45 -07:00
while ( cleaned_count - - ) {
rx_desc = IXGBE_RX_DESC_ADV ( * rx_ring , i ) ;
2008-09-11 19:58:43 -07:00
if ( ! bi - > page_dma & &
2008-08-26 04:27:08 -07:00
( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) ) {
if ( ! bi - > page ) {
2008-09-11 19:58:43 -07:00
bi - > page = alloc_page ( GFP_ATOMIC ) ;
if ( ! bi - > page ) {
adapter - > alloc_rx_page_failed + + ;
goto no_buffers ;
}
bi - > page_offset = 0 ;
} else {
/* use a half page if we're re-using */
bi - > page_offset ^ = ( PAGE_SIZE / 2 ) ;
2007-09-15 14:07:45 -07:00
}
2008-09-11 19:58:43 -07:00
bi - > page_dma = pci_map_page ( pdev , bi - > page ,
bi - > page_offset ,
( PAGE_SIZE / 2 ) ,
PCI_DMA_FROMDEVICE ) ;
2007-09-15 14:07:45 -07:00
}
2008-08-26 04:27:08 -07:00
if ( ! bi - > skb ) {
2008-12-15 01:00:57 -08:00
struct sk_buff * skb ;
2009-06-30 11:44:56 +00:00
skb = netdev_alloc_skb ( adapter - > netdev ,
( rx_ring - > rx_buf_len +
NET_IP_ALIGN ) ) ;
2007-09-15 14:07:45 -07:00
if ( ! skb ) {
adapter - > alloc_rx_buff_failed + + ;
goto no_buffers ;
}
/*
* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed
*/
skb_reserve ( skb , NET_IP_ALIGN ) ;
2008-08-26 04:27:08 -07:00
bi - > skb = skb ;
2009-06-30 11:44:56 +00:00
bi - > dma = pci_map_single ( pdev , skb - > data ,
rx_ring - > rx_buf_len ,
2008-08-26 04:27:08 -07:00
PCI_DMA_FROMDEVICE ) ;
2007-09-15 14:07:45 -07:00
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write - back erases this info . */
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
2008-08-26 04:27:08 -07:00
rx_desc - > read . pkt_addr = cpu_to_le64 ( bi - > page_dma ) ;
rx_desc - > read . hdr_addr = cpu_to_le64 ( bi - > dma ) ;
2007-09-15 14:07:45 -07:00
} else {
2008-08-26 04:27:08 -07:00
rx_desc - > read . pkt_addr = cpu_to_le64 ( bi - > dma ) ;
2007-09-15 14:07:45 -07:00
}
i + + ;
if ( i = = rx_ring - > count )
i = 0 ;
2008-08-26 04:27:08 -07:00
bi = & rx_ring - > rx_buffer_info [ i ] ;
2007-09-15 14:07:45 -07:00
}
2008-08-26 04:27:16 -07:00
2007-09-15 14:07:45 -07:00
no_buffers :
if ( rx_ring - > next_to_use ! = i ) {
rx_ring - > next_to_use = i ;
if ( i - - = = 0 )
i = ( rx_ring - > count - 1 ) ;
2009-02-27 15:45:05 +00:00
ixgbe_release_rx_desc ( & adapter - > hw , rx_ring , i ) ;
2007-09-15 14:07:45 -07:00
}
}
2008-08-26 04:27:16 -07:00
static inline u16 ixgbe_get_hdr_info ( union ixgbe_adv_rx_desc * rx_desc )
{
return rx_desc - > wb . lower . lo_dword . hs_rss . hdr_info ;
}
static inline u16 ixgbe_get_pkt_info ( union ixgbe_adv_rx_desc * rx_desc )
{
return rx_desc - > wb . lower . lo_dword . hs_rss . pkt_info ;
}
2009-04-27 22:42:37 +00:00
static inline u32 ixgbe_get_rsc_count ( union ixgbe_adv_rx_desc * rx_desc )
{
return ( le32_to_cpu ( rx_desc - > wb . lower . lo_dword . data ) &
IXGBE_RXDADV_RSCCNT_MASK ) > >
IXGBE_RXDADV_RSCCNT_SHIFT ;
}
/**
* ixgbe_transform_rsc_queue - change rsc queue into a full packet
* @ skb : pointer to the last skb in the rsc queue
*
* This function changes a queue full of hw rsc buffers into a completed
* packet . It uses the - > prev pointers to find the first packet and then
* turns it into the frag list owner .
* */
static inline struct sk_buff * ixgbe_transform_rsc_queue ( struct sk_buff * skb )
{
unsigned int frag_list_size = 0 ;
while ( skb - > prev ) {
struct sk_buff * prev = skb - > prev ;
frag_list_size + = skb - > len ;
skb - > prev = NULL ;
skb = prev ;
}
skb_shinfo ( skb ) - > frag_list = skb - > next ;
skb - > next = NULL ;
skb - > len + = frag_list_size ;
skb - > data_len + = frag_list_size ;
skb - > truesize + = frag_list_size ;
return skb ;
}
2009-01-18 21:49:45 -08:00
static bool ixgbe_clean_rx_irq ( struct ixgbe_q_vector * q_vector ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * rx_ring ,
int * work_done , int work_to_do )
2007-09-15 14:07:45 -07:00
{
2009-01-18 21:49:45 -08:00
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2007-09-15 14:07:45 -07:00
struct pci_dev * pdev = adapter - > pdev ;
union ixgbe_adv_rx_desc * rx_desc , * next_rxd ;
struct ixgbe_rx_buffer * rx_buffer_info , * next_buffer ;
struct sk_buff * skb ;
2009-04-27 22:42:37 +00:00
unsigned int i , rsc_count = 0 ;
2008-08-26 04:27:16 -07:00
u32 len , staterr ;
2008-06-18 15:32:19 -07:00
u16 hdr_info ;
bool cleaned = false ;
2007-09-15 14:07:45 -07:00
int cleaned_count = 0 ;
2008-02-01 15:59:19 -08:00
unsigned int total_rx_bytes = 0 , total_rx_packets = 0 ;
2009-06-08 14:38:44 +00:00
# ifdef IXGBE_FCOE
int ddp_bytes = 0 ;
# endif /* IXGBE_FCOE */
2007-09-15 14:07:45 -07:00
i = rx_ring - > next_to_clean ;
rx_desc = IXGBE_RX_DESC_ADV ( * rx_ring , i ) ;
staterr = le32_to_cpu ( rx_desc - > wb . upper . status_error ) ;
rx_buffer_info = & rx_ring - > rx_buffer_info [ i ] ;
while ( staterr & IXGBE_RXD_STAT_DD ) {
2008-08-26 04:27:16 -07:00
u32 upper_len = 0 ;
2007-09-15 14:07:45 -07:00
if ( * work_done > = work_to_do )
break ;
( * work_done ) + + ;
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
2008-08-26 04:27:16 -07:00
hdr_info = le16_to_cpu ( ixgbe_get_hdr_info ( rx_desc ) ) ;
len = ( hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK ) > >
2008-09-11 19:58:43 -07:00
IXGBE_RXDADV_HDRBUFLEN_SHIFT ;
2007-09-15 14:07:45 -07:00
if ( hdr_info & IXGBE_RXDADV_SPH )
adapter - > rx_hdr_split + + ;
if ( len > IXGBE_RX_HDR_SIZE )
len = IXGBE_RX_HDR_SIZE ;
upper_len = le16_to_cpu ( rx_desc - > wb . upper . length ) ;
2008-08-26 04:27:16 -07:00
} else {
2007-09-15 14:07:45 -07:00
len = le16_to_cpu ( rx_desc - > wb . upper . length ) ;
2008-08-26 04:27:16 -07:00
}
2007-09-15 14:07:45 -07:00
cleaned = true ;
skb = rx_buffer_info - > skb ;
prefetch ( skb - > data - NET_IP_ALIGN ) ;
rx_buffer_info - > skb = NULL ;
2009-06-04 15:59:49 +00:00
if ( rx_buffer_info - > dma ) {
2007-09-15 14:07:45 -07:00
pci_unmap_single ( pdev , rx_buffer_info - > dma ,
2008-12-15 01:00:57 -08:00
rx_ring - > rx_buf_len ,
2008-09-11 20:04:46 -07:00
PCI_DMA_FROMDEVICE ) ;
2009-06-30 11:44:56 +00:00
rx_buffer_info - > dma = 0 ;
2007-09-15 14:07:45 -07:00
skb_put ( skb , len ) ;
}
if ( upper_len ) {
pci_unmap_page ( pdev , rx_buffer_info - > page_dma ,
2008-09-11 19:58:43 -07:00
PAGE_SIZE / 2 , PCI_DMA_FROMDEVICE ) ;
2007-09-15 14:07:45 -07:00
rx_buffer_info - > page_dma = 0 ;
skb_fill_page_desc ( skb , skb_shinfo ( skb ) - > nr_frags ,
2008-09-11 19:58:43 -07:00
rx_buffer_info - > page ,
rx_buffer_info - > page_offset ,
upper_len ) ;
if ( ( rx_ring - > rx_buf_len > ( PAGE_SIZE / 2 ) ) | |
( page_count ( rx_buffer_info - > page ) ! = 1 ) )
rx_buffer_info - > page = NULL ;
else
get_page ( rx_buffer_info - > page ) ;
2007-09-15 14:07:45 -07:00
skb - > len + = upper_len ;
skb - > data_len + = upper_len ;
skb - > truesize + = upper_len ;
}
i + + ;
if ( i = = rx_ring - > count )
i = 0 ;
next_rxd = IXGBE_RX_DESC_ADV ( * rx_ring , i ) ;
prefetch ( next_rxd ) ;
cleaned_count + + ;
2009-04-27 22:42:37 +00:00
2009-06-04 16:00:47 +00:00
if ( adapter - > flags & IXGBE_FLAG2_RSC_CAPABLE )
2009-04-27 22:42:37 +00:00
rsc_count = ixgbe_get_rsc_count ( rx_desc ) ;
if ( rsc_count ) {
u32 nextp = ( staterr & IXGBE_RXDADV_NEXTP_MASK ) > >
IXGBE_RXDADV_NEXTP_SHIFT ;
next_buffer = & rx_ring - > rx_buffer_info [ nextp ] ;
rx_ring - > rsc_count + = ( rsc_count - 1 ) ;
} else {
next_buffer = & rx_ring - > rx_buffer_info [ i ] ;
}
2007-09-15 14:07:45 -07:00
if ( staterr & IXGBE_RXD_STAT_EOP ) {
2009-04-27 22:42:37 +00:00
if ( skb - > prev )
skb = ixgbe_transform_rsc_queue ( skb ) ;
2007-09-15 14:07:45 -07:00
rx_ring - > stats . packets + + ;
rx_ring - > stats . bytes + = skb - > len ;
} else {
2009-04-27 22:42:37 +00:00
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
rx_buffer_info - > skb = next_buffer - > skb ;
rx_buffer_info - > dma = next_buffer - > dma ;
next_buffer - > skb = skb ;
next_buffer - > dma = 0 ;
} else {
skb - > next = next_buffer - > skb ;
skb - > next - > prev = skb ;
}
2007-09-15 14:07:45 -07:00
adapter - > non_eop_descs + + ;
goto next_desc ;
}
if ( staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK ) {
dev_kfree_skb_irq ( skb ) ;
goto next_desc ;
}
2009-07-23 18:00:39 +00:00
ixgbe_rx_checksum ( adapter , rx_desc , skb ) ;
2008-02-01 15:59:19 -08:00
/* probably a little skewed due to removing CRC */
total_rx_bytes + = skb - > len ;
total_rx_packets + + ;
2008-09-11 20:03:23 -07:00
skb - > protocol = eth_type_trans ( skb , adapter - > netdev ) ;
2009-05-13 13:11:53 +00:00
# ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
2009-06-08 14:38:44 +00:00
if ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED ) {
ddp_bytes = ixgbe_fcoe_ddp ( adapter , rx_desc , skb ) ;
if ( ! ddp_bytes )
2009-05-13 13:11:53 +00:00
goto next_desc ;
2009-06-08 14:38:44 +00:00
}
2009-05-13 13:11:53 +00:00
# endif /* IXGBE_FCOE */
2009-05-06 10:43:47 +00:00
ixgbe_receive_skb ( q_vector , skb , staterr , rx_ring , rx_desc ) ;
2007-09-15 14:07:45 -07:00
next_desc :
rx_desc - > wb . upper . status_error = 0 ;
/* return some buffers to hardware, one at a time is too slow */
if ( cleaned_count > = IXGBE_RX_BUFFER_WRITE ) {
ixgbe_alloc_rx_buffers ( adapter , rx_ring , cleaned_count ) ;
cleaned_count = 0 ;
}
/* use prefetched values */
rx_desc = next_rxd ;
2009-04-27 22:42:37 +00:00
rx_buffer_info = & rx_ring - > rx_buffer_info [ i ] ;
2007-09-15 14:07:45 -07:00
staterr = le32_to_cpu ( rx_desc - > wb . upper . status_error ) ;
2008-06-18 15:32:19 -07:00
}
2007-09-15 14:07:45 -07:00
rx_ring - > next_to_clean = i ;
cleaned_count = IXGBE_DESC_UNUSED ( rx_ring ) ;
if ( cleaned_count )
ixgbe_alloc_rx_buffers ( adapter , rx_ring , cleaned_count ) ;
2009-06-08 14:38:44 +00:00
# ifdef IXGBE_FCOE
/* include DDPed FCoE data */
if ( ddp_bytes > 0 ) {
unsigned int mss ;
mss = adapter - > netdev - > mtu - sizeof ( struct fcoe_hdr ) -
sizeof ( struct fc_frame_header ) -
sizeof ( struct fcoe_crc_eof ) ;
if ( mss > 512 )
mss & = ~ 511 ;
total_rx_bytes + = ddp_bytes ;
total_rx_packets + = DIV_ROUND_UP ( ddp_bytes , mss ) ;
}
# endif /* IXGBE_FCOE */
2008-03-03 15:03:57 -08:00
rx_ring - > total_packets + = total_rx_packets ;
rx_ring - > total_bytes + = total_rx_bytes ;
adapter - > net_stats . rx_bytes + = total_rx_bytes ;
adapter - > net_stats . rx_packets + = total_rx_packets ;
2007-09-15 14:07:45 -07:00
return cleaned ;
}
2008-03-03 15:03:45 -08:00
static int ixgbe_clean_rxonly ( struct napi_struct * , int ) ;
2007-09-15 14:07:45 -07:00
/**
* ixgbe_configure_msix - Configure MSI - X hardware
* @ adapter : board private structure
*
* ixgbe_configure_msix sets up the hardware to properly generate MSI - X
* interrupts .
* */
static void ixgbe_configure_msix ( struct ixgbe_adapter * adapter )
{
2008-03-03 15:03:45 -08:00
struct ixgbe_q_vector * q_vector ;
int i , j , q_vectors , v_idx , r_idx ;
u32 mask ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
2007-09-15 14:07:45 -07:00
2009-03-13 22:15:31 +00:00
/*
* Populate the IVAR table and set the ITR values to the
2008-03-03 15:03:45 -08:00
* corresponding register .
*/
for ( v_idx = 0 ; v_idx < q_vectors ; v_idx + + ) {
2009-05-06 10:43:28 +00:00
q_vector = adapter - > q_vector [ v_idx ] ;
2008-03-03 15:03:45 -08:00
/* XXX for_each_bit(...) */
r_idx = find_first_bit ( q_vector - > rxr_idx ,
2008-09-11 20:04:46 -07:00
adapter - > num_rx_queues ) ;
2008-03-03 15:03:45 -08:00
for ( i = 0 ; i < q_vector - > rxr_count ; i + + ) {
j = adapter - > rx_ring [ r_idx ] . reg_idx ;
2009-02-27 15:45:05 +00:00
ixgbe_set_ivar ( adapter , 0 , j , v_idx ) ;
2008-03-03 15:03:45 -08:00
r_idx = find_next_bit ( q_vector - > rxr_idx ,
2008-09-11 20:04:46 -07:00
adapter - > num_rx_queues ,
r_idx + 1 ) ;
2008-03-03 15:03:45 -08:00
}
r_idx = find_first_bit ( q_vector - > txr_idx ,
2008-09-11 20:04:46 -07:00
adapter - > num_tx_queues ) ;
2008-03-03 15:03:45 -08:00
for ( i = 0 ; i < q_vector - > txr_count ; i + + ) {
j = adapter - > tx_ring [ r_idx ] . reg_idx ;
2009-02-27 15:45:05 +00:00
ixgbe_set_ivar ( adapter , 1 , j , v_idx ) ;
2008-03-03 15:03:45 -08:00
r_idx = find_next_bit ( q_vector - > txr_idx ,
2008-09-11 20:04:46 -07:00
adapter - > num_tx_queues ,
r_idx + 1 ) ;
2008-03-03 15:03:45 -08:00
}
2008-09-11 19:58:14 -07:00
/* if this is a tx only vector halve the interrupt rate */
2008-03-03 15:03:45 -08:00
if ( q_vector - > txr_count & & ! q_vector - > rxr_count )
2008-09-11 19:58:14 -07:00
q_vector - > eitr = ( adapter - > eitr_param > > 1 ) ;
2009-03-13 22:13:28 +00:00
else if ( q_vector - > rxr_count )
2008-09-11 19:58:14 -07:00
/* rx only */
q_vector - > eitr = adapter - > eitr_param ;
2008-03-03 15:03:45 -08:00
2009-06-04 16:00:09 +00:00
ixgbe_write_eitr ( q_vector ) ;
2007-09-15 14:07:45 -07:00
}
2009-02-27 15:45:05 +00:00
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB )
ixgbe_set_ivar ( adapter , - 1 , IXGBE_IVAR_OTHER_CAUSES_INDEX ,
v_idx ) ;
else if ( adapter - > hw . mac . type = = ixgbe_mac_82599EB )
ixgbe_set_ivar ( adapter , - 1 , 1 , v_idx ) ;
2008-03-03 15:03:45 -08:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EITR ( v_idx ) , 1950 ) ;
2008-09-11 19:55:58 -07:00
/* set up to autoclear timer, and the vectors */
2008-03-03 15:03:45 -08:00
mask = IXGBE_EIMS_ENABLE_MASK ;
2008-09-11 19:55:58 -07:00
mask & = ~ ( IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC ) ;
2008-03-03 15:03:45 -08:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIAC , mask ) ;
2007-09-15 14:07:45 -07:00
}
2008-03-03 15:03:57 -08:00
enum latency_range {
lowest_latency = 0 ,
low_latency = 1 ,
bulk_latency = 2 ,
latency_invalid = 255
} ;
/**
* ixgbe_update_itr - update the dynamic ITR value based on statistics
* @ adapter : pointer to adapter
* @ eitr : eitr setting ( ints per sec ) to give last timeslice
* @ itr_setting : current throttle rate in ints / second
* @ packets : the number of packets during this measurement interval
* @ bytes : the number of bytes during this measurement interval
*
* Stores a new ITR value based on packets and byte
* counts during the last interrupt . The advantage of per interrupt
* computation is faster updates and more accurate ITR for the current
* traffic pattern . Constants in this function were computed
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput .
* this functionality is controlled by the InterruptThrottleRate module
* parameter ( see ixgbe_param . c )
* */
static u8 ixgbe_update_itr ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
u32 eitr , u8 itr_setting ,
int packets , int bytes )
2008-03-03 15:03:57 -08:00
{
unsigned int retval = itr_setting ;
u32 timepassed_us ;
u64 bytes_perint ;
if ( packets = = 0 )
goto update_itr_done ;
/* simple throttlerate management
* 0 - 20 MB / s lowest ( 100000 ints / s )
* 20 - 100 MB / s low ( 20000 ints / s )
* 100 - 1249 MB / s bulk ( 8000 ints / s )
*/
/* what was last interrupt timeslice? */
timepassed_us = 1000000 / eitr ;
bytes_perint = bytes / timepassed_us ; /* bytes/usec */
switch ( itr_setting ) {
case lowest_latency :
if ( bytes_perint > adapter - > eitr_low )
retval = low_latency ;
break ;
case low_latency :
if ( bytes_perint > adapter - > eitr_high )
retval = bulk_latency ;
else if ( bytes_perint < = adapter - > eitr_low )
retval = lowest_latency ;
break ;
case bulk_latency :
if ( bytes_perint < = adapter - > eitr_high )
retval = low_latency ;
break ;
}
update_itr_done :
return retval ;
}
2009-03-13 22:13:28 +00:00
/**
* ixgbe_write_eitr - write EITR register in hardware specific way
2009-06-04 16:00:09 +00:00
* @ q_vector : structure containing interrupt and ring information
2009-03-13 22:13:28 +00:00
*
* This function is made to be called by ethtool and by the driver
* when it needs to update EITR registers at runtime . Hardware
* specific quirks / differences are taken care of here .
*/
2009-06-04 16:00:09 +00:00
void ixgbe_write_eitr ( struct ixgbe_q_vector * q_vector )
2009-03-13 22:13:28 +00:00
{
2009-06-04 16:00:09 +00:00
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2009-03-13 22:13:28 +00:00
struct ixgbe_hw * hw = & adapter - > hw ;
2009-06-04 16:00:09 +00:00
int v_idx = q_vector - > v_idx ;
u32 itr_reg = EITR_INTS_PER_SEC_TO_REG ( q_vector - > eitr ) ;
2009-03-13 22:13:28 +00:00
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB ) {
/* must write high and low 16 bits to reset counter */
itr_reg | = ( itr_reg < < 16 ) ;
} else if ( adapter - > hw . mac . type = = ixgbe_mac_82599EB ) {
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
*/
itr_reg | = IXGBE_EITR_CNT_WDIS ;
}
IXGBE_WRITE_REG ( hw , IXGBE_EITR ( v_idx ) , itr_reg ) ;
}
2008-03-03 15:03:57 -08:00
static void ixgbe_set_itr_msix ( struct ixgbe_q_vector * q_vector )
{
struct ixgbe_adapter * adapter = q_vector - > adapter ;
u32 new_itr ;
u8 current_itr , ret_itr ;
2009-06-04 16:00:09 +00:00
int i , r_idx ;
2008-03-03 15:03:57 -08:00
struct ixgbe_ring * rx_ring , * tx_ring ;
r_idx = find_first_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ) ;
for ( i = 0 ; i < q_vector - > txr_count ; i + + ) {
tx_ring = & ( adapter - > tx_ring [ r_idx ] ) ;
ret_itr = ixgbe_update_itr ( adapter , q_vector - > eitr ,
2008-09-11 20:04:46 -07:00
q_vector - > tx_itr ,
tx_ring - > total_packets ,
tx_ring - > total_bytes ) ;
2008-03-03 15:03:57 -08:00
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
2008-09-11 19:58:14 -07:00
q_vector - > tx_itr = ( ( q_vector - > tx_itr > ret_itr ) ?
2008-09-11 20:04:46 -07:00
q_vector - > tx_itr - 1 : ret_itr ) ;
2008-03-03 15:03:57 -08:00
r_idx = find_next_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ,
2008-09-11 20:04:46 -07:00
r_idx + 1 ) ;
2008-03-03 15:03:57 -08:00
}
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
for ( i = 0 ; i < q_vector - > rxr_count ; i + + ) {
rx_ring = & ( adapter - > rx_ring [ r_idx ] ) ;
ret_itr = ixgbe_update_itr ( adapter , q_vector - > eitr ,
2008-09-11 20:04:46 -07:00
q_vector - > rx_itr ,
rx_ring - > total_packets ,
rx_ring - > total_bytes ) ;
2008-03-03 15:03:57 -08:00
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
2008-09-11 19:58:14 -07:00
q_vector - > rx_itr = ( ( q_vector - > rx_itr > ret_itr ) ?
2008-09-11 20:04:46 -07:00
q_vector - > rx_itr - 1 : ret_itr ) ;
2008-03-03 15:03:57 -08:00
r_idx = find_next_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ,
2008-09-11 20:04:46 -07:00
r_idx + 1 ) ;
2008-03-03 15:03:57 -08:00
}
2008-09-11 19:58:14 -07:00
current_itr = max ( q_vector - > rx_itr , q_vector - > tx_itr ) ;
2008-03-03 15:03:57 -08:00
switch ( current_itr ) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency :
new_itr = 100000 ;
break ;
case low_latency :
new_itr = 20000 ; /* aka hwitr = ~200 */
break ;
case bulk_latency :
default :
new_itr = 8000 ;
break ;
}
if ( new_itr ! = q_vector - > eitr ) {
2009-06-04 16:00:09 +00:00
/* do an exponential smoothing */
new_itr = ( ( q_vector - > eitr * 90 ) / 100 ) + ( ( new_itr * 10 ) / 100 ) ;
2009-03-13 22:13:28 +00:00
/* save the algorithm value here, not the smoothed one */
q_vector - > eitr = new_itr ;
2009-06-04 16:00:09 +00:00
ixgbe_write_eitr ( q_vector ) ;
2008-03-03 15:03:57 -08:00
}
return ;
}
2008-10-31 00:46:40 -07:00
static void ixgbe_check_fan_failure ( struct ixgbe_adapter * adapter , u32 eicr )
{
struct ixgbe_hw * hw = & adapter - > hw ;
if ( ( adapter - > flags & IXGBE_FLAG_FAN_FAIL_CAPABLE ) & &
( eicr & IXGBE_EICR_GPI_SDP1 ) ) {
DPRINTK ( PROBE , CRIT , " Fan has stopped, replace the adapter \n " ) ;
/* write to clear the interrupt */
IXGBE_WRITE_REG ( hw , IXGBE_EICR , IXGBE_EICR_GPI_SDP1 ) ;
}
}
2008-09-11 19:55:32 -07:00
2009-02-27 15:45:05 +00:00
static void ixgbe_check_sfp_event ( struct ixgbe_adapter * adapter , u32 eicr )
{
struct ixgbe_hw * hw = & adapter - > hw ;
if ( eicr & IXGBE_EICR_GPI_SDP1 ) {
/* Clear the interrupt */
IXGBE_WRITE_REG ( hw , IXGBE_EICR , IXGBE_EICR_GPI_SDP1 ) ;
schedule_work ( & adapter - > multispeed_fiber_task ) ;
} else if ( eicr & IXGBE_EICR_GPI_SDP2 ) {
/* Clear the interrupt */
IXGBE_WRITE_REG ( hw , IXGBE_EICR , IXGBE_EICR_GPI_SDP2 ) ;
schedule_work ( & adapter - > sfp_config_module_task ) ;
} else {
/* Interrupt isn't for us... */
return ;
}
}
2008-09-11 19:55:32 -07:00
static void ixgbe_check_lsc ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
adapter - > lsc_int + + ;
adapter - > flags | = IXGBE_FLAG_NEED_LINK_UPDATE ;
adapter - > link_check_timeout = jiffies ;
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) ) {
IXGBE_WRITE_REG ( hw , IXGBE_EIMC , IXGBE_EIMC_LSC ) ;
schedule_work ( & adapter - > watchdog_task ) ;
}
}
2007-09-15 14:07:45 -07:00
static irqreturn_t ixgbe_msix_lsc ( int irq , void * data )
{
struct net_device * netdev = data ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
2009-02-21 15:42:56 -08:00
u32 eicr ;
/*
* Workaround for Silicon errata . Use clear - by - write instead
* of clear - by - read . Reading with EICS will return the
* interrupt causes without clearing , which later be done
* with the write to EICR .
*/
eicr = IXGBE_READ_REG ( hw , IXGBE_EICS ) ;
IXGBE_WRITE_REG ( hw , IXGBE_EICR , eicr ) ;
2007-09-15 14:07:45 -07:00
2008-09-11 19:55:32 -07:00
if ( eicr & IXGBE_EICR_LSC )
ixgbe_check_lsc ( adapter ) ;
2008-02-01 15:58:41 -08:00
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82598EB )
ixgbe_check_fan_failure ( adapter , eicr ) ;
2008-10-31 00:46:40 -07:00
2009-06-04 16:01:43 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
2009-02-27 15:45:05 +00:00
ixgbe_check_sfp_event ( adapter , eicr ) ;
2009-06-04 16:01:43 +00:00
/* Handle Flow Director Full threshold interrupt */
if ( eicr & IXGBE_EICR_FLOW_DIR ) {
int i ;
IXGBE_WRITE_REG ( hw , IXGBE_EICR , IXGBE_EICR_FLOW_DIR ) ;
/* Disable transmits before FDIR Re-initialization */
netif_tx_stop_all_queues ( netdev ) ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
struct ixgbe_ring * tx_ring =
& adapter - > tx_ring [ i ] ;
if ( test_and_clear_bit ( __IXGBE_FDIR_INIT_DONE ,
& tx_ring - > reinit_state ) )
schedule_work ( & adapter - > fdir_reinit_task ) ;
}
}
}
2008-02-01 15:58:41 -08:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
IXGBE_WRITE_REG ( hw , IXGBE_EIMS , IXGBE_EIMS_OTHER ) ;
2007-09-15 14:07:45 -07:00
return IRQ_HANDLED ;
}
2009-06-04 16:00:09 +00:00
static inline void ixgbe_irq_enable_queues ( struct ixgbe_adapter * adapter ,
u64 qmask )
{
u32 mask ;
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB ) {
mask = ( IXGBE_EIMS_RTX_QUEUE & qmask ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMS , mask ) ;
} else {
mask = ( qmask & 0xFFFFFFFF ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMS_EX ( 0 ) , mask ) ;
mask = ( qmask > > 32 ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMS_EX ( 1 ) , mask ) ;
}
/* skip the flush */
}
static inline void ixgbe_irq_disable_queues ( struct ixgbe_adapter * adapter ,
u64 qmask )
{
u32 mask ;
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB ) {
mask = ( IXGBE_EIMS_RTX_QUEUE & qmask ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMC , mask ) ;
} else {
mask = ( qmask & 0xFFFFFFFF ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMC_EX ( 0 ) , mask ) ;
mask = ( qmask > > 32 ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMC_EX ( 1 ) , mask ) ;
}
/* skip the flush */
}
2007-09-15 14:07:45 -07:00
static irqreturn_t ixgbe_msix_clean_tx ( int irq , void * data )
{
2008-03-03 15:03:45 -08:00
struct ixgbe_q_vector * q_vector = data ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2008-08-26 04:27:08 -07:00
struct ixgbe_ring * tx_ring ;
2008-03-03 15:03:45 -08:00
int i , r_idx ;
if ( ! q_vector - > txr_count )
return IRQ_HANDLED ;
r_idx = find_first_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ) ;
for ( i = 0 ; i < q_vector - > txr_count ; i + + ) {
2008-08-26 04:27:08 -07:00
tx_ring = & ( adapter - > tx_ring [ r_idx ] ) ;
tx_ring - > total_bytes = 0 ;
tx_ring - > total_packets = 0 ;
2008-03-03 15:03:45 -08:00
r_idx = find_next_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ,
2008-09-11 20:04:46 -07:00
r_idx + 1 ) ;
2008-03-03 15:03:45 -08:00
}
2007-09-15 14:07:45 -07:00
2009-06-04 16:00:27 +00:00
/* disable interrupts on this vector only */
ixgbe_irq_disable_queues ( adapter , ( ( u64 ) 1 < < q_vector - > v_idx ) ) ;
napi_schedule ( & q_vector - > napi ) ;
2007-09-15 14:07:45 -07:00
return IRQ_HANDLED ;
}
2008-03-03 15:03:45 -08:00
/**
* ixgbe_msix_clean_rx - single unshared vector rx clean ( all queues )
* @ irq : unused
* @ data : pointer to our q_vector struct for this interrupt vector
* */
2007-09-15 14:07:45 -07:00
static irqreturn_t ixgbe_msix_clean_rx ( int irq , void * data )
{
2008-03-03 15:03:45 -08:00
struct ixgbe_q_vector * q_vector = data ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2008-08-26 04:27:08 -07:00
struct ixgbe_ring * rx_ring ;
2008-03-03 15:03:45 -08:00
int r_idx ;
2008-09-11 19:58:14 -07:00
int i ;
2008-03-03 15:03:45 -08:00
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
2008-09-11 19:58:14 -07:00
for ( i = 0 ; i < q_vector - > rxr_count ; i + + ) {
rx_ring = & ( adapter - > rx_ring [ r_idx ] ) ;
rx_ring - > total_bytes = 0 ;
rx_ring - > total_packets = 0 ;
r_idx = find_next_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ,
r_idx + 1 ) ;
}
2008-03-03 15:03:45 -08:00
if ( ! q_vector - > rxr_count )
return IRQ_HANDLED ;
2008-09-11 19:58:14 -07:00
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
2008-08-26 04:27:08 -07:00
rx_ring = & ( adapter - > rx_ring [ r_idx ] ) ;
2008-03-03 15:03:45 -08:00
/* disable interrupts on this vector only */
2009-06-04 16:00:09 +00:00
ixgbe_irq_disable_queues ( adapter , ( ( u64 ) 1 < < q_vector - > v_idx ) ) ;
2009-01-19 16:43:59 -08:00
napi_schedule ( & q_vector - > napi ) ;
2008-03-03 15:03:45 -08:00
return IRQ_HANDLED ;
}
static irqreturn_t ixgbe_msix_clean_many ( int irq , void * data )
{
2009-06-04 16:00:27 +00:00
struct ixgbe_q_vector * q_vector = data ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
struct ixgbe_ring * ring ;
int r_idx ;
int i ;
if ( ! q_vector - > txr_count & & ! q_vector - > rxr_count )
return IRQ_HANDLED ;
r_idx = find_first_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ) ;
for ( i = 0 ; i < q_vector - > txr_count ; i + + ) {
ring = & ( adapter - > tx_ring [ r_idx ] ) ;
ring - > total_bytes = 0 ;
ring - > total_packets = 0 ;
r_idx = find_next_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ,
r_idx + 1 ) ;
}
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
for ( i = 0 ; i < q_vector - > rxr_count ; i + + ) {
ring = & ( adapter - > rx_ring [ r_idx ] ) ;
ring - > total_bytes = 0 ;
ring - > total_packets = 0 ;
r_idx = find_next_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ,
r_idx + 1 ) ;
}
/* disable interrupts on this vector only */
ixgbe_irq_disable_queues ( adapter , ( ( u64 ) 1 < < q_vector - > v_idx ) ) ;
napi_schedule ( & q_vector - > napi ) ;
2007-09-15 14:07:45 -07:00
return IRQ_HANDLED ;
}
2008-03-03 15:03:45 -08:00
/**
* ixgbe_clean_rxonly - msix ( aka one shot ) rx clean routine
* @ napi : napi struct with our devices info in it
* @ budget : amount of work driver is allowed to do this pass , in packets
*
2008-09-11 19:59:42 -07:00
* This function is optimized for cleaning one queue only on a single
* q_vector ! ! !
2008-03-03 15:03:45 -08:00
* */
2007-09-15 14:07:45 -07:00
static int ixgbe_clean_rxonly ( struct napi_struct * napi , int budget )
{
2008-03-03 15:03:45 -08:00
struct ixgbe_q_vector * q_vector =
2008-09-11 20:04:46 -07:00
container_of ( napi , struct ixgbe_q_vector , napi ) ;
2008-03-03 15:03:45 -08:00
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2008-09-11 19:59:42 -07:00
struct ixgbe_ring * rx_ring = NULL ;
2007-09-15 14:07:45 -07:00
int work_done = 0 ;
2008-03-03 15:03:45 -08:00
long r_idx ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
2008-08-26 04:27:08 -07:00
rx_ring = & ( adapter - > rx_ring [ r_idx ] ) ;
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-03-03 15:04:02 -08:00
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED )
2008-08-26 04:27:08 -07:00
ixgbe_update_rx_dca ( adapter , rx_ring ) ;
2008-03-03 15:04:02 -08:00
# endif
2007-09-15 14:07:45 -07:00
2009-01-18 21:49:45 -08:00
ixgbe_clean_rx_irq ( q_vector , rx_ring , & work_done , budget ) ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
/* If all Rx work done, exit the polling mode */
if ( work_done < budget ) {
2009-01-19 16:43:59 -08:00
napi_complete ( napi ) ;
2009-03-13 22:13:28 +00:00
if ( adapter - > itr_setting & 1 )
2008-03-03 15:03:57 -08:00
ixgbe_set_itr_msix ( q_vector ) ;
2007-09-15 14:07:45 -07:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
2009-06-04 16:00:09 +00:00
ixgbe_irq_enable_queues ( adapter ,
( ( u64 ) 1 < < q_vector - > v_idx ) ) ;
2007-09-15 14:07:45 -07:00
}
return work_done ;
}
2008-09-11 19:59:42 -07:00
/**
2009-06-04 16:00:27 +00:00
* ixgbe_clean_rxtx_many - msix ( aka one shot ) rx clean routine
2008-09-11 19:59:42 -07:00
* @ napi : napi struct with our devices info in it
* @ budget : amount of work driver is allowed to do this pass , in packets
*
* This function will clean more than one rx queue associated with a
* q_vector .
* */
2009-06-04 16:00:27 +00:00
static int ixgbe_clean_rxtx_many ( struct napi_struct * napi , int budget )
2008-09-11 19:59:42 -07:00
{
struct ixgbe_q_vector * q_vector =
container_of ( napi , struct ixgbe_q_vector , napi ) ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2009-06-04 16:00:27 +00:00
struct ixgbe_ring * ring = NULL ;
2008-09-11 19:59:42 -07:00
int work_done = 0 , i ;
long r_idx ;
2009-06-04 16:00:27 +00:00
bool tx_clean_complete = true ;
r_idx = find_first_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ) ;
for ( i = 0 ; i < q_vector - > txr_count ; i + + ) {
ring = & ( adapter - > tx_ring [ r_idx ] ) ;
# ifdef CONFIG_IXGBE_DCA
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED )
ixgbe_update_tx_dca ( adapter , ring ) ;
# endif
tx_clean_complete & = ixgbe_clean_tx_irq ( q_vector , ring ) ;
r_idx = find_next_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ,
r_idx + 1 ) ;
}
2008-09-11 19:59:42 -07:00
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we ' ll exit polling */
budget / = ( q_vector - > rxr_count ? : 1 ) ;
budget = max ( budget , 1 ) ;
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
for ( i = 0 ; i < q_vector - > rxr_count ; i + + ) {
2009-06-04 16:00:27 +00:00
ring = & ( adapter - > rx_ring [ r_idx ] ) ;
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-09-11 19:59:42 -07:00
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED )
2009-06-04 16:00:27 +00:00
ixgbe_update_rx_dca ( adapter , ring ) ;
2008-09-11 19:59:42 -07:00
# endif
2009-06-04 16:00:27 +00:00
ixgbe_clean_rx_irq ( q_vector , ring , & work_done , budget ) ;
2008-09-11 19:59:42 -07:00
r_idx = find_next_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ,
r_idx + 1 ) ;
}
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
2009-06-04 16:00:27 +00:00
ring = & ( adapter - > rx_ring [ r_idx ] ) ;
2008-09-11 19:59:42 -07:00
/* If all Rx work done, exit the polling mode */
2008-09-11 20:00:16 -07:00
if ( work_done < budget ) {
2009-01-19 16:43:59 -08:00
napi_complete ( napi ) ;
2009-03-13 22:13:28 +00:00
if ( adapter - > itr_setting & 1 )
2008-09-11 19:59:42 -07:00
ixgbe_set_itr_msix ( q_vector ) ;
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
2009-06-04 16:00:09 +00:00
ixgbe_irq_enable_queues ( adapter ,
( ( u64 ) 1 < < q_vector - > v_idx ) ) ;
2008-09-11 19:59:42 -07:00
return 0 ;
}
return work_done ;
}
2009-06-04 16:00:27 +00:00
/**
* ixgbe_clean_txonly - msix ( aka one shot ) tx clean routine
* @ napi : napi struct with our devices info in it
* @ budget : amount of work driver is allowed to do this pass , in packets
*
* This function is optimized for cleaning one queue only on a single
* q_vector ! ! !
* */
static int ixgbe_clean_txonly ( struct napi_struct * napi , int budget )
{
struct ixgbe_q_vector * q_vector =
container_of ( napi , struct ixgbe_q_vector , napi ) ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
struct ixgbe_ring * tx_ring = NULL ;
int work_done = 0 ;
long r_idx ;
r_idx = find_first_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ) ;
tx_ring = & ( adapter - > tx_ring [ r_idx ] ) ;
# ifdef CONFIG_IXGBE_DCA
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED )
ixgbe_update_tx_dca ( adapter , tx_ring ) ;
# endif
if ( ! ixgbe_clean_tx_irq ( q_vector , tx_ring ) )
work_done = budget ;
/* If all Rx work done, exit the polling mode */
if ( work_done < budget ) {
napi_complete ( napi ) ;
if ( adapter - > itr_setting & 1 )
ixgbe_set_itr_msix ( q_vector ) ;
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_enable_queues ( adapter , ( ( u64 ) 1 < < q_vector - > v_idx ) ) ;
}
return work_done ;
}
2008-03-03 15:03:45 -08:00
static inline void map_vector_to_rxq ( struct ixgbe_adapter * a , int v_idx ,
2008-09-11 20:04:46 -07:00
int r_idx )
2008-03-03 15:03:45 -08:00
{
2009-05-06 10:43:28 +00:00
struct ixgbe_q_vector * q_vector = a - > q_vector [ v_idx ] ;
set_bit ( r_idx , q_vector - > rxr_idx ) ;
q_vector - > rxr_count + + ;
2008-03-03 15:03:45 -08:00
}
static inline void map_vector_to_txq ( struct ixgbe_adapter * a , int v_idx ,
2009-05-06 10:43:28 +00:00
int t_idx )
2008-03-03 15:03:45 -08:00
{
2009-05-06 10:43:28 +00:00
struct ixgbe_q_vector * q_vector = a - > q_vector [ v_idx ] ;
set_bit ( t_idx , q_vector - > txr_idx ) ;
q_vector - > txr_count + + ;
2008-03-03 15:03:45 -08:00
}
2007-09-15 14:07:45 -07:00
/**
2008-03-03 15:03:45 -08:00
* ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
* @ adapter : board private structure to initialize
* @ vectors : allotted vector count for descriptor rings
2007-09-15 14:07:45 -07:00
*
2008-03-03 15:03:45 -08:00
* This function maps descriptor rings to the queue - specific vectors
* we were allotted through the MSI - X enabling code . Ideally , we ' d have
* one vector per ring / queue , but on a constrained vector budget , we
* group the rings as " efficiently " as possible . You would add new
* mapping configurations in here .
2007-09-15 14:07:45 -07:00
* */
2008-03-03 15:03:45 -08:00
static int ixgbe_map_rings_to_vectors ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
int vectors )
2008-03-03 15:03:45 -08:00
{
int v_start = 0 ;
int rxr_idx = 0 , txr_idx = 0 ;
int rxr_remaining = adapter - > num_rx_queues ;
int txr_remaining = adapter - > num_tx_queues ;
int i , j ;
int rqpv , tqpv ;
int err = 0 ;
/* No mapping required if MSI-X is disabled. */
if ( ! ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) )
goto out ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
/*
* The ideal configuration . . .
* We have enough vectors to map one per queue .
*/
if ( vectors = = adapter - > num_rx_queues + adapter - > num_tx_queues ) {
for ( ; rxr_idx < rxr_remaining ; v_start + + , rxr_idx + + )
map_vector_to_rxq ( adapter , v_start , rxr_idx ) ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
for ( ; txr_idx < txr_remaining ; v_start + + , txr_idx + + )
map_vector_to_txq ( adapter , v_start , txr_idx ) ;
2007-09-15 14:07:45 -07:00
goto out ;
2008-03-03 15:03:45 -08:00
}
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
/*
* If we don ' t have enough vectors for a 1 - to - 1
* mapping , we ' ll have to group them so there are
* multiple queues per vector .
*/
/* Re-adjusting *qpv takes care of the remainder. */
for ( i = v_start ; i < vectors ; i + + ) {
rqpv = DIV_ROUND_UP ( rxr_remaining , vectors - i ) ;
for ( j = 0 ; j < rqpv ; j + + ) {
map_vector_to_rxq ( adapter , i , rxr_idx ) ;
rxr_idx + + ;
rxr_remaining - - ;
}
}
for ( i = v_start ; i < vectors ; i + + ) {
tqpv = DIV_ROUND_UP ( txr_remaining , vectors - i ) ;
for ( j = 0 ; j < tqpv ; j + + ) {
map_vector_to_txq ( adapter , i , txr_idx ) ;
txr_idx + + ;
txr_remaining - - ;
2007-09-15 14:07:45 -07:00
}
}
2008-03-03 15:03:45 -08:00
out :
return err ;
}
/**
* ixgbe_request_msix_irqs - Initialize MSI - X interrupts
* @ adapter : board private structure
*
* ixgbe_request_msix_irqs allocates MSI - X vectors and requests
* interrupts from the kernel .
* */
static int ixgbe_request_msix_irqs ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
irqreturn_t ( * handler ) ( int , void * ) ;
int i , vector , q_vectors , err ;
2008-11-25 16:43:52 -08:00
int ri = 0 , ti = 0 ;
2008-03-03 15:03:45 -08:00
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
/* Map the Tx/Rx rings to the vectors we were allotted. */
err = ixgbe_map_rings_to_vectors ( adapter , q_vectors ) ;
if ( err )
goto out ;
# define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
2008-09-11 20:04:46 -07:00
( ! ( _v ) - > txr_count ) ? & ixgbe_msix_clean_rx : \
& ixgbe_msix_clean_many )
2008-03-03 15:03:45 -08:00
for ( vector = 0 ; vector < q_vectors ; vector + + ) {
2009-05-06 10:43:28 +00:00
handler = SET_HANDLER ( adapter - > q_vector [ vector ] ) ;
2008-11-25 16:43:52 -08:00
if ( handler = = & ixgbe_msix_clean_rx ) {
sprintf ( adapter - > name [ vector ] , " %s-%s-%d " ,
netdev - > name , " rx " , ri + + ) ;
}
else if ( handler = = & ixgbe_msix_clean_tx ) {
sprintf ( adapter - > name [ vector ] , " %s-%s-%d " ,
netdev - > name , " tx " , ti + + ) ;
}
else
sprintf ( adapter - > name [ vector ] , " %s-%s-%d " ,
netdev - > name , " TxRx " , vector ) ;
2008-03-03 15:03:45 -08:00
err = request_irq ( adapter - > msix_entries [ vector ] . vector ,
2008-09-11 20:04:46 -07:00
handler , 0 , adapter - > name [ vector ] ,
2009-05-06 10:43:28 +00:00
adapter - > q_vector [ vector ] ) ;
2007-09-15 14:07:45 -07:00
if ( err ) {
DPRINTK ( PROBE , ERR ,
2008-09-11 20:04:46 -07:00
" request_irq failed for MSIX interrupt "
" Error: %d \n " , err ) ;
2008-03-03 15:03:45 -08:00
goto free_queue_irqs ;
2007-09-15 14:07:45 -07:00
}
}
2008-03-03 15:03:45 -08:00
sprintf ( adapter - > name [ vector ] , " %s:lsc " , netdev - > name ) ;
err = request_irq ( adapter - > msix_entries [ vector ] . vector ,
2008-09-11 20:04:46 -07:00
& ixgbe_msix_lsc , 0 , adapter - > name [ vector ] , netdev ) ;
2007-09-15 14:07:45 -07:00
if ( err ) {
DPRINTK ( PROBE , ERR ,
" request_irq for msix_lsc failed: %d \n " , err ) ;
2008-03-03 15:03:45 -08:00
goto free_queue_irqs ;
2007-09-15 14:07:45 -07:00
}
return 0 ;
2008-03-03 15:03:45 -08:00
free_queue_irqs :
for ( i = vector - 1 ; i > = 0 ; i - - )
free_irq ( adapter - > msix_entries [ - - vector ] . vector ,
2009-05-06 10:43:28 +00:00
adapter - > q_vector [ i ] ) ;
2008-03-03 15:03:45 -08:00
adapter - > flags & = ~ IXGBE_FLAG_MSIX_ENABLED ;
pci_disable_msix ( adapter - > pdev ) ;
2007-09-15 14:07:45 -07:00
kfree ( adapter - > msix_entries ) ;
adapter - > msix_entries = NULL ;
2008-03-03 15:03:45 -08:00
out :
2007-09-15 14:07:45 -07:00
return err ;
}
2008-03-03 15:03:57 -08:00
static void ixgbe_set_itr ( struct ixgbe_adapter * adapter )
{
2009-05-06 10:43:28 +00:00
struct ixgbe_q_vector * q_vector = adapter - > q_vector [ 0 ] ;
2008-03-03 15:03:57 -08:00
u8 current_itr ;
u32 new_itr = q_vector - > eitr ;
struct ixgbe_ring * rx_ring = & adapter - > rx_ring [ 0 ] ;
struct ixgbe_ring * tx_ring = & adapter - > tx_ring [ 0 ] ;
2008-09-11 19:58:14 -07:00
q_vector - > tx_itr = ixgbe_update_itr ( adapter , new_itr ,
2008-09-11 20:04:46 -07:00
q_vector - > tx_itr ,
tx_ring - > total_packets ,
tx_ring - > total_bytes ) ;
2008-09-11 19:58:14 -07:00
q_vector - > rx_itr = ixgbe_update_itr ( adapter , new_itr ,
2008-09-11 20:04:46 -07:00
q_vector - > rx_itr ,
rx_ring - > total_packets ,
rx_ring - > total_bytes ) ;
2008-03-03 15:03:57 -08:00
2008-09-11 19:58:14 -07:00
current_itr = max ( q_vector - > rx_itr , q_vector - > tx_itr ) ;
2008-03-03 15:03:57 -08:00
switch ( current_itr ) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency :
new_itr = 100000 ;
break ;
case low_latency :
new_itr = 20000 ; /* aka hwitr = ~200 */
break ;
case bulk_latency :
new_itr = 8000 ;
break ;
default :
break ;
}
if ( new_itr ! = q_vector - > eitr ) {
2009-06-04 16:00:09 +00:00
/* do an exponential smoothing */
new_itr = ( ( q_vector - > eitr * 90 ) / 100 ) + ( ( new_itr * 10 ) / 100 ) ;
2009-03-13 22:13:28 +00:00
/* save the algorithm value here, not the smoothed one */
q_vector - > eitr = new_itr ;
2009-06-04 16:00:09 +00:00
ixgbe_write_eitr ( q_vector ) ;
2008-03-03 15:03:57 -08:00
}
return ;
}
2008-11-19 14:17:02 -08:00
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @ adapter : board private structure
* */
static inline void ixgbe_irq_enable ( struct ixgbe_adapter * adapter )
{
u32 mask ;
2009-04-27 22:42:54 +00:00
mask = ( IXGBE_EIMS_ENABLE_MASK & ~ IXGBE_EIMS_RTX_QUEUE ) ;
2008-11-20 16:44:00 -08:00
if ( adapter - > flags & IXGBE_FLAG_FAN_FAIL_CAPABLE )
mask | = IXGBE_EIMS_GPI_SDP1 ;
2009-02-27 15:45:05 +00:00
if ( adapter - > hw . mac . type = = ixgbe_mac_82599EB ) {
2009-03-13 22:14:30 +00:00
mask | = IXGBE_EIMS_ECC ;
2009-02-27 15:45:05 +00:00
mask | = IXGBE_EIMS_GPI_SDP1 ;
mask | = IXGBE_EIMS_GPI_SDP2 ;
}
2009-06-04 16:01:43 +00:00
if ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE | |
adapter - > flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE )
mask | = IXGBE_EIMS_FLOW_DIR ;
2009-02-27 15:45:05 +00:00
2008-11-19 14:17:02 -08:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMS , mask ) ;
2009-04-27 22:42:54 +00:00
ixgbe_irq_enable_queues ( adapter , ~ 0 ) ;
2008-11-19 14:17:02 -08:00
IXGBE_WRITE_FLUSH ( & adapter - > hw ) ;
}
2008-03-03 15:03:45 -08:00
2007-09-15 14:07:45 -07:00
/**
2008-03-03 15:03:45 -08:00
* ixgbe_intr - legacy mode Interrupt Handler
2007-09-15 14:07:45 -07:00
* @ irq : interrupt number
* @ data : pointer to a network interface device structure
* */
static irqreturn_t ixgbe_intr ( int irq , void * data )
{
struct net_device * netdev = data ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
2009-05-06 10:43:28 +00:00
struct ixgbe_q_vector * q_vector = adapter - > q_vector [ 0 ] ;
2007-09-15 14:07:45 -07:00
u32 eicr ;
2009-02-21 15:42:56 -08:00
/*
* Workaround for silicon errata . Mask the interrupts
* before the read of EICR .
*/
IXGBE_WRITE_REG ( hw , IXGBE_EIMC , IXGBE_IRQ_CLEAR_MASK ) ;
2008-03-03 15:03:45 -08:00
/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
* therefore no explict interrupt disable is necessary */
eicr = IXGBE_READ_REG ( hw , IXGBE_EICR ) ;
2008-09-11 19:56:14 -07:00
if ( ! eicr ) {
/* shared interrupt alert!
* make sure interrupts are enabled because the read will
* have disabled interrupts due to EIAM */
ixgbe_irq_enable ( adapter ) ;
2007-09-15 14:07:45 -07:00
return IRQ_NONE ; /* Not our interrupt */
2008-09-11 19:56:14 -07:00
}
2007-09-15 14:07:45 -07:00
2008-09-11 19:55:32 -07:00
if ( eicr & IXGBE_EICR_LSC )
ixgbe_check_lsc ( adapter ) ;
2008-03-03 15:03:45 -08:00
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB )
ixgbe_check_sfp_event ( adapter , eicr ) ;
2008-10-31 00:46:40 -07:00
ixgbe_check_fan_failure ( adapter , eicr ) ;
2009-05-06 10:43:28 +00:00
if ( napi_schedule_prep ( & ( q_vector - > napi ) ) ) {
2008-03-03 15:03:57 -08:00
adapter - > tx_ring [ 0 ] . total_packets = 0 ;
adapter - > tx_ring [ 0 ] . total_bytes = 0 ;
adapter - > rx_ring [ 0 ] . total_packets = 0 ;
adapter - > rx_ring [ 0 ] . total_bytes = 0 ;
2008-03-03 15:03:45 -08:00
/* would disable interrupts here but EIAM disabled it */
2009-05-06 10:43:28 +00:00
__napi_schedule ( & ( q_vector - > napi ) ) ;
2007-09-15 14:07:45 -07:00
}
return IRQ_HANDLED ;
}
2008-03-03 15:03:45 -08:00
static inline void ixgbe_reset_q_vectors ( struct ixgbe_adapter * adapter )
{
int i , q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
for ( i = 0 ; i < q_vectors ; i + + ) {
2009-05-06 10:43:28 +00:00
struct ixgbe_q_vector * q_vector = adapter - > q_vector [ i ] ;
2008-03-03 15:03:45 -08:00
bitmap_zero ( q_vector - > rxr_idx , MAX_RX_QUEUES ) ;
bitmap_zero ( q_vector - > txr_idx , MAX_TX_QUEUES ) ;
q_vector - > rxr_count = 0 ;
q_vector - > txr_count = 0 ;
}
}
2007-09-15 14:07:45 -07:00
/**
* ixgbe_request_irq - initialize interrupts
* @ adapter : board private structure
*
* Attempts to configure interrupts using the best available
* capabilities of the hardware and kernel .
* */
2008-03-03 15:03:45 -08:00
static int ixgbe_request_irq ( struct ixgbe_adapter * adapter )
2007-09-15 14:07:45 -07:00
{
struct net_device * netdev = adapter - > netdev ;
2008-03-03 15:03:45 -08:00
int err ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
err = ixgbe_request_msix_irqs ( adapter ) ;
} else if ( adapter - > flags & IXGBE_FLAG_MSI_ENABLED ) {
err = request_irq ( adapter - > pdev - > irq , & ixgbe_intr , 0 ,
2008-09-11 20:04:46 -07:00
netdev - > name , netdev ) ;
2008-03-03 15:03:45 -08:00
} else {
err = request_irq ( adapter - > pdev - > irq , & ixgbe_intr , IRQF_SHARED ,
2008-09-11 20:04:46 -07:00
netdev - > name , netdev ) ;
2007-09-15 14:07:45 -07:00
}
if ( err )
DPRINTK ( PROBE , ERR , " request_irq failed, Error %d \n " , err ) ;
return err ;
}
static void ixgbe_free_irq ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
2008-03-03 15:03:45 -08:00
int i , q_vectors ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
q_vectors = adapter - > num_msix_vectors ;
i = q_vectors - 1 ;
2007-09-15 14:07:45 -07:00
free_irq ( adapter - > msix_entries [ i ] . vector , netdev ) ;
2008-03-03 15:03:45 -08:00
i - - ;
for ( ; i > = 0 ; i - - ) {
free_irq ( adapter - > msix_entries [ i ] . vector ,
2009-05-06 10:43:28 +00:00
adapter - > q_vector [ i ] ) ;
2008-03-03 15:03:45 -08:00
}
ixgbe_reset_q_vectors ( adapter ) ;
} else {
free_irq ( adapter - > pdev - > irq , netdev ) ;
2007-09-15 14:07:45 -07:00
}
}
2009-03-19 01:24:04 +00:00
/**
* ixgbe_irq_disable - Mask off interrupt generation on the NIC
* @ adapter : board private structure
* */
static inline void ixgbe_irq_disable ( struct ixgbe_adapter * adapter )
{
2009-04-27 22:42:54 +00:00
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB ) {
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMC , ~ 0 ) ;
} else {
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMC , 0xFFFF0000 ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMC_EX ( 0 ) , ~ 0 ) ;
2009-03-19 01:24:04 +00:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMC_EX ( 1 ) , ~ 0 ) ;
}
IXGBE_WRITE_FLUSH ( & adapter - > hw ) ;
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
int i ;
for ( i = 0 ; i < adapter - > num_msix_vectors ; i + + )
synchronize_irq ( adapter - > msix_entries [ i ] . vector ) ;
} else {
synchronize_irq ( adapter - > pdev - > irq ) ;
}
}
2007-09-15 14:07:45 -07:00
/**
* ixgbe_configure_msi_and_legacy - Initialize PIN ( INTA . . . ) and MSI interrupts
*
* */
static void ixgbe_configure_msi_and_legacy ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
2008-03-03 15:03:45 -08:00
IXGBE_WRITE_REG ( hw , IXGBE_EITR ( 0 ) ,
2008-09-11 19:58:14 -07:00
EITR_INTS_PER_SEC_TO_REG ( adapter - > eitr_param ) ) ;
2007-09-15 14:07:45 -07:00
2009-02-27 15:45:05 +00:00
ixgbe_set_ivar ( adapter , 0 , 0 , 0 ) ;
ixgbe_set_ivar ( adapter , 1 , 0 , 0 ) ;
2008-03-03 15:03:45 -08:00
map_vector_to_rxq ( adapter , 0 , 0 ) ;
map_vector_to_txq ( adapter , 0 , 0 ) ;
DPRINTK ( HW , INFO , " Legacy interrupt IVAR setup done \n " ) ;
2007-09-15 14:07:45 -07:00
}
/**
2008-08-26 04:27:08 -07:00
* ixgbe_configure_tx - Configure 8259 x Transmit Unit after Reset
2007-09-15 14:07:45 -07:00
* @ adapter : board private structure
*
* Configure the Tx unit of the MAC after a reset .
* */
static void ixgbe_configure_tx ( struct ixgbe_adapter * adapter )
{
2009-02-06 21:47:24 -08:00
u64 tdba ;
2007-09-15 14:07:45 -07:00
struct ixgbe_hw * hw = & adapter - > hw ;
2008-03-03 15:03:45 -08:00
u32 i , j , tdlen , txctrl ;
2007-09-15 14:07:45 -07:00
/* Setup the HW Tx Head and Tail descriptor pointers */
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
2008-08-26 04:27:13 -07:00
struct ixgbe_ring * ring = & adapter - > tx_ring [ i ] ;
j = ring - > reg_idx ;
tdba = ring - > dma ;
tdlen = ring - > count * sizeof ( union ixgbe_adv_tx_desc ) ;
2008-03-03 15:03:45 -08:00
IXGBE_WRITE_REG ( hw , IXGBE_TDBAL ( j ) ,
2009-04-06 19:01:15 -07:00
( tdba & DMA_BIT_MASK ( 32 ) ) ) ;
2008-03-03 15:03:45 -08:00
IXGBE_WRITE_REG ( hw , IXGBE_TDBAH ( j ) , ( tdba > > 32 ) ) ;
IXGBE_WRITE_REG ( hw , IXGBE_TDLEN ( j ) , tdlen ) ;
IXGBE_WRITE_REG ( hw , IXGBE_TDH ( j ) , 0 ) ;
IXGBE_WRITE_REG ( hw , IXGBE_TDT ( j ) , 0 ) ;
adapter - > tx_ring [ i ] . head = IXGBE_TDH ( j ) ;
adapter - > tx_ring [ i ] . tail = IXGBE_TDT ( j ) ;
/* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren ' t delivered in order .
*/
2008-08-26 04:27:13 -07:00
txctrl = IXGBE_READ_REG ( hw , IXGBE_DCA_TXCTRL ( j ) ) ;
2008-03-03 15:03:45 -08:00
txctrl & = ~ IXGBE_DCA_TXCTRL_TX_WB_RO_EN ;
2008-08-26 04:27:13 -07:00
IXGBE_WRITE_REG ( hw , IXGBE_DCA_TXCTRL ( j ) , txctrl ) ;
2007-09-15 14:07:45 -07:00
}
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
/* We enable 8 traffic classes, DCB only */
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED )
IXGBE_WRITE_REG ( hw , IXGBE_MTQC , ( IXGBE_MTQC_RT_ENA |
IXGBE_MTQC_8TC_8TQ ) ) ;
}
2007-09-15 14:07:45 -07:00
}
2009-02-27 15:45:05 +00:00
# define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2008-08-26 04:27:27 -07:00
static void ixgbe_configure_srrctl ( struct ixgbe_adapter * adapter , int index )
{
struct ixgbe_ring * rx_ring ;
u32 srrctl ;
2009-02-27 15:45:05 +00:00
int queue0 = 0 ;
2008-08-30 00:29:10 -07:00
unsigned long mask ;
2009-05-19 09:19:11 +00:00
struct ixgbe_ring_feature * feature = adapter - > ring_feature ;
2008-08-30 00:29:10 -07:00
2009-02-27 15:45:05 +00:00
if ( adapter - > hw . mac . type = = ixgbe_mac_82599EB ) {
2009-05-07 10:39:16 +00:00
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED ) {
2009-05-19 09:19:11 +00:00
int dcb_i = feature [ RING_F_DCB ] . indices ;
2009-05-07 10:39:16 +00:00
if ( dcb_i = = 8 )
queue0 = index > > 4 ;
else if ( dcb_i = = 4 )
queue0 = index > > 5 ;
else
dev_err ( & adapter - > pdev - > dev , " Invalid DCB "
" configuration \n " ) ;
2009-05-17 12:33:52 +00:00
# ifdef IXGBE_FCOE
if ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED ) {
struct ixgbe_ring_feature * f ;
rx_ring = & adapter - > rx_ring [ queue0 ] ;
f = & adapter - > ring_feature [ RING_F_FCOE ] ;
if ( ( queue0 = = 0 ) & & ( index > rx_ring - > reg_idx ) )
queue0 = f - > mask + index -
rx_ring - > reg_idx - 1 ;
}
# endif /* IXGBE_FCOE */
2009-05-07 10:39:16 +00:00
} else {
queue0 = index ;
}
2008-08-26 04:27:27 -07:00
} else {
2009-05-19 09:19:11 +00:00
mask = ( unsigned long ) feature [ RING_F_RSS ] . mask ;
2008-08-30 00:29:10 -07:00
queue0 = index & mask ;
index = index & mask ;
2008-08-26 04:27:27 -07:00
}
2008-08-30 00:29:10 -07:00
2008-08-26 04:27:27 -07:00
rx_ring = & adapter - > rx_ring [ queue0 ] ;
srrctl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_SRRCTL ( index ) ) ;
srrctl & = ~ IXGBE_SRRCTL_BSIZEHDR_MASK ;
srrctl & = ~ IXGBE_SRRCTL_BSIZEPKT_MASK ;
2009-05-07 10:38:56 +00:00
srrctl | = ( IXGBE_RX_HDR_SIZE < < IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT ) &
IXGBE_SRRCTL_BSIZEHDR_MASK ;
2008-08-26 04:27:27 -07:00
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
2009-05-07 10:38:56 +00:00
# if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
srrctl | = IXGBE_MAX_RXBUFFER > > IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
# else
srrctl | = ( PAGE_SIZE / 2 ) > > IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
# endif
2008-08-26 04:27:27 -07:00
srrctl | = IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS ;
} else {
2009-05-07 10:38:56 +00:00
srrctl | = ALIGN ( rx_ring - > rx_buf_len , 1024 ) > >
IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
2008-08-26 04:27:27 -07:00
srrctl | = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF ;
}
2009-02-27 15:45:05 +00:00
2008-08-26 04:27:27 -07:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_SRRCTL ( index ) , srrctl ) ;
}
2007-09-15 14:07:45 -07:00
2009-05-19 09:19:11 +00:00
static u32 ixgbe_setup_mrqc ( struct ixgbe_adapter * adapter )
{
u32 mrqc = 0 ;
int mask ;
if ( ! ( adapter - > hw . mac . type = = ixgbe_mac_82599EB ) )
return mrqc ;
mask = adapter - > flags & ( IXGBE_FLAG_RSS_ENABLED
# ifdef CONFIG_IXGBE_DCB
| IXGBE_FLAG_DCB_ENABLED
# endif
) ;
switch ( mask ) {
case ( IXGBE_FLAG_RSS_ENABLED ) :
mrqc = IXGBE_MRQC_RSSEN ;
break ;
# ifdef CONFIG_IXGBE_DCB
case ( IXGBE_FLAG_DCB_ENABLED ) :
mrqc = IXGBE_MRQC_RT8TCEN ;
break ;
# endif /* CONFIG_IXGBE_DCB */
default :
break ;
}
return mrqc ;
}
2007-09-15 14:07:45 -07:00
/**
2008-08-26 04:27:08 -07:00
* ixgbe_configure_rx - Configure 8259 x Receive Unit after Reset
2007-09-15 14:07:45 -07:00
* @ adapter : board private structure
*
* Configure the Rx unit of the MAC after a reset .
* */
static void ixgbe_configure_rx ( struct ixgbe_adapter * adapter )
{
u64 rdba ;
struct ixgbe_hw * hw = & adapter - > hw ;
struct net_device * netdev = adapter - > netdev ;
int max_frame = netdev - > mtu + ETH_HLEN + ETH_FCS_LEN ;
2008-03-03 15:03:45 -08:00
int i , j ;
2007-09-15 14:07:45 -07:00
u32 rdlen , rxctrl , rxcsum ;
2008-08-26 04:27:16 -07:00
static const u32 seed [ 10 ] = { 0xE291D73D , 0x1805EC6C , 0x2A94B30D ,
0xA54F2BEC , 0xEA49AF7C , 0xE214AD3D , 0xB855AABE ,
0x6A3E67EA , 0x14364D17 , 0x3BED200D } ;
2007-09-15 14:07:45 -07:00
u32 fctrl , hlreg0 ;
2009-03-13 22:13:28 +00:00
u32 reta = 0 , mrqc = 0 ;
2008-08-26 04:27:27 -07:00
u32 rdrxctl ;
2009-04-27 22:42:37 +00:00
u32 rscctrl ;
2008-08-26 04:27:16 -07:00
int rx_buf_len ;
2007-09-15 14:07:45 -07:00
/* Decide whether to use packet split mode or not */
2008-09-11 19:58:43 -07:00
adapter - > flags | = IXGBE_FLAG_RX_PS_ENABLED ;
2007-09-15 14:07:45 -07:00
2009-05-13 13:11:06 +00:00
# ifdef IXGBE_FCOE
if ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED )
adapter - > flags & = ~ IXGBE_FLAG_RX_PS_ENABLED ;
# endif /* IXGBE_FCOE */
2007-09-15 14:07:45 -07:00
/* Set the RX buffer length according to the mode */
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
2008-08-26 04:27:16 -07:00
rx_buf_len = IXGBE_RX_HDR_SIZE ;
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
/* PSRTYPE must be initialized in 82599 */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
2009-05-07 10:39:35 +00:00
IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR ;
2009-02-27 15:45:05 +00:00
IXGBE_WRITE_REG ( hw , IXGBE_PSRTYPE ( 0 ) , psrtype ) ;
}
2007-09-15 14:07:45 -07:00
} else {
2009-06-04 16:00:47 +00:00
if ( ! ( adapter - > flags & IXGBE_FLAG2_RSC_ENABLED ) & &
2009-04-27 22:42:37 +00:00
( netdev - > mtu < = ETH_DATA_LEN ) )
2008-08-26 04:27:16 -07:00
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE ;
2007-09-15 14:07:45 -07:00
else
2008-08-26 04:27:16 -07:00
rx_buf_len = ALIGN ( max_frame , 1024 ) ;
2007-09-15 14:07:45 -07:00
}
fctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_FCTRL ) ;
fctrl | = IXGBE_FCTRL_BAM ;
2008-03-03 15:03:45 -08:00
fctrl | = IXGBE_FCTRL_DPF ; /* discard pause frames when FC enabled */
2009-02-27 15:45:05 +00:00
fctrl | = IXGBE_FCTRL_PMCF ;
2007-09-15 14:07:45 -07:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_FCTRL , fctrl ) ;
hlreg0 = IXGBE_READ_REG ( hw , IXGBE_HLREG0 ) ;
if ( adapter - > netdev - > mtu < = ETH_DATA_LEN )
hlreg0 & = ~ IXGBE_HLREG0_JUMBOEN ;
else
hlreg0 | = IXGBE_HLREG0_JUMBOEN ;
2009-05-17 12:34:35 +00:00
# ifdef IXGBE_FCOE
if ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED )
hlreg0 | = IXGBE_HLREG0_JUMBOEN ;
# endif
2007-09-15 14:07:45 -07:00
IXGBE_WRITE_REG ( hw , IXGBE_HLREG0 , hlreg0 ) ;
rdlen = adapter - > rx_ring [ 0 ] . count * sizeof ( union ixgbe_adv_rx_desc ) ;
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG ( hw , IXGBE_RXCTRL ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RXCTRL , rxctrl & ~ IXGBE_RXCTRL_RXEN ) ;
2009-05-19 09:19:11 +00:00
/*
* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
2007-09-15 14:07:45 -07:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
rdba = adapter - > rx_ring [ i ] . dma ;
2008-08-26 04:27:16 -07:00
j = adapter - > rx_ring [ i ] . reg_idx ;
2009-04-06 19:01:15 -07:00
IXGBE_WRITE_REG ( hw , IXGBE_RDBAL ( j ) , ( rdba & DMA_BIT_MASK ( 32 ) ) ) ;
2008-08-26 04:27:16 -07:00
IXGBE_WRITE_REG ( hw , IXGBE_RDBAH ( j ) , ( rdba > > 32 ) ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RDLEN ( j ) , rdlen ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RDH ( j ) , 0 ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RDT ( j ) , 0 ) ;
adapter - > rx_ring [ i ] . head = IXGBE_RDH ( j ) ;
adapter - > rx_ring [ i ] . tail = IXGBE_RDT ( j ) ;
adapter - > rx_ring [ i ] . rx_buf_len = rx_buf_len ;
2008-08-26 04:27:27 -07:00
2009-05-17 12:34:35 +00:00
# ifdef IXGBE_FCOE
if ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED ) {
struct ixgbe_ring_feature * f ;
f = & adapter - > ring_feature [ RING_F_FCOE ] ;
if ( ( rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE ) & &
( i > = f - > mask ) & & ( i < f - > mask + f - > indices ) )
adapter - > rx_ring [ i ] . rx_buf_len =
IXGBE_FCOE_JUMBO_FRAME_SIZE ;
}
# endif /* IXGBE_FCOE */
2008-08-26 04:27:27 -07:00
ixgbe_configure_srrctl ( adapter , j ) ;
2007-09-15 14:07:45 -07:00
}
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82598EB ) {
/*
* For VMDq support of different descriptor types or
* buffer sizes through the use of multiple SRRCTL
* registers , RDRXCTL . MVMEN must be set to 1
*
* also , the manual doesn ' t mention it clearly but DCA hints
* will only use queue 0 ' s tags unless this bit is set . Side
* effects of setting this bit are only that SRRCTL must be
* fully programmed [ 0. .15 ]
*/
2009-03-13 22:14:30 +00:00
rdrxctl = IXGBE_READ_REG ( hw , IXGBE_RDRXCTL ) ;
rdrxctl | = IXGBE_RDRXCTL_MVMEN ;
IXGBE_WRITE_REG ( hw , IXGBE_RDRXCTL , rdrxctl ) ;
2008-11-20 20:52:10 -08:00
}
2008-06-18 15:32:19 -07:00
2009-02-27 15:45:05 +00:00
/* Program MRQC for the distribution of queues */
2009-05-19 09:19:11 +00:00
mrqc = ixgbe_setup_mrqc ( adapter ) ;
2009-02-27 15:45:05 +00:00
2008-03-03 15:03:45 -08:00
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED ) {
2007-09-15 14:07:45 -07:00
/* Fill out redirection table */
2008-03-03 15:03:45 -08:00
for ( i = 0 , j = 0 ; i < 128 ; i + + , j + + ) {
if ( j = = adapter - > ring_feature [ RING_F_RSS ] . indices )
j = 0 ;
/* reta = 4-byte sliding window of
* 0x00 . . ( indices - 1 ) ( indices - 1 ) 00. . etc . */
reta = ( reta < < 8 ) | ( j * 0x11 ) ;
if ( ( i & 3 ) = = 3 )
IXGBE_WRITE_REG ( hw , IXGBE_RETA ( i > > 2 ) , reta ) ;
2007-09-15 14:07:45 -07:00
}
/* Fill out hash function seeds */
for ( i = 0 ; i < 10 ; i + + )
2008-08-26 04:27:16 -07:00
IXGBE_WRITE_REG ( hw , IXGBE_RSSRK ( i ) , seed [ i ] ) ;
2007-09-15 14:07:45 -07:00
2009-03-13 22:14:30 +00:00
if ( hw - > mac . type = = ixgbe_mac_82598EB )
mrqc | = IXGBE_MRQC_RSSEN ;
2007-09-15 14:07:45 -07:00
/* Perform hash on these packet types */
2009-03-13 22:14:30 +00:00
mrqc | = IXGBE_MRQC_RSS_FIELD_IPV4
| IXGBE_MRQC_RSS_FIELD_IPV4_TCP
| IXGBE_MRQC_RSS_FIELD_IPV4_UDP
| IXGBE_MRQC_RSS_FIELD_IPV6
| IXGBE_MRQC_RSS_FIELD_IPV6_TCP
| IXGBE_MRQC_RSS_FIELD_IPV6_UDP ;
2008-03-03 15:03:45 -08:00
}
2009-03-13 22:14:30 +00:00
IXGBE_WRITE_REG ( hw , IXGBE_MRQC , mrqc ) ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
rxcsum = IXGBE_READ_REG ( hw , IXGBE_RXCSUM ) ;
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED | |
adapter - > flags & IXGBE_FLAG_RX_CSUM_ENABLED ) {
/* Disable indicating checksum in descriptor, enables
* RSS hash */
2007-09-15 14:07:45 -07:00
rxcsum | = IXGBE_RXCSUM_PCSD ;
}
2008-03-03 15:03:45 -08:00
if ( ! ( rxcsum & IXGBE_RXCSUM_PCSD ) ) {
/* Enable IPv4 payload checksum for UDP fragments
* if PCSD is not set */
rxcsum | = IXGBE_RXCSUM_IPPCSE ;
}
IXGBE_WRITE_REG ( hw , IXGBE_RXCSUM , rxcsum ) ;
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
rdrxctl = IXGBE_READ_REG ( hw , IXGBE_RDRXCTL ) ;
rdrxctl | = IXGBE_RDRXCTL_CRCSTRIP ;
2009-04-27 22:42:37 +00:00
rdrxctl & = ~ IXGBE_RDRXCTL_RSCFRSTSIZE ;
2009-02-27 15:45:05 +00:00
IXGBE_WRITE_REG ( hw , IXGBE_RDRXCTL , rdrxctl ) ;
}
2009-04-27 22:42:37 +00:00
2009-06-04 16:00:47 +00:00
if ( adapter - > flags & IXGBE_FLAG2_RSC_ENABLED ) {
2009-04-27 22:42:37 +00:00
/* Enable 82599 HW-RSC */
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
j = adapter - > rx_ring [ i ] . reg_idx ;
rscctrl = IXGBE_READ_REG ( hw , IXGBE_RSCCTL ( j ) ) ;
rscctrl | = IXGBE_RSCCTL_RSCEN ;
/*
2009-05-17 20:57:47 +00:00
* we must limit the number of descriptors so that the
* total size of max desc * buf_len is not greater
* than 65535
2009-04-27 22:42:37 +00:00
*/
2009-05-17 20:57:47 +00:00
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
# if (MAX_SKB_FRAGS > 16)
rscctrl | = IXGBE_RSCCTL_MAXDESC_16 ;
# elif (MAX_SKB_FRAGS > 8)
2009-04-27 22:42:37 +00:00
rscctrl | = IXGBE_RSCCTL_MAXDESC_8 ;
2009-05-17 20:57:47 +00:00
# elif (MAX_SKB_FRAGS > 4)
rscctrl | = IXGBE_RSCCTL_MAXDESC_4 ;
2009-04-27 22:42:37 +00:00
# else
2009-05-17 20:57:47 +00:00
rscctrl | = IXGBE_RSCCTL_MAXDESC_1 ;
2009-04-27 22:42:37 +00:00
# endif
2009-05-17 20:57:47 +00:00
} else {
if ( rx_buf_len < IXGBE_RXBUFFER_4096 )
rscctrl | = IXGBE_RSCCTL_MAXDESC_16 ;
else if ( rx_buf_len < IXGBE_RXBUFFER_8192 )
rscctrl | = IXGBE_RSCCTL_MAXDESC_8 ;
else
rscctrl | = IXGBE_RSCCTL_MAXDESC_4 ;
}
2009-04-27 22:42:37 +00:00
IXGBE_WRITE_REG ( hw , IXGBE_RSCCTL ( j ) , rscctrl ) ;
}
/* Disable RSC for ACK packets */
IXGBE_WRITE_REG ( hw , IXGBE_RSCDBU ,
( IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG ( hw , IXGBE_RSCDBU ) ) ) ;
}
2007-09-15 14:07:45 -07:00
}
2009-01-19 16:54:36 -08:00
static void ixgbe_vlan_rx_add_vid ( struct net_device * netdev , u16 vid )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
/* add VID to filter table */
hw - > mac . ops . set_vfta ( & adapter - > hw , vid , 0 , true ) ;
}
static void ixgbe_vlan_rx_kill_vid ( struct net_device * netdev , u16 vid )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_disable ( adapter ) ;
vlan_group_set_device ( adapter - > vlgrp , vid , NULL ) ;
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_enable ( adapter ) ;
/* remove VID from filter table */
hw - > mac . ops . set_vfta ( & adapter - > hw , vid , 0 , false ) ;
}
2007-09-15 14:07:45 -07:00
static void ixgbe_vlan_rx_register ( struct net_device * netdev ,
2008-09-11 20:04:46 -07:00
struct vlan_group * grp )
2007-09-15 14:07:45 -07:00
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
u32 ctrl ;
2009-02-27 15:45:05 +00:00
int i , j ;
2007-09-15 14:07:45 -07:00
2008-02-01 15:58:41 -08:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_disable ( adapter ) ;
2007-09-15 14:07:45 -07:00
adapter - > vlgrp = grp ;
2008-11-20 20:52:10 -08:00
/*
* For a DCB driver , always enable VLAN tag stripping so we can
* still receive traffic from a DCB - enabled host even if we ' re
* not in DCB mode .
*/
ctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_VLNCTRL ) ;
2009-02-27 15:45:05 +00:00
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB ) {
ctrl | = IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE ;
ctrl & = ~ IXGBE_VLNCTRL_CFIEN ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_VLNCTRL , ctrl ) ;
} else if ( adapter - > hw . mac . type = = ixgbe_mac_82599EB ) {
ctrl | = IXGBE_VLNCTRL_VFE ;
2007-09-15 14:07:45 -07:00
/* enable VLAN tag insert/strip */
ctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_VLNCTRL ) ;
ctrl & = ~ IXGBE_VLNCTRL_CFIEN ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_VLNCTRL , ctrl ) ;
2009-02-27 15:45:05 +00:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
j = adapter - > rx_ring [ i ] . reg_idx ;
ctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_RXDCTL ( j ) ) ;
ctrl | = IXGBE_RXDCTL_VME ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_RXDCTL ( j ) , ctrl ) ;
}
2007-09-15 14:07:45 -07:00
}
2009-02-27 15:45:05 +00:00
ixgbe_vlan_rx_add_vid ( netdev , 0 ) ;
2007-09-15 14:07:45 -07:00
2008-02-01 15:58:41 -08:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_enable ( adapter ) ;
2007-09-15 14:07:45 -07:00
}
static void ixgbe_restore_vlan ( struct ixgbe_adapter * adapter )
{
ixgbe_vlan_rx_register ( adapter - > netdev , adapter - > vlgrp ) ;
if ( adapter - > vlgrp ) {
u16 vid ;
for ( vid = 0 ; vid < VLAN_GROUP_ARRAY_LEN ; vid + + ) {
if ( ! vlan_group_get_device ( adapter - > vlgrp , vid ) )
continue ;
ixgbe_vlan_rx_add_vid ( adapter - > netdev , vid ) ;
}
}
}
2008-08-26 04:27:02 -07:00
static u8 * ixgbe_addr_list_itr ( struct ixgbe_hw * hw , u8 * * mc_addr_ptr , u32 * vmdq )
{
struct dev_mc_list * mc_ptr ;
u8 * addr = * mc_addr_ptr ;
* vmdq = 0 ;
mc_ptr = container_of ( addr , struct dev_mc_list , dmi_addr [ 0 ] ) ;
if ( mc_ptr - > next )
* mc_addr_ptr = mc_ptr - > next - > dmi_addr ;
else
* mc_addr_ptr = NULL ;
return addr ;
}
2007-09-15 14:07:45 -07:00
/**
2008-08-26 04:27:02 -07:00
* ixgbe_set_rx_mode - Unicast , Multicast and Promiscuous mode set
2007-09-15 14:07:45 -07:00
* @ netdev : network interface device structure
*
2008-08-26 04:27:02 -07:00
* The set_rx_method entry point is called whenever the unicast / multicast
* address list or the network interface flags are updated . This routine is
* responsible for configuring the hardware for proper unicast , multicast and
* promiscuous mode .
2007-09-15 14:07:45 -07:00
* */
2008-08-26 04:27:02 -07:00
static void ixgbe_set_rx_mode ( struct net_device * netdev )
2007-09-15 14:07:45 -07:00
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
2008-08-26 18:30:04 -07:00
u32 fctrl , vlnctrl ;
2008-08-26 04:27:02 -07:00
u8 * addr_list = NULL ;
int addr_count = 0 ;
2007-09-15 14:07:45 -07:00
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG ( hw , IXGBE_FCTRL ) ;
2008-08-26 18:30:04 -07:00
vlnctrl = IXGBE_READ_REG ( hw , IXGBE_VLNCTRL ) ;
2007-09-15 14:07:45 -07:00
if ( netdev - > flags & IFF_PROMISC ) {
2008-08-26 04:27:02 -07:00
hw - > addr_ctrl . user_set_promisc = 1 ;
2007-09-15 14:07:45 -07:00
fctrl | = ( IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE ) ;
2008-08-26 18:30:04 -07:00
vlnctrl & = ~ IXGBE_VLNCTRL_VFE ;
2007-09-15 14:07:45 -07:00
} else {
2008-07-16 20:15:45 -07:00
if ( netdev - > flags & IFF_ALLMULTI ) {
fctrl | = IXGBE_FCTRL_MPE ;
fctrl & = ~ IXGBE_FCTRL_UPE ;
} else {
fctrl & = ~ ( IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE ) ;
}
2008-08-26 18:30:04 -07:00
vlnctrl | = IXGBE_VLNCTRL_VFE ;
2008-08-26 04:27:02 -07:00
hw - > addr_ctrl . user_set_promisc = 0 ;
2007-09-15 14:07:45 -07:00
}
IXGBE_WRITE_REG ( hw , IXGBE_FCTRL , fctrl ) ;
2008-08-26 18:30:04 -07:00
IXGBE_WRITE_REG ( hw , IXGBE_VLNCTRL , vlnctrl ) ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:02 -07:00
/* reprogram secondary unicast list */
2009-06-17 01:12:19 +00:00
hw - > mac . ops . update_uc_addr_list ( hw , & netdev - > uc . list ) ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:02 -07:00
/* reprogram multicast list */
addr_count = netdev - > mc_count ;
if ( addr_count )
addr_list = netdev - > mc_list - > dmi_addr ;
2008-09-11 19:59:59 -07:00
hw - > mac . ops . update_mc_addr_list ( hw , addr_list , addr_count ,
ixgbe_addr_list_itr ) ;
2007-09-15 14:07:45 -07:00
}
2008-03-03 15:03:45 -08:00
static void ixgbe_napi_enable_all ( struct ixgbe_adapter * adapter )
{
int q_idx ;
struct ixgbe_q_vector * q_vector ;
int q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
/* legacy and MSI only use one vector */
if ( ! ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) )
q_vectors = 1 ;
for ( q_idx = 0 ; q_idx < q_vectors ; q_idx + + ) {
2008-09-11 19:59:42 -07:00
struct napi_struct * napi ;
2009-05-06 10:43:28 +00:00
q_vector = adapter - > q_vector [ q_idx ] ;
2008-09-11 19:59:42 -07:00
napi = & q_vector - > napi ;
2009-06-04 16:00:27 +00:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
if ( ! q_vector - > rxr_count | | ! q_vector - > txr_count ) {
if ( q_vector - > txr_count = = 1 )
napi - > poll = & ixgbe_clean_txonly ;
else if ( q_vector - > rxr_count = = 1 )
napi - > poll = & ixgbe_clean_rxonly ;
}
}
2008-09-11 19:59:42 -07:00
napi_enable ( napi ) ;
2008-03-03 15:03:45 -08:00
}
}
static void ixgbe_napi_disable_all ( struct ixgbe_adapter * adapter )
{
int q_idx ;
struct ixgbe_q_vector * q_vector ;
int q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
/* legacy and MSI only use one vector */
if ( ! ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) )
q_vectors = 1 ;
for ( q_idx = 0 ; q_idx < q_vectors ; q_idx + + ) {
2009-05-06 10:43:28 +00:00
q_vector = adapter - > q_vector [ q_idx ] ;
2008-03-03 15:03:45 -08:00
napi_disable ( & q_vector - > napi ) ;
}
}
2008-11-25 01:02:08 -08:00
# ifdef CONFIG_IXGBE_DCB
2008-11-20 20:52:10 -08:00
/*
* ixgbe_configure_dcb - Configure DCB hardware
* @ adapter : ixgbe adapter struct
*
* This is called by the driver on open to configure the DCB hardware .
* This is also called by the gennetlink interface when reconfiguring
* the DCB state .
*/
static void ixgbe_configure_dcb ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
u32 txdctl , vlnctrl ;
int i , j ;
ixgbe_dcb_check_config ( & adapter - > dcb_cfg ) ;
ixgbe_dcb_calculate_tc_credits ( & adapter - > dcb_cfg , DCB_TX_CONFIG ) ;
ixgbe_dcb_calculate_tc_credits ( & adapter - > dcb_cfg , DCB_RX_CONFIG ) ;
/* reconfigure the hardware */
ixgbe_dcb_hw_config ( & adapter - > hw , & adapter - > dcb_cfg ) ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
j = adapter - > tx_ring [ i ] . reg_idx ;
txdctl = IXGBE_READ_REG ( hw , IXGBE_TXDCTL ( j ) ) ;
/* PThresh workaround for Tx hang with DFP enabled. */
txdctl | = 32 ;
IXGBE_WRITE_REG ( hw , IXGBE_TXDCTL ( j ) , txdctl ) ;
}
/* Enable VLAN tag insert/strip */
vlnctrl = IXGBE_READ_REG ( hw , IXGBE_VLNCTRL ) ;
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82598EB ) {
vlnctrl | = IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE ;
vlnctrl & = ~ IXGBE_VLNCTRL_CFIEN ;
IXGBE_WRITE_REG ( hw , IXGBE_VLNCTRL , vlnctrl ) ;
} else if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
vlnctrl | = IXGBE_VLNCTRL_VFE ;
vlnctrl & = ~ IXGBE_VLNCTRL_CFIEN ;
IXGBE_WRITE_REG ( hw , IXGBE_VLNCTRL , vlnctrl ) ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
j = adapter - > rx_ring [ i ] . reg_idx ;
vlnctrl = IXGBE_READ_REG ( hw , IXGBE_RXDCTL ( j ) ) ;
vlnctrl | = IXGBE_RXDCTL_VME ;
IXGBE_WRITE_REG ( hw , IXGBE_RXDCTL ( j ) , vlnctrl ) ;
}
}
2008-11-20 20:52:10 -08:00
hw - > mac . ops . set_vfta ( & adapter - > hw , 0 , 0 , true ) ;
}
# endif
2007-09-15 14:07:45 -07:00
static void ixgbe_configure ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
2009-06-04 16:01:43 +00:00
struct ixgbe_hw * hw = & adapter - > hw ;
2007-09-15 14:07:45 -07:00
int i ;
2008-08-26 04:27:02 -07:00
ixgbe_set_rx_mode ( netdev ) ;
2007-09-15 14:07:45 -07:00
ixgbe_restore_vlan ( adapter ) ;
2008-11-25 01:02:08 -08:00
# ifdef CONFIG_IXGBE_DCB
2008-11-20 20:52:10 -08:00
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED ) {
netif_set_gso_max_size ( netdev , 32768 ) ;
ixgbe_configure_dcb ( adapter ) ;
} else {
netif_set_gso_max_size ( netdev , 65536 ) ;
}
# else
netif_set_gso_max_size ( netdev , 65536 ) ;
# endif
2007-09-15 14:07:45 -07:00
2009-05-13 13:11:06 +00:00
# ifdef IXGBE_FCOE
if ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED )
ixgbe_configure_fcoe ( adapter ) ;
# endif /* IXGBE_FCOE */
2009-06-04 16:01:43 +00:00
if ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ) {
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
adapter - > tx_ring [ i ] . atr_sample_rate =
adapter - > atr_sample_rate ;
ixgbe_init_fdir_signature_82599 ( hw , adapter - > fdir_pballoc ) ;
} else if ( adapter - > flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE ) {
ixgbe_init_fdir_perfect_82599 ( hw , adapter - > fdir_pballoc ) ;
}
2007-09-15 14:07:45 -07:00
ixgbe_configure_tx ( adapter ) ;
ixgbe_configure_rx ( adapter ) ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
ixgbe_alloc_rx_buffers ( adapter , & adapter - > rx_ring [ i ] ,
2008-09-11 20:04:46 -07:00
( adapter - > rx_ring [ i ] . count - 1 ) ) ;
2007-09-15 14:07:45 -07:00
}
2009-02-27 15:45:05 +00:00
static inline bool ixgbe_is_sfp ( struct ixgbe_hw * hw )
{
switch ( hw - > phy . type ) {
case ixgbe_phy_sfp_avago :
case ixgbe_phy_sfp_ftl :
case ixgbe_phy_sfp_intel :
case ixgbe_phy_sfp_unknown :
case ixgbe_phy_tw_tyco :
case ixgbe_phy_tw_unknown :
return true ;
default :
return false ;
}
}
2009-02-06 21:46:54 -08:00
/**
2009-02-27 15:45:05 +00:00
* ixgbe_sfp_link_config - set up SFP + link
* @ adapter : pointer to private adapter struct
* */
static void ixgbe_sfp_link_config ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
if ( hw - > phy . multispeed_fiber ) {
/*
* In multispeed fiber setups , the device may not have
* had a physical connection when the driver loaded .
* If that ' s the case , the initial link configuration
* couldn ' t get the MAC into 10 G or 1 G mode , so we ' ll
* never have a link status change interrupt fire .
* We need to try and force an autonegotiation
* session , then bring up link .
*/
hw - > mac . ops . setup_sfp ( hw ) ;
if ( ! ( adapter - > flags & IXGBE_FLAG_IN_SFP_LINK_TASK ) )
schedule_work ( & adapter - > multispeed_fiber_task ) ;
} else {
/*
* Direct Attach Cu and non - multispeed fiber modules
* still need to be configured properly prior to
* attempting link .
*/
if ( ! ( adapter - > flags & IXGBE_FLAG_IN_SFP_MOD_TASK ) )
schedule_work ( & adapter - > sfp_config_module_task ) ;
}
}
/**
* ixgbe_non_sfp_link_config - set up non - SFP + link
2009-02-06 21:46:54 -08:00
* @ hw : pointer to private hardware struct
*
* Returns 0 on success , negative on failure
* */
2009-02-27 15:45:05 +00:00
static int ixgbe_non_sfp_link_config ( struct ixgbe_hw * hw )
2009-02-06 21:46:54 -08:00
{
u32 autoneg ;
bool link_up = false ;
u32 ret = IXGBE_ERR_LINK_SETUP ;
if ( hw - > mac . ops . check_link )
ret = hw - > mac . ops . check_link ( hw , & autoneg , & link_up , false ) ;
if ( ret )
goto link_cfg_out ;
if ( hw - > mac . ops . get_link_capabilities )
ret = hw - > mac . ops . get_link_capabilities ( hw , & autoneg ,
& hw - > mac . autoneg ) ;
if ( ret )
goto link_cfg_out ;
if ( hw - > mac . ops . setup_link_speed )
ret = hw - > mac . ops . setup_link_speed ( hw , autoneg , true , link_up ) ;
link_cfg_out :
return ret ;
}
2009-02-27 15:45:05 +00:00
# define IXGBE_MAX_RX_DESC_POLL 10
static inline void ixgbe_rx_desc_queue_enable ( struct ixgbe_adapter * adapter ,
int rxr )
{
int j = adapter - > rx_ring [ rxr ] . reg_idx ;
int k ;
for ( k = 0 ; k < IXGBE_MAX_RX_DESC_POLL ; k + + ) {
if ( IXGBE_READ_REG ( & adapter - > hw ,
IXGBE_RXDCTL ( j ) ) & IXGBE_RXDCTL_ENABLE )
break ;
else
msleep ( 1 ) ;
}
if ( k > = IXGBE_MAX_RX_DESC_POLL ) {
DPRINTK ( DRV , ERR , " RXDCTL.ENABLE on Rx queue %d "
" not set within the polling period \n " , rxr ) ;
}
ixgbe_release_rx_desc ( & adapter - > hw , & adapter - > rx_ring [ rxr ] ,
( adapter - > rx_ring [ rxr ] . count - 1 ) ) ;
}
2007-09-15 14:07:45 -07:00
static int ixgbe_up_complete ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
struct ixgbe_hw * hw = & adapter - > hw ;
2008-03-03 15:03:45 -08:00
int i , j = 0 ;
2009-02-27 15:45:05 +00:00
int num_rx_rings = adapter - > num_rx_queues ;
2009-02-06 21:46:54 -08:00
int err ;
2007-09-15 14:07:45 -07:00
int max_frame = netdev - > mtu + ETH_HLEN + ETH_FCS_LEN ;
2008-03-03 15:03:45 -08:00
u32 txdctl , rxdctl , mhadd ;
2009-02-27 15:45:05 +00:00
u32 dmatxctl ;
2008-03-03 15:03:45 -08:00
u32 gpie ;
2007-09-15 14:07:45 -07:00
2008-02-01 15:59:04 -08:00
ixgbe_get_hw_control ( adapter ) ;
2008-03-03 15:03:45 -08:00
if ( ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) | |
( adapter - > flags & IXGBE_FLAG_MSI_ENABLED ) ) {
2007-09-15 14:07:45 -07:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
gpie = ( IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
2008-09-11 20:04:46 -07:00
IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD ) ;
2007-09-15 14:07:45 -07:00
} else {
/* MSI only */
2008-03-03 15:03:45 -08:00
gpie = 0 ;
2007-09-15 14:07:45 -07:00
}
2008-03-03 15:03:45 -08:00
/* XXX: to interrupt immediately for EICS writes, enable this */
/* gpie |= IXGBE_GPIE_EIMEN; */
IXGBE_WRITE_REG ( hw , IXGBE_GPIE , gpie ) ;
2007-09-15 14:07:45 -07:00
}
2008-03-03 15:03:45 -08:00
if ( ! ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) ) {
/* legacy interrupts, use EIAM to auto-mask when reading EICR,
* specifically only auto mask tx and rx interrupts */
IXGBE_WRITE_REG ( hw , IXGBE_EIAM , IXGBE_EICS_RTX_QUEUE ) ;
}
2007-09-15 14:07:45 -07:00
2008-10-31 00:46:40 -07:00
/* Enable fan failure interrupt if media type is copper */
if ( adapter - > flags & IXGBE_FLAG_FAN_FAIL_CAPABLE ) {
gpie = IXGBE_READ_REG ( hw , IXGBE_GPIE ) ;
gpie | = IXGBE_SDP1_GPIEN ;
IXGBE_WRITE_REG ( hw , IXGBE_GPIE , gpie ) ;
}
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
gpie = IXGBE_READ_REG ( hw , IXGBE_GPIE ) ;
gpie | = IXGBE_SDP1_GPIEN ;
gpie | = IXGBE_SDP2_GPIEN ;
IXGBE_WRITE_REG ( hw , IXGBE_GPIE , gpie ) ;
}
2009-05-17 12:34:35 +00:00
# ifdef IXGBE_FCOE
/* adjust max frame to be able to do baby jumbo for FCoE */
if ( ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED ) & &
( max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE ) )
max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE ;
# endif /* IXGBE_FCOE */
2008-03-03 15:03:45 -08:00
mhadd = IXGBE_READ_REG ( hw , IXGBE_MHADD ) ;
2007-09-15 14:07:45 -07:00
if ( max_frame ! = ( mhadd > > IXGBE_MHADD_MFS_SHIFT ) ) {
mhadd & = ~ IXGBE_MHADD_MFS_MASK ;
mhadd | = max_frame < < IXGBE_MHADD_MFS_SHIFT ;
IXGBE_WRITE_REG ( hw , IXGBE_MHADD , mhadd ) ;
}
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
2008-03-03 15:03:45 -08:00
j = adapter - > tx_ring [ i ] . reg_idx ;
txdctl = IXGBE_READ_REG ( hw , IXGBE_TXDCTL ( j ) ) ;
2008-08-26 04:27:13 -07:00
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl | = ( 8 < < 16 ) ;
2009-02-27 15:45:05 +00:00
IXGBE_WRITE_REG ( hw , IXGBE_TXDCTL ( j ) , txdctl ) ;
}
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
/* DMATXCTL.EN must be set after all Tx queue config is done */
dmatxctl = IXGBE_READ_REG ( hw , IXGBE_DMATXCTL ) ;
dmatxctl | = IXGBE_DMATXCTL_TE ;
IXGBE_WRITE_REG ( hw , IXGBE_DMATXCTL , dmatxctl ) ;
}
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
j = adapter - > tx_ring [ i ] . reg_idx ;
txdctl = IXGBE_READ_REG ( hw , IXGBE_TXDCTL ( j ) ) ;
2007-09-15 14:07:45 -07:00
txdctl | = IXGBE_TXDCTL_ENABLE ;
2008-03-03 15:03:45 -08:00
IXGBE_WRITE_REG ( hw , IXGBE_TXDCTL ( j ) , txdctl ) ;
2007-09-15 14:07:45 -07:00
}
2009-02-27 15:45:05 +00:00
for ( i = 0 ; i < num_rx_rings ; i + + ) {
2008-03-03 15:03:45 -08:00
j = adapter - > rx_ring [ i ] . reg_idx ;
rxdctl = IXGBE_READ_REG ( hw , IXGBE_RXDCTL ( j ) ) ;
/* enable PTHRESH=32 descriptors (half the internal cache)
* and HTHRESH = 0 descriptors ( to minimize latency on fetch ) ,
* this also removes a pesky rx_no_buffer_count increment */
rxdctl | = 0x0020 ;
2007-09-15 14:07:45 -07:00
rxdctl | = IXGBE_RXDCTL_ENABLE ;
2008-03-03 15:03:45 -08:00
IXGBE_WRITE_REG ( hw , IXGBE_RXDCTL ( j ) , rxdctl ) ;
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB )
ixgbe_rx_desc_queue_enable ( adapter , i ) ;
2007-09-15 14:07:45 -07:00
}
/* enable all receives */
rxdctl = IXGBE_READ_REG ( hw , IXGBE_RXCTRL ) ;
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82598EB )
rxdctl | = ( IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN ) ;
else
rxdctl | = IXGBE_RXCTRL_RXEN ;
hw - > mac . ops . enable_rx_dma ( hw , rxdctl ) ;
2007-09-15 14:07:45 -07:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED )
ixgbe_configure_msix ( adapter ) ;
else
ixgbe_configure_msi_and_legacy ( adapter ) ;
clear_bit ( __IXGBE_DOWN , & adapter - > state ) ;
2008-03-03 15:03:45 -08:00
ixgbe_napi_enable_all ( adapter ) ;
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG ( hw , IXGBE_EICR ) ;
2007-09-15 14:07:45 -07:00
ixgbe_irq_enable ( adapter ) ;
2009-05-07 10:39:54 +00:00
/*
* If this adapter has a fan , check to see if we had a failure
* before we enabled the interrupt .
*/
if ( adapter - > flags & IXGBE_FLAG_FAN_FAIL_CAPABLE ) {
u32 esdp = IXGBE_READ_REG ( hw , IXGBE_ESDP ) ;
if ( esdp & IXGBE_ESDP_SDP1 )
DPRINTK ( DRV , CRIT ,
" Fan has stopped, replace the adapter \n " ) ;
}
2009-02-27 15:45:05 +00:00
/*
* For hot - pluggable SFP + devices , a new SFP + module may have
2009-07-02 12:50:31 +00:00
* arrived before interrupts were enabled but after probe . Such
* devices wouldn ' t have their type identified yet . We need to
* kick off the SFP + module setup first , then try to bring up link .
2009-02-27 15:45:05 +00:00
* If we ' re not hot - pluggable SFP + , we just need to configure link
* and bring it up .
*/
2009-07-02 12:50:31 +00:00
if ( hw - > phy . type = = ixgbe_phy_unknown ) {
err = hw - > phy . ops . identify ( hw ) ;
if ( err = = IXGBE_ERR_SFP_NOT_SUPPORTED ) {
2009-07-02 12:50:52 +00:00
/*
* Take the device down and schedule the sfp tasklet
* which will unregister_netdev and log it .
*/
2009-07-02 12:50:31 +00:00
ixgbe_down ( adapter ) ;
2009-07-02 12:50:52 +00:00
schedule_work ( & adapter - > sfp_config_module_task ) ;
2009-07-02 12:50:31 +00:00
return err ;
}
2009-02-27 15:45:05 +00:00
}
if ( ixgbe_is_sfp ( hw ) ) {
ixgbe_sfp_link_config ( adapter ) ;
} else {
err = ixgbe_non_sfp_link_config ( hw ) ;
if ( err )
DPRINTK ( PROBE , ERR , " link_config FAILED %d \n " , err ) ;
}
2009-02-06 21:46:54 -08:00
2009-06-04 16:01:43 +00:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
set_bit ( __IXGBE_FDIR_INIT_DONE ,
& ( adapter - > tx_ring [ i ] . reinit_state ) ) ;
2009-01-19 16:55:03 -08:00
/* enable transmits */
netif_tx_start_all_queues ( netdev ) ;
2007-09-15 14:07:45 -07:00
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn ' t be a problem */
2008-09-11 19:55:32 -07:00
adapter - > flags | = IXGBE_FLAG_NEED_LINK_UPDATE ;
adapter - > link_check_timeout = jiffies ;
2007-09-15 14:07:45 -07:00
mod_timer ( & adapter - > watchdog_timer , jiffies ) ;
return 0 ;
}
2008-02-01 15:58:41 -08:00
void ixgbe_reinit_locked ( struct ixgbe_adapter * adapter )
{
WARN_ON ( in_interrupt ( ) ) ;
while ( test_and_set_bit ( __IXGBE_RESETTING , & adapter - > state ) )
msleep ( 1 ) ;
ixgbe_down ( adapter ) ;
ixgbe_up ( adapter ) ;
clear_bit ( __IXGBE_RESETTING , & adapter - > state ) ;
}
2007-09-15 14:07:45 -07:00
int ixgbe_up ( struct ixgbe_adapter * adapter )
{
/* hardware has been reset, we need to reload some things */
ixgbe_configure ( adapter ) ;
return ixgbe_up_complete ( adapter ) ;
}
void ixgbe_reset ( struct ixgbe_adapter * adapter )
{
2008-09-11 19:59:59 -07:00
struct ixgbe_hw * hw = & adapter - > hw ;
2009-05-26 20:40:47 -07:00
int err ;
err = hw - > mac . ops . init_hw ( hw ) ;
2009-06-04 11:10:35 +00:00
switch ( err ) {
case 0 :
case IXGBE_ERR_SFP_NOT_PRESENT :
break ;
case IXGBE_ERR_MASTER_REQUESTS_PENDING :
dev_err ( & adapter - > pdev - > dev , " master disable timed out \n " ) ;
break ;
2009-06-04 16:02:24 +00:00
case IXGBE_ERR_EEPROM_VERSION :
/* We are running on a pre-production device, log a warning */
dev_warn ( & adapter - > pdev - > dev , " This device is a pre-production "
" adapter/LOM. Please be aware there may be issues "
" associated with your hardware. If you are "
" experiencing problems please contact your Intel or "
" hardware representative who provided you with this "
" hardware. \n " ) ;
break ;
2009-06-04 11:10:35 +00:00
default :
dev_err ( & adapter - > pdev - > dev , " Hardware Error: %d \n " , err ) ;
}
2007-09-15 14:07:45 -07:00
/* reprogram the RAR[0] in case user changed it. */
2008-09-11 19:59:59 -07:00
hw - > mac . ops . set_rar ( hw , 0 , hw - > mac . addr , 0 , IXGBE_RAH_AV ) ;
2007-09-15 14:07:45 -07:00
}
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @ adapter : board private structure
* @ rx_ring : ring to free buffers from
* */
static void ixgbe_clean_rx_ring ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * rx_ring )
2007-09-15 14:07:45 -07:00
{
struct pci_dev * pdev = adapter - > pdev ;
unsigned long size ;
unsigned int i ;
/* Free all the Rx ring sk_buffs */
for ( i = 0 ; i < rx_ring - > count ; i + + ) {
struct ixgbe_rx_buffer * rx_buffer_info ;
rx_buffer_info = & rx_ring - > rx_buffer_info [ i ] ;
if ( rx_buffer_info - > dma ) {
pci_unmap_single ( pdev , rx_buffer_info - > dma ,
2008-09-11 20:04:46 -07:00
rx_ring - > rx_buf_len ,
PCI_DMA_FROMDEVICE ) ;
2007-09-15 14:07:45 -07:00
rx_buffer_info - > dma = 0 ;
}
if ( rx_buffer_info - > skb ) {
2009-04-27 22:42:37 +00:00
struct sk_buff * skb = rx_buffer_info - > skb ;
2007-09-15 14:07:45 -07:00
rx_buffer_info - > skb = NULL ;
2009-04-27 22:42:37 +00:00
do {
struct sk_buff * this = skb ;
skb = skb - > prev ;
dev_kfree_skb ( this ) ;
} while ( skb ) ;
2007-09-15 14:07:45 -07:00
}
if ( ! rx_buffer_info - > page )
continue ;
2009-06-30 11:44:56 +00:00
if ( rx_buffer_info - > page_dma ) {
pci_unmap_page ( pdev , rx_buffer_info - > page_dma ,
PAGE_SIZE / 2 , PCI_DMA_FROMDEVICE ) ;
rx_buffer_info - > page_dma = 0 ;
}
2007-09-15 14:07:45 -07:00
put_page ( rx_buffer_info - > page ) ;
rx_buffer_info - > page = NULL ;
2008-09-11 19:58:43 -07:00
rx_buffer_info - > page_offset = 0 ;
2007-09-15 14:07:45 -07:00
}
size = sizeof ( struct ixgbe_rx_buffer ) * rx_ring - > count ;
memset ( rx_ring - > rx_buffer_info , 0 , size ) ;
/* Zero out the descriptor ring */
memset ( rx_ring - > desc , 0 , rx_ring - > size ) ;
rx_ring - > next_to_clean = 0 ;
rx_ring - > next_to_use = 0 ;
2009-03-13 22:14:50 +00:00
if ( rx_ring - > head )
writel ( 0 , adapter - > hw . hw_addr + rx_ring - > head ) ;
if ( rx_ring - > tail )
writel ( 0 , adapter - > hw . hw_addr + rx_ring - > tail ) ;
2007-09-15 14:07:45 -07:00
}
/**
* ixgbe_clean_tx_ring - Free Tx Buffers
* @ adapter : board private structure
* @ tx_ring : ring to be cleaned
* */
static void ixgbe_clean_tx_ring ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * tx_ring )
2007-09-15 14:07:45 -07:00
{
struct ixgbe_tx_buffer * tx_buffer_info ;
unsigned long size ;
unsigned int i ;
/* Free all the Tx ring sk_buffs */
for ( i = 0 ; i < tx_ring - > count ; i + + ) {
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
ixgbe_unmap_and_free_tx_resource ( adapter , tx_buffer_info ) ;
}
size = sizeof ( struct ixgbe_tx_buffer ) * tx_ring - > count ;
memset ( tx_ring - > tx_buffer_info , 0 , size ) ;
/* Zero out the descriptor ring */
memset ( tx_ring - > desc , 0 , tx_ring - > size ) ;
tx_ring - > next_to_use = 0 ;
tx_ring - > next_to_clean = 0 ;
2009-03-13 22:14:50 +00:00
if ( tx_ring - > head )
writel ( 0 , adapter - > hw . hw_addr + tx_ring - > head ) ;
if ( tx_ring - > tail )
writel ( 0 , adapter - > hw . hw_addr + tx_ring - > tail ) ;
2007-09-15 14:07:45 -07:00
}
/**
2008-03-03 15:03:45 -08:00
* ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2007-09-15 14:07:45 -07:00
* @ adapter : board private structure
* */
2008-03-03 15:03:45 -08:00
static void ixgbe_clean_all_rx_rings ( struct ixgbe_adapter * adapter )
2007-09-15 14:07:45 -07:00
{
int i ;
2008-03-03 15:03:45 -08:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
ixgbe_clean_rx_ring ( adapter , & adapter - > rx_ring [ i ] ) ;
2007-09-15 14:07:45 -07:00
}
/**
2008-03-03 15:03:45 -08:00
* ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2007-09-15 14:07:45 -07:00
* @ adapter : board private structure
* */
2008-03-03 15:03:45 -08:00
static void ixgbe_clean_all_tx_rings ( struct ixgbe_adapter * adapter )
2007-09-15 14:07:45 -07:00
{
int i ;
2008-03-03 15:03:45 -08:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
ixgbe_clean_tx_ring ( adapter , & adapter - > tx_ring [ i ] ) ;
2007-09-15 14:07:45 -07:00
}
void ixgbe_down ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
2008-09-11 20:00:16 -07:00
struct ixgbe_hw * hw = & adapter - > hw ;
2007-09-15 14:07:45 -07:00
u32 rxctrl ;
2008-09-11 20:00:16 -07:00
u32 txdctl ;
int i , j ;
2007-09-15 14:07:45 -07:00
/* signal that we are down to the interrupt handler */
set_bit ( __IXGBE_DOWN , & adapter - > state ) ;
/* disable receives */
2008-09-11 20:00:16 -07:00
rxctrl = IXGBE_READ_REG ( hw , IXGBE_RXCTRL ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RXCTRL , rxctrl & ~ IXGBE_RXCTRL_RXEN ) ;
2007-09-15 14:07:45 -07:00
netif_tx_disable ( netdev ) ;
2008-09-11 20:00:16 -07:00
IXGBE_WRITE_FLUSH ( hw ) ;
2007-09-15 14:07:45 -07:00
msleep ( 10 ) ;
2008-09-11 20:00:16 -07:00
netif_tx_stop_all_queues ( netdev ) ;
2007-09-15 14:07:45 -07:00
ixgbe_irq_disable ( adapter ) ;
2008-03-03 15:03:45 -08:00
ixgbe_napi_disable_all ( adapter ) ;
2008-09-11 20:00:16 -07:00
2007-09-15 14:07:45 -07:00
del_timer_sync ( & adapter - > watchdog_timer ) ;
2008-09-11 19:55:32 -07:00
cancel_work_sync ( & adapter - > watchdog_task ) ;
2007-09-15 14:07:45 -07:00
2009-06-04 16:01:43 +00:00
if ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE | |
adapter - > flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE )
cancel_work_sync ( & adapter - > fdir_reinit_task ) ;
2008-09-11 20:00:16 -07:00
/* disable transmits in the hardware now that interrupts are off */
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
j = adapter - > tx_ring [ i ] . reg_idx ;
txdctl = IXGBE_READ_REG ( hw , IXGBE_TXDCTL ( j ) ) ;
IXGBE_WRITE_REG ( hw , IXGBE_TXDCTL ( j ) ,
( txdctl & ~ IXGBE_TXDCTL_ENABLE ) ) ;
}
2009-03-13 22:15:10 +00:00
/* Disable the Tx DMA engine on 82599 */
if ( hw - > mac . type = = ixgbe_mac_82599EB )
IXGBE_WRITE_REG ( hw , IXGBE_DMATXCTL ,
( IXGBE_READ_REG ( hw , IXGBE_DMATXCTL ) &
~ IXGBE_DMATXCTL_TE ) ) ;
2008-09-11 20:00:16 -07:00
2007-09-15 14:07:45 -07:00
netif_carrier_off ( netdev ) ;
2008-06-24 17:00:56 -07:00
if ( ! pci_channel_offline ( adapter - > pdev ) )
ixgbe_reset ( adapter ) ;
2007-09-15 14:07:45 -07:00
ixgbe_clean_all_tx_rings ( adapter ) ;
ixgbe_clean_all_rx_rings ( adapter ) ;
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-08-26 04:27:21 -07:00
/* since we reset the hardware DCA settings were cleared */
2009-05-21 13:07:12 +00:00
ixgbe_setup_dca ( adapter ) ;
2008-08-26 04:27:21 -07:00
# endif
2007-09-15 14:07:45 -07:00
}
/**
2008-03-03 15:03:45 -08:00
* ixgbe_poll - NAPI Rx polling callback
* @ napi : structure for representing this polling device
* @ budget : how many packets driver is allowed to clean
*
* This function is used for legacy and MSI , NAPI mode
2007-09-15 14:07:45 -07:00
* */
2008-03-03 15:03:45 -08:00
static int ixgbe_poll ( struct napi_struct * napi , int budget )
2007-09-15 14:07:45 -07:00
{
2009-03-13 22:14:10 +00:00
struct ixgbe_q_vector * q_vector =
container_of ( napi , struct ixgbe_q_vector , napi ) ;
2008-03-03 15:03:45 -08:00
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2009-03-13 22:14:10 +00:00
int tx_clean_complete , work_done = 0 ;
2007-09-15 14:07:45 -07:00
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-03-03 15:04:02 -08:00
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED ) {
ixgbe_update_tx_dca ( adapter , adapter - > tx_ring ) ;
ixgbe_update_rx_dca ( adapter , adapter - > rx_ring ) ;
}
# endif
2009-06-04 16:00:09 +00:00
tx_clean_complete = ixgbe_clean_tx_irq ( q_vector , adapter - > tx_ring ) ;
2009-01-18 21:49:45 -08:00
ixgbe_clean_rx_irq ( q_vector , adapter - > rx_ring , & work_done , budget ) ;
2007-09-15 14:07:45 -07:00
2009-03-13 22:14:10 +00:00
if ( ! tx_clean_complete )
2008-01-15 22:43:24 -08:00
work_done = budget ;
2008-01-07 21:06:12 -08:00
/* If budget not fully consumed, exit the polling mode */
if ( work_done < budget ) {
2009-01-19 16:43:59 -08:00
napi_complete ( napi ) ;
2009-03-13 22:13:28 +00:00
if ( adapter - > itr_setting & 1 )
2008-03-03 15:03:57 -08:00
ixgbe_set_itr ( adapter ) ;
2008-02-01 15:58:41 -08:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
2009-04-27 22:42:54 +00:00
ixgbe_irq_enable_queues ( adapter , IXGBE_EIMS_RTX_QUEUE ) ;
2007-09-15 14:07:45 -07:00
}
return work_done ;
}
/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @ netdev : network interface device structure
* */
static void ixgbe_tx_timeout ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
/* Do the reset outside of interrupt context */
schedule_work ( & adapter - > reset_task ) ;
}
static void ixgbe_reset_task ( struct work_struct * work )
{
struct ixgbe_adapter * adapter ;
adapter = container_of ( work , struct ixgbe_adapter , reset_task ) ;
2008-11-20 20:52:10 -08:00
/* If we're already down or resetting, just bail */
if ( test_bit ( __IXGBE_DOWN , & adapter - > state ) | |
test_bit ( __IXGBE_RESETTING , & adapter - > state ) )
return ;
2007-09-15 14:07:45 -07:00
adapter - > tx_timeout_count + + ;
2008-02-01 15:58:41 -08:00
ixgbe_reinit_locked ( adapter ) ;
2007-09-15 14:07:45 -07:00
}
2009-02-05 23:53:59 -08:00
# ifdef CONFIG_IXGBE_DCB
static inline bool ixgbe_set_dcb_queues ( struct ixgbe_adapter * adapter )
2008-09-11 20:00:29 -07:00
{
2009-02-05 23:53:59 -08:00
bool ret = false ;
2009-05-19 09:19:11 +00:00
struct ixgbe_ring_feature * f = & adapter - > ring_feature [ RING_F_DCB ] ;
2008-09-11 20:00:29 -07:00
2009-05-19 09:19:11 +00:00
if ( ! ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED ) )
return ret ;
f - > mask = 0x7 < < 3 ;
adapter - > num_rx_queues = f - > indices ;
adapter - > num_tx_queues = f - > indices ;
ret = true ;
2008-11-20 20:52:10 -08:00
2009-02-05 23:53:59 -08:00
return ret ;
}
# endif
2009-03-13 22:15:31 +00:00
/**
* ixgbe_set_rss_queues : Allocate queues for RSS
* @ adapter : board private structure to initialize
*
* This is our " base " multiqueue mode . RSS ( Receive Side Scaling ) will try
* to allocate one Rx queue per CPU , and if available , one Tx queue per CPU .
*
* */
2009-02-05 23:53:59 -08:00
static inline bool ixgbe_set_rss_queues ( struct ixgbe_adapter * adapter )
{
bool ret = false ;
2009-05-19 09:19:11 +00:00
struct ixgbe_ring_feature * f = & adapter - > ring_feature [ RING_F_RSS ] ;
2009-02-05 23:53:59 -08:00
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED ) {
2009-05-19 09:19:11 +00:00
f - > mask = 0xF ;
adapter - > num_rx_queues = f - > indices ;
adapter - > num_tx_queues = f - > indices ;
2009-02-05 23:53:59 -08:00
ret = true ;
} else {
ret = false ;
2008-09-11 20:00:29 -07:00
}
2009-02-05 23:53:59 -08:00
return ret ;
}
2009-06-04 16:01:43 +00:00
/**
* ixgbe_set_fdir_queues : Allocate queues for Flow Director
* @ adapter : board private structure to initialize
*
* Flow Director is an advanced Rx filter , attempting to get Rx flows back
* to the original CPU that initiated the Tx session . This runs in addition
* to RSS , so if a packet doesn ' t match an FDIR filter , we can still spread the
* Rx load across CPUs using RSS .
*
* */
static bool inline ixgbe_set_fdir_queues ( struct ixgbe_adapter * adapter )
{
bool ret = false ;
struct ixgbe_ring_feature * f_fdir = & adapter - > ring_feature [ RING_F_FDIR ] ;
f_fdir - > indices = min ( ( int ) num_online_cpus ( ) , f_fdir - > indices ) ;
f_fdir - > mask = 0 ;
/* Flow Director must have RSS enabled */
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED & &
( ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE | |
( adapter - > flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE ) ) ) ) {
adapter - > num_tx_queues = f_fdir - > indices ;
adapter - > num_rx_queues = f_fdir - > indices ;
ret = true ;
} else {
adapter - > flags & = ~ IXGBE_FLAG_FDIR_HASH_CAPABLE ;
adapter - > flags & = ~ IXGBE_FLAG_FDIR_PERFECT_CAPABLE ;
}
return ret ;
}
2009-05-17 12:33:52 +00:00
# ifdef IXGBE_FCOE
/**
* ixgbe_set_fcoe_queues : Allocate queues for Fiber Channel over Ethernet ( FCoE )
* @ adapter : board private structure to initialize
*
* FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges .
* The ring feature mask is not used as a mask for FCoE , as it can take any 8
* rx queues out of the max number of rx queues , instead , it is used as the
* index of the first rx queue used by FCoE .
*
* */
static inline bool ixgbe_set_fcoe_queues ( struct ixgbe_adapter * adapter )
{
bool ret = false ;
struct ixgbe_ring_feature * f = & adapter - > ring_feature [ RING_F_FCOE ] ;
f - > indices = min ( ( int ) num_online_cpus ( ) , f - > indices ) ;
if ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED ) {
# ifdef CONFIG_IXGBE_DCB
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED ) {
DPRINTK ( PROBE , INFO , " FCOE enabled with DCB \n " ) ;
ixgbe_set_dcb_queues ( adapter ) ;
}
# endif
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED ) {
DPRINTK ( PROBE , INFO , " FCOE enabled with RSS \n " ) ;
2009-07-09 02:29:50 +00:00
if ( ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ) | |
( adapter - > flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE ) )
ixgbe_set_fdir_queues ( adapter ) ;
else
ixgbe_set_rss_queues ( adapter ) ;
2009-05-17 12:33:52 +00:00
}
/* adding FCoE rx rings to the end */
f - > mask = adapter - > num_rx_queues ;
adapter - > num_rx_queues + = f - > indices ;
if ( adapter - > num_tx_queues = = 0 )
adapter - > num_tx_queues = f - > indices ;
ret = true ;
}
return ret ;
}
# endif /* IXGBE_FCOE */
2009-03-13 22:15:31 +00:00
/*
* ixgbe_set_num_queues : Allocate queues for device , feature dependant
* @ adapter : board private structure to initialize
*
* This is the top level queue allocation routine . The order here is very
* important , starting with the " most " number of features turned on at once ,
* and ending with the smallest set of features . This way large combinations
* can be allocated if they ' re turned on , and smaller combinations are the
* fallthrough conditions .
*
* */
2009-02-05 23:53:59 -08:00
static void ixgbe_set_num_queues ( struct ixgbe_adapter * adapter )
{
2009-05-17 12:33:52 +00:00
# ifdef IXGBE_FCOE
if ( ixgbe_set_fcoe_queues ( adapter ) )
goto done ;
# endif /* IXGBE_FCOE */
2009-02-05 23:53:59 -08:00
# ifdef CONFIG_IXGBE_DCB
if ( ixgbe_set_dcb_queues ( adapter ) )
2009-04-14 21:54:07 -07:00
goto done ;
2009-02-05 23:53:59 -08:00
# endif
2009-06-04 16:01:43 +00:00
if ( ixgbe_set_fdir_queues ( adapter ) )
goto done ;
2009-02-05 23:53:59 -08:00
if ( ixgbe_set_rss_queues ( adapter ) )
2009-04-14 21:54:07 -07:00
goto done ;
/* fallback to base case */
adapter - > num_rx_queues = 1 ;
adapter - > num_tx_queues = 1 ;
done :
/* Notify the stack of the (possibly) reduced Tx Queue count. */
adapter - > netdev - > real_num_tx_queues = adapter - > num_tx_queues ;
2008-09-11 20:00:29 -07:00
}
2008-03-03 15:03:45 -08:00
static void ixgbe_acquire_msix_vectors ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
int vectors )
2008-03-03 15:03:45 -08:00
{
int err , vector_threshold ;
/* We'll want at least 3 (vector_threshold):
* 1 ) TxQ [ 0 ] Cleanup
* 2 ) RxQ [ 0 ] Cleanup
* 3 ) Other ( Link Status Change , etc . )
* 4 ) TCP Timer ( optional )
*/
vector_threshold = MIN_MSIX_COUNT ;
/* The more we get, the more we will assign to Tx/Rx Cleanup
* for the separate queues . . . where Rx Cleanup > = Tx Cleanup .
* Right now , we simply care about how many we ' ll get ; we ' ll
* set them up later while requesting irq ' s .
*/
while ( vectors > = vector_threshold ) {
err = pci_enable_msix ( adapter - > pdev , adapter - > msix_entries ,
2008-09-11 20:04:46 -07:00
vectors ) ;
2008-03-03 15:03:45 -08:00
if ( ! err ) /* Success in acquiring all requested vectors. */
break ;
else if ( err < 0 )
vectors = 0 ; /* Nasty failure, quit now */
else /* err == number of vectors we should try again with */
vectors = err ;
}
if ( vectors < vector_threshold ) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we ' ll go with either a single MSI
* vector or fall back to legacy interrupts .
*/
DPRINTK ( HW , DEBUG , " Unable to allocate MSI-X interrupts \n " ) ;
adapter - > flags & = ~ IXGBE_FLAG_MSIX_ENABLED ;
kfree ( adapter - > msix_entries ) ;
adapter - > msix_entries = NULL ;
} else {
adapter - > flags | = IXGBE_FLAG_MSIX_ENABLED ; /* Woot! */
2009-02-01 01:18:58 -08:00
/*
* Adjust for only the vectors we ' ll use , which is minimum
* of max_msix_q_vectors + NON_Q_VECTORS , or the number of
* vectors we were allocated .
*/
adapter - > num_msix_vectors = min ( vectors ,
adapter - > max_msix_q_vectors + NON_Q_VECTORS ) ;
2008-03-03 15:03:45 -08:00
}
}
/**
2009-02-05 23:53:59 -08:00
* ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
2008-03-03 15:03:45 -08:00
* @ adapter : board private structure to initialize
*
2009-02-05 23:53:59 -08:00
* Cache the descriptor ring offsets for RSS to the assigned rings .
*
2008-03-03 15:03:45 -08:00
* */
2009-02-05 23:53:59 -08:00
static inline bool ixgbe_cache_ring_rss ( struct ixgbe_adapter * adapter )
2008-03-03 15:03:45 -08:00
{
2009-02-05 23:53:59 -08:00
int i ;
bool ret = false ;
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED ) {
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
adapter - > rx_ring [ i ] . reg_idx = i ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
adapter - > tx_ring [ i ] . reg_idx = i ;
ret = true ;
} else {
ret = false ;
}
return ret ;
}
# ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
* @ adapter : board private structure to initialize
*
* Cache the descriptor ring offsets for DCB to the assigned rings .
*
* */
static inline bool ixgbe_cache_ring_dcb ( struct ixgbe_adapter * adapter )
{
int i ;
bool ret = false ;
int dcb_i = adapter - > ring_feature [ RING_F_DCB ] . indices ;
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED ) {
if ( adapter - > hw . mac . type = = ixgbe_mac_82598EB ) {
2008-11-20 20:52:10 -08:00
/* the number of queues is assumed to be symmetric */
for ( i = 0 ; i < dcb_i ; i + + ) {
adapter - > rx_ring [ i ] . reg_idx = i < < 3 ;
adapter - > tx_ring [ i ] . reg_idx = i < < 2 ;
}
2009-02-05 23:53:59 -08:00
ret = true ;
2009-02-27 15:45:05 +00:00
} else if ( adapter - > hw . mac . type = = ixgbe_mac_82599EB ) {
2009-04-16 15:00:20 +00:00
if ( dcb_i = = 8 ) {
/*
* Tx TC0 starts at : descriptor queue 0
* Tx TC1 starts at : descriptor queue 32
* Tx TC2 starts at : descriptor queue 64
* Tx TC3 starts at : descriptor queue 80
* Tx TC4 starts at : descriptor queue 96
* Tx TC5 starts at : descriptor queue 104
* Tx TC6 starts at : descriptor queue 112
* Tx TC7 starts at : descriptor queue 120
*
* Rx TC0 - TC7 are offset by 16 queues each
*/
for ( i = 0 ; i < 3 ; i + + ) {
adapter - > tx_ring [ i ] . reg_idx = i < < 5 ;
adapter - > rx_ring [ i ] . reg_idx = i < < 4 ;
}
for ( ; i < 5 ; i + + ) {
adapter - > tx_ring [ i ] . reg_idx =
( ( i + 2 ) < < 4 ) ;
adapter - > rx_ring [ i ] . reg_idx = i < < 4 ;
}
for ( ; i < dcb_i ; i + + ) {
adapter - > tx_ring [ i ] . reg_idx =
( ( i + 8 ) < < 3 ) ;
adapter - > rx_ring [ i ] . reg_idx = i < < 4 ;
}
ret = true ;
} else if ( dcb_i = = 4 ) {
/*
* Tx TC0 starts at : descriptor queue 0
* Tx TC1 starts at : descriptor queue 64
* Tx TC2 starts at : descriptor queue 96
* Tx TC3 starts at : descriptor queue 112
*
* Rx TC0 - TC3 are offset by 32 queues each
*/
adapter - > tx_ring [ 0 ] . reg_idx = 0 ;
adapter - > tx_ring [ 1 ] . reg_idx = 64 ;
adapter - > tx_ring [ 2 ] . reg_idx = 96 ;
adapter - > tx_ring [ 3 ] . reg_idx = 112 ;
for ( i = 0 ; i < dcb_i ; i + + )
adapter - > rx_ring [ i ] . reg_idx = i < < 5 ;
ret = true ;
} else {
ret = false ;
2009-02-27 15:45:05 +00:00
}
2009-02-05 23:53:59 -08:00
} else {
ret = false ;
2008-03-03 15:03:45 -08:00
}
2009-02-05 23:53:59 -08:00
} else {
ret = false ;
2008-03-03 15:03:45 -08:00
}
2009-02-05 23:53:59 -08:00
return ret ;
}
# endif
2009-06-04 16:01:43 +00:00
/**
* ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
* @ adapter : board private structure to initialize
*
* Cache the descriptor ring offsets for Flow Director to the assigned rings .
*
* */
static bool inline ixgbe_cache_ring_fdir ( struct ixgbe_adapter * adapter )
{
int i ;
bool ret = false ;
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED & &
( ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ) | |
( adapter - > flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE ) ) ) {
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
adapter - > rx_ring [ i ] . reg_idx = i ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
adapter - > tx_ring [ i ] . reg_idx = i ;
ret = true ;
}
return ret ;
}
2009-05-17 12:33:52 +00:00
# ifdef IXGBE_FCOE
/**
* ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
* @ adapter : board private structure to initialize
*
* Cache the descriptor ring offsets for FCoE mode to the assigned rings .
*
*/
static inline bool ixgbe_cache_ring_fcoe ( struct ixgbe_adapter * adapter )
{
int i , fcoe_i = 0 ;
bool ret = false ;
struct ixgbe_ring_feature * f = & adapter - > ring_feature [ RING_F_FCOE ] ;
if ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED ) {
# ifdef CONFIG_IXGBE_DCB
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED ) {
ixgbe_cache_ring_dcb ( adapter ) ;
fcoe_i = adapter - > rx_ring [ 0 ] . reg_idx + 1 ;
}
# endif /* CONFIG_IXGBE_DCB */
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED ) {
2009-07-09 02:29:50 +00:00
if ( ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ) | |
( adapter - > flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE ) )
ixgbe_cache_ring_fdir ( adapter ) ;
else
ixgbe_cache_ring_rss ( adapter ) ;
2009-05-17 12:33:52 +00:00
fcoe_i = f - > mask ;
}
for ( i = 0 ; i < f - > indices ; i + + , fcoe_i + + )
adapter - > rx_ring [ f - > mask + i ] . reg_idx = fcoe_i ;
ret = true ;
}
return ret ;
}
# endif /* IXGBE_FCOE */
2009-02-05 23:53:59 -08:00
/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @ adapter : board private structure to initialize
*
* Once we know the feature - set enabled for the device , we ' ll cache
* the register offset the descriptor ring is assigned to .
*
* Note , the order the various feature calls is important . It must start with
* the " most " features enabled at the same time , then trickle down to the
* least amount of features turned on at once .
* */
static void ixgbe_cache_ring_register ( struct ixgbe_adapter * adapter )
{
/* start with default case */
adapter - > rx_ring [ 0 ] . reg_idx = 0 ;
adapter - > tx_ring [ 0 ] . reg_idx = 0 ;
2009-05-17 12:33:52 +00:00
# ifdef IXGBE_FCOE
if ( ixgbe_cache_ring_fcoe ( adapter ) )
return ;
# endif /* IXGBE_FCOE */
2009-02-05 23:53:59 -08:00
# ifdef CONFIG_IXGBE_DCB
if ( ixgbe_cache_ring_dcb ( adapter ) )
return ;
# endif
2009-06-04 16:01:43 +00:00
if ( ixgbe_cache_ring_fdir ( adapter ) )
return ;
2009-02-05 23:53:59 -08:00
if ( ixgbe_cache_ring_rss ( adapter ) )
return ;
2008-03-03 15:03:45 -08:00
}
2007-09-15 14:07:45 -07:00
/**
* ixgbe_alloc_queues - Allocate memory for all rings
* @ adapter : board private structure to initialize
*
* We allocate one ring per queue at run - time since we don ' t know the
2009-03-13 22:15:31 +00:00
* number of queues at compile - time . The polling_netdev array is
* intended for Multiqueue , but should work fine with a single queue .
2007-09-15 14:07:45 -07:00
* */
2008-11-20 20:52:10 -08:00
static int ixgbe_alloc_queues ( struct ixgbe_adapter * adapter )
2007-09-15 14:07:45 -07:00
{
int i ;
adapter - > tx_ring = kcalloc ( adapter - > num_tx_queues ,
2008-09-11 20:04:46 -07:00
sizeof ( struct ixgbe_ring ) , GFP_KERNEL ) ;
2007-09-15 14:07:45 -07:00
if ( ! adapter - > tx_ring )
2008-03-03 15:03:45 -08:00
goto err_tx_ring_allocation ;
2007-09-15 14:07:45 -07:00
adapter - > rx_ring = kcalloc ( adapter - > num_rx_queues ,
2008-09-11 20:04:46 -07:00
sizeof ( struct ixgbe_ring ) , GFP_KERNEL ) ;
2008-03-03 15:03:45 -08:00
if ( ! adapter - > rx_ring )
goto err_rx_ring_allocation ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
2008-09-11 20:00:29 -07:00
adapter - > tx_ring [ i ] . count = adapter - > tx_ring_count ;
2008-03-03 15:03:45 -08:00
adapter - > tx_ring [ i ] . queue_index = i ;
}
2008-09-11 20:00:29 -07:00
2007-09-15 14:07:45 -07:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
2008-09-11 20:00:29 -07:00
adapter - > rx_ring [ i ] . count = adapter - > rx_ring_count ;
2008-03-03 15:03:45 -08:00
adapter - > rx_ring [ i ] . queue_index = i ;
}
ixgbe_cache_ring_register ( adapter ) ;
return 0 ;
err_rx_ring_allocation :
kfree ( adapter - > tx_ring ) ;
err_tx_ring_allocation :
return - ENOMEM ;
}
/**
* ixgbe_set_interrupt_capability - set MSI - X or MSI if supported
* @ adapter : board private structure to initialize
*
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel .
* */
2008-11-27 15:34:07 -08:00
static int ixgbe_set_interrupt_capability ( struct ixgbe_adapter * adapter )
2008-03-03 15:03:45 -08:00
{
2009-03-31 21:34:05 +00:00
struct ixgbe_hw * hw = & adapter - > hw ;
2008-03-03 15:03:45 -08:00
int err = 0 ;
int vector , v_budget ;
/*
* It ' s easy to be greedy for MSI - X vectors , but it really
* doesn ' t do us much good if we have a lot more vectors
* than CPU ' s . So let ' s be conservative and only ask for
* ( roughly ) twice the number of vectors as there are CPU ' s .
*/
v_budget = min ( adapter - > num_rx_queues + adapter - > num_tx_queues ,
2008-09-11 20:04:46 -07:00
( int ) ( num_online_cpus ( ) * 2 ) ) + NON_Q_VECTORS ;
2008-03-03 15:03:45 -08:00
/*
* At the same time , hardware can only support a maximum of
2009-03-31 21:34:05 +00:00
* hw . mac - > max_msix_vectors vectors . With features
* such as RSS and VMDq , we can easily surpass the number of Rx and Tx
* descriptor queues supported by our device . Thus , we cap it off in
* those rare cases where the cpu count also exceeds our vector limit .
2008-03-03 15:03:45 -08:00
*/
2009-03-31 21:34:05 +00:00
v_budget = min ( v_budget , ( int ) hw - > mac . max_msix_vectors ) ;
2008-03-03 15:03:45 -08:00
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI - X capabilities of the adapter . */
adapter - > msix_entries = kcalloc ( v_budget ,
2008-09-11 20:04:46 -07:00
sizeof ( struct msix_entry ) , GFP_KERNEL ) ;
2009-05-06 10:43:28 +00:00
if ( adapter - > msix_entries ) {
for ( vector = 0 ; vector < v_budget ; vector + + )
adapter - > msix_entries [ vector ] . entry = vector ;
2008-03-03 15:03:45 -08:00
2009-05-06 10:43:28 +00:00
ixgbe_acquire_msix_vectors ( adapter , v_budget ) ;
2008-03-03 15:03:45 -08:00
2009-05-06 10:43:28 +00:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED )
goto out ;
}
2008-03-03 15:03:45 -08:00
2009-05-06 10:43:28 +00:00
adapter - > flags & = ~ IXGBE_FLAG_DCB_ENABLED ;
adapter - > flags & = ~ IXGBE_FLAG_RSS_ENABLED ;
2009-06-04 16:01:43 +00:00
adapter - > flags & = ~ IXGBE_FLAG_FDIR_HASH_CAPABLE ;
adapter - > flags & = ~ IXGBE_FLAG_FDIR_PERFECT_CAPABLE ;
adapter - > atr_sample_rate = 0 ;
2009-05-06 10:43:28 +00:00
ixgbe_set_num_queues ( adapter ) ;
2008-03-03 15:03:45 -08:00
err = pci_enable_msi ( adapter - > pdev ) ;
if ( ! err ) {
adapter - > flags | = IXGBE_FLAG_MSI_ENABLED ;
} else {
DPRINTK ( HW , DEBUG , " Unable to allocate MSI interrupt, "
2008-09-11 20:04:46 -07:00
" falling back to legacy. Error: %d \n " , err ) ;
2008-03-03 15:03:45 -08:00
/* reset err */
err = 0 ;
}
out :
return err ;
}
2009-05-06 10:43:28 +00:00
/**
* ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
* @ adapter : board private structure to initialize
*
* We allocate one q_vector per queue interrupt . If allocation fails we
* return - ENOMEM .
* */
static int ixgbe_alloc_q_vectors ( struct ixgbe_adapter * adapter )
{
int q_idx , num_q_vectors ;
struct ixgbe_q_vector * q_vector ;
int napi_vectors ;
int ( * poll ) ( struct napi_struct * , int ) ;
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
num_q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
napi_vectors = adapter - > num_rx_queues ;
2009-06-04 16:00:27 +00:00
poll = & ixgbe_clean_rxtx_many ;
2009-05-06 10:43:28 +00:00
} else {
num_q_vectors = 1 ;
napi_vectors = 1 ;
poll = & ixgbe_poll ;
}
for ( q_idx = 0 ; q_idx < num_q_vectors ; q_idx + + ) {
q_vector = kzalloc ( sizeof ( struct ixgbe_q_vector ) , GFP_KERNEL ) ;
if ( ! q_vector )
goto err_out ;
q_vector - > adapter = adapter ;
q_vector - > eitr = adapter - > eitr_param ;
2009-06-04 16:00:09 +00:00
q_vector - > v_idx = q_idx ;
2009-06-04 16:00:27 +00:00
netif_napi_add ( adapter - > netdev , & q_vector - > napi , ( * poll ) , 64 ) ;
2009-05-06 10:43:28 +00:00
adapter - > q_vector [ q_idx ] = q_vector ;
}
return 0 ;
err_out :
while ( q_idx ) {
q_idx - - ;
q_vector = adapter - > q_vector [ q_idx ] ;
netif_napi_del ( & q_vector - > napi ) ;
kfree ( q_vector ) ;
adapter - > q_vector [ q_idx ] = NULL ;
}
return - ENOMEM ;
}
/**
* ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
* @ adapter : board private structure to initialize
*
* This function frees the memory allocated to the q_vectors . In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector .
* */
static void ixgbe_free_q_vectors ( struct ixgbe_adapter * adapter )
{
int q_idx , num_q_vectors ;
2009-06-04 16:00:27 +00:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED )
2009-05-06 10:43:28 +00:00
num_q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
2009-06-04 16:00:27 +00:00
else
2009-05-06 10:43:28 +00:00
num_q_vectors = 1 ;
for ( q_idx = 0 ; q_idx < num_q_vectors ; q_idx + + ) {
struct ixgbe_q_vector * q_vector = adapter - > q_vector [ q_idx ] ;
adapter - > q_vector [ q_idx ] = NULL ;
2009-06-04 16:00:27 +00:00
netif_napi_del ( & q_vector - > napi ) ;
2009-05-06 10:43:28 +00:00
kfree ( q_vector ) ;
}
}
2008-11-20 20:52:10 -08:00
void ixgbe_reset_interrupt_capability ( struct ixgbe_adapter * adapter )
2008-03-03 15:03:45 -08:00
{
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
adapter - > flags & = ~ IXGBE_FLAG_MSIX_ENABLED ;
pci_disable_msix ( adapter - > pdev ) ;
kfree ( adapter - > msix_entries ) ;
adapter - > msix_entries = NULL ;
} else if ( adapter - > flags & IXGBE_FLAG_MSI_ENABLED ) {
adapter - > flags & = ~ IXGBE_FLAG_MSI_ENABLED ;
pci_disable_msi ( adapter - > pdev ) ;
}
return ;
}
/**
* ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
* @ adapter : board private structure to initialize
*
* We determine which interrupt scheme to use based on . . .
* - Kernel support ( MSI , MSI - X )
* - which can be user - defined ( via MODULE_PARAM )
* - Hardware queue count ( num_ * _queues )
* - defined by miscellaneous hardware support / features ( RSS , etc . )
* */
2008-11-20 20:52:10 -08:00
int ixgbe_init_interrupt_scheme ( struct ixgbe_adapter * adapter )
2008-03-03 15:03:45 -08:00
{
int err ;
/* Number of supported queues */
ixgbe_set_num_queues ( adapter ) ;
err = ixgbe_set_interrupt_capability ( adapter ) ;
if ( err ) {
DPRINTK ( PROBE , ERR , " Unable to setup interrupt capabilities \n " ) ;
goto err_set_interrupt ;
2007-09-15 14:07:45 -07:00
}
2009-05-06 10:43:28 +00:00
err = ixgbe_alloc_q_vectors ( adapter ) ;
if ( err ) {
DPRINTK ( PROBE , ERR , " Unable to allocate memory for queue "
" vectors \n " ) ;
goto err_alloc_q_vectors ;
}
err = ixgbe_alloc_queues ( adapter ) ;
if ( err ) {
DPRINTK ( PROBE , ERR , " Unable to allocate memory for queues \n " ) ;
goto err_alloc_queues ;
}
2008-03-03 15:03:45 -08:00
DPRINTK ( DRV , INFO , " Multiqueue %s: Rx Queue count = %u, "
2008-09-11 20:04:46 -07:00
" Tx Queue count = %u \n " ,
( adapter - > num_rx_queues > 1 ) ? " Enabled " :
" Disabled " , adapter - > num_rx_queues , adapter - > num_tx_queues ) ;
2008-03-03 15:03:45 -08:00
set_bit ( __IXGBE_DOWN , & adapter - > state ) ;
2007-09-15 14:07:45 -07:00
return 0 ;
2008-03-03 15:03:45 -08:00
2009-05-06 10:43:28 +00:00
err_alloc_queues :
ixgbe_free_q_vectors ( adapter ) ;
err_alloc_q_vectors :
ixgbe_reset_interrupt_capability ( adapter ) ;
2008-03-03 15:03:45 -08:00
err_set_interrupt :
2009-05-06 10:43:28 +00:00
return err ;
}
/**
* ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
* @ adapter : board private structure to clear interrupt scheme on
*
* We go through and clear interrupt specific resources and reset the structure
* to pre - load conditions
* */
void ixgbe_clear_interrupt_scheme ( struct ixgbe_adapter * adapter )
{
2008-03-03 15:03:45 -08:00
kfree ( adapter - > tx_ring ) ;
kfree ( adapter - > rx_ring ) ;
2009-05-06 10:43:28 +00:00
adapter - > tx_ring = NULL ;
adapter - > rx_ring = NULL ;
ixgbe_free_q_vectors ( adapter ) ;
ixgbe_reset_interrupt_capability ( adapter ) ;
2007-09-15 14:07:45 -07:00
}
2008-11-20 21:11:42 -08:00
/**
* ixgbe_sfp_timer - worker thread to find a missing module
* @ data : pointer to our adapter struct
* */
static void ixgbe_sfp_timer ( unsigned long data )
{
struct ixgbe_adapter * adapter = ( struct ixgbe_adapter * ) data ;
2009-03-13 22:15:31 +00:00
/*
* Do the sfp_timer outside of interrupt context due to the
2008-11-20 21:11:42 -08:00
* delays that sfp + detection requires
*/
schedule_work ( & adapter - > sfp_task ) ;
}
/**
* ixgbe_sfp_task - worker thread to find a missing module
* @ work : pointer to work_struct containing our data
* */
static void ixgbe_sfp_task ( struct work_struct * work )
{
struct ixgbe_adapter * adapter = container_of ( work ,
struct ixgbe_adapter ,
sfp_task ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
if ( ( hw - > phy . type = = ixgbe_phy_nl ) & &
( hw - > phy . sfp_type = = ixgbe_sfp_type_not_present ) ) {
s32 ret = hw - > phy . ops . identify_sfp ( hw ) ;
2009-07-02 12:50:12 +00:00
if ( ret = = IXGBE_ERR_SFP_NOT_PRESENT )
2008-11-20 21:11:42 -08:00
goto reschedule ;
ret = hw - > phy . ops . reset ( hw ) ;
if ( ret = = IXGBE_ERR_SFP_NOT_SUPPORTED ) {
2009-06-30 11:43:55 +00:00
dev_err ( & adapter - > pdev - > dev , " failed to initialize "
" because an unsupported SFP+ module type "
" was detected. \n "
" Reload the driver after installing a "
" supported module. \n " ) ;
2008-11-20 21:11:42 -08:00
unregister_netdev ( adapter - > netdev ) ;
} else {
DPRINTK ( PROBE , INFO , " detected SFP+: %d \n " ,
hw - > phy . sfp_type ) ;
}
/* don't need this routine any more */
clear_bit ( __IXGBE_SFP_MODULE_NOT_FOUND , & adapter - > state ) ;
}
return ;
reschedule :
if ( test_bit ( __IXGBE_SFP_MODULE_NOT_FOUND , & adapter - > state ) )
mod_timer ( & adapter - > sfp_timer ,
round_jiffies ( jiffies + ( 2 * HZ ) ) ) ;
}
2007-09-15 14:07:45 -07:00
/**
* ixgbe_sw_init - Initialize general software structures ( struct ixgbe_adapter )
* @ adapter : board private structure to initialize
*
* ixgbe_sw_init initializes the Adapter private data structure .
* Fields are initialized based on PCI device information and
* OS network device settings ( MTU size ) .
* */
static int __devinit ixgbe_sw_init ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
struct pci_dev * pdev = adapter - > pdev ;
2008-03-03 15:03:45 -08:00
unsigned int rss ;
2008-11-25 01:02:08 -08:00
# ifdef CONFIG_IXGBE_DCB
2008-11-20 20:52:10 -08:00
int j ;
struct tc_configuration * tc ;
# endif
2008-03-03 15:03:45 -08:00
2008-09-11 19:59:59 -07:00
/* PCI config space info */
hw - > vendor_id = pdev - > vendor ;
hw - > device_id = pdev - > device ;
hw - > revision_id = pdev - > revision ;
hw - > subsystem_vendor_id = pdev - > subsystem_vendor ;
hw - > subsystem_device_id = pdev - > subsystem_device ;
2008-03-03 15:03:45 -08:00
/* Set capability flags */
rss = min ( IXGBE_MAX_RSS_INDICES , ( int ) num_online_cpus ( ) ) ;
adapter - > ring_feature [ RING_F_RSS ] . indices = rss ;
adapter - > flags | = IXGBE_FLAG_RSS_ENABLED ;
2008-11-20 20:52:10 -08:00
adapter - > ring_feature [ RING_F_DCB ] . indices = IXGBE_MAX_DCB_INDICES ;
2009-05-07 10:39:54 +00:00
if ( hw - > mac . type = = ixgbe_mac_82598EB ) {
if ( hw - > device_id = = IXGBE_DEV_ID_82598AT )
adapter - > flags | = IXGBE_FLAG_FAN_FAIL_CAPABLE ;
2009-02-27 15:45:05 +00:00
adapter - > max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598 ;
2009-05-07 10:39:54 +00:00
} else if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
2009-02-27 15:45:05 +00:00
adapter - > max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599 ;
2009-06-04 16:00:47 +00:00
adapter - > flags | = IXGBE_FLAG2_RSC_CAPABLE ;
adapter - > flags | = IXGBE_FLAG2_RSC_ENABLED ;
2009-06-04 16:01:43 +00:00
adapter - > flags | = IXGBE_FLAG_FDIR_HASH_CAPABLE ;
adapter - > ring_feature [ RING_F_FDIR ] . indices =
IXGBE_MAX_FDIR_INDICES ;
adapter - > atr_sample_rate = 20 ;
adapter - > fdir_pballoc = 0 ;
2009-05-13 13:11:06 +00:00
# ifdef IXGBE_FCOE
2009-07-22 14:07:12 +00:00
adapter - > flags | = IXGBE_FLAG_FCOE_CAPABLE ;
adapter - > flags & = ~ IXGBE_FLAG_FCOE_ENABLED ;
adapter - > ring_feature [ RING_F_FCOE ] . indices = 0 ;
2009-05-13 13:11:06 +00:00
# endif /* IXGBE_FCOE */
2009-04-27 22:42:37 +00:00
}
2008-11-20 20:52:10 -08:00
2008-11-25 01:02:08 -08:00
# ifdef CONFIG_IXGBE_DCB
2008-11-20 20:52:10 -08:00
/* Configure DCB traffic classes */
for ( j = 0 ; j < MAX_TRAFFIC_CLASS ; j + + ) {
tc = & adapter - > dcb_cfg . tc_config [ j ] ;
tc - > path [ DCB_TX_CONFIG ] . bwg_id = 0 ;
tc - > path [ DCB_TX_CONFIG ] . bwg_percent = 12 + ( j & 1 ) ;
tc - > path [ DCB_RX_CONFIG ] . bwg_id = 0 ;
tc - > path [ DCB_RX_CONFIG ] . bwg_percent = 12 + ( j & 1 ) ;
tc - > dcb_pfc = pfc_disabled ;
}
adapter - > dcb_cfg . bw_percentage [ DCB_TX_CONFIG ] [ 0 ] = 100 ;
adapter - > dcb_cfg . bw_percentage [ DCB_RX_CONFIG ] [ 0 ] = 100 ;
adapter - > dcb_cfg . rx_pba_cfg = pba_equal ;
2009-05-17 12:35:16 +00:00
adapter - > dcb_cfg . pfc_mode_enable = false ;
2008-11-20 20:52:10 -08:00
adapter - > dcb_cfg . round_robin_enable = false ;
adapter - > dcb_set_bitmap = 0x00 ;
ixgbe_copy_dcb_cfg ( & adapter - > dcb_cfg , & adapter - > temp_dcb_cfg ,
adapter - > ring_feature [ RING_F_DCB ] . indices ) ;
# endif
2007-09-15 14:07:45 -07:00
/* default flow control settings */
2009-03-31 21:33:44 +00:00
hw - > fc . requested_mode = ixgbe_fc_full ;
2009-03-31 21:35:05 +00:00
hw - > fc . current_mode = ixgbe_fc_full ; /* init for ethtool output */
2009-05-17 12:35:16 +00:00
# ifdef CONFIG_DCB
adapter - > last_lfc_mode = hw - > fc . current_mode ;
# endif
2008-08-26 04:27:10 -07:00
hw - > fc . high_water = IXGBE_DEFAULT_FCRTH ;
hw - > fc . low_water = IXGBE_DEFAULT_FCRTL ;
hw - > fc . pause_time = IXGBE_DEFAULT_FCPAUSE ;
hw - > fc . send_xon = true ;
2009-03-31 21:35:05 +00:00
hw - > fc . disable_fc_autoneg = false ;
2007-09-15 14:07:45 -07:00
2008-09-11 19:58:14 -07:00
/* enable itr by default in dynamic mode */
adapter - > itr_setting = 1 ;
adapter - > eitr_param = 20000 ;
/* set defaults for eitr in MegaBytes */
adapter - > eitr_low = 10 ;
adapter - > eitr_high = 20 ;
/* set default ring sizes */
adapter - > tx_ring_count = IXGBE_DEFAULT_TXD ;
adapter - > rx_ring_count = IXGBE_DEFAULT_RXD ;
2007-09-15 14:07:45 -07:00
/* initialize eeprom parameters */
2008-09-11 19:59:59 -07:00
if ( ixgbe_init_eeprom_params_generic ( hw ) ) {
2007-09-15 14:07:45 -07:00
dev_err ( & pdev - > dev , " EEPROM initialization failed \n " ) ;
return - EIO ;
}
2008-03-03 15:03:45 -08:00
/* enable rx csum by default */
2007-09-15 14:07:45 -07:00
adapter - > flags | = IXGBE_FLAG_RX_CSUM_ENABLED ;
set_bit ( __IXGBE_DOWN , & adapter - > state ) ;
return 0 ;
}
/**
* ixgbe_setup_tx_resources - allocate Tx resources ( Descriptors )
* @ adapter : board private structure
2008-08-26 04:27:08 -07:00
* @ tx_ring : tx descriptor ring ( for a specific queue ) to setup
2007-09-15 14:07:45 -07:00
*
* Return 0 on success , negative on failure
* */
int ixgbe_setup_tx_resources ( struct ixgbe_adapter * adapter ,
2008-08-26 04:27:13 -07:00
struct ixgbe_ring * tx_ring )
2007-09-15 14:07:45 -07:00
{
struct pci_dev * pdev = adapter - > pdev ;
int size ;
2008-08-26 04:27:08 -07:00
size = sizeof ( struct ixgbe_tx_buffer ) * tx_ring - > count ;
tx_ring - > tx_buffer_info = vmalloc ( size ) ;
2008-08-26 04:27:13 -07:00
if ( ! tx_ring - > tx_buffer_info )
goto err ;
2008-08-26 04:27:08 -07:00
memset ( tx_ring - > tx_buffer_info , 0 , size ) ;
2007-09-15 14:07:45 -07:00
/* round up to nearest 4K */
2009-02-06 21:47:24 -08:00
tx_ring - > size = tx_ring - > count * sizeof ( union ixgbe_adv_tx_desc ) ;
2008-08-26 04:27:08 -07:00
tx_ring - > size = ALIGN ( tx_ring - > size , 4096 ) ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:08 -07:00
tx_ring - > desc = pci_alloc_consistent ( pdev , tx_ring - > size ,
& tx_ring - > dma ) ;
2008-08-26 04:27:13 -07:00
if ( ! tx_ring - > desc )
goto err ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:08 -07:00
tx_ring - > next_to_use = 0 ;
tx_ring - > next_to_clean = 0 ;
tx_ring - > work_limit = tx_ring - > count ;
2007-09-15 14:07:45 -07:00
return 0 ;
2008-08-26 04:27:13 -07:00
err :
vfree ( tx_ring - > tx_buffer_info ) ;
tx_ring - > tx_buffer_info = NULL ;
DPRINTK ( PROBE , ERR , " Unable to allocate memory for the transmit "
" descriptor ring \n " ) ;
return - ENOMEM ;
2007-09-15 14:07:45 -07:00
}
2008-09-11 20:05:39 -07:00
/**
* ixgbe_setup_all_tx_resources - allocate all queues Tx resources
* @ adapter : board private structure
*
* If this function returns with an error , then it ' s possible one or
* more of the rings is populated ( while the rest are not ) . It is the
* callers duty to clean those orphaned rings .
*
* Return 0 on success , negative on failure
* */
static int ixgbe_setup_all_tx_resources ( struct ixgbe_adapter * adapter )
{
int i , err = 0 ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
err = ixgbe_setup_tx_resources ( adapter , & adapter - > tx_ring [ i ] ) ;
if ( ! err )
continue ;
DPRINTK ( PROBE , ERR , " Allocation for Tx Queue %u failed \n " , i ) ;
break ;
}
return err ;
}
2007-09-15 14:07:45 -07:00
/**
* ixgbe_setup_rx_resources - allocate Rx resources ( Descriptors )
* @ adapter : board private structure
2008-08-26 04:27:08 -07:00
* @ rx_ring : rx descriptor ring ( for a specific queue ) to setup
2007-09-15 14:07:45 -07:00
*
* Returns 0 on success , negative on failure
* */
int ixgbe_setup_rx_resources ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * rx_ring )
2007-09-15 14:07:45 -07:00
{
struct pci_dev * pdev = adapter - > pdev ;
2008-03-03 15:03:45 -08:00
int size ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:08 -07:00
size = sizeof ( struct ixgbe_rx_buffer ) * rx_ring - > count ;
rx_ring - > rx_buffer_info = vmalloc ( size ) ;
if ( ! rx_ring - > rx_buffer_info ) {
2007-09-15 14:07:45 -07:00
DPRINTK ( PROBE , ERR ,
2008-09-11 20:04:46 -07:00
" vmalloc allocation failed for the rx desc ring \n " ) ;
2008-06-18 15:32:19 -07:00
goto alloc_failed ;
2007-09-15 14:07:45 -07:00
}
2008-08-26 04:27:08 -07:00
memset ( rx_ring - > rx_buffer_info , 0 , size ) ;
2007-09-15 14:07:45 -07:00
/* Round up to nearest 4K */
2008-08-26 04:27:08 -07:00
rx_ring - > size = rx_ring - > count * sizeof ( union ixgbe_adv_rx_desc ) ;
rx_ring - > size = ALIGN ( rx_ring - > size , 4096 ) ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:08 -07:00
rx_ring - > desc = pci_alloc_consistent ( pdev , rx_ring - > size , & rx_ring - > dma ) ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:08 -07:00
if ( ! rx_ring - > desc ) {
2007-09-15 14:07:45 -07:00
DPRINTK ( PROBE , ERR ,
2008-09-11 20:04:46 -07:00
" Memory allocation failed for the rx desc ring \n " ) ;
2008-08-26 04:27:08 -07:00
vfree ( rx_ring - > rx_buffer_info ) ;
2008-06-18 15:32:19 -07:00
goto alloc_failed ;
2007-09-15 14:07:45 -07:00
}
2008-08-26 04:27:08 -07:00
rx_ring - > next_to_clean = 0 ;
rx_ring - > next_to_use = 0 ;
2007-09-15 14:07:45 -07:00
return 0 ;
2008-06-18 15:32:19 -07:00
alloc_failed :
return - ENOMEM ;
2007-09-15 14:07:45 -07:00
}
2008-09-11 20:05:39 -07:00
/**
* ixgbe_setup_all_rx_resources - allocate all queues Rx resources
* @ adapter : board private structure
*
* If this function returns with an error , then it ' s possible one or
* more of the rings is populated ( while the rest are not ) . It is the
* callers duty to clean those orphaned rings .
*
* Return 0 on success , negative on failure
* */
static int ixgbe_setup_all_rx_resources ( struct ixgbe_adapter * adapter )
{
int i , err = 0 ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
err = ixgbe_setup_rx_resources ( adapter , & adapter - > rx_ring [ i ] ) ;
if ( ! err )
continue ;
DPRINTK ( PROBE , ERR , " Allocation for Rx Queue %u failed \n " , i ) ;
break ;
}
return err ;
}
2007-09-15 14:07:45 -07:00
/**
* ixgbe_free_tx_resources - Free Tx Resources per Queue
* @ adapter : board private structure
* @ tx_ring : Tx descriptor ring for a specific queue
*
* Free all transmit software resources
* */
2008-09-11 19:59:16 -07:00
void ixgbe_free_tx_resources ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * tx_ring )
2007-09-15 14:07:45 -07:00
{
struct pci_dev * pdev = adapter - > pdev ;
ixgbe_clean_tx_ring ( adapter , tx_ring ) ;
vfree ( tx_ring - > tx_buffer_info ) ;
tx_ring - > tx_buffer_info = NULL ;
pci_free_consistent ( pdev , tx_ring - > size , tx_ring - > desc , tx_ring - > dma ) ;
tx_ring - > desc = NULL ;
}
/**
* ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
* @ adapter : board private structure
*
* Free all transmit software resources
* */
static void ixgbe_free_all_tx_resources ( struct ixgbe_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
2009-03-13 22:14:50 +00:00
if ( adapter - > tx_ring [ i ] . desc )
ixgbe_free_tx_resources ( adapter , & adapter - > tx_ring [ i ] ) ;
2007-09-15 14:07:45 -07:00
}
/**
2008-09-11 20:04:46 -07:00
* ixgbe_free_rx_resources - Free Rx Resources
2007-09-15 14:07:45 -07:00
* @ adapter : board private structure
* @ rx_ring : ring to clean the resources from
*
* Free all receive software resources
* */
2008-09-11 19:59:16 -07:00
void ixgbe_free_rx_resources ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * rx_ring )
2007-09-15 14:07:45 -07:00
{
struct pci_dev * pdev = adapter - > pdev ;
ixgbe_clean_rx_ring ( adapter , rx_ring ) ;
vfree ( rx_ring - > rx_buffer_info ) ;
rx_ring - > rx_buffer_info = NULL ;
pci_free_consistent ( pdev , rx_ring - > size , rx_ring - > desc , rx_ring - > dma ) ;
rx_ring - > desc = NULL ;
}
/**
* ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
* @ adapter : board private structure
*
* Free all receive software resources
* */
static void ixgbe_free_all_rx_resources ( struct ixgbe_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
2009-03-13 22:14:50 +00:00
if ( adapter - > rx_ring [ i ] . desc )
ixgbe_free_rx_resources ( adapter , & adapter - > rx_ring [ i ] ) ;
2007-09-15 14:07:45 -07:00
}
/**
* ixgbe_change_mtu - Change the Maximum Transfer Unit
* @ netdev : network interface device structure
* @ new_mtu : new value for maximum frame size
*
* Returns 0 on success , negative on failure
* */
static int ixgbe_change_mtu ( struct net_device * netdev , int new_mtu )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN ;
2008-09-11 19:56:28 -07:00
/* MTU < 68 is an error and causes problems on some kernels */
if ( ( new_mtu < 68 ) | | ( max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE ) )
2007-09-15 14:07:45 -07:00
return - EINVAL ;
2008-03-03 15:03:45 -08:00
DPRINTK ( PROBE , INFO , " changing MTU from %d to %d \n " ,
2008-09-11 20:04:46 -07:00
netdev - > mtu , new_mtu ) ;
2008-03-03 15:03:45 -08:00
/* must set new MTU before calling down or up */
2007-09-15 14:07:45 -07:00
netdev - > mtu = new_mtu ;
2008-02-01 15:58:41 -08:00
if ( netif_running ( netdev ) )
ixgbe_reinit_locked ( adapter ) ;
2007-09-15 14:07:45 -07:00
return 0 ;
}
/**
* ixgbe_open - Called when a network interface is made active
* @ netdev : network interface device structure
*
* Returns 0 on success , negative value on failure
*
* The open entry point is called when a network interface is made
* active by the system ( IFF_UP ) . At this point all resources needed
* for transmit and receive operations are allocated , the interrupt
* handler is registered with the OS , the watchdog timer is started ,
* and the stack is notified that the interface is ready .
* */
static int ixgbe_open ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
int err ;
2008-02-11 09:26:01 -08:00
/* disallow open during test */
if ( test_bit ( __IXGBE_TESTING , & adapter - > state ) )
return - EBUSY ;
2007-09-15 14:07:45 -07:00
2009-04-17 20:44:27 +00:00
netif_carrier_off ( netdev ) ;
2007-09-15 14:07:45 -07:00
/* allocate transmit descriptors */
err = ixgbe_setup_all_tx_resources ( adapter ) ;
if ( err )
goto err_setup_tx ;
/* allocate receive descriptors */
err = ixgbe_setup_all_rx_resources ( adapter ) ;
if ( err )
goto err_setup_rx ;
ixgbe_configure ( adapter ) ;
2008-03-03 15:03:45 -08:00
err = ixgbe_request_irq ( adapter ) ;
2007-09-15 14:07:45 -07:00
if ( err )
goto err_req_irq ;
err = ixgbe_up_complete ( adapter ) ;
if ( err )
goto err_up ;
2008-07-18 04:33:03 -07:00
netif_tx_start_all_queues ( netdev ) ;
2007-09-15 14:07:45 -07:00
return 0 ;
err_up :
2008-02-01 15:59:04 -08:00
ixgbe_release_hw_control ( adapter ) ;
2007-09-15 14:07:45 -07:00
ixgbe_free_irq ( adapter ) ;
err_req_irq :
err_setup_rx :
2009-03-31 21:34:44 +00:00
ixgbe_free_all_rx_resources ( adapter ) ;
2007-09-15 14:07:45 -07:00
err_setup_tx :
2009-03-31 21:34:44 +00:00
ixgbe_free_all_tx_resources ( adapter ) ;
2007-09-15 14:07:45 -07:00
ixgbe_reset ( adapter ) ;
return err ;
}
/**
* ixgbe_close - Disables a network interface
* @ netdev : network interface device structure
*
* Returns 0 , this is not allowed to fail
*
* The close entry point is called when an interface is de - activated
* by the OS . The hardware is still under the drivers control , but
* needs to be disabled . A global MAC reset is issued to stop the
* hardware , and all transmit and receive resources are freed .
* */
static int ixgbe_close ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
ixgbe_down ( adapter ) ;
ixgbe_free_irq ( adapter ) ;
ixgbe_free_all_tx_resources ( adapter ) ;
ixgbe_free_all_rx_resources ( adapter ) ;
2008-02-01 15:59:04 -08:00
ixgbe_release_hw_control ( adapter ) ;
2007-09-15 14:07:45 -07:00
return 0 ;
}
2008-09-11 20:04:56 -07:00
# ifdef CONFIG_PM
static int ixgbe_resume ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
u32 err ;
pci_set_power_state ( pdev , PCI_D0 ) ;
pci_restore_state ( pdev ) ;
2009-05-06 10:44:45 +00:00
err = pci_enable_device_mem ( pdev ) ;
2008-09-11 20:04:56 -07:00
if ( err ) {
2008-09-11 20:05:39 -07:00
printk ( KERN_ERR " ixgbe: Cannot enable PCI device from "
2008-09-11 20:04:56 -07:00
" suspend \n " ) ;
return err ;
}
pci_set_master ( pdev ) ;
2009-04-29 00:22:31 -07:00
pci_wake_from_d3 ( pdev , false ) ;
2008-09-11 20:04:56 -07:00
err = ixgbe_init_interrupt_scheme ( adapter ) ;
if ( err ) {
printk ( KERN_ERR " ixgbe: Cannot initialize interrupts for "
" device \n " ) ;
return err ;
}
ixgbe_reset ( adapter ) ;
2009-04-23 11:15:18 +00:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_WUS , ~ 0 ) ;
2008-09-11 20:04:56 -07:00
if ( netif_running ( netdev ) ) {
err = ixgbe_open ( adapter - > netdev ) ;
if ( err )
return err ;
}
netif_device_attach ( netdev ) ;
return 0 ;
}
# endif /* CONFIG_PM */
2009-04-15 17:44:01 +00:00
static int __ixgbe_shutdown ( struct pci_dev * pdev , bool * enable_wake )
2008-09-11 20:04:56 -07:00
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2009-02-27 15:45:05 +00:00
struct ixgbe_hw * hw = & adapter - > hw ;
u32 ctrl , fctrl ;
u32 wufc = adapter - > wol ;
2008-09-11 20:04:56 -07:00
# ifdef CONFIG_PM
int retval = 0 ;
# endif
netif_device_detach ( netdev ) ;
if ( netif_running ( netdev ) ) {
ixgbe_down ( adapter ) ;
ixgbe_free_irq ( adapter ) ;
ixgbe_free_all_tx_resources ( adapter ) ;
ixgbe_free_all_rx_resources ( adapter ) ;
}
2009-05-06 10:43:28 +00:00
ixgbe_clear_interrupt_scheme ( adapter ) ;
2008-09-11 20:04:56 -07:00
# ifdef CONFIG_PM
retval = pci_save_state ( pdev ) ;
if ( retval )
return retval ;
2009-03-13 22:15:31 +00:00
2008-09-11 20:04:56 -07:00
# endif
2009-02-27 15:45:05 +00:00
if ( wufc ) {
ixgbe_set_rx_mode ( netdev ) ;
2008-09-11 20:04:56 -07:00
2009-02-27 15:45:05 +00:00
/* turn on all-multi mode if wake on multicast is enabled */
if ( wufc & IXGBE_WUFC_MC ) {
fctrl = IXGBE_READ_REG ( hw , IXGBE_FCTRL ) ;
fctrl | = IXGBE_FCTRL_MPE ;
IXGBE_WRITE_REG ( hw , IXGBE_FCTRL , fctrl ) ;
}
ctrl = IXGBE_READ_REG ( hw , IXGBE_CTRL ) ;
ctrl | = IXGBE_CTRL_GIO_DIS ;
IXGBE_WRITE_REG ( hw , IXGBE_CTRL , ctrl ) ;
IXGBE_WRITE_REG ( hw , IXGBE_WUFC , wufc ) ;
} else {
IXGBE_WRITE_REG ( hw , IXGBE_WUC , 0 ) ;
IXGBE_WRITE_REG ( hw , IXGBE_WUFC , 0 ) ;
}
2009-04-29 00:22:31 -07:00
if ( wufc & & hw - > mac . type = = ixgbe_mac_82599EB )
pci_wake_from_d3 ( pdev , true ) ;
else
pci_wake_from_d3 ( pdev , false ) ;
2008-09-11 20:04:56 -07:00
2009-04-15 17:44:01 +00:00
* enable_wake = ! ! wufc ;
2008-09-11 20:04:56 -07:00
ixgbe_release_hw_control ( adapter ) ;
pci_disable_device ( pdev ) ;
2009-04-15 17:44:01 +00:00
return 0 ;
}
# ifdef CONFIG_PM
static int ixgbe_suspend ( struct pci_dev * pdev , pm_message_t state )
{
int retval ;
bool wake ;
retval = __ixgbe_shutdown ( pdev , & wake ) ;
if ( retval )
return retval ;
if ( wake ) {
pci_prepare_to_sleep ( pdev ) ;
} else {
pci_wake_from_d3 ( pdev , false ) ;
pci_set_power_state ( pdev , PCI_D3hot ) ;
}
2008-09-11 20:04:56 -07:00
return 0 ;
}
2009-04-15 17:44:01 +00:00
# endif /* CONFIG_PM */
2008-09-11 20:04:56 -07:00
static void ixgbe_shutdown ( struct pci_dev * pdev )
{
2009-04-15 17:44:01 +00:00
bool wake ;
__ixgbe_shutdown ( pdev , & wake ) ;
if ( system_state = = SYSTEM_POWER_OFF ) {
pci_wake_from_d3 ( pdev , wake ) ;
pci_set_power_state ( pdev , PCI_D3hot ) ;
}
2008-09-11 20:04:56 -07:00
}
2007-09-15 14:07:45 -07:00
/**
* ixgbe_update_stats - Update the board statistics counters .
* @ adapter : board private structure
* */
void ixgbe_update_stats ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
2008-02-01 15:59:14 -08:00
u64 total_mpc = 0 ;
u32 i , missed_rx = 0 , mpc , bprc , lxon , lxoff , xon_off_tot ;
2007-09-15 14:07:45 -07:00
2009-03-13 22:12:48 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
2009-04-27 22:42:37 +00:00
u64 rsc_count = 0 ;
2009-03-13 22:12:48 +00:00
for ( i = 0 ; i < 16 ; i + + )
adapter - > hw_rx_no_dma_resources + =
IXGBE_READ_REG ( hw , IXGBE_QPRDC ( i ) ) ;
2009-04-27 22:42:37 +00:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
rsc_count + = adapter - > rx_ring [ i ] . rsc_count ;
adapter - > rsc_count = rsc_count ;
2009-03-13 22:12:48 +00:00
}
2007-09-15 14:07:45 -07:00
adapter - > stats . crcerrs + = IXGBE_READ_REG ( hw , IXGBE_CRCERRS ) ;
2008-02-01 15:59:14 -08:00
for ( i = 0 ; i < 8 ; i + + ) {
/* for packet buffers not used, the register should read 0 */
mpc = IXGBE_READ_REG ( hw , IXGBE_MPC ( i ) ) ;
missed_rx + = mpc ;
adapter - > stats . mpc [ i ] + = mpc ;
total_mpc + = adapter - > stats . mpc [ i ] ;
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82598EB )
adapter - > stats . rnbc [ i ] + = IXGBE_READ_REG ( hw , IXGBE_RNBC ( i ) ) ;
2008-11-20 20:52:10 -08:00
adapter - > stats . qptc [ i ] + = IXGBE_READ_REG ( hw , IXGBE_QPTC ( i ) ) ;
adapter - > stats . qbtc [ i ] + = IXGBE_READ_REG ( hw , IXGBE_QBTC ( i ) ) ;
adapter - > stats . qprc [ i ] + = IXGBE_READ_REG ( hw , IXGBE_QPRC ( i ) ) ;
adapter - > stats . qbrc [ i ] + = IXGBE_READ_REG ( hw , IXGBE_QBRC ( i ) ) ;
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
adapter - > stats . pxonrxc [ i ] + = IXGBE_READ_REG ( hw ,
IXGBE_PXONRXCNT ( i ) ) ;
adapter - > stats . pxoffrxc [ i ] + = IXGBE_READ_REG ( hw ,
IXGBE_PXOFFRXCNT ( i ) ) ;
adapter - > stats . qprdc [ i ] + = IXGBE_READ_REG ( hw , IXGBE_QPRDC ( i ) ) ;
} else {
adapter - > stats . pxonrxc [ i ] + = IXGBE_READ_REG ( hw ,
IXGBE_PXONRXC ( i ) ) ;
adapter - > stats . pxoffrxc [ i ] + = IXGBE_READ_REG ( hw ,
IXGBE_PXOFFRXC ( i ) ) ;
}
2008-11-20 20:52:10 -08:00
adapter - > stats . pxontxc [ i ] + = IXGBE_READ_REG ( hw ,
IXGBE_PXONTXC ( i ) ) ;
adapter - > stats . pxofftxc [ i ] + = IXGBE_READ_REG ( hw ,
2009-02-27 15:45:05 +00:00
IXGBE_PXOFFTXC ( i ) ) ;
2008-02-01 15:59:14 -08:00
}
adapter - > stats . gprc + = IXGBE_READ_REG ( hw , IXGBE_GPRC ) ;
/* work around hardware counting issue */
adapter - > stats . gprc - = missed_rx ;
/* 82598 hardware only has a 32 bit counter in the high register */
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
adapter - > stats . gorc + = IXGBE_READ_REG ( hw , IXGBE_GORCL ) ;
IXGBE_READ_REG ( hw , IXGBE_GORCH ) ; /* to clear */
adapter - > stats . gotc + = IXGBE_READ_REG ( hw , IXGBE_GOTCL ) ;
IXGBE_READ_REG ( hw , IXGBE_GOTCH ) ; /* to clear */
adapter - > stats . tor + = IXGBE_READ_REG ( hw , IXGBE_TORL ) ;
IXGBE_READ_REG ( hw , IXGBE_TORH ) ; /* to clear */
adapter - > stats . lxonrxc + = IXGBE_READ_REG ( hw , IXGBE_LXONRXCNT ) ;
adapter - > stats . lxoffrxc + = IXGBE_READ_REG ( hw , IXGBE_LXOFFRXCNT ) ;
2009-06-04 16:01:43 +00:00
adapter - > stats . fdirmatch + = IXGBE_READ_REG ( hw , IXGBE_FDIRMATCH ) ;
adapter - > stats . fdirmiss + = IXGBE_READ_REG ( hw , IXGBE_FDIRMISS ) ;
2009-05-13 13:12:16 +00:00
# ifdef IXGBE_FCOE
adapter - > stats . fccrc + = IXGBE_READ_REG ( hw , IXGBE_FCCRC ) ;
adapter - > stats . fcoerpdc + = IXGBE_READ_REG ( hw , IXGBE_FCOERPDC ) ;
adapter - > stats . fcoeprc + = IXGBE_READ_REG ( hw , IXGBE_FCOEPRC ) ;
adapter - > stats . fcoeptc + = IXGBE_READ_REG ( hw , IXGBE_FCOEPTC ) ;
adapter - > stats . fcoedwrc + = IXGBE_READ_REG ( hw , IXGBE_FCOEDWRC ) ;
adapter - > stats . fcoedwtc + = IXGBE_READ_REG ( hw , IXGBE_FCOEDWTC ) ;
# endif /* IXGBE_FCOE */
2009-02-27 15:45:05 +00:00
} else {
adapter - > stats . lxonrxc + = IXGBE_READ_REG ( hw , IXGBE_LXONRXC ) ;
adapter - > stats . lxoffrxc + = IXGBE_READ_REG ( hw , IXGBE_LXOFFRXC ) ;
adapter - > stats . gorc + = IXGBE_READ_REG ( hw , IXGBE_GORCH ) ;
adapter - > stats . gotc + = IXGBE_READ_REG ( hw , IXGBE_GOTCH ) ;
adapter - > stats . tor + = IXGBE_READ_REG ( hw , IXGBE_TORH ) ;
}
2007-09-15 14:07:45 -07:00
bprc = IXGBE_READ_REG ( hw , IXGBE_BPRC ) ;
adapter - > stats . bprc + = bprc ;
adapter - > stats . mprc + = IXGBE_READ_REG ( hw , IXGBE_MPRC ) ;
2009-02-27 15:45:05 +00:00
if ( hw - > mac . type = = ixgbe_mac_82598EB )
adapter - > stats . mprc - = bprc ;
2007-09-15 14:07:45 -07:00
adapter - > stats . roc + = IXGBE_READ_REG ( hw , IXGBE_ROC ) ;
adapter - > stats . prc64 + = IXGBE_READ_REG ( hw , IXGBE_PRC64 ) ;
adapter - > stats . prc127 + = IXGBE_READ_REG ( hw , IXGBE_PRC127 ) ;
adapter - > stats . prc255 + = IXGBE_READ_REG ( hw , IXGBE_PRC255 ) ;
adapter - > stats . prc511 + = IXGBE_READ_REG ( hw , IXGBE_PRC511 ) ;
adapter - > stats . prc1023 + = IXGBE_READ_REG ( hw , IXGBE_PRC1023 ) ;
adapter - > stats . prc1522 + = IXGBE_READ_REG ( hw , IXGBE_PRC1522 ) ;
adapter - > stats . rlec + = IXGBE_READ_REG ( hw , IXGBE_RLEC ) ;
2008-02-01 15:59:14 -08:00
lxon = IXGBE_READ_REG ( hw , IXGBE_LXONTXC ) ;
adapter - > stats . lxontxc + = lxon ;
lxoff = IXGBE_READ_REG ( hw , IXGBE_LXOFFTXC ) ;
adapter - > stats . lxofftxc + = lxoff ;
2007-09-15 14:07:45 -07:00
adapter - > stats . ruc + = IXGBE_READ_REG ( hw , IXGBE_RUC ) ;
adapter - > stats . gptc + = IXGBE_READ_REG ( hw , IXGBE_GPTC ) ;
2008-02-01 15:59:14 -08:00
adapter - > stats . mptc + = IXGBE_READ_REG ( hw , IXGBE_MPTC ) ;
/*
* 82598 errata - tx of flow control packets is included in tx counters
*/
xon_off_tot = lxon + lxoff ;
adapter - > stats . gptc - = xon_off_tot ;
adapter - > stats . mptc - = xon_off_tot ;
adapter - > stats . gotc - = ( xon_off_tot * ( ETH_ZLEN + ETH_FCS_LEN ) ) ;
2007-09-15 14:07:45 -07:00
adapter - > stats . ruc + = IXGBE_READ_REG ( hw , IXGBE_RUC ) ;
adapter - > stats . rfc + = IXGBE_READ_REG ( hw , IXGBE_RFC ) ;
adapter - > stats . rjc + = IXGBE_READ_REG ( hw , IXGBE_RJC ) ;
adapter - > stats . tpr + = IXGBE_READ_REG ( hw , IXGBE_TPR ) ;
adapter - > stats . ptc64 + = IXGBE_READ_REG ( hw , IXGBE_PTC64 ) ;
2008-02-01 15:59:14 -08:00
adapter - > stats . ptc64 - = xon_off_tot ;
2007-09-15 14:07:45 -07:00
adapter - > stats . ptc127 + = IXGBE_READ_REG ( hw , IXGBE_PTC127 ) ;
adapter - > stats . ptc255 + = IXGBE_READ_REG ( hw , IXGBE_PTC255 ) ;
adapter - > stats . ptc511 + = IXGBE_READ_REG ( hw , IXGBE_PTC511 ) ;
adapter - > stats . ptc1023 + = IXGBE_READ_REG ( hw , IXGBE_PTC1023 ) ;
adapter - > stats . ptc1522 + = IXGBE_READ_REG ( hw , IXGBE_PTC1522 ) ;
adapter - > stats . bptc + = IXGBE_READ_REG ( hw , IXGBE_BPTC ) ;
/* Fill out the OS statistics structure */
adapter - > net_stats . multicast = adapter - > stats . mprc ;
/* Rx Errors */
adapter - > net_stats . rx_errors = adapter - > stats . crcerrs +
2008-09-11 20:04:46 -07:00
adapter - > stats . rlec ;
2007-09-15 14:07:45 -07:00
adapter - > net_stats . rx_dropped = 0 ;
adapter - > net_stats . rx_length_errors = adapter - > stats . rlec ;
adapter - > net_stats . rx_crc_errors = adapter - > stats . crcerrs ;
2008-02-01 15:59:14 -08:00
adapter - > net_stats . rx_missed_errors = total_mpc ;
2007-09-15 14:07:45 -07:00
}
/**
* ixgbe_watchdog - Timer Call - back
* @ data : pointer to adapter cast into an unsigned long
* */
static void ixgbe_watchdog ( unsigned long data )
{
struct ixgbe_adapter * adapter = ( struct ixgbe_adapter * ) data ;
2008-09-11 19:55:32 -07:00
struct ixgbe_hw * hw = & adapter - > hw ;
2009-06-04 16:00:09 +00:00
u64 eics = 0 ;
int i ;
2008-09-11 19:55:32 -07:00
2009-06-04 16:00:09 +00:00
/*
* Do the watchdog outside of interrupt context due to the lovely
* delays that some of the newer hardware requires
*/
2009-03-19 01:24:04 +00:00
2009-06-04 16:00:09 +00:00
if ( test_bit ( __IXGBE_DOWN , & adapter - > state ) )
goto watchdog_short_circuit ;
2009-03-19 01:24:04 +00:00
2009-06-04 16:00:09 +00:00
if ( ! ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) ) {
/*
* for legacy and MSI interrupts don ' t set any bits
* that are enabled for EIAM , because this operation
* would set * both * EIMS and EICS for any bit in EIAM
*/
IXGBE_WRITE_REG ( hw , IXGBE_EICS ,
( IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER ) ) ;
goto watchdog_reschedule ;
}
/* get one bit for every active tx/rx interrupt vector */
for ( i = 0 ; i < adapter - > num_msix_vectors - NON_Q_VECTORS ; i + + ) {
struct ixgbe_q_vector * qv = adapter - > q_vector [ i ] ;
if ( qv - > rxr_count | | qv - > txr_count )
eics | = ( ( u64 ) 1 < < i ) ;
2008-09-11 19:55:32 -07:00
}
2007-09-15 14:07:45 -07:00
2009-06-04 16:00:09 +00:00
/* Cause software interrupt to ensure rx rings are cleaned */
ixgbe_irq_rearm_queues ( adapter , eics ) ;
watchdog_reschedule :
/* Reset the timer */
mod_timer ( & adapter - > watchdog_timer , round_jiffies ( jiffies + 2 * HZ ) ) ;
watchdog_short_circuit :
2008-09-11 19:55:32 -07:00
schedule_work ( & adapter - > watchdog_task ) ;
}
2009-02-27 15:45:05 +00:00
/**
* ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
* @ work : pointer to work_struct containing our data
* */
static void ixgbe_multispeed_fiber_task ( struct work_struct * work )
{
struct ixgbe_adapter * adapter = container_of ( work ,
struct ixgbe_adapter ,
multispeed_fiber_task ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
u32 autoneg ;
adapter - > flags | = IXGBE_FLAG_IN_SFP_LINK_TASK ;
2009-06-30 11:44:36 +00:00
autoneg = hw - > phy . autoneg_advertised ;
if ( ( ! autoneg ) & & ( hw - > mac . ops . get_link_capabilities ) )
2009-02-27 15:45:05 +00:00
hw - > mac . ops . get_link_capabilities ( hw , & autoneg ,
& hw - > mac . autoneg ) ;
if ( hw - > mac . ops . setup_link_speed )
hw - > mac . ops . setup_link_speed ( hw , autoneg , true , true ) ;
adapter - > flags | = IXGBE_FLAG_NEED_LINK_UPDATE ;
adapter - > flags & = ~ IXGBE_FLAG_IN_SFP_LINK_TASK ;
}
/**
* ixgbe_sfp_config_module_task - worker thread to configure a new SFP + module
* @ work : pointer to work_struct containing our data
* */
static void ixgbe_sfp_config_module_task ( struct work_struct * work )
{
struct ixgbe_adapter * adapter = container_of ( work ,
struct ixgbe_adapter ,
sfp_config_module_task ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
u32 err ;
adapter - > flags | = IXGBE_FLAG_IN_SFP_MOD_TASK ;
2009-07-02 12:50:12 +00:00
/* Time for electrical oscillations to settle down */
msleep ( 100 ) ;
2009-02-27 15:45:05 +00:00
err = hw - > phy . ops . identify_sfp ( hw ) ;
2009-07-02 12:50:12 +00:00
2009-02-27 15:45:05 +00:00
if ( err = = IXGBE_ERR_SFP_NOT_SUPPORTED ) {
2009-06-30 11:43:55 +00:00
dev_err ( & adapter - > pdev - > dev , " failed to initialize because "
" an unsupported SFP+ module type was detected. \n "
" Reload the driver after installing a supported "
" module. \n " ) ;
2009-07-02 12:50:12 +00:00
unregister_netdev ( adapter - > netdev ) ;
2009-02-27 15:45:05 +00:00
return ;
}
hw - > mac . ops . setup_sfp ( hw ) ;
2009-04-09 22:29:10 +00:00
if ( ! ( adapter - > flags & IXGBE_FLAG_IN_SFP_LINK_TASK ) )
2009-02-27 15:45:05 +00:00
/* This will also work for DA Twinax connections */
schedule_work ( & adapter - > multispeed_fiber_task ) ;
adapter - > flags & = ~ IXGBE_FLAG_IN_SFP_MOD_TASK ;
}
2009-06-04 16:01:43 +00:00
/**
* ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
* @ work : pointer to work_struct containing our data
* */
static void ixgbe_fdir_reinit_task ( struct work_struct * work )
{
struct ixgbe_adapter * adapter = container_of ( work ,
struct ixgbe_adapter ,
fdir_reinit_task ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
int i ;
if ( ixgbe_reinit_fdir_tables_82599 ( hw ) = = 0 ) {
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
set_bit ( __IXGBE_FDIR_INIT_DONE ,
& ( adapter - > tx_ring [ i ] . reinit_state ) ) ;
} else {
DPRINTK ( PROBE , ERR , " failed to finish FDIR re-initialization, "
" ignored adding FDIR ATR filters \n " ) ;
}
/* Done FDIR Re-initialization, enable transmits */
netif_tx_start_all_queues ( adapter - > netdev ) ;
}
2008-09-11 19:55:32 -07:00
/**
2008-09-11 20:05:39 -07:00
* ixgbe_watchdog_task - worker thread to bring link up
* @ work : pointer to work_struct containing our data
2008-09-11 19:55:32 -07:00
* */
static void ixgbe_watchdog_task ( struct work_struct * work )
{
struct ixgbe_adapter * adapter = container_of ( work ,
struct ixgbe_adapter ,
watchdog_task ) ;
struct net_device * netdev = adapter - > netdev ;
struct ixgbe_hw * hw = & adapter - > hw ;
u32 link_speed = adapter - > link_speed ;
bool link_up = adapter - > link_up ;
2009-04-27 22:43:12 +00:00
int i ;
struct ixgbe_ring * tx_ring ;
int some_tx_pending = 0 ;
2008-09-11 19:55:32 -07:00
adapter - > flags | = IXGBE_FLAG_IN_WATCHDOG_TASK ;
if ( adapter - > flags & IXGBE_FLAG_NEED_LINK_UPDATE ) {
hw - > mac . ops . check_link ( hw , & link_speed , & link_up , false ) ;
2009-05-17 12:35:16 +00:00
if ( link_up ) {
# ifdef CONFIG_DCB
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED ) {
for ( i = 0 ; i < MAX_TRAFFIC_CLASS ; i + + )
2009-06-04 11:11:13 +00:00
hw - > mac . ops . fc_enable ( hw , i ) ;
2009-05-17 12:35:16 +00:00
} else {
2009-06-04 11:11:13 +00:00
hw - > mac . ops . fc_enable ( hw , 0 ) ;
2009-05-17 12:35:16 +00:00
}
# else
2009-06-04 11:11:13 +00:00
hw - > mac . ops . fc_enable ( hw , 0 ) ;
2009-05-17 12:35:16 +00:00
# endif
}
2008-09-11 19:55:32 -07:00
if ( link_up | |
time_after ( jiffies , ( adapter - > link_check_timeout +
IXGBE_TRY_LINK_TIMEOUT ) ) ) {
adapter - > flags & = ~ IXGBE_FLAG_NEED_LINK_UPDATE ;
2009-05-17 12:35:16 +00:00
IXGBE_WRITE_REG ( hw , IXGBE_EIMS , IXGBE_EIMC_LSC ) ;
2008-09-11 19:55:32 -07:00
}
adapter - > link_up = link_up ;
adapter - > link_speed = link_speed ;
}
2007-09-15 14:07:45 -07:00
if ( link_up ) {
if ( ! netif_carrier_ok ( netdev ) ) {
2009-02-27 15:45:05 +00:00
bool flow_rx , flow_tx ;
if ( hw - > mac . type = = ixgbe_mac_82599EB ) {
u32 mflcn = IXGBE_READ_REG ( hw , IXGBE_MFLCN ) ;
u32 fccfg = IXGBE_READ_REG ( hw , IXGBE_FCCFG ) ;
flow_rx = ( mflcn & IXGBE_MFLCN_RFCE ) ;
flow_tx = ( fccfg & IXGBE_FCCFG_TFCE_802_3X ) ;
} else {
u32 frctl = IXGBE_READ_REG ( hw , IXGBE_FCTRL ) ;
u32 rmcs = IXGBE_READ_REG ( hw , IXGBE_RMCS ) ;
flow_rx = ( frctl & IXGBE_FCTRL_RFCE ) ;
flow_tx = ( rmcs & IXGBE_RMCS_TFCE_802_3X ) ;
}
2008-11-27 00:22:21 -08:00
printk ( KERN_INFO " ixgbe: %s NIC Link is Up %s, "
" Flow Control: %s \n " ,
netdev - > name ,
( link_speed = = IXGBE_LINK_SPEED_10GB_FULL ?
" 10 Gbps " :
( link_speed = = IXGBE_LINK_SPEED_1GB_FULL ?
" 1 Gbps " : " unknown speed " ) ) ,
2009-02-27 15:45:05 +00:00
( ( flow_rx & & flow_tx ) ? " RX/TX " :
( flow_rx ? " RX " :
( flow_tx ? " TX " : " None " ) ) ) ) ;
2007-09-15 14:07:45 -07:00
netif_carrier_on ( netdev ) ;
} else {
/* Force detection of hung controller */
adapter - > detect_tx_hung = true ;
}
} else {
2008-09-11 19:55:32 -07:00
adapter - > link_up = false ;
adapter - > link_speed = 0 ;
2007-09-15 14:07:45 -07:00
if ( netif_carrier_ok ( netdev ) ) {
2008-11-27 00:22:21 -08:00
printk ( KERN_INFO " ixgbe: %s NIC Link is Down \n " ,
netdev - > name ) ;
2007-09-15 14:07:45 -07:00
netif_carrier_off ( netdev ) ;
}
}
2009-04-27 22:43:12 +00:00
if ( ! netif_carrier_ok ( netdev ) ) {
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
tx_ring = & adapter - > tx_ring [ i ] ;
if ( tx_ring - > next_to_use ! = tx_ring - > next_to_clean ) {
some_tx_pending = 1 ;
break ;
}
}
if ( some_tx_pending ) {
/* We've lost link, so the controller stops DMA,
* but we ' ve got queued Tx work that ' s never going
* to get done , so reset controller to flush Tx .
* ( Do the reset outside of interrupt context ) .
*/
schedule_work ( & adapter - > reset_task ) ;
}
}
2007-09-15 14:07:45 -07:00
ixgbe_update_stats ( adapter ) ;
2008-09-11 19:55:32 -07:00
adapter - > flags & = ~ IXGBE_FLAG_IN_WATCHDOG_TASK ;
2007-09-15 14:07:45 -07:00
}
static int ixgbe_tso ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * tx_ring , struct sk_buff * skb ,
u32 tx_flags , u8 * hdr_len )
2007-09-15 14:07:45 -07:00
{
struct ixgbe_adv_tx_context_desc * context_desc ;
unsigned int i ;
int err ;
struct ixgbe_tx_buffer * tx_buffer_info ;
2008-09-11 20:03:35 -07:00
u32 vlan_macip_lens = 0 , type_tucmd_mlhl ;
u32 mss_l4len_idx , l4len ;
2007-09-15 14:07:45 -07:00
if ( skb_is_gso ( skb ) ) {
if ( skb_header_cloned ( skb ) ) {
err = pskb_expand_head ( skb , 0 , 0 , GFP_ATOMIC ) ;
if ( err )
return err ;
}
l4len = tcp_hdrlen ( skb ) ;
* hdr_len + = l4len ;
2007-12-10 18:54:12 +00:00
if ( skb - > protocol = = htons ( ETH_P_IP ) ) {
2007-09-15 14:07:45 -07:00
struct iphdr * iph = ip_hdr ( skb ) ;
iph - > tot_len = 0 ;
iph - > check = 0 ;
tcp_hdr ( skb ) - > check = ~ csum_tcpudp_magic ( iph - > saddr ,
2008-09-11 20:04:46 -07:00
iph - > daddr , 0 ,
IPPROTO_TCP ,
0 ) ;
2007-09-15 14:07:45 -07:00
adapter - > hw_tso_ctxt + + ;
} else if ( skb_shinfo ( skb ) - > gso_type = = SKB_GSO_TCPV6 ) {
ipv6_hdr ( skb ) - > payload_len = 0 ;
tcp_hdr ( skb ) - > check =
~ csum_ipv6_magic ( & ipv6_hdr ( skb ) - > saddr ,
2008-09-11 20:04:46 -07:00
& ipv6_hdr ( skb ) - > daddr ,
0 , IPPROTO_TCP , 0 ) ;
2007-09-15 14:07:45 -07:00
adapter - > hw_tso6_ctxt + + ;
}
i = tx_ring - > next_to_use ;
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
context_desc = IXGBE_TX_CTXTDESC_ADV ( * tx_ring , i ) ;
/* VLAN MACLEN IPLEN */
if ( tx_flags & IXGBE_TX_FLAGS_VLAN )
vlan_macip_lens | =
( tx_flags & IXGBE_TX_FLAGS_VLAN_MASK ) ;
vlan_macip_lens | = ( ( skb_network_offset ( skb ) ) < <
2008-09-11 20:04:46 -07:00
IXGBE_ADVTXD_MACLEN_SHIFT ) ;
2007-09-15 14:07:45 -07:00
* hdr_len + = skb_network_offset ( skb ) ;
vlan_macip_lens | =
( skb_transport_header ( skb ) - skb_network_header ( skb ) ) ;
* hdr_len + =
( skb_transport_header ( skb ) - skb_network_header ( skb ) ) ;
context_desc - > vlan_macip_lens = cpu_to_le32 ( vlan_macip_lens ) ;
context_desc - > seqnum_seed = 0 ;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2008-09-11 20:03:35 -07:00
type_tucmd_mlhl = ( IXGBE_TXD_CMD_DEXT |
2008-09-11 20:04:46 -07:00
IXGBE_ADVTXD_DTYP_CTXT ) ;
2007-09-15 14:07:45 -07:00
2007-12-10 18:54:12 +00:00
if ( skb - > protocol = = htons ( ETH_P_IP ) )
2007-09-15 14:07:45 -07:00
type_tucmd_mlhl | = IXGBE_ADVTXD_TUCMD_IPV4 ;
type_tucmd_mlhl | = IXGBE_ADVTXD_TUCMD_L4T_TCP ;
context_desc - > type_tucmd_mlhl = cpu_to_le32 ( type_tucmd_mlhl ) ;
/* MSS L4LEN IDX */
2008-09-11 20:03:35 -07:00
mss_l4len_idx =
2007-09-15 14:07:45 -07:00
( skb_shinfo ( skb ) - > gso_size < < IXGBE_ADVTXD_MSS_SHIFT ) ;
mss_l4len_idx | = ( l4len < < IXGBE_ADVTXD_L4LEN_SHIFT ) ;
2008-08-26 04:27:30 -07:00
/* use index 1 for TSO */
mss_l4len_idx | = ( 1 < < IXGBE_ADVTXD_IDX_SHIFT ) ;
2007-09-15 14:07:45 -07:00
context_desc - > mss_l4len_idx = cpu_to_le32 ( mss_l4len_idx ) ;
tx_buffer_info - > time_stamp = jiffies ;
tx_buffer_info - > next_to_watch = i ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
tx_ring - > next_to_use = i ;
return true ;
}
return false ;
}
static bool ixgbe_tx_csum ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * tx_ring ,
struct sk_buff * skb , u32 tx_flags )
2007-09-15 14:07:45 -07:00
{
struct ixgbe_adv_tx_context_desc * context_desc ;
unsigned int i ;
struct ixgbe_tx_buffer * tx_buffer_info ;
u32 vlan_macip_lens = 0 , type_tucmd_mlhl = 0 ;
if ( skb - > ip_summed = = CHECKSUM_PARTIAL | |
( tx_flags & IXGBE_TX_FLAGS_VLAN ) ) {
i = tx_ring - > next_to_use ;
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
context_desc = IXGBE_TX_CTXTDESC_ADV ( * tx_ring , i ) ;
if ( tx_flags & IXGBE_TX_FLAGS_VLAN )
vlan_macip_lens | =
( tx_flags & IXGBE_TX_FLAGS_VLAN_MASK ) ;
vlan_macip_lens | = ( skb_network_offset ( skb ) < <
2008-09-11 20:04:46 -07:00
IXGBE_ADVTXD_MACLEN_SHIFT ) ;
2007-09-15 14:07:45 -07:00
if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
vlan_macip_lens | = ( skb_transport_header ( skb ) -
2008-09-11 20:04:46 -07:00
skb_network_header ( skb ) ) ;
2007-09-15 14:07:45 -07:00
context_desc - > vlan_macip_lens = cpu_to_le32 ( vlan_macip_lens ) ;
context_desc - > seqnum_seed = 0 ;
type_tucmd_mlhl | = ( IXGBE_TXD_CMD_DEXT |
2008-09-11 20:04:46 -07:00
IXGBE_ADVTXD_DTYP_CTXT ) ;
2007-09-15 14:07:45 -07:00
if ( skb - > ip_summed = = CHECKSUM_PARTIAL ) {
2008-02-12 15:20:33 -08:00
switch ( skb - > protocol ) {
2009-02-01 00:45:17 -08:00
case cpu_to_be16 ( ETH_P_IP ) :
2007-09-15 14:07:45 -07:00
type_tucmd_mlhl | = IXGBE_ADVTXD_TUCMD_IPV4 ;
2008-02-12 15:20:33 -08:00
if ( ip_hdr ( skb ) - > protocol = = IPPROTO_TCP )
type_tucmd_mlhl | =
2008-09-11 20:04:46 -07:00
IXGBE_ADVTXD_TUCMD_L4T_TCP ;
2009-04-27 22:36:35 +00:00
else if ( ip_hdr ( skb ) - > protocol = = IPPROTO_SCTP )
type_tucmd_mlhl | =
IXGBE_ADVTXD_TUCMD_L4T_SCTP ;
2008-02-12 15:20:33 -08:00
break ;
2009-02-01 00:45:17 -08:00
case cpu_to_be16 ( ETH_P_IPV6 ) :
2008-02-12 15:20:33 -08:00
/* XXX what about other V6 headers?? */
if ( ipv6_hdr ( skb ) - > nexthdr = = IPPROTO_TCP )
type_tucmd_mlhl | =
2008-09-11 20:04:46 -07:00
IXGBE_ADVTXD_TUCMD_L4T_TCP ;
2009-04-27 22:36:35 +00:00
else if ( ipv6_hdr ( skb ) - > nexthdr = = IPPROTO_SCTP )
type_tucmd_mlhl | =
IXGBE_ADVTXD_TUCMD_L4T_SCTP ;
2008-02-12 15:20:33 -08:00
break ;
default :
if ( unlikely ( net_ratelimit ( ) ) ) {
DPRINTK ( PROBE , WARNING ,
" partial checksum but proto=%x! \n " ,
skb - > protocol ) ;
}
break ;
}
2007-09-15 14:07:45 -07:00
}
context_desc - > type_tucmd_mlhl = cpu_to_le32 ( type_tucmd_mlhl ) ;
2008-08-26 04:27:30 -07:00
/* use index zero for tx checksum offload */
2007-09-15 14:07:45 -07:00
context_desc - > mss_l4len_idx = 0 ;
tx_buffer_info - > time_stamp = jiffies ;
tx_buffer_info - > next_to_watch = i ;
2008-09-11 20:03:35 -07:00
2007-09-15 14:07:45 -07:00
adapter - > hw_csum_tx_good + + ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
tx_ring - > next_to_use = i ;
return true ;
}
2008-09-11 20:03:35 -07:00
2007-09-15 14:07:45 -07:00
return false ;
}
static int ixgbe_tx_map ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * tx_ring ,
2009-05-13 13:11:06 +00:00
struct sk_buff * skb , u32 tx_flags ,
unsigned int first )
2007-09-15 14:07:45 -07:00
{
struct ixgbe_tx_buffer * tx_buffer_info ;
2009-05-13 13:11:06 +00:00
unsigned int len ;
unsigned int total = skb - > len ;
2007-09-15 14:07:45 -07:00
unsigned int offset = 0 , size , count = 0 , i ;
unsigned int nr_frags = skb_shinfo ( skb ) - > nr_frags ;
unsigned int f ;
2009-03-31 21:34:23 +00:00
dma_addr_t * map ;
2007-09-15 14:07:45 -07:00
i = tx_ring - > next_to_use ;
2009-03-31 21:34:23 +00:00
if ( skb_dma_map ( & adapter - > pdev - > dev , skb , DMA_TO_DEVICE ) ) {
dev_err ( & adapter - > pdev - > dev , " TX DMA map failed \n " ) ;
return 0 ;
}
map = skb_shinfo ( skb ) - > dma_maps ;
2009-05-13 13:11:06 +00:00
if ( tx_flags & IXGBE_TX_FLAGS_FCOE )
/* excluding fcoe_crc_eof for FCoE */
total - = sizeof ( struct fcoe_crc_eof ) ;
len = min ( skb_headlen ( skb ) , total ) ;
2007-09-15 14:07:45 -07:00
while ( len ) {
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
size = min ( len , ( uint ) IXGBE_MAX_DATA_PER_TXD ) ;
tx_buffer_info - > length = size ;
2009-06-05 04:04:16 +00:00
tx_buffer_info - > dma = skb_shinfo ( skb ) - > dma_head + offset ;
2007-09-15 14:07:45 -07:00
tx_buffer_info - > time_stamp = jiffies ;
tx_buffer_info - > next_to_watch = i ;
len - = size ;
2009-05-13 13:11:06 +00:00
total - = size ;
2007-09-15 14:07:45 -07:00
offset + = size ;
count + + ;
2009-03-31 21:34:23 +00:00
if ( len ) {
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
}
2007-09-15 14:07:45 -07:00
}
for ( f = 0 ; f < nr_frags ; f + + ) {
struct skb_frag_struct * frag ;
frag = & skb_shinfo ( skb ) - > frags [ f ] ;
2009-05-13 13:11:06 +00:00
len = min ( ( unsigned int ) frag - > size , total ) ;
2009-03-31 21:34:23 +00:00
offset = 0 ;
2007-09-15 14:07:45 -07:00
while ( len ) {
2009-03-31 21:34:23 +00:00
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
2007-09-15 14:07:45 -07:00
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
size = min ( len , ( uint ) IXGBE_MAX_DATA_PER_TXD ) ;
tx_buffer_info - > length = size ;
2009-06-05 04:04:16 +00:00
tx_buffer_info - > dma = map [ f ] + offset ;
2007-09-15 14:07:45 -07:00
tx_buffer_info - > time_stamp = jiffies ;
tx_buffer_info - > next_to_watch = i ;
len - = size ;
2009-05-13 13:11:06 +00:00
total - = size ;
2007-09-15 14:07:45 -07:00
offset + = size ;
count + + ;
}
2009-05-13 13:11:06 +00:00
if ( total = = 0 )
break ;
2007-09-15 14:07:45 -07:00
}
2009-03-31 21:34:23 +00:00
2007-09-15 14:07:45 -07:00
tx_ring - > tx_buffer_info [ i ] . skb = skb ;
tx_ring - > tx_buffer_info [ first ] . next_to_watch = i ;
return count ;
}
static void ixgbe_tx_queue ( struct ixgbe_adapter * adapter ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * tx_ring ,
int tx_flags , int count , u32 paylen , u8 hdr_len )
2007-09-15 14:07:45 -07:00
{
union ixgbe_adv_tx_desc * tx_desc = NULL ;
struct ixgbe_tx_buffer * tx_buffer_info ;
u32 olinfo_status = 0 , cmd_type_len = 0 ;
unsigned int i ;
u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS ;
cmd_type_len | = IXGBE_ADVTXD_DTYP_DATA ;
cmd_type_len | = IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT ;
if ( tx_flags & IXGBE_TX_FLAGS_VLAN )
cmd_type_len | = IXGBE_ADVTXD_DCMD_VLE ;
if ( tx_flags & IXGBE_TX_FLAGS_TSO ) {
cmd_type_len | = IXGBE_ADVTXD_DCMD_TSE ;
olinfo_status | = IXGBE_TXD_POPTS_TXSM < <
2008-09-11 20:04:46 -07:00
IXGBE_ADVTXD_POPTS_SHIFT ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:30 -07:00
/* use index 1 context for tso */
olinfo_status | = ( 1 < < IXGBE_ADVTXD_IDX_SHIFT ) ;
2007-09-15 14:07:45 -07:00
if ( tx_flags & IXGBE_TX_FLAGS_IPV4 )
olinfo_status | = IXGBE_TXD_POPTS_IXSM < <
2008-09-11 20:04:46 -07:00
IXGBE_ADVTXD_POPTS_SHIFT ;
2007-09-15 14:07:45 -07:00
} else if ( tx_flags & IXGBE_TX_FLAGS_CSUM )
olinfo_status | = IXGBE_TXD_POPTS_TXSM < <
2008-09-11 20:04:46 -07:00
IXGBE_ADVTXD_POPTS_SHIFT ;
2007-09-15 14:07:45 -07:00
2009-05-13 13:11:06 +00:00
if ( tx_flags & IXGBE_TX_FLAGS_FCOE ) {
olinfo_status | = IXGBE_ADVTXD_CC ;
olinfo_status | = ( 1 < < IXGBE_ADVTXD_IDX_SHIFT ) ;
if ( tx_flags & IXGBE_TX_FLAGS_FSO )
cmd_type_len | = IXGBE_ADVTXD_DCMD_TSE ;
}
2007-09-15 14:07:45 -07:00
olinfo_status | = ( ( paylen - hdr_len ) < < IXGBE_ADVTXD_PAYLEN_SHIFT ) ;
i = tx_ring - > next_to_use ;
while ( count - - ) {
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
tx_desc = IXGBE_TX_DESC_ADV ( * tx_ring , i ) ;
tx_desc - > read . buffer_addr = cpu_to_le64 ( tx_buffer_info - > dma ) ;
tx_desc - > read . cmd_type_len =
2008-09-11 20:04:46 -07:00
cpu_to_le32 ( cmd_type_len | tx_buffer_info - > length ) ;
2007-09-15 14:07:45 -07:00
tx_desc - > read . olinfo_status = cpu_to_le32 ( olinfo_status ) ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
}
tx_desc - > read . cmd_type_len | = cpu_to_le32 ( txd_cmd ) ;
/*
* Force memory writes to complete before letting h / w
* know there are new descriptors to fetch . ( Only
* applicable for weak - ordered memory model archs ,
* such as IA - 64 ) .
*/
wmb ( ) ;
tx_ring - > next_to_use = i ;
writel ( i , adapter - > hw . hw_addr + tx_ring - > tail ) ;
}
2009-06-04 16:01:43 +00:00
static void ixgbe_atr ( struct ixgbe_adapter * adapter , struct sk_buff * skb ,
int queue , u32 tx_flags )
{
/* Right now, we support IPv4 only */
struct ixgbe_atr_input atr_input ;
struct tcphdr * th ;
struct udphdr * uh ;
struct iphdr * iph = ip_hdr ( skb ) ;
struct ethhdr * eth = ( struct ethhdr * ) skb - > data ;
u16 vlan_id , src_port , dst_port , flex_bytes ;
u32 src_ipv4_addr , dst_ipv4_addr ;
u8 l4type = 0 ;
/* check if we're UDP or TCP */
if ( iph - > protocol = = IPPROTO_TCP ) {
th = tcp_hdr ( skb ) ;
src_port = th - > source ;
dst_port = th - > dest ;
l4type | = IXGBE_ATR_L4TYPE_TCP ;
/* l4type IPv4 type is 0, no need to assign */
} else if ( iph - > protocol = = IPPROTO_UDP ) {
uh = udp_hdr ( skb ) ;
src_port = uh - > source ;
dst_port = uh - > dest ;
l4type | = IXGBE_ATR_L4TYPE_UDP ;
/* l4type IPv4 type is 0, no need to assign */
} else {
/* Unsupported L4 header, just bail here */
return ;
}
memset ( & atr_input , 0 , sizeof ( struct ixgbe_atr_input ) ) ;
vlan_id = ( tx_flags & IXGBE_TX_FLAGS_VLAN_MASK ) > >
IXGBE_TX_FLAGS_VLAN_SHIFT ;
src_ipv4_addr = iph - > saddr ;
dst_ipv4_addr = iph - > daddr ;
flex_bytes = eth - > h_proto ;
ixgbe_atr_set_vlan_id_82599 ( & atr_input , vlan_id ) ;
ixgbe_atr_set_src_port_82599 ( & atr_input , dst_port ) ;
ixgbe_atr_set_dst_port_82599 ( & atr_input , src_port ) ;
ixgbe_atr_set_flex_byte_82599 ( & atr_input , flex_bytes ) ;
ixgbe_atr_set_l4type_82599 ( & atr_input , l4type ) ;
/* src and dst are inverted, think how the receiver sees them */
ixgbe_atr_set_src_ipv4_82599 ( & atr_input , dst_ipv4_addr ) ;
ixgbe_atr_set_dst_ipv4_82599 ( & atr_input , src_ipv4_addr ) ;
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599 ( & adapter - > hw , & atr_input , queue ) ;
}
2008-02-01 15:58:49 -08:00
static int __ixgbe_maybe_stop_tx ( struct net_device * netdev ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * tx_ring , int size )
2008-02-01 15:58:49 -08:00
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2008-03-03 15:03:52 -08:00
netif_stop_subqueue ( netdev , tx_ring - > queue_index ) ;
2008-02-01 15:58:49 -08:00
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue ( ) ;
* but since that doesn ' t exist yet , just open code it . */
smp_mb ( ) ;
/* We need to check again in a case another CPU has just
* made room available . */
if ( likely ( IXGBE_DESC_UNUSED ( tx_ring ) < size ) )
return - EBUSY ;
/* A reprieve! - use start_queue because it doesn't call schedule */
2008-09-11 19:54:23 -07:00
netif_start_subqueue ( netdev , tx_ring - > queue_index ) ;
2008-02-01 15:58:49 -08:00
+ + adapter - > restart_queue ;
return 0 ;
}
static int ixgbe_maybe_stop_tx ( struct net_device * netdev ,
2008-09-11 20:04:46 -07:00
struct ixgbe_ring * tx_ring , int size )
2008-02-01 15:58:49 -08:00
{
if ( likely ( IXGBE_DESC_UNUSED ( tx_ring ) > = size ) )
return 0 ;
return __ixgbe_maybe_stop_tx ( netdev , tx_ring , size ) ;
}
2009-03-21 13:40:01 -07:00
static u16 ixgbe_select_queue ( struct net_device * dev , struct sk_buff * skb )
{
struct ixgbe_adapter * adapter = netdev_priv ( dev ) ;
2009-06-04 16:01:43 +00:00
if ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE )
return smp_processor_id ( ) ;
2009-03-21 13:40:01 -07:00
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED )
return 0 ; /* All traffic should default to class 0 */
return skb_tx_hash ( dev , skb ) ;
}
2007-09-15 14:07:45 -07:00
static int ixgbe_xmit_frame ( struct sk_buff * skb , struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_ring * tx_ring ;
unsigned int first ;
unsigned int tx_flags = 0 ;
2008-03-03 15:03:52 -08:00
u8 hdr_len = 0 ;
int r_idx = 0 , tso ;
2007-09-15 14:07:45 -07:00
int count = 0 ;
unsigned int f ;
2008-09-11 20:03:35 -07:00
if ( adapter - > vlgrp & & vlan_tx_tag_present ( skb ) ) {
tx_flags | = vlan_tx_tag_get ( skb ) ;
2008-11-20 20:52:10 -08:00
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED ) {
tx_flags & = ~ IXGBE_TX_FLAGS_VLAN_PRIO_MASK ;
tx_flags | = ( skb - > queue_mapping < < 13 ) ;
}
tx_flags < < = IXGBE_TX_FLAGS_VLAN_SHIFT ;
tx_flags | = IXGBE_TX_FLAGS_VLAN ;
} else if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED ) {
2009-07-22 14:07:33 +00:00
if ( skb - > priority ! = TC_PRIO_CONTROL ) {
tx_flags | = ( skb - > queue_mapping < < 13 ) ;
tx_flags < < = IXGBE_TX_FLAGS_VLAN_SHIFT ;
tx_flags | = IXGBE_TX_FLAGS_VLAN ;
} else {
skb - > queue_mapping =
adapter - > ring_feature [ RING_F_DCB ] . indices - 1 ;
}
2007-09-15 14:07:45 -07:00
}
2009-05-13 13:11:06 +00:00
2009-07-22 14:07:33 +00:00
r_idx = skb - > queue_mapping ;
tx_ring = & adapter - > tx_ring [ r_idx ] ;
2009-05-13 13:11:06 +00:00
if ( ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED ) & &
( skb - > protocol = = htons ( ETH_P_FCOE ) ) )
tx_flags | = IXGBE_TX_FLAGS_FCOE ;
/* four things can cause us to need a context descriptor */
2008-09-11 20:03:35 -07:00
if ( skb_is_gso ( skb ) | |
( skb - > ip_summed = = CHECKSUM_PARTIAL ) | |
2009-05-13 13:11:06 +00:00
( tx_flags & IXGBE_TX_FLAGS_VLAN ) | |
( tx_flags & IXGBE_TX_FLAGS_FCOE ) )
2007-09-15 14:07:45 -07:00
count + + ;
2008-09-11 20:03:35 -07:00
count + = TXD_USE_COUNT ( skb_headlen ( skb ) ) ;
for ( f = 0 ; f < skb_shinfo ( skb ) - > nr_frags ; f + + )
2007-09-15 14:07:45 -07:00
count + = TXD_USE_COUNT ( skb_shinfo ( skb ) - > frags [ f ] . size ) ;
2008-02-01 15:58:49 -08:00
if ( ixgbe_maybe_stop_tx ( netdev , tx_ring , count ) ) {
2007-09-15 14:07:45 -07:00
adapter - > tx_busy + + ;
return NETDEV_TX_BUSY ;
}
first = tx_ring - > next_to_use ;
2009-05-13 13:11:06 +00:00
if ( tx_flags & IXGBE_TX_FLAGS_FCOE ) {
# ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
tso = ixgbe_fso ( adapter , tx_ring , skb , tx_flags , & hdr_len ) ;
if ( tso < 0 ) {
dev_kfree_skb_any ( skb ) ;
return NETDEV_TX_OK ;
}
if ( tso )
tx_flags | = IXGBE_TX_FLAGS_FSO ;
# endif /* IXGBE_FCOE */
} else {
if ( skb - > protocol = = htons ( ETH_P_IP ) )
tx_flags | = IXGBE_TX_FLAGS_IPV4 ;
tso = ixgbe_tso ( adapter , tx_ring , skb , tx_flags , & hdr_len ) ;
if ( tso < 0 ) {
dev_kfree_skb_any ( skb ) ;
return NETDEV_TX_OK ;
}
2007-09-15 14:07:45 -07:00
2009-05-13 13:11:06 +00:00
if ( tso )
tx_flags | = IXGBE_TX_FLAGS_TSO ;
else if ( ixgbe_tx_csum ( adapter , tx_ring , skb , tx_flags ) & &
( skb - > ip_summed = = CHECKSUM_PARTIAL ) )
tx_flags | = IXGBE_TX_FLAGS_CSUM ;
}
2007-09-15 14:07:45 -07:00
2009-05-13 13:11:06 +00:00
count = ixgbe_tx_map ( adapter , tx_ring , skb , tx_flags , first ) ;
2009-03-31 21:34:23 +00:00
if ( count ) {
2009-06-04 16:01:43 +00:00
/* add the ATR filter if ATR is on */
if ( tx_ring - > atr_sample_rate ) {
+ + tx_ring - > atr_count ;
if ( ( tx_ring - > atr_count > = tx_ring - > atr_sample_rate ) & &
test_bit ( __IXGBE_FDIR_INIT_DONE ,
& tx_ring - > reinit_state ) ) {
ixgbe_atr ( adapter , skb , tx_ring - > queue_index ,
tx_flags ) ;
tx_ring - > atr_count = 0 ;
}
}
2009-03-31 21:34:23 +00:00
ixgbe_tx_queue ( adapter , tx_ring , tx_flags , count , skb - > len ,
hdr_len ) ;
ixgbe_maybe_stop_tx ( netdev , tx_ring , DESC_NEEDED ) ;
2007-09-15 14:07:45 -07:00
2009-03-31 21:34:23 +00:00
} else {
dev_kfree_skb_any ( skb ) ;
tx_ring - > tx_buffer_info [ first ] . time_stamp = 0 ;
tx_ring - > next_to_use = first ;
}
2007-09-15 14:07:45 -07:00
return NETDEV_TX_OK ;
}
/**
* ixgbe_get_stats - Get System Network Statistics
* @ netdev : network interface device structure
*
* Returns the address of the device statistics structure .
* The statistics are actually updated from the timer callback .
* */
static struct net_device_stats * ixgbe_get_stats ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
/* only return the current stats */
return & adapter - > net_stats ;
}
/**
* ixgbe_set_mac - Change the Ethernet Address of the NIC
* @ netdev : network interface device structure
* @ p : pointer to an address structure
*
* Returns 0 on success , negative on failure
* */
static int ixgbe_set_mac ( struct net_device * netdev , void * p )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2008-09-11 20:04:46 -07:00
struct ixgbe_hw * hw = & adapter - > hw ;
2007-09-15 14:07:45 -07:00
struct sockaddr * addr = p ;
if ( ! is_valid_ether_addr ( addr - > sa_data ) )
return - EADDRNOTAVAIL ;
memcpy ( netdev - > dev_addr , addr - > sa_data , netdev - > addr_len ) ;
2008-09-11 20:04:46 -07:00
memcpy ( hw - > mac . addr , addr - > sa_data , netdev - > addr_len ) ;
2007-09-15 14:07:45 -07:00
2008-09-11 20:04:46 -07:00
hw - > mac . ops . set_rar ( hw , 0 , hw - > mac . addr , 0 , IXGBE_RAH_AV ) ;
2007-09-15 14:07:45 -07:00
return 0 ;
}
2009-04-29 08:08:58 +00:00
static int
ixgbe_mdio_read ( struct net_device * netdev , int prtad , int devad , u16 addr )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
u16 value ;
int rc ;
if ( prtad ! = hw - > phy . mdio . prtad )
return - EINVAL ;
rc = hw - > phy . ops . read_reg ( hw , addr , devad , & value ) ;
if ( ! rc )
rc = value ;
return rc ;
}
static int ixgbe_mdio_write ( struct net_device * netdev , int prtad , int devad ,
u16 addr , u16 value )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
if ( prtad ! = hw - > phy . mdio . prtad )
return - EINVAL ;
return hw - > phy . ops . write_reg ( hw , addr , devad , value ) ;
}
static int ixgbe_ioctl ( struct net_device * netdev , struct ifreq * req , int cmd )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
return mdio_mii_ioctl ( & adapter - > hw . phy . mdio , if_mii ( req ) , cmd ) ;
}
2009-05-17 12:32:25 +00:00
/**
* ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
2009-06-17 01:12:19 +00:00
* netdev - > dev_addrs
2009-05-17 12:32:25 +00:00
* @ netdev : network interface device structure
*
* Returns non - zero on failure
* */
static int ixgbe_add_sanmac_netdev ( struct net_device * dev )
{
int err = 0 ;
struct ixgbe_adapter * adapter = netdev_priv ( dev ) ;
struct ixgbe_mac_info * mac = & adapter - > hw . mac ;
if ( is_valid_ether_addr ( mac - > san_addr ) ) {
rtnl_lock ( ) ;
err = dev_addr_add ( dev , mac - > san_addr , NETDEV_HW_ADDR_T_SAN ) ;
rtnl_unlock ( ) ;
}
return err ;
}
/**
* ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
2009-06-17 01:12:19 +00:00
* netdev - > dev_addrs
2009-05-17 12:32:25 +00:00
* @ netdev : network interface device structure
*
* Returns non - zero on failure
* */
static int ixgbe_del_sanmac_netdev ( struct net_device * dev )
{
int err = 0 ;
struct ixgbe_adapter * adapter = netdev_priv ( dev ) ;
struct ixgbe_mac_info * mac = & adapter - > hw . mac ;
if ( is_valid_ether_addr ( mac - > san_addr ) ) {
rtnl_lock ( ) ;
err = dev_addr_del ( dev , mac - > san_addr , NETDEV_HW_ADDR_T_SAN ) ;
rtnl_unlock ( ) ;
}
return err ;
}
2007-09-15 14:07:45 -07:00
# ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling ' interrupt ' - used by things like netconsole to send skbs
* without having to re - enable interrupts . It ' s not called while
* the interrupt routine is executing .
*/
static void ixgbe_netpoll ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2009-07-30 12:25:09 +00:00
int i ;
2007-09-15 14:07:45 -07:00
adapter - > flags | = IXGBE_FLAG_IN_NETPOLL ;
2009-07-30 12:25:09 +00:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
int num_q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
for ( i = 0 ; i < num_q_vectors ; i + + ) {
struct ixgbe_q_vector * q_vector = adapter - > q_vector [ i ] ;
ixgbe_msix_clean_many ( 0 , q_vector ) ;
}
} else {
ixgbe_intr ( adapter - > pdev - > irq , netdev ) ;
}
2007-09-15 14:07:45 -07:00
adapter - > flags & = ~ IXGBE_FLAG_IN_NETPOLL ;
}
# endif
2008-11-19 22:24:29 -08:00
static const struct net_device_ops ixgbe_netdev_ops = {
. ndo_open = ixgbe_open ,
. ndo_stop = ixgbe_close ,
2008-11-20 20:14:53 -08:00
. ndo_start_xmit = ixgbe_xmit_frame ,
2009-03-21 13:40:01 -07:00
. ndo_select_queue = ixgbe_select_queue ,
2008-11-19 22:24:29 -08:00
. ndo_get_stats = ixgbe_get_stats ,
2009-03-10 16:00:24 +00:00
. ndo_set_rx_mode = ixgbe_set_rx_mode ,
2008-11-19 22:24:29 -08:00
. ndo_set_multicast_list = ixgbe_set_rx_mode ,
. ndo_validate_addr = eth_validate_addr ,
. ndo_set_mac_address = ixgbe_set_mac ,
. ndo_change_mtu = ixgbe_change_mtu ,
. ndo_tx_timeout = ixgbe_tx_timeout ,
. ndo_vlan_rx_register = ixgbe_vlan_rx_register ,
. ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid ,
. ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid ,
2009-04-29 08:08:58 +00:00
. ndo_do_ioctl = ixgbe_ioctl ,
2008-11-19 22:24:29 -08:00
# ifdef CONFIG_NET_POLL_CONTROLLER
. ndo_poll_controller = ixgbe_netpoll ,
# endif
2009-05-13 13:11:53 +00:00
# ifdef IXGBE_FCOE
. ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get ,
. ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put ,
# endif /* IXGBE_FCOE */
2008-11-19 22:24:29 -08:00
} ;
2007-09-15 14:07:45 -07:00
/**
* ixgbe_probe - Device Initialization Routine
* @ pdev : PCI device information struct
* @ ent : entry in ixgbe_pci_tbl
*
* Returns 0 on success , negative on failure
*
* ixgbe_probe initializes an adapter identified by a pci_dev structure .
* The OS initialization , configuring of the adapter private structure ,
* and a hardware reset occur .
* */
static int __devinit ixgbe_probe ( struct pci_dev * pdev ,
2008-09-11 20:04:46 -07:00
const struct pci_device_id * ent )
2007-09-15 14:07:45 -07:00
{
struct net_device * netdev ;
struct ixgbe_adapter * adapter = NULL ;
struct ixgbe_hw * hw ;
const struct ixgbe_info * ii = ixgbe_info_tbl [ ent - > driver_data ] ;
static int cards_found ;
int i , err , pci_using_dac ;
2009-05-13 13:11:06 +00:00
# ifdef IXGBE_FCOE
u16 device_caps ;
# endif
2008-09-11 19:59:59 -07:00
u32 part_num , eec ;
2007-09-15 14:07:45 -07:00
2009-05-06 10:44:45 +00:00
err = pci_enable_device_mem ( pdev ) ;
2007-09-15 14:07:45 -07:00
if ( err )
return err ;
2009-04-06 19:01:13 -07:00
if ( ! pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) & &
! pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) ) {
2007-09-15 14:07:45 -07:00
pci_using_dac = 1 ;
} else {
2009-04-06 19:01:15 -07:00
err = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
2007-09-15 14:07:45 -07:00
if ( err ) {
2009-04-06 19:01:15 -07:00
err = pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
2007-09-15 14:07:45 -07:00
if ( err ) {
2008-09-11 20:04:46 -07:00
dev_err ( & pdev - > dev , " No usable DMA "
" configuration, aborting \n " ) ;
2007-09-15 14:07:45 -07:00
goto err_dma ;
}
}
pci_using_dac = 0 ;
}
2009-05-06 10:44:45 +00:00
err = pci_request_selected_regions ( pdev , pci_select_bars ( pdev ,
IORESOURCE_MEM ) , ixgbe_driver_name ) ;
2007-09-15 14:07:45 -07:00
if ( err ) {
2009-05-06 10:44:45 +00:00
dev_err ( & pdev - > dev ,
" pci_request_selected_regions failed 0x%x \n " , err ) ;
2007-09-15 14:07:45 -07:00
goto err_pci_reg ;
}
2008-12-10 01:13:08 -08:00
err = pci_enable_pcie_error_reporting ( pdev ) ;
if ( err ) {
dev_err ( & pdev - > dev , " pci_enable_pcie_error_reporting failed "
" 0x%x \n " , err ) ;
/* non-fatal, continue */
}
2007-09-15 14:07:45 -07:00
pci_set_master ( pdev ) ;
2008-04-23 11:09:24 -07:00
pci_save_state ( pdev ) ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:52 -08:00
netdev = alloc_etherdev_mq ( sizeof ( struct ixgbe_adapter ) , MAX_TX_QUEUES ) ;
2007-09-15 14:07:45 -07:00
if ( ! netdev ) {
err = - ENOMEM ;
goto err_alloc_etherdev ;
}
SET_NETDEV_DEV ( netdev , & pdev - > dev ) ;
pci_set_drvdata ( pdev , netdev ) ;
adapter = netdev_priv ( netdev ) ;
adapter - > netdev = netdev ;
adapter - > pdev = pdev ;
hw = & adapter - > hw ;
hw - > back = adapter ;
adapter - > msg_enable = ( 1 < < DEFAULT_DEBUG_LEVEL_SHIFT ) - 1 ;
2008-09-11 19:57:00 -07:00
hw - > hw_addr = ioremap ( pci_resource_start ( pdev , 0 ) ,
pci_resource_len ( pdev , 0 ) ) ;
2007-09-15 14:07:45 -07:00
if ( ! hw - > hw_addr ) {
err = - EIO ;
goto err_ioremap ;
}
for ( i = 1 ; i < = 5 ; i + + ) {
if ( pci_resource_len ( pdev , i ) = = 0 )
continue ;
}
2008-11-19 22:24:29 -08:00
netdev - > netdev_ops = & ixgbe_netdev_ops ;
2007-09-15 14:07:45 -07:00
ixgbe_set_ethtool_ops ( netdev ) ;
netdev - > watchdog_timeo = 5 * HZ ;
strcpy ( netdev - > name , pci_name ( pdev ) ) ;
adapter - > bd_number = cards_found ;
/* Setup hw api */
memcpy ( & hw - > mac . ops , ii - > mac_ops , sizeof ( hw - > mac . ops ) ) ;
2008-03-03 15:03:45 -08:00
hw - > mac . type = ii - > mac ;
2007-09-15 14:07:45 -07:00
2008-09-11 19:59:59 -07:00
/* EEPROM */
memcpy ( & hw - > eeprom . ops , ii - > eeprom_ops , sizeof ( hw - > eeprom . ops ) ) ;
eec = IXGBE_READ_REG ( hw , IXGBE_EEC ) ;
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
if ( ! ( eec & ( 1 < < 8 ) ) )
hw - > eeprom . ops . read = & ixgbe_read_eeprom_bit_bang_generic ;
/* PHY */
memcpy ( & hw - > phy . ops , ii - > phy_ops , sizeof ( hw - > phy . ops ) ) ;
2008-11-20 21:11:42 -08:00
hw - > phy . sfp_type = ixgbe_sfp_type_unknown ;
2009-04-29 08:08:58 +00:00
/* ixgbe_identify_phy_generic will set prtad and mmds properly */
hw - > phy . mdio . prtad = MDIO_PRTAD_NONE ;
hw - > phy . mdio . mmds = 0 ;
hw - > phy . mdio . mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22 ;
hw - > phy . mdio . dev = netdev ;
hw - > phy . mdio . mdio_read = ixgbe_mdio_read ;
hw - > phy . mdio . mdio_write = ixgbe_mdio_write ;
2008-11-20 21:11:42 -08:00
/* set up this timer and work struct before calling get_invariants
* which might start the timer
*/
init_timer ( & adapter - > sfp_timer ) ;
adapter - > sfp_timer . function = & ixgbe_sfp_timer ;
adapter - > sfp_timer . data = ( unsigned long ) adapter ;
INIT_WORK ( & adapter - > sfp_task , ixgbe_sfp_task ) ;
2008-09-11 19:59:59 -07:00
2009-02-27 15:45:05 +00:00
/* multispeed fiber has its own tasklet, called from GPI SDP1 context */
INIT_WORK ( & adapter - > multispeed_fiber_task , ixgbe_multispeed_fiber_task ) ;
/* a new SFP+ module arrival, called from GPI SDP2 context */
INIT_WORK ( & adapter - > sfp_config_module_task ,
ixgbe_sfp_config_module_task ) ;
2009-05-26 20:40:47 -07:00
ii - > get_invariants ( hw ) ;
2007-09-15 14:07:45 -07:00
/* setup the private structure */
err = ixgbe_sw_init ( adapter ) ;
if ( err )
goto err_sw_init ;
2009-05-07 10:39:54 +00:00
/*
* If there is a fan on this device and it has failed log the
* failure .
*/
if ( adapter - > flags & IXGBE_FLAG_FAN_FAIL_CAPABLE ) {
u32 esdp = IXGBE_READ_REG ( hw , IXGBE_ESDP ) ;
if ( esdp & IXGBE_ESDP_SDP1 )
DPRINTK ( PROBE , CRIT ,
" Fan has stopped, replace the adapter \n " ) ;
}
2008-09-11 19:59:59 -07:00
/* reset_hw fills in the perm_addr as well */
err = hw - > mac . ops . reset_hw ( hw ) ;
2009-05-26 20:40:47 -07:00
if ( err = = IXGBE_ERR_SFP_NOT_PRESENT & &
hw - > mac . type = = ixgbe_mac_82598EB ) {
/*
* Start a kernel thread to watch for a module to arrive .
* Only do this for 82598 , since 82599 will generate
* interrupts on module arrival .
*/
set_bit ( __IXGBE_SFP_MODULE_NOT_FOUND , & adapter - > state ) ;
mod_timer ( & adapter - > sfp_timer ,
round_jiffies ( jiffies + ( 2 * HZ ) ) ) ;
err = 0 ;
} else if ( err = = IXGBE_ERR_SFP_NOT_SUPPORTED ) {
2009-06-30 11:43:55 +00:00
dev_err ( & adapter - > pdev - > dev , " failed to initialize because "
" an unsupported SFP+ module type was detected. \n "
" Reload the driver after installing a supported "
" module. \n " ) ;
2009-04-09 22:27:57 +00:00
goto err_sw_init ;
} else if ( err ) {
2008-09-11 19:59:59 -07:00
dev_err ( & adapter - > pdev - > dev , " HW Init failed: %d \n " , err ) ;
goto err_sw_init ;
}
2007-09-15 14:07:45 -07:00
netdev - > features = NETIF_F_SG |
2008-09-11 20:04:46 -07:00
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER ;
2007-09-15 14:07:45 -07:00
2008-08-26 04:27:24 -07:00
netdev - > features | = NETIF_F_IPV6_CSUM ;
2007-09-15 14:07:45 -07:00
netdev - > features | = NETIF_F_TSO ;
netdev - > features | = NETIF_F_TSO6 ;
2009-01-18 21:49:45 -08:00
netdev - > features | = NETIF_F_GRO ;
2008-06-05 04:05:30 -07:00
2009-04-27 22:36:35 +00:00
if ( adapter - > hw . mac . type = = ixgbe_mac_82599EB )
netdev - > features | = NETIF_F_SCTP_CSUM ;
2008-06-05 04:05:30 -07:00
netdev - > vlan_features | = NETIF_F_TSO ;
netdev - > vlan_features | = NETIF_F_TSO6 ;
2008-08-26 04:27:18 -07:00
netdev - > vlan_features | = NETIF_F_IP_CSUM ;
2008-06-05 04:05:30 -07:00
netdev - > vlan_features | = NETIF_F_SG ;
2008-11-20 20:52:10 -08:00
if ( adapter - > flags & IXGBE_FLAG_DCB_ENABLED )
adapter - > flags & = ~ IXGBE_FLAG_RSS_ENABLED ;
2008-11-25 01:02:08 -08:00
# ifdef CONFIG_IXGBE_DCB
2008-11-20 20:52:10 -08:00
netdev - > dcbnl_ops = & dcbnl_ops ;
# endif
2009-05-13 13:11:06 +00:00
# ifdef IXGBE_FCOE
2009-07-22 14:07:12 +00:00
if ( adapter - > flags & IXGBE_FLAG_FCOE_CAPABLE ) {
2009-05-13 13:11:06 +00:00
if ( hw - > mac . ops . get_device_caps ) {
hw - > mac . ops . get_device_caps ( hw , & device_caps ) ;
2009-07-22 14:07:12 +00:00
if ( device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS )
adapter - > flags & = ~ IXGBE_FLAG_FCOE_CAPABLE ;
2009-05-13 13:11:06 +00:00
}
}
# endif /* IXGBE_FCOE */
2007-09-15 14:07:45 -07:00
if ( pci_using_dac )
netdev - > features | = NETIF_F_HIGHDMA ;
2009-06-04 16:00:47 +00:00
if ( adapter - > flags & IXGBE_FLAG2_RSC_ENABLED )
2009-04-27 22:42:37 +00:00
netdev - > features | = NETIF_F_LRO ;
2007-09-15 14:07:45 -07:00
/* make sure the EEPROM is good */
2008-09-11 19:59:59 -07:00
if ( hw - > eeprom . ops . validate_checksum ( hw , NULL ) < 0 ) {
2007-09-15 14:07:45 -07:00
dev_err ( & pdev - > dev , " The EEPROM Checksum Is Not Valid \n " ) ;
err = - EIO ;
goto err_eeprom ;
}
memcpy ( netdev - > dev_addr , hw - > mac . perm_addr , netdev - > addr_len ) ;
memcpy ( netdev - > perm_addr , hw - > mac . perm_addr , netdev - > addr_len ) ;
2008-09-11 19:59:59 -07:00
if ( ixgbe_validate_mac_addr ( netdev - > perm_addr ) ) {
dev_err ( & pdev - > dev , " invalid MAC address \n " ) ;
2007-09-15 14:07:45 -07:00
err = - EIO ;
goto err_eeprom ;
}
init_timer ( & adapter - > watchdog_timer ) ;
adapter - > watchdog_timer . function = & ixgbe_watchdog ;
adapter - > watchdog_timer . data = ( unsigned long ) adapter ;
INIT_WORK ( & adapter - > reset_task , ixgbe_reset_task ) ;
2008-09-11 19:55:32 -07:00
INIT_WORK ( & adapter - > watchdog_task , ixgbe_watchdog_task ) ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
err = ixgbe_init_interrupt_scheme ( adapter ) ;
if ( err )
goto err_sw_init ;
2007-09-15 14:07:45 -07:00
2009-02-27 15:45:05 +00:00
switch ( pdev - > device ) {
case IXGBE_DEV_ID_82599_KX4 :
2009-04-23 11:15:18 +00:00
adapter - > wol = ( IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
IXGBE_WUFC_MC | IXGBE_WUFC_BC ) ;
2009-06-04 11:09:58 +00:00
/* Enable ACPI wakeup in GRC */
IXGBE_WRITE_REG ( hw , IXGBE_GRC ,
( IXGBE_READ_REG ( hw , IXGBE_GRC ) & ~ IXGBE_GRC_APME ) ) ;
2009-02-27 15:45:05 +00:00
break ;
default :
adapter - > wol = 0 ;
break ;
}
device_set_wakeup_enable ( & adapter - > pdev - > dev , adapter - > wol ) ;
2009-04-09 22:27:57 +00:00
/* pick up the PCI bus settings for reporting later */
hw - > mac . ops . get_bus_info ( hw ) ;
2007-09-15 14:07:45 -07:00
/* print bus type/speed/width info */
2008-10-27 17:47:26 -07:00
dev_info ( & pdev - > dev , " (PCI Express:%s:%s) %pM \n " ,
2009-02-27 15:45:05 +00:00
( ( hw - > bus . speed = = ixgbe_bus_speed_5000 ) ? " 5.0Gb/s " :
( hw - > bus . speed = = ixgbe_bus_speed_2500 ) ? " 2.5Gb/s " : " Unknown " ) ,
( ( hw - > bus . width = = ixgbe_bus_width_pcie_x8 ) ? " Width x8 " :
( hw - > bus . width = = ixgbe_bus_width_pcie_x4 ) ? " Width x4 " :
( hw - > bus . width = = ixgbe_bus_width_pcie_x1 ) ? " Width x1 " :
2008-09-11 20:04:46 -07:00
" Unknown " ) ,
2008-10-27 17:47:26 -07:00
netdev - > dev_addr ) ;
2008-09-11 19:59:59 -07:00
ixgbe_read_pba_num_generic ( hw , & part_num ) ;
2009-02-27 15:45:05 +00:00
if ( ixgbe_is_sfp ( hw ) & & hw - > phy . sfp_type ! = ixgbe_sfp_type_not_present )
dev_info ( & pdev - > dev , " MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x \n " ,
hw - > mac . type , hw - > phy . type , hw - > phy . sfp_type ,
( part_num > > 8 ) , ( part_num & 0xff ) ) ;
else
dev_info ( & pdev - > dev , " MAC: %d, PHY: %d, PBA No: %06x-%03x \n " ,
hw - > mac . type , hw - > phy . type ,
( part_num > > 8 ) , ( part_num & 0xff ) ) ;
2007-09-15 14:07:45 -07:00
2009-02-27 15:45:05 +00:00
if ( hw - > bus . width < = ixgbe_bus_width_pcie_x4 ) {
2008-02-11 09:25:56 -08:00
dev_warn ( & pdev - > dev , " PCI-Express bandwidth available for "
2008-09-11 20:04:46 -07:00
" this card is not sufficient for optimal "
" performance. \n " ) ;
2008-02-11 09:25:56 -08:00
dev_warn ( & pdev - > dev , " For optimal performance a x8 "
2008-09-11 20:04:46 -07:00
" PCI-Express slot is required. \n " ) ;
2008-02-11 09:25:56 -08:00
}
2009-02-05 23:54:42 -08:00
/* save off EEPROM version number */
hw - > eeprom . ops . read ( hw , 0x29 , & adapter - > eeprom_version ) ;
2007-09-15 14:07:45 -07:00
/* reset the hardware with the new settings */
2009-06-04 16:02:24 +00:00
err = hw - > mac . ops . start_hw ( hw ) ;
2008-09-11 19:59:59 -07:00
2009-06-04 16:02:24 +00:00
if ( err = = IXGBE_ERR_EEPROM_VERSION ) {
/* We are running on a pre-production device, log a warning */
dev_warn ( & pdev - > dev , " This device is a pre-production "
" adapter/LOM. Please be aware there may be issues "
" associated with your hardware. If you are "
" experiencing problems please contact your Intel or "
" hardware representative who provided you with this "
" hardware. \n " ) ;
}
2007-09-15 14:07:45 -07:00
strcpy ( netdev - > name , " eth%d " ) ;
err = register_netdev ( netdev ) ;
if ( err )
goto err_register ;
2009-04-17 20:44:27 +00:00
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off ( netdev ) ;
2009-06-04 16:01:43 +00:00
if ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE | |
adapter - > flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE )
INIT_WORK ( & adapter - > fdir_reinit_task , ixgbe_fdir_reinit_task ) ;
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-03-27 14:39:17 +03:00
if ( dca_add_requester ( & pdev - > dev ) = = 0 ) {
2008-03-03 15:04:02 -08:00
adapter - > flags | = IXGBE_FLAG_DCA_ENABLED ;
ixgbe_setup_dca ( adapter ) ;
}
# endif
2009-05-17 12:32:25 +00:00
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev ( netdev ) ;
2007-09-15 14:07:45 -07:00
dev_info ( & pdev - > dev , " Intel(R) 10 Gigabit Network Connection \n " ) ;
cards_found + + ;
return 0 ;
err_register :
2008-02-01 15:59:04 -08:00
ixgbe_release_hw_control ( adapter ) ;
2009-05-06 10:43:28 +00:00
ixgbe_clear_interrupt_scheme ( adapter ) ;
2007-09-15 14:07:45 -07:00
err_sw_init :
err_eeprom :
2008-11-20 21:11:42 -08:00
clear_bit ( __IXGBE_SFP_MODULE_NOT_FOUND , & adapter - > state ) ;
del_timer_sync ( & adapter - > sfp_timer ) ;
cancel_work_sync ( & adapter - > sfp_task ) ;
2009-02-27 15:45:05 +00:00
cancel_work_sync ( & adapter - > multispeed_fiber_task ) ;
cancel_work_sync ( & adapter - > sfp_config_module_task ) ;
2007-09-15 14:07:45 -07:00
iounmap ( hw - > hw_addr ) ;
err_ioremap :
free_netdev ( netdev ) ;
err_alloc_etherdev :
2009-05-06 10:44:45 +00:00
pci_release_selected_regions ( pdev , pci_select_bars ( pdev ,
IORESOURCE_MEM ) ) ;
2007-09-15 14:07:45 -07:00
err_pci_reg :
err_dma :
pci_disable_device ( pdev ) ;
return err ;
}
/**
* ixgbe_remove - Device Removal Routine
* @ pdev : PCI device information struct
*
* ixgbe_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device . The could be caused by a
* Hot - Plug event , or because the driver is going to be removed from
* memory .
* */
static void __devexit ixgbe_remove ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2008-12-10 01:13:08 -08:00
int err ;
2007-09-15 14:07:45 -07:00
set_bit ( __IXGBE_DOWN , & adapter - > state ) ;
2008-11-20 21:11:42 -08:00
/* clear the module not found bit to make sure the worker won't
* reschedule
*/
clear_bit ( __IXGBE_SFP_MODULE_NOT_FOUND , & adapter - > state ) ;
2007-09-15 14:07:45 -07:00
del_timer_sync ( & adapter - > watchdog_timer ) ;
2008-11-20 21:11:42 -08:00
del_timer_sync ( & adapter - > sfp_timer ) ;
cancel_work_sync ( & adapter - > watchdog_task ) ;
cancel_work_sync ( & adapter - > sfp_task ) ;
2009-02-27 15:45:05 +00:00
cancel_work_sync ( & adapter - > multispeed_fiber_task ) ;
cancel_work_sync ( & adapter - > sfp_config_module_task ) ;
2009-06-04 16:01:43 +00:00
if ( adapter - > flags & IXGBE_FLAG_FDIR_HASH_CAPABLE | |
adapter - > flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE )
cancel_work_sync ( & adapter - > fdir_reinit_task ) ;
2007-09-15 14:07:45 -07:00
flush_scheduled_work ( ) ;
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-03-03 15:04:02 -08:00
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED ) {
adapter - > flags & = ~ IXGBE_FLAG_DCA_ENABLED ;
dca_remove_requester ( & pdev - > dev ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_CTRL , 1 ) ;
}
# endif
2009-05-13 13:11:53 +00:00
# ifdef IXGBE_FCOE
if ( adapter - > flags & IXGBE_FLAG_FCOE_ENABLED )
ixgbe_cleanup_fcoe ( adapter ) ;
# endif /* IXGBE_FCOE */
2009-05-17 12:32:25 +00:00
/* remove the added san mac */
ixgbe_del_sanmac_netdev ( netdev ) ;
2008-11-20 21:11:42 -08:00
if ( netdev - > reg_state = = NETREG_REGISTERED )
unregister_netdev ( netdev ) ;
2007-09-15 14:07:45 -07:00
2009-05-06 10:43:28 +00:00
ixgbe_clear_interrupt_scheme ( adapter ) ;
2008-02-01 15:59:04 -08:00
2008-03-03 15:03:45 -08:00
ixgbe_release_hw_control ( adapter ) ;
2007-09-15 14:07:45 -07:00
iounmap ( adapter - > hw . hw_addr ) ;
2009-05-06 10:44:45 +00:00
pci_release_selected_regions ( pdev , pci_select_bars ( pdev ,
IORESOURCE_MEM ) ) ;
2007-09-15 14:07:45 -07:00
2008-03-03 15:03:45 -08:00
DPRINTK ( PROBE , INFO , " complete \n " ) ;
2007-09-15 14:07:45 -07:00
free_netdev ( netdev ) ;
2008-12-10 01:13:08 -08:00
err = pci_disable_pcie_error_reporting ( pdev ) ;
if ( err )
dev_err ( & pdev - > dev ,
" pci_disable_pcie_error_reporting failed 0x%x \n " , err ) ;
2007-09-15 14:07:45 -07:00
pci_disable_device ( pdev ) ;
}
/**
* ixgbe_io_error_detected - called when PCI error is detected
* @ pdev : Pointer to PCI device
* @ state : The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected .
*/
static pci_ers_result_t ixgbe_io_error_detected ( struct pci_dev * pdev ,
2008-09-11 20:04:46 -07:00
pci_channel_state_t state )
2007-09-15 14:07:45 -07:00
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
2008-11-12 23:37:49 -08:00
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2007-09-15 14:07:45 -07:00
netif_device_detach ( netdev ) ;
2009-05-06 10:44:26 +00:00
if ( state = = pci_channel_io_perm_failure )
return PCI_ERS_RESULT_DISCONNECT ;
2007-09-15 14:07:45 -07:00
if ( netif_running ( netdev ) )
ixgbe_down ( adapter ) ;
pci_disable_device ( pdev ) ;
2008-09-11 20:04:46 -07:00
/* Request a slot reset. */
2007-09-15 14:07:45 -07:00
return PCI_ERS_RESULT_NEED_RESET ;
}
/**
* ixgbe_io_slot_reset - called after the pci bus has been reset .
* @ pdev : Pointer to PCI device
*
* Restart the card from scratch , as if from a cold - boot .
*/
static pci_ers_result_t ixgbe_io_slot_reset ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
2008-11-12 23:37:49 -08:00
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2008-12-10 01:13:08 -08:00
pci_ers_result_t result ;
int err ;
2007-09-15 14:07:45 -07:00
2009-05-06 10:44:45 +00:00
if ( pci_enable_device_mem ( pdev ) ) {
2007-09-15 14:07:45 -07:00
DPRINTK ( PROBE , ERR ,
2008-09-11 20:04:46 -07:00
" Cannot re-enable PCI device after reset. \n " ) ;
2008-12-10 01:13:08 -08:00
result = PCI_ERS_RESULT_DISCONNECT ;
} else {
pci_set_master ( pdev ) ;
pci_restore_state ( pdev ) ;
2007-09-15 14:07:45 -07:00
2009-04-29 00:22:31 -07:00
pci_wake_from_d3 ( pdev , false ) ;
2007-09-15 14:07:45 -07:00
2008-12-10 01:13:08 -08:00
ixgbe_reset ( adapter ) ;
2009-03-13 22:15:10 +00:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_WUS , ~ 0 ) ;
2008-12-10 01:13:08 -08:00
result = PCI_ERS_RESULT_RECOVERED ;
}
err = pci_cleanup_aer_uncorrect_error_status ( pdev ) ;
if ( err ) {
dev_err ( & pdev - > dev ,
" pci_cleanup_aer_uncorrect_error_status failed 0x%0x \n " , err ) ;
/* non-fatal, continue */
}
2007-09-15 14:07:45 -07:00
2008-12-10 01:13:08 -08:00
return result ;
2007-09-15 14:07:45 -07:00
}
/**
* ixgbe_io_resume - called when traffic can start flowing again .
* @ pdev : Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation .
*/
static void ixgbe_io_resume ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
2008-11-12 23:37:49 -08:00
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2007-09-15 14:07:45 -07:00
if ( netif_running ( netdev ) ) {
if ( ixgbe_up ( adapter ) ) {
DPRINTK ( PROBE , INFO , " ixgbe_up failed after reset \n " ) ;
return ;
}
}
netif_device_attach ( netdev ) ;
}
static struct pci_error_handlers ixgbe_err_handler = {
. error_detected = ixgbe_io_error_detected ,
. slot_reset = ixgbe_io_slot_reset ,
. resume = ixgbe_io_resume ,
} ;
static struct pci_driver ixgbe_driver = {
. name = ixgbe_driver_name ,
. id_table = ixgbe_pci_tbl ,
. probe = ixgbe_probe ,
. remove = __devexit_p ( ixgbe_remove ) ,
# ifdef CONFIG_PM
. suspend = ixgbe_suspend ,
. resume = ixgbe_resume ,
# endif
. shutdown = ixgbe_shutdown ,
. err_handler = & ixgbe_err_handler
} ;
/**
* ixgbe_init_module - Driver Registration Routine
*
* ixgbe_init_module is the first routine called when the driver is
* loaded . All it does is register with the PCI subsystem .
* */
static int __init ixgbe_init_module ( void )
{
int ret ;
printk ( KERN_INFO " %s: %s - version %s \n " , ixgbe_driver_name ,
ixgbe_driver_string , ixgbe_driver_version ) ;
printk ( KERN_INFO " %s: %s \n " , ixgbe_driver_name , ixgbe_copyright ) ;
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-03-03 15:04:02 -08:00
dca_register_notify ( & dca_notifier ) ;
# endif
2008-10-16 05:09:31 -04:00
2007-09-15 14:07:45 -07:00
ret = pci_register_driver ( & ixgbe_driver ) ;
return ret ;
}
2008-09-11 20:04:46 -07:00
2007-09-15 14:07:45 -07:00
module_init ( ixgbe_init_module ) ;
/**
* ixgbe_exit_module - Driver Exit Cleanup Routine
*
* ixgbe_exit_module is called just before the driver is removed
* from memory .
* */
static void __exit ixgbe_exit_module ( void )
{
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-03-03 15:04:02 -08:00
dca_unregister_notify ( & dca_notifier ) ;
# endif
2007-09-15 14:07:45 -07:00
pci_unregister_driver ( & ixgbe_driver ) ;
}
2008-03-03 15:04:02 -08:00
2008-10-16 05:09:31 -04:00
# ifdef CONFIG_IXGBE_DCA
2008-03-03 15:04:02 -08:00
static int ixgbe_notify_dca ( struct notifier_block * nb , unsigned long event ,
2008-09-11 20:04:46 -07:00
void * p )
2008-03-03 15:04:02 -08:00
{
int ret_val ;
ret_val = driver_for_each_device ( & ixgbe_driver . driver , NULL , & event ,
2008-09-11 20:04:46 -07:00
__ixgbe_notify_dca ) ;
2008-03-03 15:04:02 -08:00
return ret_val ? NOTIFY_BAD : NOTIFY_DONE ;
}
2009-03-31 21:32:42 +00:00
2008-10-16 05:09:31 -04:00
# endif /* CONFIG_IXGBE_DCA */
2009-03-31 21:32:42 +00:00
# ifdef DEBUG
/**
* ixgbe_get_hw_dev_name - return device name string
* used by hardware layer to print debugging information
* */
char * ixgbe_get_hw_dev_name ( struct ixgbe_hw * hw )
{
struct ixgbe_adapter * adapter = hw - > back ;
return adapter - > netdev - > name ;
}
2008-03-03 15:04:02 -08:00
2009-03-31 21:32:42 +00:00
# endif
2007-09-15 14:07:45 -07:00
module_exit ( ixgbe_exit_module ) ;
/* ixgbe_main.c */