2008-01-24 02:22:38 -08:00
/*******************************************************************************
Intel ( R ) Gigabit Ethernet Linux driver
2009-02-06 23:23:12 +00:00
Copyright ( c ) 2007 - 2009 Intel Corporation .
2008-01-24 02:22:38 -08:00
This program is free software ; you can redistribute it and / or modify it
under the terms and conditions of the GNU General Public License ,
version 2 , as published by the Free Software Foundation .
This program is distributed in the hope it will be useful , but WITHOUT
ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
more details .
You should have received a copy of the GNU General Public License along with
this program ; if not , write to the Free Software Foundation , Inc . ,
51 Franklin St - Fifth Floor , Boston , MA 02110 - 1301 USA .
The full GNU General Public License is included in this distribution in
the file called " COPYING " .
Contact Information :
e1000 - devel Mailing List < e1000 - devel @ lists . sourceforge . net >
Intel Corporation , 5200 N . E . Elam Young Parkway , Hillsboro , OR 97124 - 6497
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include <linux/module.h>
# include <linux/types.h>
# include <linux/init.h>
# include <linux/vmalloc.h>
# include <linux/pagemap.h>
# include <linux/netdevice.h>
# include <linux/ipv6.h>
# include <net/checksum.h>
# include <net/ip6_checksum.h>
2009-02-12 05:03:42 +00:00
# include <linux/net_tstamp.h>
2008-01-24 02:22:38 -08:00
# include <linux/mii.h>
# include <linux/ethtool.h>
# include <linux/if_vlan.h>
# include <linux/pci.h>
2008-10-16 21:26:57 -07:00
# include <linux/pci-aspm.h>
2008-01-24 02:22:38 -08:00
# include <linux/delay.h>
# include <linux/interrupt.h>
# include <linux/if_ether.h>
2008-11-27 00:24:37 -08:00
# include <linux/aer.h>
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2008-07-08 15:07:55 -07:00
# include <linux/dca.h>
# endif
2008-01-24 02:22:38 -08:00
# include "igb.h"
2009-02-06 23:23:12 +00:00
# define DRV_VERSION "1.3.16-k2"
2008-01-24 02:22:38 -08:00
char igb_driver_name [ ] = " igb " ;
char igb_driver_version [ ] = DRV_VERSION ;
static const char igb_driver_string [ ] =
" Intel(R) Gigabit Ethernet Network Driver " ;
2009-02-06 23:23:12 +00:00
static const char igb_copyright [ ] = " Copyright (c) 2007-2009 Intel Corporation. " ;
2008-01-24 02:22:38 -08:00
static const struct e1000_info * igb_info_tbl [ ] = {
[ board_82575 ] = & e1000_82575_info ,
} ;
static struct pci_device_id igb_pci_tbl [ ] = {
2008-07-08 15:10:12 -07:00
{ PCI_VDEVICE ( INTEL , E1000_DEV_ID_82576 ) , board_82575 } ,
2009-03-13 20:42:15 +00:00
{ PCI_VDEVICE ( INTEL , E1000_DEV_ID_82576_NS ) , board_82575 } ,
2008-07-08 15:10:12 -07:00
{ PCI_VDEVICE ( INTEL , E1000_DEV_ID_82576_FIBER ) , board_82575 } ,
{ PCI_VDEVICE ( INTEL , E1000_DEV_ID_82576_SERDES ) , board_82575 } ,
2009-03-13 20:42:35 +00:00
{ PCI_VDEVICE ( INTEL , E1000_DEV_ID_82576_QUAD_COPPER ) , board_82575 } ,
2008-01-24 02:22:38 -08:00
{ PCI_VDEVICE ( INTEL , E1000_DEV_ID_82575EB_COPPER ) , board_82575 } ,
{ PCI_VDEVICE ( INTEL , E1000_DEV_ID_82575EB_FIBER_SERDES ) , board_82575 } ,
{ PCI_VDEVICE ( INTEL , E1000_DEV_ID_82575GB_QUAD_COPPER ) , board_82575 } ,
/* required last entry */
{ 0 , }
} ;
MODULE_DEVICE_TABLE ( pci , igb_pci_tbl ) ;
void igb_reset ( struct igb_adapter * ) ;
static int igb_setup_all_tx_resources ( struct igb_adapter * ) ;
static int igb_setup_all_rx_resources ( struct igb_adapter * ) ;
static void igb_free_all_tx_resources ( struct igb_adapter * ) ;
static void igb_free_all_rx_resources ( struct igb_adapter * ) ;
void igb_update_stats ( struct igb_adapter * ) ;
static int igb_probe ( struct pci_dev * , const struct pci_device_id * ) ;
static void __devexit igb_remove ( struct pci_dev * pdev ) ;
static int igb_sw_init ( struct igb_adapter * ) ;
static int igb_open ( struct net_device * ) ;
static int igb_close ( struct net_device * ) ;
static void igb_configure_tx ( struct igb_adapter * ) ;
static void igb_configure_rx ( struct igb_adapter * ) ;
static void igb_setup_rctl ( struct igb_adapter * ) ;
static void igb_clean_all_tx_rings ( struct igb_adapter * ) ;
static void igb_clean_all_rx_rings ( struct igb_adapter * ) ;
2008-06-27 10:59:48 -07:00
static void igb_clean_tx_ring ( struct igb_ring * ) ;
static void igb_clean_rx_ring ( struct igb_ring * ) ;
2008-01-24 02:22:38 -08:00
static void igb_set_multi ( struct net_device * ) ;
static void igb_update_phy_info ( unsigned long ) ;
static void igb_watchdog ( unsigned long ) ;
static void igb_watchdog_task ( struct work_struct * ) ;
static int igb_xmit_frame_ring_adv ( struct sk_buff * , struct net_device * ,
struct igb_ring * ) ;
static int igb_xmit_frame_adv ( struct sk_buff * skb , struct net_device * ) ;
static struct net_device_stats * igb_get_stats ( struct net_device * ) ;
static int igb_change_mtu ( struct net_device * , int ) ;
static int igb_set_mac ( struct net_device * , void * ) ;
static irqreturn_t igb_intr ( int irq , void * ) ;
static irqreturn_t igb_intr_msi ( int irq , void * ) ;
static irqreturn_t igb_msix_other ( int irq , void * ) ;
static irqreturn_t igb_msix_rx ( int irq , void * ) ;
static irqreturn_t igb_msix_tx ( int irq , void * ) ;
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2008-07-08 15:07:55 -07:00
static void igb_update_rx_dca ( struct igb_ring * ) ;
static void igb_update_tx_dca ( struct igb_ring * ) ;
static void igb_setup_dca ( struct igb_adapter * ) ;
2008-10-17 11:08:31 -07:00
# endif /* CONFIG_IGB_DCA */
2008-06-27 10:59:48 -07:00
static bool igb_clean_tx_irq ( struct igb_ring * ) ;
2008-07-08 15:06:51 -07:00
static int igb_poll ( struct napi_struct * , int ) ;
2008-06-27 10:59:48 -07:00
static bool igb_clean_rx_irq_adv ( struct igb_ring * , int * , int ) ;
static void igb_alloc_rx_buffers_adv ( struct igb_ring * , int ) ;
2008-01-24 02:22:38 -08:00
static int igb_ioctl ( struct net_device * , struct ifreq * , int cmd ) ;
static void igb_tx_timeout ( struct net_device * ) ;
static void igb_reset_task ( struct work_struct * ) ;
static void igb_vlan_rx_register ( struct net_device * , struct vlan_group * ) ;
static void igb_vlan_rx_add_vid ( struct net_device * , u16 ) ;
static void igb_vlan_rx_kill_vid ( struct net_device * , u16 ) ;
static void igb_restore_vlan ( struct igb_adapter * ) ;
2009-02-19 20:40:07 -08:00
static void igb_ping_all_vfs ( struct igb_adapter * ) ;
static void igb_msg_task ( struct igb_adapter * ) ;
static int igb_rcv_msg_from_vf ( struct igb_adapter * , u32 ) ;
2009-02-19 20:39:44 -08:00
static inline void igb_set_rah_pool ( struct e1000_hw * , int , int ) ;
static void igb_set_mc_list_pools ( struct igb_adapter * , int , u16 ) ;
2009-02-19 20:40:07 -08:00
static void igb_vmm_control ( struct igb_adapter * ) ;
2009-02-19 20:39:44 -08:00
static inline void igb_set_vmolr ( struct e1000_hw * , int ) ;
2009-02-19 20:40:07 -08:00
static inline int igb_set_vf_rlpml ( struct igb_adapter * , int , int ) ;
static int igb_set_vf_mac ( struct igb_adapter * adapter , int , unsigned char * ) ;
static void igb_restore_vf_multicasts ( struct igb_adapter * adapter ) ;
2008-01-24 02:22:38 -08:00
# ifdef CONFIG_PM
2009-03-31 21:23:50 +00:00
static int igb_suspend ( struct pci_dev * , pm_message_t ) ;
2008-01-24 02:22:38 -08:00
static int igb_resume ( struct pci_dev * ) ;
# endif
static void igb_shutdown ( struct pci_dev * ) ;
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2008-07-08 15:07:55 -07:00
static int igb_notify_dca ( struct notifier_block * , unsigned long , void * ) ;
static struct notifier_block dca_notifier = {
. notifier_call = igb_notify_dca ,
. next = NULL ,
. priority = 0
} ;
# endif
2008-01-24 02:22:38 -08:00
# ifdef CONFIG_NET_POLL_CONTROLLER
/* for netdump / net console */
static void igb_netpoll ( struct net_device * ) ;
# endif
2009-02-19 20:40:30 -08:00
# ifdef CONFIG_PCI_IOV
2009-04-07 14:37:52 +00:00
static unsigned int max_vfs = 0 ;
module_param ( max_vfs , uint , 0 ) ;
MODULE_PARM_DESC ( max_vfs , " Maximum number of virtual functions to allocate "
" per physical function " ) ;
# endif /* CONFIG_PCI_IOV */
2008-01-24 02:22:38 -08:00
static pci_ers_result_t igb_io_error_detected ( struct pci_dev * ,
pci_channel_state_t ) ;
static pci_ers_result_t igb_io_slot_reset ( struct pci_dev * ) ;
static void igb_io_resume ( struct pci_dev * ) ;
static struct pci_error_handlers igb_err_handler = {
. error_detected = igb_io_error_detected ,
. slot_reset = igb_io_slot_reset ,
. resume = igb_io_resume ,
} ;
static struct pci_driver igb_driver = {
. name = igb_driver_name ,
. id_table = igb_pci_tbl ,
. probe = igb_probe ,
. remove = __devexit_p ( igb_remove ) ,
# ifdef CONFIG_PM
/* Power Managment Hooks */
. suspend = igb_suspend ,
. resume = igb_resume ,
# endif
. shutdown = igb_shutdown ,
. err_handler = & igb_err_handler
} ;
2008-07-08 15:10:46 -07:00
static int global_quad_port_a ; /* global quad port a indication */
2008-01-24 02:22:38 -08:00
MODULE_AUTHOR ( " Intel Corporation, <e1000-devel@lists.sourceforge.net> " ) ;
MODULE_DESCRIPTION ( " Intel(R) Gigabit Ethernet Network Driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( DRV_VERSION ) ;
2009-02-12 05:03:41 +00:00
/**
* Scale the NIC clock cycle by a large factor so that
* relatively small clock corrections can be added or
* substracted at each clock tick . The drawbacks of a
* large factor are a ) that the clock register overflows
* more quickly ( not such a big deal ) and b ) that the
* increment per tick has to fit into 24 bits .
*
* Note that
* TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
* IGB_TSYNC_SCALE
* TIMINCA + = TIMINCA * adjustment [ ppm ] / 1e9
*
* The base scale factor is intentionally a power of two
* so that the division in % struct timecounter can be done with
* a shift .
*/
# define IGB_TSYNC_SHIFT (19)
# define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
/**
* The duration of one clock cycle of the NIC .
*
* @ todo This hard - coded value is part of the specification and might change
* in future hardware revisions . Add revision check .
*/
# define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
# if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
# error IGB_TSYNC_SCALE and / or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
# endif
/**
* igb_read_clock - read raw cycle counter ( to be used by time counter )
*/
static cycle_t igb_read_clock ( const struct cyclecounter * tc )
{
struct igb_adapter * adapter =
container_of ( tc , struct igb_adapter , cycles ) ;
struct e1000_hw * hw = & adapter - > hw ;
u64 stamp ;
stamp = rd32 ( E1000_SYSTIML ) ;
stamp | = ( u64 ) rd32 ( E1000_SYSTIMH ) < < 32ULL ;
return stamp ;
}
2008-01-24 02:22:38 -08:00
# ifdef DEBUG
/**
* igb_get_hw_dev_name - return device name string
* used by hardware layer to print debugging information
* */
char * igb_get_hw_dev_name ( struct e1000_hw * hw )
{
struct igb_adapter * adapter = hw - > back ;
return adapter - > netdev - > name ;
}
2009-02-12 05:03:41 +00:00
/**
* igb_get_time_str - format current NIC and system time as string
*/
static char * igb_get_time_str ( struct igb_adapter * adapter ,
char buffer [ 160 ] )
{
cycle_t hw = adapter - > cycles . read ( & adapter - > cycles ) ;
struct timespec nic = ns_to_timespec ( timecounter_read ( & adapter - > clock ) ) ;
struct timespec sys ;
struct timespec delta ;
getnstimeofday ( & sys ) ;
delta = timespec_sub ( nic , sys ) ;
sprintf ( buffer ,
2009-02-12 05:03:43 +00:00
" HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns " ,
hw ,
2009-02-12 05:03:41 +00:00
( long ) nic . tv_sec , nic . tv_nsec ,
( long ) sys . tv_sec , sys . tv_nsec ,
( long ) delta . tv_sec , delta . tv_nsec ) ;
return buffer ;
}
2008-01-24 02:22:38 -08:00
# endif
2009-03-20 00:16:50 +00:00
/**
* igb_desc_unused - calculate if we have unused descriptors
* */
static int igb_desc_unused ( struct igb_ring * ring )
{
if ( ring - > next_to_clean > ring - > next_to_use )
return ring - > next_to_clean - ring - > next_to_use - 1 ;
return ring - > count + ring - > next_to_clean - ring - > next_to_use - 1 ;
}
2008-01-24 02:22:38 -08:00
/**
* igb_init_module - Driver Registration Routine
*
* igb_init_module is the first routine called when the driver is
* loaded . All it does is register with the PCI subsystem .
* */
static int __init igb_init_module ( void )
{
int ret ;
printk ( KERN_INFO " %s - version %s \n " ,
igb_driver_string , igb_driver_version ) ;
printk ( KERN_INFO " %s \n " , igb_copyright ) ;
2008-07-08 15:10:46 -07:00
global_quad_port_a = 0 ;
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2008-07-08 15:07:55 -07:00
dca_register_notify ( & dca_notifier ) ;
# endif
2009-01-31 00:52:30 -08:00
ret = pci_register_driver ( & igb_driver ) ;
2008-01-24 02:22:38 -08:00
return ret ;
}
module_init ( igb_init_module ) ;
/**
* igb_exit_module - Driver Exit Cleanup Routine
*
* igb_exit_module is called just before the driver is removed
* from memory .
* */
static void __exit igb_exit_module ( void )
{
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2008-07-08 15:07:55 -07:00
dca_unregister_notify ( & dca_notifier ) ;
# endif
2008-01-24 02:22:38 -08:00
pci_unregister_driver ( & igb_driver ) ;
}
module_exit ( igb_exit_module ) ;
2008-12-26 01:34:11 -08:00
# define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
/**
* igb_cache_ring_register - Descriptor ring to register mapping
* @ adapter : board private structure to initialize
*
* Once we know the feature - set enabled for the device , we ' ll cache
* the register offset the descriptor ring is assigned to .
* */
static void igb_cache_ring_register ( struct igb_adapter * adapter )
{
int i ;
2009-02-19 20:39:23 -08:00
unsigned int rbase_offset = adapter - > vfs_allocated_count ;
2008-12-26 01:34:11 -08:00
switch ( adapter - > hw . mac . type ) {
case e1000_82576 :
/* The queues are allocated for virtualization such that VF 0
* is allocated queues 0 and 8 , VF 1 queues 1 and 9 , etc .
* In order to avoid collision we start at the first free queue
* and continue consuming queues in the same sequence
*/
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
2009-02-19 20:39:23 -08:00
adapter - > rx_ring [ i ] . reg_idx = rbase_offset +
Q_IDX_82576 ( i ) ;
2008-12-26 01:34:11 -08:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
2009-02-19 20:39:23 -08:00
adapter - > tx_ring [ i ] . reg_idx = rbase_offset +
Q_IDX_82576 ( i ) ;
2008-12-26 01:34:11 -08:00
break ;
case e1000_82575 :
default :
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
adapter - > rx_ring [ i ] . reg_idx = i ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
adapter - > tx_ring [ i ] . reg_idx = i ;
break ;
}
}
2008-01-24 02:22:38 -08:00
/**
* igb_alloc_queues - Allocate memory for all rings
* @ adapter : board private structure to initialize
*
* We allocate one ring per queue at run - time since we don ' t know the
* number of queues at compile - time .
* */
static int igb_alloc_queues ( struct igb_adapter * adapter )
{
int i ;
adapter - > tx_ring = kcalloc ( adapter - > num_tx_queues ,
sizeof ( struct igb_ring ) , GFP_KERNEL ) ;
if ( ! adapter - > tx_ring )
return - ENOMEM ;
adapter - > rx_ring = kcalloc ( adapter - > num_rx_queues ,
sizeof ( struct igb_ring ) , GFP_KERNEL ) ;
if ( ! adapter - > rx_ring ) {
kfree ( adapter - > tx_ring ) ;
return - ENOMEM ;
}
2008-07-08 15:14:44 -07:00
adapter - > rx_ring - > buddy = adapter - > tx_ring ;
2008-07-08 15:06:51 -07:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
struct igb_ring * ring = & ( adapter - > tx_ring [ i ] ) ;
2008-11-20 00:48:10 -08:00
ring - > count = adapter - > tx_ring_count ;
2008-07-08 15:06:51 -07:00
ring - > adapter = adapter ;
ring - > queue_index = i ;
}
2008-01-24 02:22:38 -08:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
struct igb_ring * ring = & ( adapter - > rx_ring [ i ] ) ;
2008-11-20 00:48:10 -08:00
ring - > count = adapter - > rx_ring_count ;
2008-01-24 02:22:38 -08:00
ring - > adapter = adapter ;
2008-06-27 11:00:39 -07:00
ring - > queue_index = i ;
2008-01-24 02:22:38 -08:00
ring - > itr_register = E1000_ITR ;
2008-06-27 11:00:39 -07:00
/* set a default napi handler for each rx_ring */
2008-07-08 15:06:51 -07:00
netif_napi_add ( adapter - > netdev , & ring - > napi , igb_poll , 64 ) ;
2008-01-24 02:22:38 -08:00
}
2008-12-26 01:34:11 -08:00
igb_cache_ring_register ( adapter ) ;
2008-01-24 02:22:38 -08:00
return 0 ;
}
2008-07-08 15:13:38 -07:00
static void igb_free_queues ( struct igb_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
netif_napi_del ( & adapter - > rx_ring [ i ] . napi ) ;
2009-03-31 20:38:19 +00:00
adapter - > num_rx_queues = 0 ;
adapter - > num_tx_queues = 0 ;
2008-07-08 15:13:38 -07:00
kfree ( adapter - > tx_ring ) ;
kfree ( adapter - > rx_ring ) ;
}
2008-01-24 02:22:38 -08:00
# define IGB_N0_QUEUE -1
static void igb_assign_vector ( struct igb_adapter * adapter , int rx_queue ,
int tx_queue , int msix_vector )
{
u32 msixbm = 0 ;
struct e1000_hw * hw = & adapter - > hw ;
2008-07-08 15:10:12 -07:00
u32 ivar , index ;
switch ( hw - > mac . type ) {
case e1000_82575 :
2008-01-24 02:22:38 -08:00
/* The 82575 assigns vectors using a bitmask, which matches the
bitmask for the EICR / EIMS / EIMC registers . To assign one
or more queues to a vector , we write the appropriate bits
into the MSIXBM register for that vector . */
if ( rx_queue > IGB_N0_QUEUE ) {
msixbm = E1000_EICR_RX_QUEUE0 < < rx_queue ;
adapter - > rx_ring [ rx_queue ] . eims_value = msixbm ;
}
if ( tx_queue > IGB_N0_QUEUE ) {
msixbm | = E1000_EICR_TX_QUEUE0 < < tx_queue ;
adapter - > tx_ring [ tx_queue ] . eims_value =
E1000_EICR_TX_QUEUE0 < < tx_queue ;
}
array_wr32 ( E1000_MSIXBM ( 0 ) , msix_vector , msixbm ) ;
2008-07-08 15:10:12 -07:00
break ;
case e1000_82576 :
2008-12-26 01:34:11 -08:00
/* 82576 uses a table-based method for assigning vectors.
2008-07-08 15:10:12 -07:00
Each queue has a single entry in the table to which we write
a vector number along with a " valid " bit . Sadly , the layout
of the table is somewhat counterintuitive . */
if ( rx_queue > IGB_N0_QUEUE ) {
2009-02-19 20:39:23 -08:00
index = ( rx_queue > > 1 ) + adapter - > vfs_allocated_count ;
2008-07-08 15:10:12 -07:00
ivar = array_rd32 ( E1000_IVAR0 , index ) ;
2008-12-26 01:34:11 -08:00
if ( rx_queue & 0x1 ) {
2008-07-08 15:10:12 -07:00
/* vector goes into third byte of register */
ivar = ivar & 0xFF00FFFF ;
ivar | = ( msix_vector | E1000_IVAR_VALID ) < < 16 ;
2008-12-26 01:34:11 -08:00
} else {
/* vector goes into low byte of register */
ivar = ivar & 0xFFFFFF00 ;
ivar | = msix_vector | E1000_IVAR_VALID ;
2008-07-08 15:10:12 -07:00
}
adapter - > rx_ring [ rx_queue ] . eims_value = 1 < < msix_vector ;
array_wr32 ( E1000_IVAR0 , index , ivar ) ;
}
if ( tx_queue > IGB_N0_QUEUE ) {
2009-02-19 20:39:23 -08:00
index = ( tx_queue > > 1 ) + adapter - > vfs_allocated_count ;
2008-07-08 15:10:12 -07:00
ivar = array_rd32 ( E1000_IVAR0 , index ) ;
2008-12-26 01:34:11 -08:00
if ( tx_queue & 0x1 ) {
2008-07-08 15:10:12 -07:00
/* vector goes into high byte of register */
ivar = ivar & 0x00FFFFFF ;
ivar | = ( msix_vector | E1000_IVAR_VALID ) < < 24 ;
2008-12-26 01:34:11 -08:00
} else {
/* vector goes into second byte of register */
ivar = ivar & 0xFFFF00FF ;
ivar | = ( msix_vector | E1000_IVAR_VALID ) < < 8 ;
2008-07-08 15:10:12 -07:00
}
adapter - > tx_ring [ tx_queue ] . eims_value = 1 < < msix_vector ;
array_wr32 ( E1000_IVAR0 , index , ivar ) ;
}
break ;
default :
BUG ( ) ;
break ;
}
2008-01-24 02:22:38 -08:00
}
/**
* igb_configure_msix - Configure MSI - X hardware
*
* igb_configure_msix sets up the hardware to properly
* generate MSI - X interrupts .
* */
static void igb_configure_msix ( struct igb_adapter * adapter )
{
u32 tmp ;
int i , vector = 0 ;
struct e1000_hw * hw = & adapter - > hw ;
adapter - > eims_enable_mask = 0 ;
2008-07-08 15:10:12 -07:00
if ( hw - > mac . type = = e1000_82576 )
/* Turn on MSI-X capability first, or our settings
* won ' t stick . And it will take days to debug . */
wr32 ( E1000_GPIE , E1000_GPIE_MSIX_MODE |
2009-02-06 23:19:29 +00:00
E1000_GPIE_PBA | E1000_GPIE_EIAME |
2008-07-08 15:10:12 -07:00
E1000_GPIE_NSICR ) ;
2008-01-24 02:22:38 -08:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
struct igb_ring * tx_ring = & adapter - > tx_ring [ i ] ;
igb_assign_vector ( adapter , IGB_N0_QUEUE , i , vector + + ) ;
adapter - > eims_enable_mask | = tx_ring - > eims_value ;
if ( tx_ring - > itr_val )
2008-07-08 15:14:44 -07:00
writel ( tx_ring - > itr_val ,
2008-01-24 02:22:38 -08:00
hw - > hw_addr + tx_ring - > itr_register ) ;
else
writel ( 1 , hw - > hw_addr + tx_ring - > itr_register ) ;
}
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
struct igb_ring * rx_ring = & adapter - > rx_ring [ i ] ;
2008-07-16 12:45:27 -07:00
rx_ring - > buddy = NULL ;
2008-01-24 02:22:38 -08:00
igb_assign_vector ( adapter , i , IGB_N0_QUEUE , vector + + ) ;
adapter - > eims_enable_mask | = rx_ring - > eims_value ;
if ( rx_ring - > itr_val )
2008-07-08 15:14:44 -07:00
writel ( rx_ring - > itr_val ,
2008-01-24 02:22:38 -08:00
hw - > hw_addr + rx_ring - > itr_register ) ;
else
writel ( 1 , hw - > hw_addr + rx_ring - > itr_register ) ;
}
/* set vector for other causes, i.e. link changes */
2008-07-08 15:10:12 -07:00
switch ( hw - > mac . type ) {
case e1000_82575 :
2008-01-24 02:22:38 -08:00
array_wr32 ( E1000_MSIXBM ( 0 ) , vector + + ,
E1000_EIMS_OTHER ) ;
tmp = rd32 ( E1000_CTRL_EXT ) ;
/* enable MSI-X PBA support*/
tmp | = E1000_CTRL_EXT_PBA_CLR ;
/* Auto-Mask interrupts upon ICR read. */
tmp | = E1000_CTRL_EXT_EIAME ;
tmp | = E1000_CTRL_EXT_IRCA ;
wr32 ( E1000_CTRL_EXT , tmp ) ;
adapter - > eims_enable_mask | = E1000_EIMS_OTHER ;
2008-06-27 11:00:39 -07:00
adapter - > eims_other = E1000_EIMS_OTHER ;
2008-01-24 02:22:38 -08:00
2008-07-08 15:10:12 -07:00
break ;
case e1000_82576 :
tmp = ( vector + + | E1000_IVAR_VALID ) < < 8 ;
wr32 ( E1000_IVAR_MISC , tmp ) ;
adapter - > eims_enable_mask = ( 1 < < ( vector ) ) - 1 ;
adapter - > eims_other = 1 < < ( vector - 1 ) ;
break ;
default :
/* do nothing, since nothing else supports MSI-X */
break ;
} /* switch (hw->mac.type) */
2008-01-24 02:22:38 -08:00
wrfl ( ) ;
}
/**
* igb_request_msix - Initialize MSI - X interrupts
*
* igb_request_msix allocates MSI - X vectors and requests interrupts from the
* kernel .
* */
static int igb_request_msix ( struct igb_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
int i , err = 0 , vector = 0 ;
vector = 0 ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
struct igb_ring * ring = & ( adapter - > tx_ring [ i ] ) ;
2008-12-05 15:08:03 -08:00
sprintf ( ring - > name , " %s-tx-%d " , netdev - > name , i ) ;
2008-01-24 02:22:38 -08:00
err = request_irq ( adapter - > msix_entries [ vector ] . vector ,
& igb_msix_tx , 0 , ring - > name ,
& ( adapter - > tx_ring [ i ] ) ) ;
if ( err )
goto out ;
ring - > itr_register = E1000_EITR ( 0 ) + ( vector < < 2 ) ;
2008-07-08 15:14:44 -07:00
ring - > itr_val = 976 ; /* ~4000 ints/sec */
2008-01-24 02:22:38 -08:00
vector + + ;
}
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
struct igb_ring * ring = & ( adapter - > rx_ring [ i ] ) ;
if ( strlen ( netdev - > name ) < ( IFNAMSIZ - 5 ) )
2008-12-05 15:08:03 -08:00
sprintf ( ring - > name , " %s-rx-%d " , netdev - > name , i ) ;
2008-01-24 02:22:38 -08:00
else
memcpy ( ring - > name , netdev - > name , IFNAMSIZ ) ;
err = request_irq ( adapter - > msix_entries [ vector ] . vector ,
& igb_msix_rx , 0 , ring - > name ,
& ( adapter - > rx_ring [ i ] ) ) ;
if ( err )
goto out ;
ring - > itr_register = E1000_EITR ( 0 ) + ( vector < < 2 ) ;
ring - > itr_val = adapter - > itr ;
vector + + ;
}
err = request_irq ( adapter - > msix_entries [ vector ] . vector ,
& igb_msix_other , 0 , netdev - > name , netdev ) ;
if ( err )
goto out ;
igb_configure_msix ( adapter ) ;
return 0 ;
out :
return err ;
}
static void igb_reset_interrupt_capability ( struct igb_adapter * adapter )
{
if ( adapter - > msix_entries ) {
pci_disable_msix ( adapter - > pdev ) ;
kfree ( adapter - > msix_entries ) ;
adapter - > msix_entries = NULL ;
2008-07-08 15:10:46 -07:00
} else if ( adapter - > flags & IGB_FLAG_HAS_MSI )
2008-01-24 02:22:38 -08:00
pci_disable_msi ( adapter - > pdev ) ;
return ;
}
/**
* igb_set_interrupt_capability - set MSI or MSI - X if supported
*
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel .
* */
static void igb_set_interrupt_capability ( struct igb_adapter * adapter )
{
int err ;
int numvecs , i ;
2009-02-06 23:15:45 +00:00
/* Number of supported queues. */
/* Having more queues than CPUs doesn't make sense. */
adapter - > num_rx_queues = min_t ( u32 , IGB_MAX_RX_QUEUES , num_online_cpus ( ) ) ;
adapter - > num_tx_queues = min_t ( u32 , IGB_MAX_TX_QUEUES , num_online_cpus ( ) ) ;
2008-01-24 02:22:38 -08:00
numvecs = adapter - > num_tx_queues + adapter - > num_rx_queues + 1 ;
adapter - > msix_entries = kcalloc ( numvecs , sizeof ( struct msix_entry ) ,
GFP_KERNEL ) ;
if ( ! adapter - > msix_entries )
goto msi_only ;
for ( i = 0 ; i < numvecs ; i + + )
adapter - > msix_entries [ i ] . entry = i ;
err = pci_enable_msix ( adapter - > pdev ,
adapter - > msix_entries ,
numvecs ) ;
if ( err = = 0 )
2008-08-26 04:25:13 -07:00
goto out ;
2008-01-24 02:22:38 -08:00
igb_reset_interrupt_capability ( adapter ) ;
/* If we can't do MSI-X, try MSI */
msi_only :
2009-04-07 14:37:52 +00:00
# ifdef CONFIG_PCI_IOV
/* disable SR-IOV for non MSI-X configurations */
if ( adapter - > vf_data ) {
struct e1000_hw * hw = & adapter - > hw ;
/* disable iov and allow time for transactions to clear */
pci_disable_sriov ( adapter - > pdev ) ;
msleep ( 500 ) ;
kfree ( adapter - > vf_data ) ;
adapter - > vf_data = NULL ;
wr32 ( E1000_IOVCTL , E1000_IOVCTL_REUSE_VFQ ) ;
msleep ( 100 ) ;
dev_info ( & adapter - > pdev - > dev , " IOV Disabled \n " ) ;
}
# endif
2008-01-24 02:22:38 -08:00
adapter - > num_rx_queues = 1 ;
2008-07-08 15:06:51 -07:00
adapter - > num_tx_queues = 1 ;
2008-01-24 02:22:38 -08:00
if ( ! pci_enable_msi ( adapter - > pdev ) )
2008-07-08 15:10:46 -07:00
adapter - > flags | = IGB_FLAG_HAS_MSI ;
2008-08-26 04:25:13 -07:00
out :
2008-07-08 15:06:51 -07:00
/* Notify the stack of the (possibly) reduced Tx Queue count. */
2008-07-17 01:56:23 -07:00
adapter - > netdev - > real_num_tx_queues = adapter - > num_tx_queues ;
2008-01-24 02:22:38 -08:00
return ;
}
/**
* igb_request_irq - initialize interrupts
*
* Attempts to configure interrupts using the best available
* capabilities of the hardware and kernel .
* */
static int igb_request_irq ( struct igb_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
struct e1000_hw * hw = & adapter - > hw ;
int err = 0 ;
if ( adapter - > msix_entries ) {
err = igb_request_msix ( adapter ) ;
2008-06-27 11:00:39 -07:00
if ( ! err )
2008-01-24 02:22:38 -08:00
goto request_done ;
/* fall back to MSI */
igb_reset_interrupt_capability ( adapter ) ;
if ( ! pci_enable_msi ( adapter - > pdev ) )
2008-07-08 15:10:46 -07:00
adapter - > flags | = IGB_FLAG_HAS_MSI ;
2008-01-24 02:22:38 -08:00
igb_free_all_tx_resources ( adapter ) ;
igb_free_all_rx_resources ( adapter ) ;
adapter - > num_rx_queues = 1 ;
igb_alloc_queues ( adapter ) ;
2008-06-27 11:00:39 -07:00
} else {
2008-07-08 15:10:12 -07:00
switch ( hw - > mac . type ) {
case e1000_82575 :
wr32 ( E1000_MSIXBM ( 0 ) ,
( E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER ) ) ;
break ;
case e1000_82576 :
wr32 ( E1000_IVAR0 , E1000_IVAR_VALID ) ;
break ;
default :
break ;
}
2008-01-24 02:22:38 -08:00
}
2008-06-27 11:00:39 -07:00
2008-07-08 15:10:46 -07:00
if ( adapter - > flags & IGB_FLAG_HAS_MSI ) {
2008-01-24 02:22:38 -08:00
err = request_irq ( adapter - > pdev - > irq , & igb_intr_msi , 0 ,
netdev - > name , netdev ) ;
if ( ! err )
goto request_done ;
/* fall back to legacy interrupts */
igb_reset_interrupt_capability ( adapter ) ;
2008-07-08 15:10:46 -07:00
adapter - > flags & = ~ IGB_FLAG_HAS_MSI ;
2008-01-24 02:22:38 -08:00
}
err = request_irq ( adapter - > pdev - > irq , & igb_intr , IRQF_SHARED ,
netdev - > name , netdev ) ;
2008-02-15 14:05:25 -08:00
if ( err )
2008-01-24 02:22:38 -08:00
dev_err ( & adapter - > pdev - > dev , " Error %d getting interrupt \n " ,
err ) ;
request_done :
return err ;
}
static void igb_free_irq ( struct igb_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
if ( adapter - > msix_entries ) {
int vector = 0 , i ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
free_irq ( adapter - > msix_entries [ vector + + ] . vector ,
& ( adapter - > tx_ring [ i ] ) ) ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
free_irq ( adapter - > msix_entries [ vector + + ] . vector ,
& ( adapter - > rx_ring [ i ] ) ) ;
free_irq ( adapter - > msix_entries [ vector + + ] . vector , netdev ) ;
return ;
}
free_irq ( adapter - > pdev - > irq , netdev ) ;
}
/**
* igb_irq_disable - Mask off interrupt generation on the NIC
* @ adapter : board private structure
* */
static void igb_irq_disable ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
if ( adapter - > msix_entries ) {
2008-06-27 11:00:39 -07:00
wr32 ( E1000_EIAM , 0 ) ;
2008-01-24 02:22:38 -08:00
wr32 ( E1000_EIMC , ~ 0 ) ;
wr32 ( E1000_EIAC , 0 ) ;
}
2008-06-27 11:00:39 -07:00
wr32 ( E1000_IAM , 0 ) ;
2008-01-24 02:22:38 -08:00
wr32 ( E1000_IMC , ~ 0 ) ;
wrfl ( ) ;
synchronize_irq ( adapter - > pdev - > irq ) ;
}
/**
* igb_irq_enable - Enable default interrupt generation settings
* @ adapter : board private structure
* */
static void igb_irq_enable ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
if ( adapter - > msix_entries ) {
2008-06-27 11:00:39 -07:00
wr32 ( E1000_EIAC , adapter - > eims_enable_mask ) ;
wr32 ( E1000_EIAM , adapter - > eims_enable_mask ) ;
wr32 ( E1000_EIMS , adapter - > eims_enable_mask ) ;
2009-02-19 20:40:07 -08:00
if ( adapter - > vfs_allocated_count )
wr32 ( E1000_MBVFIMR , 0xFF ) ;
wr32 ( E1000_IMS , ( E1000_IMS_LSC | E1000_IMS_VMMB |
E1000_IMS_DOUTSYNC ) ) ;
2008-06-27 11:00:39 -07:00
} else {
wr32 ( E1000_IMS , IMS_ENABLE_MASK ) ;
wr32 ( E1000_IAM , IMS_ENABLE_MASK ) ;
}
2008-01-24 02:22:38 -08:00
}
static void igb_update_mng_vlan ( struct igb_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
u16 vid = adapter - > hw . mng_cookie . vlan_id ;
u16 old_vid = adapter - > mng_vlan_id ;
if ( adapter - > vlgrp ) {
if ( ! vlan_group_get_device ( adapter - > vlgrp , vid ) ) {
if ( adapter - > hw . mng_cookie . status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN ) {
igb_vlan_rx_add_vid ( netdev , vid ) ;
adapter - > mng_vlan_id = vid ;
} else
adapter - > mng_vlan_id = IGB_MNG_VLAN_NONE ;
if ( ( old_vid ! = ( u16 ) IGB_MNG_VLAN_NONE ) & &
( vid ! = old_vid ) & &
! vlan_group_get_device ( adapter - > vlgrp , old_vid ) )
igb_vlan_rx_kill_vid ( netdev , old_vid ) ;
} else
adapter - > mng_vlan_id = vid ;
}
}
/**
* igb_release_hw_control - release control of the h / w to f / w
* @ adapter : address of board private structure
*
* igb_release_hw_control resets CTRL_EXT : DRV_LOAD bit .
* For ASF and Pass Through versions of f / w this means that the
* driver is no longer loaded .
*
* */
static void igb_release_hw_control ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
u32 ctrl_ext ;
/* Let firmware take over control of h/w */
ctrl_ext = rd32 ( E1000_CTRL_EXT ) ;
wr32 ( E1000_CTRL_EXT ,
ctrl_ext & ~ E1000_CTRL_EXT_DRV_LOAD ) ;
}
/**
* igb_get_hw_control - get control of the h / w from f / w
* @ adapter : address of board private structure
*
* igb_get_hw_control sets CTRL_EXT : DRV_LOAD bit .
* For ASF and Pass Through versions of f / w this means that
* the driver is loaded .
*
* */
static void igb_get_hw_control ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
u32 ctrl_ext ;
/* Let firmware know the driver has taken over */
ctrl_ext = rd32 ( E1000_CTRL_EXT ) ;
wr32 ( E1000_CTRL_EXT ,
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD ) ;
}
/**
* igb_configure - configure the hardware for RX and TX
* @ adapter : private board structure
* */
static void igb_configure ( struct igb_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
int i ;
igb_get_hw_control ( adapter ) ;
igb_set_multi ( netdev ) ;
igb_restore_vlan ( adapter ) ;
igb_configure_tx ( adapter ) ;
igb_setup_rctl ( adapter ) ;
igb_configure_rx ( adapter ) ;
2008-06-27 11:00:29 -07:00
igb_rx_fifo_flush_82575 ( & adapter - > hw ) ;
2009-03-20 00:16:50 +00:00
/* call igb_desc_unused which always leaves
2008-01-24 02:22:38 -08:00
* at least 1 descriptor unused to make sure
* next_to_use ! = next_to_clean */
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
struct igb_ring * ring = & adapter - > rx_ring [ i ] ;
2009-03-20 00:16:50 +00:00
igb_alloc_rx_buffers_adv ( ring , igb_desc_unused ( ring ) ) ;
2008-01-24 02:22:38 -08:00
}
adapter - > tx_queue_len = netdev - > tx_queue_len ;
}
/**
* igb_up - Open the interface and prepare it to handle traffic
* @ adapter : board private structure
* */
int igb_up ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
int i ;
/* hardware has been reset, we need to reload some things */
igb_configure ( adapter ) ;
clear_bit ( __IGB_DOWN , & adapter - > state ) ;
2008-06-27 11:00:39 -07:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
napi_enable ( & adapter - > rx_ring [ i ] . napi ) ;
if ( adapter - > msix_entries )
2008-01-24 02:22:38 -08:00
igb_configure_msix ( adapter ) ;
2009-02-19 20:40:07 -08:00
igb_vmm_control ( adapter ) ;
2009-02-19 20:39:44 -08:00
igb_set_rah_pool ( hw , adapter - > vfs_allocated_count , 0 ) ;
igb_set_vmolr ( hw , adapter - > vfs_allocated_count ) ;
2008-01-24 02:22:38 -08:00
/* Clear any pending interrupts. */
rd32 ( E1000_ICR ) ;
igb_irq_enable ( adapter ) ;
/* Fire a link change interrupt to start the watchdog. */
wr32 ( E1000_ICS , E1000_ICS_LSC ) ;
return 0 ;
}
void igb_down ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
struct net_device * netdev = adapter - > netdev ;
u32 tctl , rctl ;
int i ;
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
set_bit ( __IGB_DOWN , & adapter - > state ) ;
/* disable receives in the hardware */
rctl = rd32 ( E1000_RCTL ) ;
wr32 ( E1000_RCTL , rctl & ~ E1000_RCTL_EN ) ;
/* flush and sleep below */
2008-07-17 01:56:23 -07:00
netif_tx_stop_all_queues ( netdev ) ;
2008-01-24 02:22:38 -08:00
/* disable transmits in the hardware */
tctl = rd32 ( E1000_TCTL ) ;
tctl & = ~ E1000_TCTL_EN ;
wr32 ( E1000_TCTL , tctl ) ;
/* flush both disables and wait for them to finish */
wrfl ( ) ;
msleep ( 10 ) ;
2008-06-27 11:00:39 -07:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
napi_disable ( & adapter - > rx_ring [ i ] . napi ) ;
2008-01-24 02:22:38 -08:00
igb_irq_disable ( adapter ) ;
del_timer_sync ( & adapter - > watchdog_timer ) ;
del_timer_sync ( & adapter - > phy_info_timer ) ;
netdev - > tx_queue_len = adapter - > tx_queue_len ;
netif_carrier_off ( netdev ) ;
2009-02-06 23:22:32 +00:00
/* record the stats before reset*/
igb_update_stats ( adapter ) ;
2008-01-24 02:22:38 -08:00
adapter - > link_speed = 0 ;
adapter - > link_duplex = 0 ;
2008-06-24 17:01:15 -07:00
if ( ! pci_channel_offline ( adapter - > pdev ) )
igb_reset ( adapter ) ;
2008-01-24 02:22:38 -08:00
igb_clean_all_tx_rings ( adapter ) ;
igb_clean_all_rx_rings ( adapter ) ;
}
void igb_reinit_locked ( struct igb_adapter * adapter )
{
WARN_ON ( in_interrupt ( ) ) ;
while ( test_and_set_bit ( __IGB_RESETTING , & adapter - > state ) )
msleep ( 1 ) ;
igb_down ( adapter ) ;
igb_up ( adapter ) ;
clear_bit ( __IGB_RESETTING , & adapter - > state ) ;
}
void igb_reset ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
2008-07-08 15:10:12 -07:00
struct e1000_mac_info * mac = & hw - > mac ;
struct e1000_fc_info * fc = & hw - > fc ;
2008-01-24 02:22:38 -08:00
u32 pba = 0 , tx_space , min_tx_space , min_rx_space ;
u16 hwm ;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL . RST is required .
*/
2009-02-06 23:21:31 +00:00
switch ( mac - > type ) {
case e1000_82576 :
2008-07-08 15:10:12 -07:00
pba = E1000_PBA_64K ;
2009-02-06 23:21:31 +00:00
break ;
case e1000_82575 :
default :
pba = E1000_PBA_34K ;
break ;
2008-07-08 15:10:12 -07:00
}
2008-01-24 02:22:38 -08:00
2008-07-08 15:10:12 -07:00
if ( ( adapter - > max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) & &
( mac - > type < e1000_82576 ) ) {
2008-01-24 02:22:38 -08:00
/* adjust PBA for jumbo frames */
wr32 ( E1000_PBA , pba ) ;
/* To maintain wire speed transmits, the Tx FIFO should be
* large enough to accommodate two full transmit packets ,
* rounded up to the next 1 KB and expressed in KB . Likewise ,
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
* expressed in KB . */
pba = rd32 ( E1000_PBA ) ;
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba > > 16 ;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba & = 0xffff ;
/* the tx fifo also stores 16 bytes of information about the tx
* but don ' t include ethernet FCS because hardware appends it */
min_tx_space = ( adapter - > max_frame_size +
2009-02-16 00:00:20 -08:00
sizeof ( union e1000_adv_tx_desc ) -
2008-01-24 02:22:38 -08:00
ETH_FCS_LEN ) * 2 ;
min_tx_space = ALIGN ( min_tx_space , 1024 ) ;
min_tx_space > > = 10 ;
/* software strips receive CRC, so leave room for it */
min_rx_space = adapter - > max_frame_size ;
min_rx_space = ALIGN ( min_rx_space , 1024 ) ;
min_rx_space > > = 10 ;
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
* allocation , take space away from current Rx allocation */
if ( tx_space < min_tx_space & &
( ( min_tx_space - tx_space ) < pba ) ) {
pba = pba - ( min_tx_space - tx_space ) ;
/* if short on rx space, rx wins and must trump tx
* adjustment */
if ( pba < min_rx_space )
pba = min_rx_space ;
}
2008-07-08 15:10:12 -07:00
wr32 ( E1000_PBA , pba ) ;
2008-01-24 02:22:38 -08:00
}
/* flow control settings */
/* The high water mark must be low enough to fit one full frame
* ( or the size used for early receive ) above it in the Rx FIFO .
* Set it to the lower of :
* - 90 % of the Rx FIFO size , or
* - the full Rx FIFO size minus one full frame */
hwm = min ( ( ( pba < < 10 ) * 9 / 10 ) ,
2008-07-08 15:10:12 -07:00
( ( pba < < 10 ) - 2 * adapter - > max_frame_size ) ) ;
2008-01-24 02:22:38 -08:00
2008-07-08 15:10:12 -07:00
if ( mac - > type < e1000_82576 ) {
fc - > high_water = hwm & 0xFFF8 ; /* 8-byte granularity */
fc - > low_water = fc - > high_water - 8 ;
} else {
fc - > high_water = hwm & 0xFFF0 ; /* 16-byte granularity */
fc - > low_water = fc - > high_water - 16 ;
}
2008-01-24 02:22:38 -08:00
fc - > pause_time = 0xFFFF ;
fc - > send_xon = 1 ;
fc - > type = fc - > original_type ;
2009-02-19 20:40:07 -08:00
/* disable receive for all VFs and wait one second */
if ( adapter - > vfs_allocated_count ) {
int i ;
for ( i = 0 ; i < adapter - > vfs_allocated_count ; i + + )
adapter - > vf_data [ i ] . clear_to_send = false ;
/* ping all the active vfs to let them know we are going down */
igb_ping_all_vfs ( adapter ) ;
/* disable transmits and receives */
wr32 ( E1000_VFRE , 0 ) ;
wr32 ( E1000_VFTE , 0 ) ;
}
2008-01-24 02:22:38 -08:00
/* Allow time for pending master requests to run */
adapter - > hw . mac . ops . reset_hw ( & adapter - > hw ) ;
wr32 ( E1000_WUC , 0 ) ;
if ( adapter - > hw . mac . ops . init_hw ( & adapter - > hw ) )
dev_err ( & adapter - > pdev - > dev , " Hardware Error \n " ) ;
igb_update_mng_vlan ( adapter ) ;
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
wr32 ( E1000_VET , ETHERNET_IEEE_VLAN_TYPE ) ;
igb_reset_adaptive ( & adapter - > hw ) ;
2008-11-21 21:30:24 -08:00
igb_get_phy_info ( & adapter - > hw ) ;
2008-01-24 02:22:38 -08:00
}
2008-11-19 22:20:44 -08:00
static const struct net_device_ops igb_netdev_ops = {
. ndo_open = igb_open ,
. ndo_stop = igb_close ,
2008-11-20 20:14:53 -08:00
. ndo_start_xmit = igb_xmit_frame_adv ,
2008-11-19 22:20:44 -08:00
. ndo_get_stats = igb_get_stats ,
. ndo_set_multicast_list = igb_set_multi ,
. ndo_set_mac_address = igb_set_mac ,
. ndo_change_mtu = igb_change_mtu ,
. ndo_do_ioctl = igb_ioctl ,
. ndo_tx_timeout = igb_tx_timeout ,
. ndo_validate_addr = eth_validate_addr ,
. ndo_vlan_rx_register = igb_vlan_rx_register ,
. ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid ,
. ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid ,
# ifdef CONFIG_NET_POLL_CONTROLLER
. ndo_poll_controller = igb_netpoll ,
# endif
} ;
2008-01-24 02:22:38 -08:00
/**
* igb_probe - Device Initialization Routine
* @ pdev : PCI device information struct
* @ ent : entry in igb_pci_tbl
*
* Returns 0 on success , negative on failure
*
* igb_probe initializes an adapter identified by a pci_dev structure .
* The OS initialization , configuring of the adapter private structure ,
* and a hardware reset occur .
* */
static int __devinit igb_probe ( struct pci_dev * pdev ,
const struct pci_device_id * ent )
{
struct net_device * netdev ;
struct igb_adapter * adapter ;
struct e1000_hw * hw ;
const struct e1000_info * ei = igb_info_tbl [ ent - > driver_data ] ;
unsigned long mmio_start , mmio_len ;
2009-03-17 15:01:30 -07:00
int err , pci_using_dac ;
2009-03-14 22:26:40 -07:00
u16 eeprom_data = 0 ;
2008-01-24 02:22:38 -08:00
u16 eeprom_apme_mask = IGB_EEPROM_APME ;
u32 part_num ;
2009-02-06 23:16:04 +00:00
err = pci_enable_device_mem ( pdev ) ;
2008-01-24 02:22:38 -08:00
if ( err )
return err ;
pci_using_dac = 0 ;
2009-04-06 19:01:13 -07:00
err = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) ;
2008-01-24 02:22:38 -08:00
if ( ! err ) {
2009-04-06 19:01:13 -07:00
err = pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) ;
2008-01-24 02:22:38 -08:00
if ( ! err )
pci_using_dac = 1 ;
} else {
2009-04-06 19:01:15 -07:00
err = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
2008-01-24 02:22:38 -08:00
if ( err ) {
2009-04-06 19:01:15 -07:00
err = pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
2008-01-24 02:22:38 -08:00
if ( err ) {
dev_err ( & pdev - > dev , " No usable DMA "
" configuration, aborting \n " ) ;
goto err_dma ;
}
}
}
2009-02-06 23:16:04 +00:00
err = pci_request_selected_regions ( pdev , pci_select_bars ( pdev ,
IORESOURCE_MEM ) ,
igb_driver_name ) ;
2008-01-24 02:22:38 -08:00
if ( err )
goto err_pci_reg ;
2008-12-11 20:34:19 -08:00
err = pci_enable_pcie_error_reporting ( pdev ) ;
if ( err ) {
dev_err ( & pdev - > dev , " pci_enable_pcie_error_reporting failed "
" 0x%x \n " , err ) ;
/* non-fatal, continue */
}
2008-11-27 00:24:37 -08:00
2008-01-24 02:22:38 -08:00
pci_set_master ( pdev ) ;
2008-04-23 11:09:34 -07:00
pci_save_state ( pdev ) ;
2008-01-24 02:22:38 -08:00
err = - ENOMEM ;
2009-02-19 20:39:23 -08:00
netdev = alloc_etherdev_mq ( sizeof ( struct igb_adapter ) ,
IGB_ABS_MAX_TX_QUEUES ) ;
2008-01-24 02:22:38 -08:00
if ( ! netdev )
goto err_alloc_etherdev ;
SET_NETDEV_DEV ( netdev , & pdev - > dev ) ;
pci_set_drvdata ( pdev , netdev ) ;
adapter = netdev_priv ( netdev ) ;
adapter - > netdev = netdev ;
adapter - > pdev = pdev ;
hw = & adapter - > hw ;
hw - > back = adapter ;
adapter - > msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE ;
mmio_start = pci_resource_start ( pdev , 0 ) ;
mmio_len = pci_resource_len ( pdev , 0 ) ;
err = - EIO ;
2009-02-06 23:20:31 +00:00
hw - > hw_addr = ioremap ( mmio_start , mmio_len ) ;
if ( ! hw - > hw_addr )
2008-01-24 02:22:38 -08:00
goto err_ioremap ;
2008-11-19 22:20:44 -08:00
netdev - > netdev_ops = & igb_netdev_ops ;
2008-01-24 02:22:38 -08:00
igb_set_ethtool_ops ( netdev ) ;
netdev - > watchdog_timeo = 5 * HZ ;
strncpy ( netdev - > name , pci_name ( pdev ) , sizeof ( netdev - > name ) - 1 ) ;
netdev - > mem_start = mmio_start ;
netdev - > mem_end = mmio_start + mmio_len ;
/* PCI config space info */
hw - > vendor_id = pdev - > vendor ;
hw - > device_id = pdev - > device ;
hw - > revision_id = pdev - > revision ;
hw - > subsystem_vendor_id = pdev - > subsystem_vendor ;
hw - > subsystem_device_id = pdev - > subsystem_device ;
/* setup the private structure */
hw - > back = adapter ;
/* Copy the default MAC, PHY and NVM function pointers */
memcpy ( & hw - > mac . ops , ei - > mac_ops , sizeof ( hw - > mac . ops ) ) ;
memcpy ( & hw - > phy . ops , ei - > phy_ops , sizeof ( hw - > phy . ops ) ) ;
memcpy ( & hw - > nvm . ops , ei - > nvm_ops , sizeof ( hw - > nvm . ops ) ) ;
/* Initialize skew-specific constants */
err = ei - > get_invariants ( hw ) ;
if ( err )
2009-02-06 23:22:11 +00:00
goto err_sw_init ;
2008-01-24 02:22:38 -08:00
2009-04-07 14:37:52 +00:00
# ifdef CONFIG_PCI_IOV
/* since iov functionality isn't critical to base device function we
* can accept failure . If it fails we don ' t allow iov to be enabled */
if ( hw - > mac . type = = e1000_82576 ) {
/* 82576 supports a maximum of 7 VFs in addition to the PF */
unsigned int num_vfs = ( max_vfs > 7 ) ? 7 : max_vfs ;
int i ;
unsigned char mac_addr [ ETH_ALEN ] ;
2009-04-09 22:49:39 +00:00
if ( num_vfs ) {
2009-04-07 14:37:52 +00:00
adapter - > vf_data = kcalloc ( num_vfs ,
sizeof ( struct vf_data_storage ) ,
GFP_KERNEL ) ;
2009-04-09 22:49:39 +00:00
if ( ! adapter - > vf_data ) {
dev_err ( & pdev - > dev ,
" Could not allocate VF private data - "
" IOV enable failed \n " ) ;
2009-04-07 14:37:52 +00:00
} else {
2009-04-09 22:49:39 +00:00
err = pci_enable_sriov ( pdev , num_vfs ) ;
if ( ! err ) {
adapter - > vfs_allocated_count = num_vfs ;
dev_info ( & pdev - > dev ,
" %d vfs allocated \n " ,
num_vfs ) ;
for ( i = 0 ;
i < adapter - > vfs_allocated_count ;
i + + ) {
random_ether_addr ( mac_addr ) ;
igb_set_vf_mac ( adapter , i ,
mac_addr ) ;
}
} else {
kfree ( adapter - > vf_data ) ;
adapter - > vf_data = NULL ;
}
2009-04-07 14:37:52 +00:00
}
}
}
# endif
2009-02-06 23:22:11 +00:00
/* setup the private structure */
2008-01-24 02:22:38 -08:00
err = igb_sw_init ( adapter ) ;
if ( err )
goto err_sw_init ;
igb_get_bus_info_pcie ( hw ) ;
2008-07-08 15:10:46 -07:00
/* set flags */
switch ( hw - > mac . type ) {
case e1000_82575 :
adapter - > flags | = IGB_FLAG_NEED_CTX_IDX ;
break ;
2009-01-31 00:52:30 -08:00
case e1000_82576 :
2008-07-08 15:10:46 -07:00
default :
break ;
}
2008-01-24 02:22:38 -08:00
hw - > phy . autoneg_wait_to_complete = false ;
hw - > mac . adaptive_ifs = true ;
/* Copper options */
if ( hw - > phy . media_type = = e1000_media_type_copper ) {
hw - > phy . mdix = AUTO_ALL_MODES ;
hw - > phy . disable_polarity_correction = false ;
hw - > phy . ms_type = e1000_ms_hw_default ;
}
if ( igb_check_reset_block ( hw ) )
dev_info ( & pdev - > dev ,
" PHY reset is blocked due to SOL/IDER session. \n " ) ;
netdev - > features = NETIF_F_SG |
2009-02-06 23:18:27 +00:00
NETIF_F_IP_CSUM |
2008-01-24 02:22:38 -08:00
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER ;
2009-02-06 23:18:27 +00:00
netdev - > features | = NETIF_F_IPV6_CSUM ;
2008-01-24 02:22:38 -08:00
netdev - > features | = NETIF_F_TSO ;
netdev - > features | = NETIF_F_TSO6 ;
2008-06-05 04:06:27 -07:00
2009-01-19 15:20:57 -08:00
netdev - > features | = NETIF_F_GRO ;
2008-07-08 15:12:13 -07:00
2008-06-05 04:06:27 -07:00
netdev - > vlan_features | = NETIF_F_TSO ;
netdev - > vlan_features | = NETIF_F_TSO6 ;
2009-02-06 23:18:27 +00:00
netdev - > vlan_features | = NETIF_F_IP_CSUM ;
2008-06-05 04:06:27 -07:00
netdev - > vlan_features | = NETIF_F_SG ;
2008-01-24 02:22:38 -08:00
if ( pci_using_dac )
netdev - > features | = NETIF_F_HIGHDMA ;
adapter - > en_mng_pt = igb_enable_mng_pass_thru ( & adapter - > hw ) ;
/* before reading the NVM, reset the controller to put the device in a
* known good starting state */
hw - > mac . ops . reset_hw ( hw ) ;
/* make sure the NVM is good */
if ( igb_validate_nvm_checksum ( hw ) < 0 ) {
dev_err ( & pdev - > dev , " The NVM Checksum Is Not Valid \n " ) ;
err = - EIO ;
goto err_eeprom ;
}
/* copy the MAC address out of the NVM */
if ( hw - > mac . ops . read_mac_addr ( hw ) )
dev_err ( & pdev - > dev , " NVM Read Error \n " ) ;
memcpy ( netdev - > dev_addr , hw - > mac . addr , netdev - > addr_len ) ;
memcpy ( netdev - > perm_addr , hw - > mac . addr , netdev - > addr_len ) ;
if ( ! is_valid_ether_addr ( netdev - > perm_addr ) ) {
dev_err ( & pdev - > dev , " Invalid MAC Address \n " ) ;
err = - EIO ;
goto err_eeprom ;
}
2009-03-20 00:17:08 +00:00
setup_timer ( & adapter - > watchdog_timer , & igb_watchdog ,
( unsigned long ) adapter ) ;
setup_timer ( & adapter - > phy_info_timer , & igb_update_phy_info ,
( unsigned long ) adapter ) ;
2008-01-24 02:22:38 -08:00
INIT_WORK ( & adapter - > reset_task , igb_reset_task ) ;
INIT_WORK ( & adapter - > watchdog_task , igb_watchdog_task ) ;
2009-02-06 23:22:11 +00:00
/* Initialize link properties that are user-changeable */
2008-01-24 02:22:38 -08:00
adapter - > fc_autoneg = true ;
hw - > mac . autoneg = true ;
hw - > phy . autoneg_advertised = 0x2f ;
hw - > fc . original_type = e1000_fc_default ;
hw - > fc . type = e1000_fc_default ;
2009-02-15 23:59:44 -08:00
adapter - > itr_setting = IGB_DEFAULT_ITR ;
2008-01-24 02:22:38 -08:00
adapter - > itr = IGB_START_ITR ;
igb_validate_mdi_setting ( hw ) ;
adapter - > rx_csum = 1 ;
/* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
* enable the ACPI Magic Packet filter
*/
2009-03-13 20:41:17 +00:00
if ( hw - > bus . func = = 0 )
2009-02-06 23:17:47 +00:00
hw - > nvm . ops . read ( hw , NVM_INIT_CONTROL3_PORT_A , 1 , & eeprom_data ) ;
2009-03-13 20:41:17 +00:00
else if ( hw - > bus . func = = 1 )
hw - > nvm . ops . read ( hw , NVM_INIT_CONTROL3_PORT_B , 1 , & eeprom_data ) ;
2008-01-24 02:22:38 -08:00
if ( eeprom_data & eeprom_apme_mask )
adapter - > eeprom_wol | = E1000_WUFC_MAG ;
/* now that we have the eeprom settings, apply the special cases where
* the eeprom may be wrong or the board simply won ' t support wake on
* lan on a particular port */
switch ( pdev - > device ) {
case E1000_DEV_ID_82575GB_QUAD_COPPER :
adapter - > eeprom_wol = 0 ;
break ;
case E1000_DEV_ID_82575EB_FIBER_SERDES :
2008-07-08 15:10:12 -07:00
case E1000_DEV_ID_82576_FIBER :
case E1000_DEV_ID_82576_SERDES :
2008-01-24 02:22:38 -08:00
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if ( rd32 ( E1000_STATUS ) & E1000_STATUS_FUNC_1 )
adapter - > eeprom_wol = 0 ;
break ;
2009-03-13 20:42:35 +00:00
case E1000_DEV_ID_82576_QUAD_COPPER :
/* if quad port adapter, disable WoL on all but port A */
if ( global_quad_port_a ! = 0 )
adapter - > eeprom_wol = 0 ;
else
adapter - > flags | = IGB_FLAG_QUAD_PORT_A ;
/* Reset for multiple quad port adapters */
if ( + + global_quad_port_a = = 4 )
global_quad_port_a = 0 ;
break ;
2008-01-24 02:22:38 -08:00
}
/* initialize the wol settings based on the eeprom settings */
adapter - > wol = adapter - > eeprom_wol ;
2008-11-07 20:30:37 +00:00
device_set_wakeup_enable ( & adapter - > pdev - > dev , adapter - > wol ) ;
2008-01-24 02:22:38 -08:00
/* reset the hardware with the new settings */
igb_reset ( adapter ) ;
/* let the f/w know that the h/w is now under the control of the
* driver . */
igb_get_hw_control ( adapter ) ;
strcpy ( netdev - > name , " eth%d " ) ;
err = register_netdev ( netdev ) ;
if ( err )
goto err_register ;
2009-04-17 20:44:32 +00:00
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off ( netdev ) ;
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2009-01-31 00:52:30 -08:00
if ( dca_add_requester ( & pdev - > dev ) = = 0 ) {
2008-07-08 15:10:46 -07:00
adapter - > flags | = IGB_FLAG_DCA_ENABLED ;
2008-07-08 15:07:55 -07:00
dev_info ( & pdev - > dev , " DCA enabled \n " ) ;
/* Always use CB2 mode, difference is masked
* in the CB driver . */
2009-02-15 23:59:44 -08:00
wr32 ( E1000_DCA_CTRL , E1000_DCA_CTRL_DCA_MODE_CB2 ) ;
2008-07-08 15:07:55 -07:00
igb_setup_dca ( adapter ) ;
}
# endif
2009-02-12 05:03:41 +00:00
/*
* Initialize hardware timer : we keep it running just in case
* that some program needs it later on .
*/
memset ( & adapter - > cycles , 0 , sizeof ( adapter - > cycles ) ) ;
adapter - > cycles . read = igb_read_clock ;
adapter - > cycles . mask = CLOCKSOURCE_MASK ( 64 ) ;
adapter - > cycles . mult = 1 ;
adapter - > cycles . shift = IGB_TSYNC_SHIFT ;
wr32 ( E1000_TIMINCA ,
( 1 < < 24 ) |
IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE ) ;
#if 0
/*
* Avoid rollover while we initialize by resetting the time counter .
*/
wr32 ( E1000_SYSTIML , 0x00000000 ) ;
wr32 ( E1000_SYSTIMH , 0x00000000 ) ;
# else
/*
* Set registers so that rollover occurs soon to test this .
*/
wr32 ( E1000_SYSTIML , 0x00000000 ) ;
wr32 ( E1000_SYSTIMH , 0xFF800000 ) ;
# endif
wrfl ( ) ;
timecounter_init ( & adapter - > clock ,
& adapter - > cycles ,
ktime_to_ns ( ktime_get_real ( ) ) ) ;
2009-02-12 05:03:43 +00:00
/*
* Synchronize our NIC clock against system wall clock . NIC
* time stamp reading requires ~ 3u s per sample , each sample
* was pretty stable even under load = > only require 10
* samples for each offset comparison .
*/
memset ( & adapter - > compare , 0 , sizeof ( adapter - > compare ) ) ;
adapter - > compare . source = & adapter - > clock ;
adapter - > compare . target = ktime_get_real ;
adapter - > compare . num_samples = 10 ;
timecompare_update ( & adapter - > compare , 0 ) ;
2009-02-12 05:03:41 +00:00
# ifdef DEBUG
{
char buffer [ 160 ] ;
printk ( KERN_DEBUG
" igb: %s: hw %p initialized timer \n " ,
igb_get_time_str ( adapter , buffer ) ,
& adapter - > hw ) ;
}
# endif
2008-01-24 02:22:38 -08:00
dev_info ( & pdev - > dev , " Intel(R) Gigabit Ethernet Network Connection \n " ) ;
/* print bus type/speed/width info */
2008-10-27 17:47:26 -07:00
dev_info ( & pdev - > dev , " %s: (PCIe:%s:%s) %pM \n " ,
2008-01-24 02:22:38 -08:00
netdev - > name ,
( ( hw - > bus . speed = = e1000_bus_speed_2500 )
? " 2.5Gb/s " : " unknown " ) ,
2009-03-31 20:38:00 +00:00
( ( hw - > bus . width = = e1000_bus_width_pcie_x4 ) ? " Width x4 " :
( hw - > bus . width = = e1000_bus_width_pcie_x2 ) ? " Width x2 " :
( hw - > bus . width = = e1000_bus_width_pcie_x1 ) ? " Width x1 " :
" unknown " ) ,
2008-10-27 17:47:26 -07:00
netdev - > dev_addr ) ;
2008-01-24 02:22:38 -08:00
igb_read_part_num ( hw , & part_num ) ;
dev_info ( & pdev - > dev , " %s: PBA No: %06x-%03x \n " , netdev - > name ,
( part_num > > 8 ) , ( part_num & 0xff ) ) ;
dev_info ( & pdev - > dev ,
" Using %s interrupts. %d rx queue(s), %d tx queue(s) \n " ,
adapter - > msix_entries ? " MSI-X " :
2008-07-08 15:10:46 -07:00
( adapter - > flags & IGB_FLAG_HAS_MSI ) ? " MSI " : " legacy " ,
2008-01-24 02:22:38 -08:00
adapter - > num_rx_queues , adapter - > num_tx_queues ) ;
return 0 ;
err_register :
igb_release_hw_control ( adapter ) ;
err_eeprom :
if ( ! igb_check_reset_block ( hw ) )
2008-11-21 21:30:24 -08:00
igb_reset_phy ( hw ) ;
2008-01-24 02:22:38 -08:00
if ( hw - > flash_address )
iounmap ( hw - > flash_address ) ;
2008-07-08 15:13:38 -07:00
igb_free_queues ( adapter ) ;
2008-01-24 02:22:38 -08:00
err_sw_init :
iounmap ( hw - > hw_addr ) ;
err_ioremap :
free_netdev ( netdev ) ;
err_alloc_etherdev :
2009-02-06 23:16:04 +00:00
pci_release_selected_regions ( pdev , pci_select_bars ( pdev ,
IORESOURCE_MEM ) ) ;
2008-01-24 02:22:38 -08:00
err_pci_reg :
err_dma :
pci_disable_device ( pdev ) ;
return err ;
}
/**
* igb_remove - Device Removal Routine
* @ pdev : PCI device information struct
*
* igb_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device . The could be caused by a
* Hot - Plug event , or because the driver is going to be removed from
* memory .
* */
static void __devexit igb_remove ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
2008-07-08 15:07:55 -07:00
struct e1000_hw * hw = & adapter - > hw ;
2008-12-11 20:34:19 -08:00
int err ;
2008-01-24 02:22:38 -08:00
/* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled */
set_bit ( __IGB_DOWN , & adapter - > state ) ;
del_timer_sync ( & adapter - > watchdog_timer ) ;
del_timer_sync ( & adapter - > phy_info_timer ) ;
flush_scheduled_work ( ) ;
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2008-07-08 15:10:46 -07:00
if ( adapter - > flags & IGB_FLAG_DCA_ENABLED ) {
2008-07-08 15:07:55 -07:00
dev_info ( & pdev - > dev , " DCA disabled \n " ) ;
dca_remove_requester ( & pdev - > dev ) ;
2008-07-08 15:10:46 -07:00
adapter - > flags & = ~ IGB_FLAG_DCA_ENABLED ;
2009-02-15 23:59:44 -08:00
wr32 ( E1000_DCA_CTRL , E1000_DCA_CTRL_DCA_MODE_DISABLE ) ;
2008-07-08 15:07:55 -07:00
}
# endif
2008-01-24 02:22:38 -08:00
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant . */
igb_release_hw_control ( adapter ) ;
unregister_netdev ( netdev ) ;
2008-11-21 21:30:24 -08:00
if ( ! igb_check_reset_block ( & adapter - > hw ) )
igb_reset_phy ( & adapter - > hw ) ;
2008-01-24 02:22:38 -08:00
igb_reset_interrupt_capability ( adapter ) ;
2008-07-08 15:13:38 -07:00
igb_free_queues ( adapter ) ;
2008-01-24 02:22:38 -08:00
2009-02-19 20:40:30 -08:00
# ifdef CONFIG_PCI_IOV
/* reclaim resources allocated to VFs */
if ( adapter - > vf_data ) {
/* disable iov and allow time for transactions to clear */
pci_disable_sriov ( pdev ) ;
msleep ( 500 ) ;
kfree ( adapter - > vf_data ) ;
adapter - > vf_data = NULL ;
wr32 ( E1000_IOVCTL , E1000_IOVCTL_REUSE_VFQ ) ;
msleep ( 100 ) ;
dev_info ( & pdev - > dev , " IOV Disabled \n " ) ;
}
# endif
2009-02-06 23:20:31 +00:00
iounmap ( hw - > hw_addr ) ;
if ( hw - > flash_address )
iounmap ( hw - > flash_address ) ;
2009-02-06 23:16:04 +00:00
pci_release_selected_regions ( pdev , pci_select_bars ( pdev ,
IORESOURCE_MEM ) ) ;
2008-01-24 02:22:38 -08:00
free_netdev ( netdev ) ;
2008-12-11 20:34:19 -08:00
err = pci_disable_pcie_error_reporting ( pdev ) ;
if ( err )
dev_err ( & pdev - > dev ,
" pci_disable_pcie_error_reporting failed 0x%x \n " , err ) ;
2008-11-27 00:24:37 -08:00
2008-01-24 02:22:38 -08:00
pci_disable_device ( pdev ) ;
}
/**
* igb_sw_init - Initialize general software structures ( struct igb_adapter )
* @ adapter : board private structure to initialize
*
* igb_sw_init initializes the Adapter private data structure .
* Fields are initialized based on PCI device information and
* OS network device settings ( MTU size ) .
* */
static int __devinit igb_sw_init ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
struct net_device * netdev = adapter - > netdev ;
struct pci_dev * pdev = adapter - > pdev ;
pci_read_config_word ( pdev , PCI_COMMAND , & hw - > bus . pci_cmd_word ) ;
2008-11-20 00:48:10 -08:00
adapter - > tx_ring_count = IGB_DEFAULT_TXD ;
adapter - > rx_ring_count = IGB_DEFAULT_RXD ;
2008-01-24 02:22:38 -08:00
adapter - > rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE ;
adapter - > rx_ps_hdr_size = 0 ; /* disable packet split */
adapter - > max_frame_size = netdev - > mtu + ETH_HLEN + ETH_FCS_LEN ;
adapter - > min_frame_size = ETH_ZLEN + ETH_FCS_LEN ;
2008-07-08 15:06:51 -07:00
/* This call may decrease the number of queues depending on
* interrupt mode . */
2008-01-24 02:22:38 -08:00
igb_set_interrupt_capability ( adapter ) ;
if ( igb_alloc_queues ( adapter ) ) {
dev_err ( & pdev - > dev , " Unable to allocate memory for queues \n " ) ;
return - ENOMEM ;
}
/* Explicitly disable IRQ since the NIC can be in any state. */
igb_irq_disable ( adapter ) ;
set_bit ( __IGB_DOWN , & adapter - > state ) ;
return 0 ;
}
/**
* igb_open - Called when a network interface is made active
* @ netdev : network interface device structure
*
* Returns 0 on success , negative value on failure
*
* The open entry point is called when a network interface is made
* active by the system ( IFF_UP ) . At this point all resources needed
* for transmit and receive operations are allocated , the interrupt
* handler is registered with the OS , the watchdog timer is started ,
* and the stack is notified that the interface is ready .
* */
static int igb_open ( struct net_device * netdev )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
int err ;
int i ;
/* disallow open during test */
if ( test_bit ( __IGB_TESTING , & adapter - > state ) )
return - EBUSY ;
2009-04-17 20:44:32 +00:00
netif_carrier_off ( netdev ) ;
2008-01-24 02:22:38 -08:00
/* allocate transmit descriptors */
err = igb_setup_all_tx_resources ( adapter ) ;
if ( err )
goto err_setup_tx ;
/* allocate receive descriptors */
err = igb_setup_all_rx_resources ( adapter ) ;
if ( err )
goto err_setup_rx ;
/* e1000_power_up_phy(adapter); */
adapter - > mng_vlan_id = IGB_MNG_VLAN_NONE ;
if ( ( adapter - > hw . mng_cookie . status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN ) )
igb_update_mng_vlan ( adapter ) ;
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq , so we have to setup our
* clean_rx handler before we do so . */
igb_configure ( adapter ) ;
2009-02-19 20:40:07 -08:00
igb_vmm_control ( adapter ) ;
2009-02-19 20:39:44 -08:00
igb_set_rah_pool ( hw , adapter - > vfs_allocated_count , 0 ) ;
igb_set_vmolr ( hw , adapter - > vfs_allocated_count ) ;
2008-01-24 02:22:38 -08:00
err = igb_request_irq ( adapter ) ;
if ( err )
goto err_req_irq ;
/* From here on the code is the same as igb_up() */
clear_bit ( __IGB_DOWN , & adapter - > state ) ;
2008-06-27 11:00:39 -07:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
napi_enable ( & adapter - > rx_ring [ i ] . napi ) ;
2008-01-24 02:22:38 -08:00
/* Clear any pending interrupts. */
rd32 ( E1000_ICR ) ;
2008-06-27 11:00:39 -07:00
igb_irq_enable ( adapter ) ;
2008-07-18 04:33:03 -07:00
netif_tx_start_all_queues ( netdev ) ;
2008-01-24 02:22:38 -08:00
/* Fire a link status change interrupt to start the watchdog. */
wr32 ( E1000_ICS , E1000_ICS_LSC ) ;
return 0 ;
err_req_irq :
igb_release_hw_control ( adapter ) ;
/* e1000_power_down_phy(adapter); */
igb_free_all_rx_resources ( adapter ) ;
err_setup_rx :
igb_free_all_tx_resources ( adapter ) ;
err_setup_tx :
igb_reset ( adapter ) ;
return err ;
}
/**
* igb_close - Disables a network interface
* @ netdev : network interface device structure
*
* Returns 0 , this is not allowed to fail
*
* The close entry point is called when an interface is de - activated
* by the OS . The hardware is still under the driver ' s control , but
* needs to be disabled . A global MAC reset is issued to stop the
* hardware , and all transmit and receive resources are freed .
* */
static int igb_close ( struct net_device * netdev )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
WARN_ON ( test_bit ( __IGB_RESETTING , & adapter - > state ) ) ;
igb_down ( adapter ) ;
igb_free_irq ( adapter ) ;
igb_free_all_tx_resources ( adapter ) ;
igb_free_all_rx_resources ( adapter ) ;
/* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS ( let 8021 q kill it ) */
if ( ( adapter - > hw . mng_cookie . status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN ) & &
! ( adapter - > vlgrp & &
vlan_group_get_device ( adapter - > vlgrp , adapter - > mng_vlan_id ) ) )
igb_vlan_rx_kill_vid ( netdev , adapter - > mng_vlan_id ) ;
return 0 ;
}
/**
* igb_setup_tx_resources - allocate Tx resources ( Descriptors )
* @ adapter : board private structure
* @ tx_ring : tx descriptor ring ( for a specific queue ) to setup
*
* Return 0 on success , negative on failure
* */
int igb_setup_tx_resources ( struct igb_adapter * adapter ,
struct igb_ring * tx_ring )
{
struct pci_dev * pdev = adapter - > pdev ;
int size ;
size = sizeof ( struct igb_buffer ) * tx_ring - > count ;
tx_ring - > buffer_info = vmalloc ( size ) ;
if ( ! tx_ring - > buffer_info )
goto err ;
memset ( tx_ring - > buffer_info , 0 , size ) ;
/* round up to nearest 4K */
2009-02-16 00:00:20 -08:00
tx_ring - > size = tx_ring - > count * sizeof ( union e1000_adv_tx_desc ) ;
2008-01-24 02:22:38 -08:00
tx_ring - > size = ALIGN ( tx_ring - > size , 4096 ) ;
tx_ring - > desc = pci_alloc_consistent ( pdev , tx_ring - > size ,
& tx_ring - > dma ) ;
if ( ! tx_ring - > desc )
goto err ;
tx_ring - > adapter = adapter ;
tx_ring - > next_to_use = 0 ;
tx_ring - > next_to_clean = 0 ;
return 0 ;
err :
vfree ( tx_ring - > buffer_info ) ;
dev_err ( & adapter - > pdev - > dev ,
" Unable to allocate memory for the transmit descriptor ring \n " ) ;
return - ENOMEM ;
}
/**
* igb_setup_all_tx_resources - wrapper to allocate Tx resources
* ( Descriptors ) for all queues
* @ adapter : board private structure
*
* Return 0 on success , negative on failure
* */
static int igb_setup_all_tx_resources ( struct igb_adapter * adapter )
{
int i , err = 0 ;
2008-07-08 15:06:51 -07:00
int r_idx ;
2008-01-24 02:22:38 -08:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
err = igb_setup_tx_resources ( adapter , & adapter - > tx_ring [ i ] ) ;
if ( err ) {
dev_err ( & adapter - > pdev - > dev ,
" Allocation for Tx Queue %u failed \n " , i ) ;
for ( i - - ; i > = 0 ; i - - )
2008-06-27 10:59:48 -07:00
igb_free_tx_resources ( & adapter - > tx_ring [ i ] ) ;
2008-01-24 02:22:38 -08:00
break ;
}
}
2008-07-08 15:06:51 -07:00
for ( i = 0 ; i < IGB_MAX_TX_QUEUES ; i + + ) {
r_idx = i % adapter - > num_tx_queues ;
adapter - > multi_tx_table [ i ] = & adapter - > tx_ring [ r_idx ] ;
2009-02-06 23:19:29 +00:00
}
2008-01-24 02:22:38 -08:00
return err ;
}
/**
* igb_configure_tx - Configure transmit Unit after Reset
* @ adapter : board private structure
*
* Configure the Tx unit of the MAC after a reset .
* */
static void igb_configure_tx ( struct igb_adapter * adapter )
{
2008-12-26 01:33:18 -08:00
u64 tdba ;
2008-01-24 02:22:38 -08:00
struct e1000_hw * hw = & adapter - > hw ;
u32 tctl ;
u32 txdctl , txctrl ;
2008-12-26 01:34:11 -08:00
int i , j ;
2008-01-24 02:22:38 -08:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
2009-02-12 18:16:59 +00:00
struct igb_ring * ring = & adapter - > tx_ring [ i ] ;
2008-12-26 01:34:11 -08:00
j = ring - > reg_idx ;
wr32 ( E1000_TDLEN ( j ) ,
2009-02-16 00:00:20 -08:00
ring - > count * sizeof ( union e1000_adv_tx_desc ) ) ;
2008-01-24 02:22:38 -08:00
tdba = ring - > dma ;
2008-12-26 01:34:11 -08:00
wr32 ( E1000_TDBAL ( j ) ,
2009-02-12 18:16:59 +00:00
tdba & 0x00000000ffffffffULL ) ;
2008-12-26 01:34:11 -08:00
wr32 ( E1000_TDBAH ( j ) , tdba > > 32 ) ;
2008-01-24 02:22:38 -08:00
2008-12-26 01:34:11 -08:00
ring - > head = E1000_TDH ( j ) ;
ring - > tail = E1000_TDT ( j ) ;
2008-01-24 02:22:38 -08:00
writel ( 0 , hw - > hw_addr + ring - > tail ) ;
writel ( 0 , hw - > hw_addr + ring - > head ) ;
2008-12-26 01:34:11 -08:00
txdctl = rd32 ( E1000_TXDCTL ( j ) ) ;
2008-01-24 02:22:38 -08:00
txdctl | = E1000_TXDCTL_QUEUE_ENABLE ;
2008-12-26 01:34:11 -08:00
wr32 ( E1000_TXDCTL ( j ) , txdctl ) ;
2008-01-24 02:22:38 -08:00
/* Turn off Relaxed Ordering on head write-backs. The
* writebacks MUST be delivered in order or it will
* completely screw up our bookeeping .
*/
2008-12-26 01:34:11 -08:00
txctrl = rd32 ( E1000_DCA_TXCTRL ( j ) ) ;
2008-01-24 02:22:38 -08:00
txctrl & = ~ E1000_DCA_TXCTRL_TX_WB_RO_EN ;
2008-12-26 01:34:11 -08:00
wr32 ( E1000_DCA_TXCTRL ( j ) , txctrl ) ;
2008-01-24 02:22:38 -08:00
}
2009-02-19 20:39:44 -08:00
/* disable queue 0 to prevent tail bump w/o re-configuration */
if ( adapter - > vfs_allocated_count )
wr32 ( E1000_TXDCTL ( 0 ) , 0 ) ;
2008-01-24 02:22:38 -08:00
/* Program the Transmit Control Register */
tctl = rd32 ( E1000_TCTL ) ;
tctl & = ~ E1000_TCTL_CT ;
tctl | = E1000_TCTL_PSP | E1000_TCTL_RTLC |
( E1000_COLLISION_THRESHOLD < < E1000_CT_SHIFT ) ;
igb_config_collision_dist ( hw ) ;
/* Setup Transmit Descriptor Settings for eop descriptor */
adapter - > txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS ;
/* Enable transmits */
tctl | = E1000_TCTL_EN ;
wr32 ( E1000_TCTL , tctl ) ;
}
/**
* igb_setup_rx_resources - allocate Rx resources ( Descriptors )
* @ adapter : board private structure
* @ rx_ring : rx descriptor ring ( for a specific queue ) to setup
*
* Returns 0 on success , negative on failure
* */
int igb_setup_rx_resources ( struct igb_adapter * adapter ,
struct igb_ring * rx_ring )
{
struct pci_dev * pdev = adapter - > pdev ;
int size , desc_len ;
size = sizeof ( struct igb_buffer ) * rx_ring - > count ;
rx_ring - > buffer_info = vmalloc ( size ) ;
if ( ! rx_ring - > buffer_info )
goto err ;
memset ( rx_ring - > buffer_info , 0 , size ) ;
desc_len = sizeof ( union e1000_adv_rx_desc ) ;
/* Round up to nearest 4K */
rx_ring - > size = rx_ring - > count * desc_len ;
rx_ring - > size = ALIGN ( rx_ring - > size , 4096 ) ;
rx_ring - > desc = pci_alloc_consistent ( pdev , rx_ring - > size ,
& rx_ring - > dma ) ;
if ( ! rx_ring - > desc )
goto err ;
rx_ring - > next_to_clean = 0 ;
rx_ring - > next_to_use = 0 ;
rx_ring - > adapter = adapter ;
return 0 ;
err :
vfree ( rx_ring - > buffer_info ) ;
dev_err ( & adapter - > pdev - > dev , " Unable to allocate memory for "
" the receive descriptor ring \n " ) ;
return - ENOMEM ;
}
/**
* igb_setup_all_rx_resources - wrapper to allocate Rx resources
* ( Descriptors ) for all queues
* @ adapter : board private structure
*
* Return 0 on success , negative on failure
* */
static int igb_setup_all_rx_resources ( struct igb_adapter * adapter )
{
int i , err = 0 ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
err = igb_setup_rx_resources ( adapter , & adapter - > rx_ring [ i ] ) ;
if ( err ) {
dev_err ( & adapter - > pdev - > dev ,
" Allocation for Rx Queue %u failed \n " , i ) ;
for ( i - - ; i > = 0 ; i - - )
2008-06-27 10:59:48 -07:00
igb_free_rx_resources ( & adapter - > rx_ring [ i ] ) ;
2008-01-24 02:22:38 -08:00
break ;
}
}
return err ;
}
/**
* igb_setup_rctl - configure the receive control registers
* @ adapter : Board private structure
* */
static void igb_setup_rctl ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
u32 rctl ;
u32 srrctl = 0 ;
2008-12-26 01:34:11 -08:00
int i , j ;
2008-01-24 02:22:38 -08:00
rctl = rd32 ( E1000_RCTL ) ;
rctl & = ~ ( 3 < < E1000_RCTL_MO_SHIFT ) ;
2008-11-25 01:04:03 -08:00
rctl & = ~ ( E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC ) ;
2008-01-24 02:22:38 -08:00
2008-11-25 01:04:03 -08:00
rctl | = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
2009-02-06 23:20:31 +00:00
( hw - > mac . mc_filter_type < < E1000_RCTL_MO_SHIFT ) ;
2008-01-24 02:22:38 -08:00
2008-07-08 15:08:29 -07:00
/*
* enable stripping of CRC . It ' s unlikely this will break BMC
* redirection as it did with e1000 . Newer features require
* that the HW strips the CRC .
2009-02-12 18:16:59 +00:00
*/
2008-07-08 15:08:29 -07:00
rctl | = E1000_RCTL_SECRC ;
2008-01-24 02:22:38 -08:00
2008-11-25 01:03:26 -08:00
/*
2009-01-31 00:52:57 -08:00
* disable store bad packets and clear size bits .
2008-11-25 01:03:26 -08:00
*/
2009-01-31 00:52:57 -08:00
rctl & = ~ ( E1000_RCTL_SBP | E1000_RCTL_SZ_256 ) ;
2008-01-24 02:22:38 -08:00
2009-01-31 00:52:57 -08:00
/* enable LPE when to prevent packets larger than max_frame_size */
2008-11-25 01:03:26 -08:00
rctl | = E1000_RCTL_LPE ;
2008-12-10 01:08:59 -08:00
/* Setup buffer sizes */
switch ( adapter - > rx_buffer_len ) {
case IGB_RXBUFFER_256 :
rctl | = E1000_RCTL_SZ_256 ;
break ;
case IGB_RXBUFFER_512 :
rctl | = E1000_RCTL_SZ_512 ;
break ;
default :
srrctl = ALIGN ( adapter - > rx_buffer_len , 1024 )
> > E1000_SRRCTL_BSIZEPKT_SHIFT ;
break ;
2008-01-24 02:22:38 -08:00
}
/* 82575 and greater support packet-split where the protocol
* header is placed in skb - > data and the packet data is
* placed in pages hanging off of skb_shinfo ( skb ) - > nr_frags .
* In the case of a non - split , skb - > data is linearly filled ,
* followed by the page buffers . Therefore , skb - > data is
* sized to hold the largest protocol header .
*/
/* allocations using alloc_page take too long for regular MTU
* so only enable packet split for jumbo frames */
2009-01-31 00:52:57 -08:00
if ( adapter - > netdev - > mtu > ETH_DATA_LEN ) {
2008-01-24 02:22:38 -08:00
adapter - > rx_ps_hdr_size = IGB_RXBUFFER_128 ;
2008-07-08 15:11:40 -07:00
srrctl | = adapter - > rx_ps_hdr_size < <
2008-01-24 02:22:38 -08:00
E1000_SRRCTL_BSIZEHDRSIZE_SHIFT ;
srrctl | = E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS ;
} else {
adapter - > rx_ps_hdr_size = 0 ;
srrctl | = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF ;
}
2009-02-19 20:39:44 -08:00
/* Attention!!! For SR-IOV PF driver operations you must enable
* queue drop for all VF and PF queues to prevent head of line blocking
* if an un - trusted VF does not provide descriptors to hardware .
*/
if ( adapter - > vfs_allocated_count ) {
u32 vmolr ;
j = adapter - > rx_ring [ 0 ] . reg_idx ;
/* set all queue drop enable bits */
wr32 ( E1000_QDE , ALL_QUEUES ) ;
srrctl | = E1000_SRRCTL_DROP_EN ;
/* disable queue 0 to prevent tail write w/o re-config */
wr32 ( E1000_RXDCTL ( 0 ) , 0 ) ;
vmolr = rd32 ( E1000_VMOLR ( j ) ) ;
if ( rctl & E1000_RCTL_LPE )
vmolr | = E1000_VMOLR_LPE ;
if ( adapter - > num_rx_queues > 0 )
vmolr | = E1000_VMOLR_RSSE ;
wr32 ( E1000_VMOLR ( j ) , vmolr ) ;
}
2008-12-26 01:34:11 -08:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
j = adapter - > rx_ring [ i ] . reg_idx ;
wr32 ( E1000_SRRCTL ( j ) , srrctl ) ;
}
2008-01-24 02:22:38 -08:00
wr32 ( E1000_RCTL , rctl ) ;
}
2009-02-19 20:39:44 -08:00
/**
* igb_rlpml_set - set maximum receive packet size
* @ adapter : board private structure
*
* Configure maximum receivable packet size .
* */
static void igb_rlpml_set ( struct igb_adapter * adapter )
{
u32 max_frame_size = adapter - > max_frame_size ;
struct e1000_hw * hw = & adapter - > hw ;
u16 pf_id = adapter - > vfs_allocated_count ;
if ( adapter - > vlgrp )
max_frame_size + = VLAN_TAG_SIZE ;
/* if vfs are enabled we set RLPML to the largest possible request
* size and set the VMOLR RLPML to the size we need */
if ( pf_id ) {
igb_set_vf_rlpml ( adapter , max_frame_size , pf_id ) ;
max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE ;
}
wr32 ( E1000_RLPML , max_frame_size ) ;
}
/**
* igb_configure_vt_default_pool - Configure VT default pool
* @ adapter : board private structure
*
* Configure the default pool
* */
static void igb_configure_vt_default_pool ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
u16 pf_id = adapter - > vfs_allocated_count ;
u32 vtctl ;
/* not in sr-iov mode - do nothing */
if ( ! pf_id )
return ;
vtctl = rd32 ( E1000_VT_CTL ) ;
vtctl & = ~ ( E1000_VT_CTL_DEFAULT_POOL_MASK |
E1000_VT_CTL_DISABLE_DEF_POOL ) ;
vtctl | = pf_id < < E1000_VT_CTL_DEFAULT_POOL_SHIFT ;
wr32 ( E1000_VT_CTL , vtctl ) ;
}
2008-01-24 02:22:38 -08:00
/**
* igb_configure_rx - Configure receive Unit after Reset
* @ adapter : board private structure
*
* Configure the Rx unit of the MAC after a reset .
* */
static void igb_configure_rx ( struct igb_adapter * adapter )
{
u64 rdba ;
struct e1000_hw * hw = & adapter - > hw ;
u32 rctl , rxcsum ;
u32 rxdctl ;
2009-02-18 19:36:04 -08:00
int i ;
2008-01-24 02:22:38 -08:00
/* disable receives while setting up the descriptors */
rctl = rd32 ( E1000_RCTL ) ;
wr32 ( E1000_RCTL , rctl & ~ E1000_RCTL_EN ) ;
wrfl ( ) ;
mdelay ( 10 ) ;
if ( adapter - > itr_setting > 3 )
2008-07-08 15:14:44 -07:00
wr32 ( E1000_ITR , adapter - > itr ) ;
2008-01-24 02:22:38 -08:00
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
2009-02-12 18:16:59 +00:00
struct igb_ring * ring = & adapter - > rx_ring [ i ] ;
2009-02-18 19:36:04 -08:00
int j = ring - > reg_idx ;
2008-01-24 02:22:38 -08:00
rdba = ring - > dma ;
2008-12-26 01:34:11 -08:00
wr32 ( E1000_RDBAL ( j ) ,
2009-02-12 18:16:59 +00:00
rdba & 0x00000000ffffffffULL ) ;
2008-12-26 01:34:11 -08:00
wr32 ( E1000_RDBAH ( j ) , rdba > > 32 ) ;
wr32 ( E1000_RDLEN ( j ) ,
2009-02-12 18:16:59 +00:00
ring - > count * sizeof ( union e1000_adv_rx_desc ) ) ;
2008-01-24 02:22:38 -08:00
2008-12-26 01:34:11 -08:00
ring - > head = E1000_RDH ( j ) ;
ring - > tail = E1000_RDT ( j ) ;
2008-01-24 02:22:38 -08:00
writel ( 0 , hw - > hw_addr + ring - > tail ) ;
writel ( 0 , hw - > hw_addr + ring - > head ) ;
2008-12-26 01:34:11 -08:00
rxdctl = rd32 ( E1000_RXDCTL ( j ) ) ;
2008-01-24 02:22:38 -08:00
rxdctl | = E1000_RXDCTL_QUEUE_ENABLE ;
rxdctl & = 0xFFF00000 ;
rxdctl | = IGB_RX_PTHRESH ;
rxdctl | = IGB_RX_HTHRESH < < 8 ;
rxdctl | = IGB_RX_WTHRESH < < 16 ;
2008-12-26 01:34:11 -08:00
wr32 ( E1000_RXDCTL ( j ) , rxdctl ) ;
2008-01-24 02:22:38 -08:00
}
if ( adapter - > num_rx_queues > 1 ) {
u32 random [ 10 ] ;
u32 mrqc ;
u32 j , shift ;
union e1000_reta {
u32 dword ;
u8 bytes [ 4 ] ;
} reta ;
get_random_bytes ( & random [ 0 ] , 40 ) ;
2008-07-08 15:10:12 -07:00
if ( hw - > mac . type > = e1000_82576 )
shift = 0 ;
else
shift = 6 ;
2008-01-24 02:22:38 -08:00
for ( j = 0 ; j < ( 32 * 4 ) ; j + + ) {
reta . bytes [ j & 3 ] =
2008-12-26 01:34:11 -08:00
adapter - > rx_ring [ ( j % adapter - > num_rx_queues ) ] . reg_idx < < shift ;
2008-01-24 02:22:38 -08:00
if ( ( j & 3 ) = = 3 )
writel ( reta . dword ,
hw - > hw_addr + E1000_RETA ( 0 ) + ( j & ~ 3 ) ) ;
}
2009-02-19 20:39:44 -08:00
if ( adapter - > vfs_allocated_count )
mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q ;
else
mrqc = E1000_MRQC_ENABLE_RSS_4Q ;
2008-01-24 02:22:38 -08:00
/* Fill out hash function seeds */
for ( j = 0 ; j < 10 ; j + + )
array_wr32 ( E1000_RSSRK ( 0 ) , j , random [ j ] ) ;
mrqc | = ( E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP ) ;
mrqc | = ( E1000_MRQC_RSS_FIELD_IPV6 |
E1000_MRQC_RSS_FIELD_IPV6_TCP ) ;
mrqc | = ( E1000_MRQC_RSS_FIELD_IPV4_UDP |
E1000_MRQC_RSS_FIELD_IPV6_UDP ) ;
mrqc | = ( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX ) ;
wr32 ( E1000_MRQC , mrqc ) ;
/* Multiqueue and raw packet checksumming are mutually
* exclusive . Note that this not the same as TCP / IP
* checksumming , which works fine . */
rxcsum = rd32 ( E1000_RXCSUM ) ;
rxcsum | = E1000_RXCSUM_PCSD ;
wr32 ( E1000_RXCSUM , rxcsum ) ;
} else {
2009-02-19 20:39:44 -08:00
/* Enable multi-queue for sr-iov */
if ( adapter - > vfs_allocated_count )
wr32 ( E1000_MRQC , E1000_MRQC_ENABLE_VMDQ ) ;
2008-01-24 02:22:38 -08:00
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = rd32 ( E1000_RXCSUM ) ;
2009-02-12 18:17:42 +00:00
if ( adapter - > rx_csum )
rxcsum | = E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE ;
else
rxcsum & = ~ ( E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE ) ;
2008-01-24 02:22:38 -08:00
wr32 ( E1000_RXCSUM , rxcsum ) ;
}
2009-02-19 20:39:44 -08:00
/* Set the default pool for the PF's first queue */
igb_configure_vt_default_pool ( adapter ) ;
igb_rlpml_set ( adapter ) ;
2008-01-24 02:22:38 -08:00
/* Enable Receives */
wr32 ( E1000_RCTL , rctl ) ;
}
/**
* igb_free_tx_resources - Free Tx Resources per Queue
* @ tx_ring : Tx descriptor ring for a specific queue
*
* Free all transmit software resources
* */
2008-11-20 00:48:10 -08:00
void igb_free_tx_resources ( struct igb_ring * tx_ring )
2008-01-24 02:22:38 -08:00
{
2008-06-27 10:59:48 -07:00
struct pci_dev * pdev = tx_ring - > adapter - > pdev ;
2008-01-24 02:22:38 -08:00
2008-06-27 10:59:48 -07:00
igb_clean_tx_ring ( tx_ring ) ;
2008-01-24 02:22:38 -08:00
vfree ( tx_ring - > buffer_info ) ;
tx_ring - > buffer_info = NULL ;
pci_free_consistent ( pdev , tx_ring - > size , tx_ring - > desc , tx_ring - > dma ) ;
tx_ring - > desc = NULL ;
}
/**
* igb_free_all_tx_resources - Free Tx Resources for All Queues
* @ adapter : board private structure
*
* Free all transmit software resources
* */
static void igb_free_all_tx_resources ( struct igb_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
2008-06-27 10:59:48 -07:00
igb_free_tx_resources ( & adapter - > tx_ring [ i ] ) ;
2008-01-24 02:22:38 -08:00
}
static void igb_unmap_and_free_tx_resource ( struct igb_adapter * adapter ,
struct igb_buffer * buffer_info )
{
2009-03-20 00:17:43 +00:00
buffer_info - > dma = 0 ;
2008-01-24 02:22:38 -08:00
if ( buffer_info - > skb ) {
2009-03-20 00:17:43 +00:00
skb_dma_unmap ( & adapter - > pdev - > dev , buffer_info - > skb ,
DMA_TO_DEVICE ) ;
2008-01-24 02:22:38 -08:00
dev_kfree_skb_any ( buffer_info - > skb ) ;
buffer_info - > skb = NULL ;
}
buffer_info - > time_stamp = 0 ;
/* buffer_info must be completely set up in the transmit path */
}
/**
* igb_clean_tx_ring - Free Tx Buffers
* @ tx_ring : ring to be cleaned
* */
2008-06-27 10:59:48 -07:00
static void igb_clean_tx_ring ( struct igb_ring * tx_ring )
2008-01-24 02:22:38 -08:00
{
2008-06-27 10:59:48 -07:00
struct igb_adapter * adapter = tx_ring - > adapter ;
2008-01-24 02:22:38 -08:00
struct igb_buffer * buffer_info ;
unsigned long size ;
unsigned int i ;
if ( ! tx_ring - > buffer_info )
return ;
/* Free all the Tx ring sk_buffs */
for ( i = 0 ; i < tx_ring - > count ; i + + ) {
buffer_info = & tx_ring - > buffer_info [ i ] ;
igb_unmap_and_free_tx_resource ( adapter , buffer_info ) ;
}
size = sizeof ( struct igb_buffer ) * tx_ring - > count ;
memset ( tx_ring - > buffer_info , 0 , size ) ;
/* Zero out the descriptor ring */
memset ( tx_ring - > desc , 0 , tx_ring - > size ) ;
tx_ring - > next_to_use = 0 ;
tx_ring - > next_to_clean = 0 ;
writel ( 0 , adapter - > hw . hw_addr + tx_ring - > head ) ;
writel ( 0 , adapter - > hw . hw_addr + tx_ring - > tail ) ;
}
/**
* igb_clean_all_tx_rings - Free Tx Buffers for all queues
* @ adapter : board private structure
* */
static void igb_clean_all_tx_rings ( struct igb_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
2008-06-27 10:59:48 -07:00
igb_clean_tx_ring ( & adapter - > tx_ring [ i ] ) ;
2008-01-24 02:22:38 -08:00
}
/**
* igb_free_rx_resources - Free Rx Resources
* @ rx_ring : ring to clean the resources from
*
* Free all receive software resources
* */
2008-11-20 00:48:10 -08:00
void igb_free_rx_resources ( struct igb_ring * rx_ring )
2008-01-24 02:22:38 -08:00
{
2008-06-27 10:59:48 -07:00
struct pci_dev * pdev = rx_ring - > adapter - > pdev ;
2008-01-24 02:22:38 -08:00
2008-06-27 10:59:48 -07:00
igb_clean_rx_ring ( rx_ring ) ;
2008-01-24 02:22:38 -08:00
vfree ( rx_ring - > buffer_info ) ;
rx_ring - > buffer_info = NULL ;
pci_free_consistent ( pdev , rx_ring - > size , rx_ring - > desc , rx_ring - > dma ) ;
rx_ring - > desc = NULL ;
}
/**
* igb_free_all_rx_resources - Free Rx Resources for All Queues
* @ adapter : board private structure
*
* Free all receive software resources
* */
static void igb_free_all_rx_resources ( struct igb_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
2008-06-27 10:59:48 -07:00
igb_free_rx_resources ( & adapter - > rx_ring [ i ] ) ;
2008-01-24 02:22:38 -08:00
}
/**
* igb_clean_rx_ring - Free Rx Buffers per Queue
* @ rx_ring : ring to free buffers from
* */
2008-06-27 10:59:48 -07:00
static void igb_clean_rx_ring ( struct igb_ring * rx_ring )
2008-01-24 02:22:38 -08:00
{
2008-06-27 10:59:48 -07:00
struct igb_adapter * adapter = rx_ring - > adapter ;
2008-01-24 02:22:38 -08:00
struct igb_buffer * buffer_info ;
struct pci_dev * pdev = adapter - > pdev ;
unsigned long size ;
unsigned int i ;
if ( ! rx_ring - > buffer_info )
return ;
/* Free all the Rx ring sk_buffs */
for ( i = 0 ; i < rx_ring - > count ; i + + ) {
buffer_info = & rx_ring - > buffer_info [ i ] ;
if ( buffer_info - > dma ) {
if ( adapter - > rx_ps_hdr_size )
pci_unmap_single ( pdev , buffer_info - > dma ,
adapter - > rx_ps_hdr_size ,
PCI_DMA_FROMDEVICE ) ;
else
pci_unmap_single ( pdev , buffer_info - > dma ,
adapter - > rx_buffer_len ,
PCI_DMA_FROMDEVICE ) ;
buffer_info - > dma = 0 ;
}
if ( buffer_info - > skb ) {
dev_kfree_skb ( buffer_info - > skb ) ;
buffer_info - > skb = NULL ;
}
if ( buffer_info - > page ) {
2008-07-08 15:11:40 -07:00
if ( buffer_info - > page_dma )
pci_unmap_page ( pdev , buffer_info - > page_dma ,
PAGE_SIZE / 2 ,
PCI_DMA_FROMDEVICE ) ;
2008-01-24 02:22:38 -08:00
put_page ( buffer_info - > page ) ;
buffer_info - > page = NULL ;
buffer_info - > page_dma = 0 ;
2008-07-08 15:11:40 -07:00
buffer_info - > page_offset = 0 ;
2008-01-24 02:22:38 -08:00
}
}
size = sizeof ( struct igb_buffer ) * rx_ring - > count ;
memset ( rx_ring - > buffer_info , 0 , size ) ;
/* Zero out the descriptor ring */
memset ( rx_ring - > desc , 0 , rx_ring - > size ) ;
rx_ring - > next_to_clean = 0 ;
rx_ring - > next_to_use = 0 ;
writel ( 0 , adapter - > hw . hw_addr + rx_ring - > head ) ;
writel ( 0 , adapter - > hw . hw_addr + rx_ring - > tail ) ;
}
/**
* igb_clean_all_rx_rings - Free Rx Buffers for all queues
* @ adapter : board private structure
* */
static void igb_clean_all_rx_rings ( struct igb_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
2008-06-27 10:59:48 -07:00
igb_clean_rx_ring ( & adapter - > rx_ring [ i ] ) ;
2008-01-24 02:22:38 -08:00
}
/**
* igb_set_mac - Change the Ethernet Address of the NIC
* @ netdev : network interface device structure
* @ p : pointer to an address structure
*
* Returns 0 on success , negative on failure
* */
static int igb_set_mac ( struct net_device * netdev , void * p )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
2009-02-06 23:20:31 +00:00
struct e1000_hw * hw = & adapter - > hw ;
2008-01-24 02:22:38 -08:00
struct sockaddr * addr = p ;
if ( ! is_valid_ether_addr ( addr - > sa_data ) )
return - EADDRNOTAVAIL ;
memcpy ( netdev - > dev_addr , addr - > sa_data , netdev - > addr_len ) ;
2009-02-06 23:20:31 +00:00
memcpy ( hw - > mac . addr , addr - > sa_data , netdev - > addr_len ) ;
2008-01-24 02:22:38 -08:00
2009-02-06 23:20:31 +00:00
hw - > mac . ops . rar_set ( hw , hw - > mac . addr , 0 ) ;
2008-01-24 02:22:38 -08:00
2009-02-19 20:39:44 -08:00
igb_set_rah_pool ( hw , adapter - > vfs_allocated_count , 0 ) ;
2008-01-24 02:22:38 -08:00
return 0 ;
}
/**
* igb_set_multi - Multicast and Promiscuous mode set
* @ netdev : network interface device structure
*
* The set_multi entry point is called whenever the multicast address
* list or the network interface flags are updated . This routine is
* responsible for configuring the hardware for proper multicast ,
* promiscuous mode , and all - multi behavior .
* */
static void igb_set_multi ( struct net_device * netdev )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
struct e1000_mac_info * mac = & hw - > mac ;
struct dev_mc_list * mc_ptr ;
2009-03-20 00:17:25 +00:00
u8 * mta_list = NULL ;
2008-01-24 02:22:38 -08:00
u32 rctl ;
int i ;
/* Check for Promiscuous and All Multicast modes */
rctl = rd32 ( E1000_RCTL ) ;
2008-07-16 20:15:45 -07:00
if ( netdev - > flags & IFF_PROMISC ) {
2008-01-24 02:22:38 -08:00
rctl | = ( E1000_RCTL_UPE | E1000_RCTL_MPE ) ;
2008-07-16 20:15:45 -07:00
rctl & = ~ E1000_RCTL_VFE ;
} else {
if ( netdev - > flags & IFF_ALLMULTI ) {
rctl | = E1000_RCTL_MPE ;
rctl & = ~ E1000_RCTL_UPE ;
} else
rctl & = ~ ( E1000_RCTL_UPE | E1000_RCTL_MPE ) ;
2008-07-16 20:16:14 -07:00
rctl | = E1000_RCTL_VFE ;
2008-07-16 20:15:45 -07:00
}
2008-01-24 02:22:38 -08:00
wr32 ( E1000_RCTL , rctl ) ;
2009-03-20 00:17:25 +00:00
if ( netdev - > mc_count ) {
mta_list = kzalloc ( netdev - > mc_count * 6 , GFP_ATOMIC ) ;
if ( ! mta_list ) {
dev_err ( & adapter - > pdev - > dev ,
" failed to allocate multicast filter list \n " ) ;
return ;
}
2008-01-24 02:22:38 -08:00
}
/* The shared function expects a packed array of only addresses. */
mc_ptr = netdev - > mc_list ;
for ( i = 0 ; i < netdev - > mc_count ; i + + ) {
if ( ! mc_ptr )
break ;
memcpy ( mta_list + ( i * ETH_ALEN ) , mc_ptr - > dmi_addr , ETH_ALEN ) ;
mc_ptr = mc_ptr - > next ;
}
2009-02-19 20:39:44 -08:00
igb_update_mc_addr_list ( hw , mta_list , i ,
adapter - > vfs_allocated_count + 1 ,
mac - > rar_entry_count ) ;
igb_set_mc_list_pools ( adapter , i , mac - > rar_entry_count ) ;
2009-02-19 20:40:07 -08:00
igb_restore_vf_multicasts ( adapter ) ;
2008-01-24 02:22:38 -08:00
kfree ( mta_list ) ;
}
/* Need to wait a few seconds after link up to get diagnostic information from
* the phy */
static void igb_update_phy_info ( unsigned long data )
{
struct igb_adapter * adapter = ( struct igb_adapter * ) data ;
2008-11-21 21:30:24 -08:00
igb_get_phy_info ( & adapter - > hw ) ;
2008-01-24 02:22:38 -08:00
}
2009-02-06 23:16:24 +00:00
/**
* igb_has_link - check shared code for link and determine up / down
* @ adapter : pointer to driver private info
* */
static bool igb_has_link ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
bool link_active = false ;
s32 ret_val = 0 ;
/* get_link_status is set on LSC (link status) interrupt or
* rx sequence error interrupt . get_link_status will stay
* false until the e1000_check_for_link establishes link
* for copper adapters ONLY
*/
switch ( hw - > phy . media_type ) {
case e1000_media_type_copper :
if ( hw - > mac . get_link_status ) {
ret_val = hw - > mac . ops . check_for_link ( hw ) ;
link_active = ! hw - > mac . get_link_status ;
} else {
link_active = true ;
}
break ;
case e1000_media_type_fiber :
ret_val = hw - > mac . ops . check_for_link ( hw ) ;
link_active = ! ! ( rd32 ( E1000_STATUS ) & E1000_STATUS_LU ) ;
break ;
case e1000_media_type_internal_serdes :
ret_val = hw - > mac . ops . check_for_link ( hw ) ;
link_active = hw - > mac . serdes_has_link ;
break ;
default :
case e1000_media_type_unknown :
break ;
}
return link_active ;
}
2008-01-24 02:22:38 -08:00
/**
* igb_watchdog - Timer Call - back
* @ data : pointer to adapter cast into an unsigned long
* */
static void igb_watchdog ( unsigned long data )
{
struct igb_adapter * adapter = ( struct igb_adapter * ) data ;
/* Do the rest outside of interrupt context */
schedule_work ( & adapter - > watchdog_task ) ;
}
static void igb_watchdog_task ( struct work_struct * work )
{
struct igb_adapter * adapter = container_of ( work ,
struct igb_adapter , watchdog_task ) ;
struct e1000_hw * hw = & adapter - > hw ;
struct net_device * netdev = adapter - > netdev ;
struct igb_ring * tx_ring = adapter - > tx_ring ;
u32 link ;
2008-08-26 04:25:03 -07:00
u32 eics = 0 ;
int i ;
2008-01-24 02:22:38 -08:00
2009-02-06 23:16:24 +00:00
link = igb_has_link ( adapter ) ;
if ( ( netif_carrier_ok ( netdev ) ) & & link )
2008-01-24 02:22:38 -08:00
goto link_up ;
if ( link ) {
if ( ! netif_carrier_ok ( netdev ) ) {
u32 ctrl ;
hw - > mac . ops . get_speed_and_duplex ( & adapter - > hw ,
& adapter - > link_speed ,
& adapter - > link_duplex ) ;
ctrl = rd32 ( E1000_CTRL ) ;
2008-11-27 00:21:39 -08:00
/* Links status message must follow this format */
printk ( KERN_INFO " igb: %s NIC Link is Up %d Mbps %s, "
2008-01-24 02:22:38 -08:00
" Flow Control: %s \n " ,
2008-11-27 00:21:39 -08:00
netdev - > name ,
2008-01-24 02:22:38 -08:00
adapter - > link_speed ,
adapter - > link_duplex = = FULL_DUPLEX ?
" Full Duplex " : " Half Duplex " ,
( ( ctrl & E1000_CTRL_TFCE ) & & ( ctrl &
E1000_CTRL_RFCE ) ) ? " RX/TX " : ( ( ctrl &
E1000_CTRL_RFCE ) ? " RX " : ( ( ctrl &
E1000_CTRL_TFCE ) ? " TX " : " None " ) ) ) ;
/* tweak tx_queue_len according to speed/duplex and
* adjust the timeout factor */
netdev - > tx_queue_len = adapter - > tx_queue_len ;
adapter - > tx_timeout_factor = 1 ;
switch ( adapter - > link_speed ) {
case SPEED_10 :
netdev - > tx_queue_len = 10 ;
adapter - > tx_timeout_factor = 14 ;
break ;
case SPEED_100 :
netdev - > tx_queue_len = 100 ;
/* maybe add some timeout factor ? */
break ;
}
netif_carrier_on ( netdev ) ;
2008-07-17 01:56:23 -07:00
netif_tx_wake_all_queues ( netdev ) ;
2008-01-24 02:22:38 -08:00
2009-02-19 20:40:07 -08:00
igb_ping_all_vfs ( adapter ) ;
2009-02-06 23:19:50 +00:00
/* link state has changed, schedule phy info update */
2008-01-24 02:22:38 -08:00
if ( ! test_bit ( __IGB_DOWN , & adapter - > state ) )
mod_timer ( & adapter - > phy_info_timer ,
round_jiffies ( jiffies + 2 * HZ ) ) ;
}
} else {
if ( netif_carrier_ok ( netdev ) ) {
adapter - > link_speed = 0 ;
adapter - > link_duplex = 0 ;
2008-11-27 00:21:39 -08:00
/* Links status message must follow this format */
printk ( KERN_INFO " igb: %s NIC Link is Down \n " ,
netdev - > name ) ;
2008-01-24 02:22:38 -08:00
netif_carrier_off ( netdev ) ;
2008-07-17 01:56:23 -07:00
netif_tx_stop_all_queues ( netdev ) ;
2009-02-06 23:19:50 +00:00
2009-02-19 20:40:07 -08:00
igb_ping_all_vfs ( adapter ) ;
2009-02-06 23:19:50 +00:00
/* link state has changed, schedule phy info update */
2008-01-24 02:22:38 -08:00
if ( ! test_bit ( __IGB_DOWN , & adapter - > state ) )
mod_timer ( & adapter - > phy_info_timer ,
round_jiffies ( jiffies + 2 * HZ ) ) ;
}
}
link_up :
igb_update_stats ( adapter ) ;
2009-02-06 23:19:50 +00:00
hw - > mac . tx_packet_delta = adapter - > stats . tpt - adapter - > tpt_old ;
2008-01-24 02:22:38 -08:00
adapter - > tpt_old = adapter - > stats . tpt ;
2009-02-06 23:19:50 +00:00
hw - > mac . collision_delta = adapter - > stats . colc - adapter - > colc_old ;
2008-01-24 02:22:38 -08:00
adapter - > colc_old = adapter - > stats . colc ;
adapter - > gorc = adapter - > stats . gorc - adapter - > gorc_old ;
adapter - > gorc_old = adapter - > stats . gorc ;
adapter - > gotc = adapter - > stats . gotc - adapter - > gotc_old ;
adapter - > gotc_old = adapter - > stats . gotc ;
igb_update_adaptive ( & adapter - > hw ) ;
if ( ! netif_carrier_ok ( netdev ) ) {
2009-03-20 00:16:50 +00:00
if ( igb_desc_unused ( tx_ring ) + 1 < tx_ring - > count ) {
2008-01-24 02:22:38 -08:00
/* We've lost link, so the controller stops DMA,
* but we ' ve got queued Tx work that ' s never going
* to get done , so reset controller to flush Tx .
* ( Do the reset outside of interrupt context ) . */
adapter - > tx_timeout_count + + ;
schedule_work ( & adapter - > reset_task ) ;
}
}
/* Cause software interrupt to ensure rx ring is cleaned */
2008-08-26 04:25:03 -07:00
if ( adapter - > msix_entries ) {
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
eics | = adapter - > rx_ring [ i ] . eims_value ;
wr32 ( E1000_EICS , eics ) ;
} else {
wr32 ( E1000_ICS , E1000_ICS_RXDMT0 ) ;
}
2008-01-24 02:22:38 -08:00
/* Force detection of hung controller every watchdog period */
tx_ring - > detect_tx_hung = true ;
/* Reset the timer */
if ( ! test_bit ( __IGB_DOWN , & adapter - > state ) )
mod_timer ( & adapter - > watchdog_timer ,
round_jiffies ( jiffies + 2 * HZ ) ) ;
}
enum latency_range {
lowest_latency = 0 ,
low_latency = 1 ,
bulk_latency = 2 ,
latency_invalid = 255
} ;
2008-07-08 15:14:44 -07:00
/**
* igb_update_ring_itr - update the dynamic ITR value based on packet size
*
* Stores a new ITR value based on strictly on packet size . This
* algorithm is less sophisticated than that used in igb_update_itr ,
* due to the difficulty of synchronizing statistics across multiple
* receive rings . The divisors and thresholds used by this fuction
* were determined based on theoretical maximum wire speed and testing
* data , in order to minimize response time while increasing bulk
* throughput .
* This functionality is controlled by the InterruptThrottleRate module
* parameter ( see igb_param . c )
* NOTE : This function is called only when operating in a multiqueue
* receive environment .
* @ rx_ring : pointer to ring
* */
static void igb_update_ring_itr ( struct igb_ring * rx_ring )
2008-01-24 02:22:38 -08:00
{
2008-07-08 15:14:44 -07:00
int new_val = rx_ring - > itr_val ;
int avg_wire_size = 0 ;
struct igb_adapter * adapter = rx_ring - > adapter ;
2008-01-24 02:22:38 -08:00
2008-07-08 15:14:44 -07:00
if ( ! rx_ring - > total_packets )
goto clear_counts ; /* no packets, so don't do anything */
2008-01-24 02:22:38 -08:00
2008-07-08 15:14:44 -07:00
/* For non-gigabit speeds, just fix the interrupt rate at 4000
* ints / sec - ITR timer value of 120 ticks .
*/
if ( adapter - > link_speed ! = SPEED_1000 ) {
new_val = 120 ;
goto set_itr_val ;
2008-01-24 02:22:38 -08:00
}
2008-07-08 15:14:44 -07:00
avg_wire_size = rx_ring - > total_bytes / rx_ring - > total_packets ;
2008-01-24 02:22:38 -08:00
2008-07-08 15:14:44 -07:00
/* Add 24 bytes to size to account for CRC, preamble, and gap */
avg_wire_size + = 24 ;
/* Don't starve jumbo frames */
avg_wire_size = min ( avg_wire_size , 3000 ) ;
2008-01-24 02:22:38 -08:00
2008-07-08 15:14:44 -07:00
/* Give a little boost to mid-size frames */
if ( ( avg_wire_size > 300 ) & & ( avg_wire_size < 1200 ) )
new_val = avg_wire_size / 3 ;
else
new_val = avg_wire_size / 2 ;
2008-01-24 02:22:38 -08:00
2008-07-08 15:14:44 -07:00
set_itr_val :
2008-01-24 02:22:38 -08:00
if ( new_val ! = rx_ring - > itr_val ) {
rx_ring - > itr_val = new_val ;
2008-07-08 15:14:44 -07:00
rx_ring - > set_itr = 1 ;
2008-01-24 02:22:38 -08:00
}
2008-07-08 15:14:44 -07:00
clear_counts :
rx_ring - > total_bytes = 0 ;
rx_ring - > total_packets = 0 ;
2008-01-24 02:22:38 -08:00
}
/**
* igb_update_itr - update the dynamic ITR value based on statistics
* Stores a new ITR value based on packets and byte
* counts during the last interrupt . The advantage of per interrupt
* computation is faster updates and more accurate ITR for the current
* traffic pattern . Constants in this function were computed
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput .
* this functionality is controlled by the InterruptThrottleRate module
* parameter ( see igb_param . c )
* NOTE : These calculations are only valid when operating in a single -
* queue environment .
* @ adapter : pointer to adapter
* @ itr_setting : current adapter - > itr
* @ packets : the number of packets during this measurement interval
* @ bytes : the number of bytes during this measurement interval
* */
static unsigned int igb_update_itr ( struct igb_adapter * adapter , u16 itr_setting ,
int packets , int bytes )
{
unsigned int retval = itr_setting ;
if ( packets = = 0 )
goto update_itr_done ;
switch ( itr_setting ) {
case lowest_latency :
/* handle TSO and jumbo frames */
if ( bytes / packets > 8000 )
retval = bulk_latency ;
else if ( ( packets < 5 ) & & ( bytes > 512 ) )
retval = low_latency ;
break ;
case low_latency : /* 50 usec aka 20000 ints/s */
if ( bytes > 10000 ) {
/* this if handles the TSO accounting */
if ( bytes / packets > 8000 ) {
retval = bulk_latency ;
} else if ( ( packets < 10 ) | | ( ( bytes / packets ) > 1200 ) ) {
retval = bulk_latency ;
} else if ( ( packets > 35 ) ) {
retval = lowest_latency ;
}
} else if ( bytes / packets > 2000 ) {
retval = bulk_latency ;
} else if ( packets < = 2 & & bytes < 512 ) {
retval = lowest_latency ;
}
break ;
case bulk_latency : /* 250 usec aka 4000 ints/s */
if ( bytes > 25000 ) {
if ( packets > 35 )
retval = low_latency ;
2009-02-12 18:17:21 +00:00
} else if ( bytes < 1500 ) {
2008-01-24 02:22:38 -08:00
retval = low_latency ;
}
break ;
}
update_itr_done :
return retval ;
}
2008-07-08 15:14:44 -07:00
static void igb_set_itr ( struct igb_adapter * adapter )
2008-01-24 02:22:38 -08:00
{
u16 current_itr ;
u32 new_itr = adapter - > itr ;
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if ( adapter - > link_speed ! = SPEED_1000 ) {
current_itr = 0 ;
new_itr = 4000 ;
goto set_itr_now ;
}
adapter - > rx_itr = igb_update_itr ( adapter ,
adapter - > rx_itr ,
adapter - > rx_ring - > total_packets ,
adapter - > rx_ring - > total_bytes ) ;
2008-07-08 15:14:44 -07:00
if ( adapter - > rx_ring - > buddy ) {
2008-01-24 02:22:38 -08:00
adapter - > tx_itr = igb_update_itr ( adapter ,
adapter - > tx_itr ,
adapter - > tx_ring - > total_packets ,
adapter - > tx_ring - > total_bytes ) ;
current_itr = max ( adapter - > rx_itr , adapter - > tx_itr ) ;
} else {
current_itr = adapter - > rx_itr ;
}
2008-07-08 15:14:44 -07:00
/* conservative mode (itr 3) eliminates the lowest_latency setting */
2009-02-12 18:16:59 +00:00
if ( adapter - > itr_setting = = 3 & & current_itr = = lowest_latency )
2008-07-08 15:14:44 -07:00
current_itr = low_latency ;
2008-01-24 02:22:38 -08:00
switch ( current_itr ) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency :
new_itr = 70000 ;
break ;
case low_latency :
new_itr = 20000 ; /* aka hwitr = ~200 */
break ;
case bulk_latency :
new_itr = 4000 ;
break ;
default :
break ;
}
set_itr_now :
2008-07-08 15:14:44 -07:00
adapter - > rx_ring - > total_bytes = 0 ;
adapter - > rx_ring - > total_packets = 0 ;
if ( adapter - > rx_ring - > buddy ) {
adapter - > rx_ring - > buddy - > total_bytes = 0 ;
adapter - > rx_ring - > buddy - > total_packets = 0 ;
}
2008-01-24 02:22:38 -08:00
if ( new_itr ! = adapter - > itr ) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
* increasing */
new_itr = new_itr > adapter - > itr ?
min ( adapter - > itr + ( new_itr > > 2 ) , new_itr ) :
new_itr ;
/* Don't write the value here; it resets the adapter's
* internal timer , and causes us to delay far longer than
* we should between interrupts . Instead , we write the ITR
* value at the beginning of the next interrupt so the timing
* ends up being correct .
*/
adapter - > itr = new_itr ;
2008-07-08 15:14:44 -07:00
adapter - > rx_ring - > itr_val = 1000000000 / ( new_itr * 256 ) ;
adapter - > rx_ring - > set_itr = 1 ;
2008-01-24 02:22:38 -08:00
}
return ;
}
# define IGB_TX_FLAGS_CSUM 0x00000001
# define IGB_TX_FLAGS_VLAN 0x00000002
# define IGB_TX_FLAGS_TSO 0x00000004
# define IGB_TX_FLAGS_IPV4 0x00000008
2009-02-12 05:03:43 +00:00
# define IGB_TX_FLAGS_TSTAMP 0x00000010
2008-01-24 02:22:38 -08:00
# define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
# define IGB_TX_FLAGS_VLAN_SHIFT 16
static inline int igb_tso_adv ( struct igb_adapter * adapter ,
struct igb_ring * tx_ring ,
struct sk_buff * skb , u32 tx_flags , u8 * hdr_len )
{
struct e1000_adv_tx_context_desc * context_desc ;
unsigned int i ;
int err ;
struct igb_buffer * buffer_info ;
u32 info = 0 , tu_cmd = 0 ;
u32 mss_l4len_idx , l4len ;
* hdr_len = 0 ;
if ( skb_header_cloned ( skb ) ) {
err = pskb_expand_head ( skb , 0 , 0 , GFP_ATOMIC ) ;
if ( err )
return err ;
}
l4len = tcp_hdrlen ( skb ) ;
* hdr_len + = l4len ;
if ( skb - > protocol = = htons ( ETH_P_IP ) ) {
struct iphdr * iph = ip_hdr ( skb ) ;
iph - > tot_len = 0 ;
iph - > check = 0 ;
tcp_hdr ( skb ) - > check = ~ csum_tcpudp_magic ( iph - > saddr ,
iph - > daddr , 0 ,
IPPROTO_TCP ,
0 ) ;
} else if ( skb_shinfo ( skb ) - > gso_type = = SKB_GSO_TCPV6 ) {
ipv6_hdr ( skb ) - > payload_len = 0 ;
tcp_hdr ( skb ) - > check = ~ csum_ipv6_magic ( & ipv6_hdr ( skb ) - > saddr ,
& ipv6_hdr ( skb ) - > daddr ,
0 , IPPROTO_TCP , 0 ) ;
}
i = tx_ring - > next_to_use ;
buffer_info = & tx_ring - > buffer_info [ i ] ;
context_desc = E1000_TX_CTXTDESC_ADV ( * tx_ring , i ) ;
/* VLAN MACLEN IPLEN */
if ( tx_flags & IGB_TX_FLAGS_VLAN )
info | = ( tx_flags & IGB_TX_FLAGS_VLAN_MASK ) ;
info | = ( skb_network_offset ( skb ) < < E1000_ADVTXD_MACLEN_SHIFT ) ;
* hdr_len + = skb_network_offset ( skb ) ;
info | = skb_network_header_len ( skb ) ;
* hdr_len + = skb_network_header_len ( skb ) ;
context_desc - > vlan_macip_lens = cpu_to_le32 ( info ) ;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
tu_cmd | = ( E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT ) ;
if ( skb - > protocol = = htons ( ETH_P_IP ) )
tu_cmd | = E1000_ADVTXD_TUCMD_IPV4 ;
tu_cmd | = E1000_ADVTXD_TUCMD_L4T_TCP ;
context_desc - > type_tucmd_mlhl = cpu_to_le32 ( tu_cmd ) ;
/* MSS L4LEN IDX */
mss_l4len_idx = ( skb_shinfo ( skb ) - > gso_size < < E1000_ADVTXD_MSS_SHIFT ) ;
mss_l4len_idx | = ( l4len < < E1000_ADVTXD_L4LEN_SHIFT ) ;
2009-02-12 18:16:59 +00:00
/* For 82575, context index must be unique per ring. */
2008-07-08 15:10:46 -07:00
if ( adapter - > flags & IGB_FLAG_NEED_CTX_IDX )
mss_l4len_idx | = tx_ring - > queue_index < < 4 ;
2008-01-24 02:22:38 -08:00
context_desc - > mss_l4len_idx = cpu_to_le32 ( mss_l4len_idx ) ;
context_desc - > seqnum_seed = 0 ;
buffer_info - > time_stamp = jiffies ;
2008-12-26 01:33:18 -08:00
buffer_info - > next_to_watch = i ;
2008-01-24 02:22:38 -08:00
buffer_info - > dma = 0 ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
tx_ring - > next_to_use = i ;
return true ;
}
static inline bool igb_tx_csum_adv ( struct igb_adapter * adapter ,
struct igb_ring * tx_ring ,
struct sk_buff * skb , u32 tx_flags )
{
struct e1000_adv_tx_context_desc * context_desc ;
unsigned int i ;
struct igb_buffer * buffer_info ;
u32 info = 0 , tu_cmd = 0 ;
if ( ( skb - > ip_summed = = CHECKSUM_PARTIAL ) | |
( tx_flags & IGB_TX_FLAGS_VLAN ) ) {
i = tx_ring - > next_to_use ;
buffer_info = & tx_ring - > buffer_info [ i ] ;
context_desc = E1000_TX_CTXTDESC_ADV ( * tx_ring , i ) ;
if ( tx_flags & IGB_TX_FLAGS_VLAN )
info | = ( tx_flags & IGB_TX_FLAGS_VLAN_MASK ) ;
info | = ( skb_network_offset ( skb ) < < E1000_ADVTXD_MACLEN_SHIFT ) ;
if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
info | = skb_network_header_len ( skb ) ;
context_desc - > vlan_macip_lens = cpu_to_le32 ( info ) ;
tu_cmd | = ( E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT ) ;
if ( skb - > ip_summed = = CHECKSUM_PARTIAL ) {
2009-03-21 16:55:07 -07:00
__be16 protocol ;
if ( skb - > protocol = = cpu_to_be16 ( ETH_P_8021Q ) ) {
const struct vlan_ethhdr * vhdr =
( const struct vlan_ethhdr * ) skb - > data ;
protocol = vhdr - > h_vlan_encapsulated_proto ;
} else {
protocol = skb - > protocol ;
}
switch ( protocol ) {
2009-02-01 00:45:17 -08:00
case cpu_to_be16 ( ETH_P_IP ) :
2008-01-24 02:22:38 -08:00
tu_cmd | = E1000_ADVTXD_TUCMD_IPV4 ;
2008-03-07 10:32:13 -08:00
if ( ip_hdr ( skb ) - > protocol = = IPPROTO_TCP )
tu_cmd | = E1000_ADVTXD_TUCMD_L4T_TCP ;
break ;
2009-02-01 00:45:17 -08:00
case cpu_to_be16 ( ETH_P_IPV6 ) :
2008-03-07 10:32:13 -08:00
/* XXX what about other V6 headers?? */
if ( ipv6_hdr ( skb ) - > nexthdr = = IPPROTO_TCP )
tu_cmd | = E1000_ADVTXD_TUCMD_L4T_TCP ;
break ;
default :
if ( unlikely ( net_ratelimit ( ) ) )
dev_warn ( & adapter - > pdev - > dev ,
" partial checksum but proto=%x! \n " ,
skb - > protocol ) ;
break ;
}
2008-01-24 02:22:38 -08:00
}
context_desc - > type_tucmd_mlhl = cpu_to_le32 ( tu_cmd ) ;
context_desc - > seqnum_seed = 0 ;
2008-07-08 15:10:46 -07:00
if ( adapter - > flags & IGB_FLAG_NEED_CTX_IDX )
context_desc - > mss_l4len_idx =
cpu_to_le32 ( tx_ring - > queue_index < < 4 ) ;
2009-02-06 23:22:52 +00:00
else
context_desc - > mss_l4len_idx = 0 ;
2008-01-24 02:22:38 -08:00
buffer_info - > time_stamp = jiffies ;
2008-12-26 01:33:18 -08:00
buffer_info - > next_to_watch = i ;
2008-01-24 02:22:38 -08:00
buffer_info - > dma = 0 ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
tx_ring - > next_to_use = i ;
return true ;
}
return false ;
}
# define IGB_MAX_TXD_PWR 16
# define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
static inline int igb_tx_map_adv ( struct igb_adapter * adapter ,
2008-12-26 01:33:18 -08:00
struct igb_ring * tx_ring , struct sk_buff * skb ,
unsigned int first )
2008-01-24 02:22:38 -08:00
{
struct igb_buffer * buffer_info ;
unsigned int len = skb_headlen ( skb ) ;
unsigned int count = 0 , i ;
unsigned int f ;
2009-03-20 00:17:43 +00:00
dma_addr_t * map ;
2008-01-24 02:22:38 -08:00
i = tx_ring - > next_to_use ;
2009-03-20 00:17:43 +00:00
if ( skb_dma_map ( & adapter - > pdev - > dev , skb , DMA_TO_DEVICE ) ) {
dev_err ( & adapter - > pdev - > dev , " TX DMA map failed \n " ) ;
return 0 ;
}
map = skb_shinfo ( skb ) - > dma_maps ;
2008-01-24 02:22:38 -08:00
buffer_info = & tx_ring - > buffer_info [ i ] ;
BUG_ON ( len > = IGB_MAX_DATA_PER_TXD ) ;
buffer_info - > length = len ;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info - > time_stamp = jiffies ;
2008-12-26 01:33:18 -08:00
buffer_info - > next_to_watch = i ;
2009-03-20 00:17:43 +00:00
buffer_info - > dma = map [ count ] ;
2008-01-24 02:22:38 -08:00
count + + ;
for ( f = 0 ; f < skb_shinfo ( skb ) - > nr_frags ; f + + ) {
struct skb_frag_struct * frag ;
2009-03-20 00:17:43 +00:00
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
2008-01-24 02:22:38 -08:00
frag = & skb_shinfo ( skb ) - > frags [ f ] ;
len = frag - > size ;
buffer_info = & tx_ring - > buffer_info [ i ] ;
BUG_ON ( len > = IGB_MAX_DATA_PER_TXD ) ;
buffer_info - > length = len ;
buffer_info - > time_stamp = jiffies ;
2008-12-26 01:33:18 -08:00
buffer_info - > next_to_watch = i ;
2009-03-20 00:17:43 +00:00
buffer_info - > dma = map [ count ] ;
2008-01-24 02:22:38 -08:00
count + + ;
}
tx_ring - > buffer_info [ i ] . skb = skb ;
2008-12-26 01:33:18 -08:00
tx_ring - > buffer_info [ first ] . next_to_watch = i ;
2008-01-24 02:22:38 -08:00
return count ;
}
static inline void igb_tx_queue_adv ( struct igb_adapter * adapter ,
struct igb_ring * tx_ring ,
int tx_flags , int count , u32 paylen ,
u8 hdr_len )
{
union e1000_adv_tx_desc * tx_desc = NULL ;
struct igb_buffer * buffer_info ;
u32 olinfo_status = 0 , cmd_type_len ;
unsigned int i ;
cmd_type_len = ( E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
E1000_ADVTXD_DCMD_DEXT ) ;
if ( tx_flags & IGB_TX_FLAGS_VLAN )
cmd_type_len | = E1000_ADVTXD_DCMD_VLE ;
2009-02-12 05:03:43 +00:00
if ( tx_flags & IGB_TX_FLAGS_TSTAMP )
cmd_type_len | = E1000_ADVTXD_MAC_TSTAMP ;
2008-01-24 02:22:38 -08:00
if ( tx_flags & IGB_TX_FLAGS_TSO ) {
cmd_type_len | = E1000_ADVTXD_DCMD_TSE ;
/* insert tcp checksum */
olinfo_status | = E1000_TXD_POPTS_TXSM < < 8 ;
/* insert ip checksum */
if ( tx_flags & IGB_TX_FLAGS_IPV4 )
olinfo_status | = E1000_TXD_POPTS_IXSM < < 8 ;
} else if ( tx_flags & IGB_TX_FLAGS_CSUM ) {
olinfo_status | = E1000_TXD_POPTS_TXSM < < 8 ;
}
2008-07-08 15:10:46 -07:00
if ( ( adapter - > flags & IGB_FLAG_NEED_CTX_IDX ) & &
( tx_flags & ( IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_VLAN ) ) )
2008-07-08 15:06:51 -07:00
olinfo_status | = tx_ring - > queue_index < < 4 ;
2008-01-24 02:22:38 -08:00
olinfo_status | = ( ( paylen - hdr_len ) < < E1000_ADVTXD_PAYLEN_SHIFT ) ;
i = tx_ring - > next_to_use ;
while ( count - - ) {
buffer_info = & tx_ring - > buffer_info [ i ] ;
tx_desc = E1000_TX_DESC_ADV ( * tx_ring , i ) ;
tx_desc - > read . buffer_addr = cpu_to_le64 ( buffer_info - > dma ) ;
tx_desc - > read . cmd_type_len =
cpu_to_le32 ( cmd_type_len | buffer_info - > length ) ;
tx_desc - > read . olinfo_status = cpu_to_le32 ( olinfo_status ) ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
}
tx_desc - > read . cmd_type_len | = cpu_to_le32 ( adapter - > txd_cmd ) ;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch . ( Only
* applicable for weak - ordered memory model archs ,
* such as IA - 64 ) . */
wmb ( ) ;
tx_ring - > next_to_use = i ;
writel ( i , adapter - > hw . hw_addr + tx_ring - > tail ) ;
/* we need this if more than one processor can write to our tail
* at a time , it syncronizes IO on IA64 / Altix systems */
mmiowb ( ) ;
}
static int __igb_maybe_stop_tx ( struct net_device * netdev ,
struct igb_ring * tx_ring , int size )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
2008-07-08 15:06:51 -07:00
netif_stop_subqueue ( netdev , tx_ring - > queue_index ) ;
2008-01-24 02:22:38 -08:00
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue ( ) ;
* but since that doesn ' t exist yet , just open code it . */
smp_mb ( ) ;
/* We need to check again in a case another CPU has just
* made room available . */
2009-03-20 00:16:50 +00:00
if ( igb_desc_unused ( tx_ring ) < size )
2008-01-24 02:22:38 -08:00
return - EBUSY ;
/* A reprieve! */
2008-07-08 15:06:51 -07:00
netif_wake_subqueue ( netdev , tx_ring - > queue_index ) ;
2008-01-24 02:22:38 -08:00
+ + adapter - > restart_queue ;
return 0 ;
}
static int igb_maybe_stop_tx ( struct net_device * netdev ,
struct igb_ring * tx_ring , int size )
{
2009-03-20 00:16:50 +00:00
if ( igb_desc_unused ( tx_ring ) > = size )
2008-01-24 02:22:38 -08:00
return 0 ;
return __igb_maybe_stop_tx ( netdev , tx_ring , size ) ;
}
static int igb_xmit_frame_ring_adv ( struct sk_buff * skb ,
struct net_device * netdev ,
struct igb_ring * tx_ring )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
2008-12-26 01:33:18 -08:00
unsigned int first ;
2008-01-24 02:22:38 -08:00
unsigned int tx_flags = 0 ;
u8 hdr_len = 0 ;
2009-03-20 00:17:43 +00:00
int count = 0 ;
2008-01-24 02:22:38 -08:00
int tso = 0 ;
2009-02-12 05:03:43 +00:00
union skb_shared_tx * shtx ;
2008-01-24 02:22:38 -08:00
if ( test_bit ( __IGB_DOWN , & adapter - > state ) ) {
dev_kfree_skb_any ( skb ) ;
return NETDEV_TX_OK ;
}
if ( skb - > len < = 0 ) {
dev_kfree_skb_any ( skb ) ;
return NETDEV_TX_OK ;
}
/* need: 1 descriptor per page,
* + 2 desc gap to keep tail from touching head ,
* + 1 desc for skb - > data ,
* + 1 desc for context descriptor ,
* otherwise try next time */
if ( igb_maybe_stop_tx ( netdev , tx_ring , skb_shinfo ( skb ) - > nr_frags + 4 ) ) {
/* this is a hard error */
return NETDEV_TX_BUSY ;
}
2009-02-12 05:03:43 +00:00
/*
* TODO : check that there currently is no other packet with
* time stamping in the queue
*
* When doing time stamping , keep the connection to the socket
* a while longer : it is still needed by skb_hwtstamp_tx ( ) ,
* called either in igb_tx_hwtstamp ( ) or by our caller when
* doing software time stamping .
*/
shtx = skb_tx ( skb ) ;
if ( unlikely ( shtx - > hardware ) ) {
shtx - > in_progress = 1 ;
tx_flags | = IGB_TX_FLAGS_TSTAMP ;
}
2008-01-24 02:22:38 -08:00
if ( adapter - > vlgrp & & vlan_tx_tag_present ( skb ) ) {
tx_flags | = IGB_TX_FLAGS_VLAN ;
tx_flags | = ( vlan_tx_tag_get ( skb ) < < IGB_TX_FLAGS_VLAN_SHIFT ) ;
}
2008-07-08 15:06:51 -07:00
if ( skb - > protocol = = htons ( ETH_P_IP ) )
tx_flags | = IGB_TX_FLAGS_IPV4 ;
2008-12-26 01:33:18 -08:00
first = tx_ring - > next_to_use ;
2008-01-24 02:22:38 -08:00
tso = skb_is_gso ( skb ) ? igb_tso_adv ( adapter , tx_ring , skb , tx_flags ,
& hdr_len ) : 0 ;
if ( tso < 0 ) {
dev_kfree_skb_any ( skb ) ;
return NETDEV_TX_OK ;
}
if ( tso )
tx_flags | = IGB_TX_FLAGS_TSO ;
2009-02-13 14:45:17 +00:00
else if ( igb_tx_csum_adv ( adapter , tx_ring , skb , tx_flags ) & &
( skb - > ip_summed = = CHECKSUM_PARTIAL ) )
tx_flags | = IGB_TX_FLAGS_CSUM ;
2008-01-24 02:22:38 -08:00
2009-03-20 00:17:43 +00:00
/*
* count reflects descriptors mapped , if 0 then mapping error
* has occured and we need to rewind the descriptor queue
*/
count = igb_tx_map_adv ( adapter , tx_ring , skb , first ) ;
if ( count ) {
igb_tx_queue_adv ( adapter , tx_ring , tx_flags , count ,
skb - > len , hdr_len ) ;
netdev - > trans_start = jiffies ;
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx ( netdev , tx_ring , MAX_SKB_FRAGS + 4 ) ;
} else {
dev_kfree_skb_any ( skb ) ;
tx_ring - > buffer_info [ first ] . time_stamp = 0 ;
tx_ring - > next_to_use = first ;
}
2008-01-24 02:22:38 -08:00
return NETDEV_TX_OK ;
}
static int igb_xmit_frame_adv ( struct sk_buff * skb , struct net_device * netdev )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
2008-07-08 15:06:51 -07:00
struct igb_ring * tx_ring ;
int r_idx = 0 ;
2009-02-19 20:39:23 -08:00
r_idx = skb - > queue_mapping & ( IGB_ABS_MAX_TX_QUEUES - 1 ) ;
2008-07-08 15:06:51 -07:00
tx_ring = adapter - > multi_tx_table [ r_idx ] ;
2008-01-24 02:22:38 -08:00
/* This goes back to the question of how to logically map a tx queue
* to a flow . Right now , performance is impacted slightly negatively
* if using multiple tx queues . If the stack breaks away from a
* single qdisc implementation , we can look at this again . */
return ( igb_xmit_frame_ring_adv ( skb , netdev , tx_ring ) ) ;
}
/**
* igb_tx_timeout - Respond to a Tx Hang
* @ netdev : network interface device structure
* */
static void igb_tx_timeout ( struct net_device * netdev )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
/* Do the reset outside of interrupt context */
adapter - > tx_timeout_count + + ;
schedule_work ( & adapter - > reset_task ) ;
2009-02-06 23:22:52 +00:00
wr32 ( E1000_EICS ,
( adapter - > eims_enable_mask & ~ adapter - > eims_other ) ) ;
2008-01-24 02:22:38 -08:00
}
static void igb_reset_task ( struct work_struct * work )
{
struct igb_adapter * adapter ;
adapter = container_of ( work , struct igb_adapter , reset_task ) ;
igb_reinit_locked ( adapter ) ;
}
/**
* igb_get_stats - Get System Network Statistics
* @ netdev : network interface device structure
*
* Returns the address of the device statistics structure .
* The statistics are actually updated from the timer callback .
* */
2009-02-12 18:16:59 +00:00
static struct net_device_stats * igb_get_stats ( struct net_device * netdev )
2008-01-24 02:22:38 -08:00
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
/* only return the current stats */
return & adapter - > net_stats ;
}
/**
* igb_change_mtu - Change the Maximum Transfer Unit
* @ netdev : network interface device structure
* @ new_mtu : new value for maximum frame size
*
* Returns 0 on success , negative on failure
* */
static int igb_change_mtu ( struct net_device * netdev , int new_mtu )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN ;
if ( ( max_frame < ETH_ZLEN + ETH_FCS_LEN ) | |
( max_frame > MAX_JUMBO_FRAME_SIZE ) ) {
dev_err ( & adapter - > pdev - > dev , " Invalid MTU setting \n " ) ;
return - EINVAL ;
}
if ( max_frame > MAX_STD_JUMBO_FRAME_SIZE ) {
dev_err ( & adapter - > pdev - > dev , " MTU > 9216 not supported. \n " ) ;
return - EINVAL ;
}
while ( test_and_set_bit ( __IGB_RESETTING , & adapter - > state ) )
msleep ( 1 ) ;
2009-02-12 18:16:59 +00:00
2008-01-24 02:22:38 -08:00
/* igb_down has a dependency on max_frame_size */
adapter - > max_frame_size = max_frame ;
if ( netif_running ( netdev ) )
igb_down ( adapter ) ;
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more , this pushes us to allocate from the next
* larger slab size .
* i . e . RXBUFFER_2048 - - > size - 4096 slab
*/
if ( max_frame < = IGB_RXBUFFER_256 )
adapter - > rx_buffer_len = IGB_RXBUFFER_256 ;
else if ( max_frame < = IGB_RXBUFFER_512 )
adapter - > rx_buffer_len = IGB_RXBUFFER_512 ;
else if ( max_frame < = IGB_RXBUFFER_1024 )
adapter - > rx_buffer_len = IGB_RXBUFFER_1024 ;
else if ( max_frame < = IGB_RXBUFFER_2048 )
adapter - > rx_buffer_len = IGB_RXBUFFER_2048 ;
else
2008-07-08 15:11:40 -07:00
# if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
adapter - > rx_buffer_len = IGB_RXBUFFER_16384 ;
# else
adapter - > rx_buffer_len = PAGE_SIZE / 2 ;
# endif
2009-02-19 20:39:44 -08:00
/* if sr-iov is enabled we need to force buffer size to 1K or larger */
if ( adapter - > vfs_allocated_count & &
( adapter - > rx_buffer_len < IGB_RXBUFFER_1024 ) )
adapter - > rx_buffer_len = IGB_RXBUFFER_1024 ;
2008-01-24 02:22:38 -08:00
/* adjust allocation if LPE protects us, and we aren't using SBP */
if ( ( max_frame = = ETH_FRAME_LEN + ETH_FCS_LEN ) | |
( max_frame = = MAXIMUM_ETHERNET_VLAN_SIZE ) )
adapter - > rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE ;
dev_info ( & adapter - > pdev - > dev , " changing MTU from %d to %d \n " ,
netdev - > mtu , new_mtu ) ;
netdev - > mtu = new_mtu ;
if ( netif_running ( netdev ) )
igb_up ( adapter ) ;
else
igb_reset ( adapter ) ;
clear_bit ( __IGB_RESETTING , & adapter - > state ) ;
return 0 ;
}
/**
* igb_update_stats - Update the board statistics counters
* @ adapter : board private structure
* */
void igb_update_stats ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
struct pci_dev * pdev = adapter - > pdev ;
u16 phy_tmp ;
# define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
/*
* Prevent stats update while adapter is being reset , or if the pci
* connection is down .
*/
if ( adapter - > link_speed = = 0 )
return ;
if ( pci_channel_offline ( pdev ) )
return ;
adapter - > stats . crcerrs + = rd32 ( E1000_CRCERRS ) ;
adapter - > stats . gprc + = rd32 ( E1000_GPRC ) ;
adapter - > stats . gorc + = rd32 ( E1000_GORCL ) ;
rd32 ( E1000_GORCH ) ; /* clear GORCL */
adapter - > stats . bprc + = rd32 ( E1000_BPRC ) ;
adapter - > stats . mprc + = rd32 ( E1000_MPRC ) ;
adapter - > stats . roc + = rd32 ( E1000_ROC ) ;
adapter - > stats . prc64 + = rd32 ( E1000_PRC64 ) ;
adapter - > stats . prc127 + = rd32 ( E1000_PRC127 ) ;
adapter - > stats . prc255 + = rd32 ( E1000_PRC255 ) ;
adapter - > stats . prc511 + = rd32 ( E1000_PRC511 ) ;
adapter - > stats . prc1023 + = rd32 ( E1000_PRC1023 ) ;
adapter - > stats . prc1522 + = rd32 ( E1000_PRC1522 ) ;
adapter - > stats . symerrs + = rd32 ( E1000_SYMERRS ) ;
adapter - > stats . sec + = rd32 ( E1000_SEC ) ;
adapter - > stats . mpc + = rd32 ( E1000_MPC ) ;
adapter - > stats . scc + = rd32 ( E1000_SCC ) ;
adapter - > stats . ecol + = rd32 ( E1000_ECOL ) ;
adapter - > stats . mcc + = rd32 ( E1000_MCC ) ;
adapter - > stats . latecol + = rd32 ( E1000_LATECOL ) ;
adapter - > stats . dc + = rd32 ( E1000_DC ) ;
adapter - > stats . rlec + = rd32 ( E1000_RLEC ) ;
adapter - > stats . xonrxc + = rd32 ( E1000_XONRXC ) ;
adapter - > stats . xontxc + = rd32 ( E1000_XONTXC ) ;
adapter - > stats . xoffrxc + = rd32 ( E1000_XOFFRXC ) ;
adapter - > stats . xofftxc + = rd32 ( E1000_XOFFTXC ) ;
adapter - > stats . fcruc + = rd32 ( E1000_FCRUC ) ;
adapter - > stats . gptc + = rd32 ( E1000_GPTC ) ;
adapter - > stats . gotc + = rd32 ( E1000_GOTCL ) ;
rd32 ( E1000_GOTCH ) ; /* clear GOTCL */
adapter - > stats . rnbc + = rd32 ( E1000_RNBC ) ;
adapter - > stats . ruc + = rd32 ( E1000_RUC ) ;
adapter - > stats . rfc + = rd32 ( E1000_RFC ) ;
adapter - > stats . rjc + = rd32 ( E1000_RJC ) ;
adapter - > stats . tor + = rd32 ( E1000_TORH ) ;
adapter - > stats . tot + = rd32 ( E1000_TOTH ) ;
adapter - > stats . tpr + = rd32 ( E1000_TPR ) ;
adapter - > stats . ptc64 + = rd32 ( E1000_PTC64 ) ;
adapter - > stats . ptc127 + = rd32 ( E1000_PTC127 ) ;
adapter - > stats . ptc255 + = rd32 ( E1000_PTC255 ) ;
adapter - > stats . ptc511 + = rd32 ( E1000_PTC511 ) ;
adapter - > stats . ptc1023 + = rd32 ( E1000_PTC1023 ) ;
adapter - > stats . ptc1522 + = rd32 ( E1000_PTC1522 ) ;
adapter - > stats . mptc + = rd32 ( E1000_MPTC ) ;
adapter - > stats . bptc + = rd32 ( E1000_BPTC ) ;
/* used for adaptive IFS */
hw - > mac . tx_packet_delta = rd32 ( E1000_TPT ) ;
adapter - > stats . tpt + = hw - > mac . tx_packet_delta ;
hw - > mac . collision_delta = rd32 ( E1000_COLC ) ;
adapter - > stats . colc + = hw - > mac . collision_delta ;
adapter - > stats . algnerrc + = rd32 ( E1000_ALGNERRC ) ;
adapter - > stats . rxerrc + = rd32 ( E1000_RXERRC ) ;
adapter - > stats . tncrs + = rd32 ( E1000_TNCRS ) ;
adapter - > stats . tsctc + = rd32 ( E1000_TSCTC ) ;
adapter - > stats . tsctfc + = rd32 ( E1000_TSCTFC ) ;
adapter - > stats . iac + = rd32 ( E1000_IAC ) ;
adapter - > stats . icrxoc + = rd32 ( E1000_ICRXOC ) ;
adapter - > stats . icrxptc + = rd32 ( E1000_ICRXPTC ) ;
adapter - > stats . icrxatc + = rd32 ( E1000_ICRXATC ) ;
adapter - > stats . ictxptc + = rd32 ( E1000_ICTXPTC ) ;
adapter - > stats . ictxatc + = rd32 ( E1000_ICTXATC ) ;
adapter - > stats . ictxqec + = rd32 ( E1000_ICTXQEC ) ;
adapter - > stats . ictxqmtc + = rd32 ( E1000_ICTXQMTC ) ;
adapter - > stats . icrxdmtc + = rd32 ( E1000_ICRXDMTC ) ;
/* Fill out the OS statistics structure */
adapter - > net_stats . multicast = adapter - > stats . mprc ;
adapter - > net_stats . collisions = adapter - > stats . colc ;
/* Rx Errors */
/* RLEC on some newer hardware can be incorrect so build
* our own version based on RUC and ROC */
adapter - > net_stats . rx_errors = adapter - > stats . rxerrc +
adapter - > stats . crcerrs + adapter - > stats . algnerrc +
adapter - > stats . ruc + adapter - > stats . roc +
adapter - > stats . cexterr ;
adapter - > net_stats . rx_length_errors = adapter - > stats . ruc +
adapter - > stats . roc ;
adapter - > net_stats . rx_crc_errors = adapter - > stats . crcerrs ;
adapter - > net_stats . rx_frame_errors = adapter - > stats . algnerrc ;
adapter - > net_stats . rx_missed_errors = adapter - > stats . mpc ;
/* Tx Errors */
adapter - > net_stats . tx_errors = adapter - > stats . ecol +
adapter - > stats . latecol ;
adapter - > net_stats . tx_aborted_errors = adapter - > stats . ecol ;
adapter - > net_stats . tx_window_errors = adapter - > stats . latecol ;
adapter - > net_stats . tx_carrier_errors = adapter - > stats . tncrs ;
/* Tx Dropped needs to be maintained elsewhere */
/* Phy Stats */
if ( hw - > phy . media_type = = e1000_media_type_copper ) {
if ( ( adapter - > link_speed = = SPEED_1000 ) & &
2009-02-12 18:16:59 +00:00
( ! igb_read_phy_reg ( hw , PHY_1000T_STATUS , & phy_tmp ) ) ) {
2008-01-24 02:22:38 -08:00
phy_tmp & = PHY_IDLE_ERROR_COUNT_MASK ;
adapter - > phy_stats . idle_errors + = phy_tmp ;
}
}
/* Management Stats */
adapter - > stats . mgptc + = rd32 ( E1000_MGTPTC ) ;
adapter - > stats . mgprc + = rd32 ( E1000_MGTPRC ) ;
adapter - > stats . mgpdc + = rd32 ( E1000_MGTPDC ) ;
}
static irqreturn_t igb_msix_other ( int irq , void * data )
{
struct net_device * netdev = data ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
2008-06-27 11:00:39 -07:00
u32 icr = rd32 ( E1000_ICR ) ;
2008-01-24 02:22:38 -08:00
2008-06-27 11:00:39 -07:00
/* reading ICR causes bit 31 of EICR to be cleared */
2009-02-06 23:19:08 +00:00
if ( icr & E1000_ICR_DOUTSYNC ) {
/* HW is reporting DMA is out of sync */
adapter - > stats . doosync + + ;
}
2009-02-06 23:19:29 +00:00
2009-02-19 20:40:07 -08:00
/* Check for a mailbox event */
if ( icr & E1000_ICR_VMMB )
igb_msg_task ( adapter ) ;
if ( icr & E1000_ICR_LSC ) {
hw - > mac . get_link_status = 1 ;
/* guard against interrupt when we're going down */
if ( ! test_bit ( __IGB_DOWN , & adapter - > state ) )
mod_timer ( & adapter - > watchdog_timer , jiffies + 1 ) ;
}
wr32 ( E1000_IMS , E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB ) ;
2008-06-27 11:00:39 -07:00
wr32 ( E1000_EIMS , adapter - > eims_other ) ;
2008-01-24 02:22:38 -08:00
return IRQ_HANDLED ;
}
static irqreturn_t igb_msix_tx ( int irq , void * data )
{
struct igb_ring * tx_ring = data ;
struct igb_adapter * adapter = tx_ring - > adapter ;
struct e1000_hw * hw = & adapter - > hw ;
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2008-07-08 15:10:46 -07:00
if ( adapter - > flags & IGB_FLAG_DCA_ENABLED )
2008-07-08 15:07:55 -07:00
igb_update_tx_dca ( tx_ring ) ;
# endif
2009-02-12 18:16:59 +00:00
2008-01-24 02:22:38 -08:00
tx_ring - > total_bytes = 0 ;
tx_ring - > total_packets = 0 ;
2008-07-08 15:06:51 -07:00
/* auto mask will automatically reenable the interrupt when we write
* EICS */
2008-06-27 10:59:48 -07:00
if ( ! igb_clean_tx_irq ( tx_ring ) )
2008-01-24 02:22:38 -08:00
/* Ring was not completely cleaned, so fire another interrupt */
wr32 ( E1000_EICS , tx_ring - > eims_value ) ;
2008-07-08 15:06:51 -07:00
else
2008-01-24 02:22:38 -08:00
wr32 ( E1000_EIMS , tx_ring - > eims_value ) ;
2008-07-08 15:06:51 -07:00
2008-01-24 02:22:38 -08:00
return IRQ_HANDLED ;
}
2008-07-08 15:14:44 -07:00
static void igb_write_itr ( struct igb_ring * ring )
{
struct e1000_hw * hw = & ring - > adapter - > hw ;
if ( ( ring - > adapter - > itr_setting & 3 ) & & ring - > set_itr ) {
switch ( hw - > mac . type ) {
case e1000_82576 :
2009-02-12 18:16:59 +00:00
wr32 ( ring - > itr_register , ring - > itr_val |
2008-07-08 15:14:44 -07:00
0x80000000 ) ;
break ;
default :
2009-02-12 18:16:59 +00:00
wr32 ( ring - > itr_register , ring - > itr_val |
2008-07-08 15:14:44 -07:00
( ring - > itr_val < < 16 ) ) ;
break ;
}
ring - > set_itr = 0 ;
}
}
2008-01-24 02:22:38 -08:00
static irqreturn_t igb_msix_rx ( int irq , void * data )
{
struct igb_ring * rx_ring = data ;
2008-06-27 11:00:39 -07:00
/* Write the ITR value calculated at the end of the
* previous interrupt .
*/
2008-01-24 02:22:38 -08:00
2008-07-08 15:14:44 -07:00
igb_write_itr ( rx_ring ) ;
2008-01-24 02:22:38 -08:00
2009-01-19 16:43:59 -08:00
if ( napi_schedule_prep ( & rx_ring - > napi ) )
__napi_schedule ( & rx_ring - > napi ) ;
2008-06-27 11:00:39 -07:00
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2008-12-26 15:13:55 -08:00
if ( rx_ring - > adapter - > flags & IGB_FLAG_DCA_ENABLED )
2008-07-08 15:07:55 -07:00
igb_update_rx_dca ( rx_ring ) ;
# endif
return IRQ_HANDLED ;
}
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2008-07-08 15:07:55 -07:00
static void igb_update_rx_dca ( struct igb_ring * rx_ring )
{
u32 dca_rxctrl ;
struct igb_adapter * adapter = rx_ring - > adapter ;
struct e1000_hw * hw = & adapter - > hw ;
int cpu = get_cpu ( ) ;
2008-12-26 01:34:11 -08:00
int q = rx_ring - > reg_idx ;
2008-07-08 15:07:55 -07:00
if ( rx_ring - > cpu ! = cpu ) {
dca_rxctrl = rd32 ( E1000_DCA_RXCTRL ( q ) ) ;
2008-07-08 15:10:12 -07:00
if ( hw - > mac . type = = e1000_82576 ) {
dca_rxctrl & = ~ E1000_DCA_RXCTRL_CPUID_MASK_82576 ;
2009-03-13 20:40:21 +00:00
dca_rxctrl | = dca3_get_tag ( & adapter - > pdev - > dev , cpu ) < <
2008-07-08 15:10:12 -07:00
E1000_DCA_RXCTRL_CPUID_SHIFT ;
} else {
dca_rxctrl & = ~ E1000_DCA_RXCTRL_CPUID_MASK ;
2009-03-13 20:40:21 +00:00
dca_rxctrl | = dca3_get_tag ( & adapter - > pdev - > dev , cpu ) ;
2008-07-08 15:10:12 -07:00
}
2008-07-08 15:07:55 -07:00
dca_rxctrl | = E1000_DCA_RXCTRL_DESC_DCA_EN ;
dca_rxctrl | = E1000_DCA_RXCTRL_HEAD_DCA_EN ;
dca_rxctrl | = E1000_DCA_RXCTRL_DATA_DCA_EN ;
wr32 ( E1000_DCA_RXCTRL ( q ) , dca_rxctrl ) ;
rx_ring - > cpu = cpu ;
}
put_cpu ( ) ;
}
static void igb_update_tx_dca ( struct igb_ring * tx_ring )
{
u32 dca_txctrl ;
struct igb_adapter * adapter = tx_ring - > adapter ;
struct e1000_hw * hw = & adapter - > hw ;
int cpu = get_cpu ( ) ;
2008-12-26 01:34:11 -08:00
int q = tx_ring - > reg_idx ;
2008-07-08 15:07:55 -07:00
if ( tx_ring - > cpu ! = cpu ) {
dca_txctrl = rd32 ( E1000_DCA_TXCTRL ( q ) ) ;
2008-07-08 15:10:12 -07:00
if ( hw - > mac . type = = e1000_82576 ) {
dca_txctrl & = ~ E1000_DCA_TXCTRL_CPUID_MASK_82576 ;
2009-03-13 20:40:21 +00:00
dca_txctrl | = dca3_get_tag ( & adapter - > pdev - > dev , cpu ) < <
2008-07-08 15:10:12 -07:00
E1000_DCA_TXCTRL_CPUID_SHIFT ;
} else {
dca_txctrl & = ~ E1000_DCA_TXCTRL_CPUID_MASK ;
2009-03-13 20:40:21 +00:00
dca_txctrl | = dca3_get_tag ( & adapter - > pdev - > dev , cpu ) ;
2008-07-08 15:10:12 -07:00
}
2008-07-08 15:07:55 -07:00
dca_txctrl | = E1000_DCA_TXCTRL_DESC_DCA_EN ;
wr32 ( E1000_DCA_TXCTRL ( q ) , dca_txctrl ) ;
tx_ring - > cpu = cpu ;
}
put_cpu ( ) ;
}
static void igb_setup_dca ( struct igb_adapter * adapter )
{
int i ;
2008-07-08 15:10:46 -07:00
if ( ! ( adapter - > flags & IGB_FLAG_DCA_ENABLED ) )
2008-07-08 15:07:55 -07:00
return ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
adapter - > tx_ring [ i ] . cpu = - 1 ;
igb_update_tx_dca ( & adapter - > tx_ring [ i ] ) ;
}
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
adapter - > rx_ring [ i ] . cpu = - 1 ;
igb_update_rx_dca ( & adapter - > rx_ring [ i ] ) ;
}
}
static int __igb_notify_dca ( struct device * dev , void * data )
{
struct net_device * netdev = dev_get_drvdata ( dev ) ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
unsigned long event = * ( unsigned long * ) data ;
switch ( event ) {
case DCA_PROVIDER_ADD :
/* if already enabled, don't do it again */
2008-07-08 15:10:46 -07:00
if ( adapter - > flags & IGB_FLAG_DCA_ENABLED )
2008-07-08 15:07:55 -07:00
break ;
/* Always use CB2 mode, difference is masked
* in the CB driver . */
2009-02-15 23:59:44 -08:00
wr32 ( E1000_DCA_CTRL , E1000_DCA_CTRL_DCA_MODE_CB2 ) ;
2008-07-08 15:07:55 -07:00
if ( dca_add_requester ( dev ) = = 0 ) {
2009-01-31 00:52:30 -08:00
adapter - > flags | = IGB_FLAG_DCA_ENABLED ;
2008-07-08 15:07:55 -07:00
dev_info ( & adapter - > pdev - > dev , " DCA enabled \n " ) ;
igb_setup_dca ( adapter ) ;
break ;
}
/* Fall Through since DCA is disabled. */
case DCA_PROVIDER_REMOVE :
2008-07-08 15:10:46 -07:00
if ( adapter - > flags & IGB_FLAG_DCA_ENABLED ) {
2008-07-08 15:07:55 -07:00
/* without this a class_device is left
* hanging around in the sysfs model */
dca_remove_requester ( dev ) ;
dev_info ( & adapter - > pdev - > dev , " DCA disabled \n " ) ;
2008-07-08 15:10:46 -07:00
adapter - > flags & = ~ IGB_FLAG_DCA_ENABLED ;
2009-02-15 23:59:44 -08:00
wr32 ( E1000_DCA_CTRL , E1000_DCA_CTRL_DCA_MODE_DISABLE ) ;
2008-07-08 15:07:55 -07:00
}
break ;
}
2009-01-31 00:52:30 -08:00
2008-07-08 15:07:55 -07:00
return 0 ;
2008-01-24 02:22:38 -08:00
}
2008-07-08 15:07:55 -07:00
static int igb_notify_dca ( struct notifier_block * nb , unsigned long event ,
void * p )
{
int ret_val ;
ret_val = driver_for_each_device ( & igb_driver . driver , NULL , & event ,
__igb_notify_dca ) ;
return ret_val ? NOTIFY_BAD : NOTIFY_DONE ;
}
2008-10-17 11:08:31 -07:00
# endif /* CONFIG_IGB_DCA */
2008-01-24 02:22:38 -08:00
2009-02-19 20:40:07 -08:00
static void igb_ping_all_vfs ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
u32 ping ;
int i ;
for ( i = 0 ; i < adapter - > vfs_allocated_count ; i + + ) {
ping = E1000_PF_CONTROL_MSG ;
if ( adapter - > vf_data [ i ] . clear_to_send )
ping | = E1000_VT_MSGTYPE_CTS ;
igb_write_mbx ( hw , & ping , 1 , i ) ;
}
}
static int igb_set_vf_multicasts ( struct igb_adapter * adapter ,
u32 * msgbuf , u32 vf )
{
int n = ( msgbuf [ 0 ] & E1000_VT_MSGINFO_MASK ) > > E1000_VT_MSGINFO_SHIFT ;
u16 * hash_list = ( u16 * ) & msgbuf [ 1 ] ;
struct vf_data_storage * vf_data = & adapter - > vf_data [ vf ] ;
int i ;
/* only up to 30 hash values supported */
if ( n > 30 )
n = 30 ;
/* salt away the number of multi cast addresses assigned
* to this VF for later use to restore when the PF multi cast
* list changes
*/
vf_data - > num_vf_mc_hashes = n ;
/* VFs are limited to using the MTA hash table for their multicast
* addresses */
for ( i = 0 ; i < n ; i + + )
vf_data - > vf_mc_hashes [ i ] = hash_list [ i ] ; ;
/* Flush and reset the mta with the new values */
igb_set_multi ( adapter - > netdev ) ;
return 0 ;
}
static void igb_restore_vf_multicasts ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
struct vf_data_storage * vf_data ;
int i , j ;
for ( i = 0 ; i < adapter - > vfs_allocated_count ; i + + ) {
vf_data = & adapter - > vf_data [ i ] ;
2009-03-13 20:41:55 +00:00
for ( j = 0 ; j < vf_data - > num_vf_mc_hashes ; j + + )
2009-02-19 20:40:07 -08:00
igb_mta_set ( hw , vf_data - > vf_mc_hashes [ j ] ) ;
}
}
static void igb_clear_vf_vfta ( struct igb_adapter * adapter , u32 vf )
{
struct e1000_hw * hw = & adapter - > hw ;
u32 pool_mask , reg , vid ;
int i ;
pool_mask = 1 < < ( E1000_VLVF_POOLSEL_SHIFT + vf ) ;
/* Find the vlan filter for this id */
for ( i = 0 ; i < E1000_VLVF_ARRAY_SIZE ; i + + ) {
reg = rd32 ( E1000_VLVF ( i ) ) ;
/* remove the vf from the pool */
reg & = ~ pool_mask ;
/* if pool is empty then remove entry from vfta */
if ( ! ( reg & E1000_VLVF_POOLSEL_MASK ) & &
( reg & E1000_VLVF_VLANID_ENABLE ) ) {
reg = 0 ;
vid = reg & E1000_VLVF_VLANID_MASK ;
igb_vfta_set ( hw , vid , false ) ;
}
wr32 ( E1000_VLVF ( i ) , reg ) ;
}
}
static s32 igb_vlvf_set ( struct igb_adapter * adapter , u32 vid , bool add , u32 vf )
{
struct e1000_hw * hw = & adapter - > hw ;
u32 reg , i ;
/* It is an error to call this function when VFs are not enabled */
if ( ! adapter - > vfs_allocated_count )
return - 1 ;
/* Find the vlan filter for this id */
for ( i = 0 ; i < E1000_VLVF_ARRAY_SIZE ; i + + ) {
reg = rd32 ( E1000_VLVF ( i ) ) ;
if ( ( reg & E1000_VLVF_VLANID_ENABLE ) & &
vid = = ( reg & E1000_VLVF_VLANID_MASK ) )
break ;
}
if ( add ) {
if ( i = = E1000_VLVF_ARRAY_SIZE ) {
/* Did not find a matching VLAN ID entry that was
* enabled . Search for a free filter entry , i . e .
* one without the enable bit set
*/
for ( i = 0 ; i < E1000_VLVF_ARRAY_SIZE ; i + + ) {
reg = rd32 ( E1000_VLVF ( i ) ) ;
if ( ! ( reg & E1000_VLVF_VLANID_ENABLE ) )
break ;
}
}
if ( i < E1000_VLVF_ARRAY_SIZE ) {
/* Found an enabled/available entry */
reg | = 1 < < ( E1000_VLVF_POOLSEL_SHIFT + vf ) ;
/* if !enabled we need to set this up in vfta */
if ( ! ( reg & E1000_VLVF_VLANID_ENABLE ) ) {
2009-03-13 20:41:37 +00:00
/* add VID to filter table, if bit already set
* PF must have added it outside of table */
if ( igb_vfta_set ( hw , vid , true ) )
reg | = 1 < < ( E1000_VLVF_POOLSEL_SHIFT +
adapter - > vfs_allocated_count ) ;
2009-02-19 20:40:07 -08:00
reg | = E1000_VLVF_VLANID_ENABLE ;
}
2009-03-13 20:41:37 +00:00
reg & = ~ E1000_VLVF_VLANID_MASK ;
reg | = vid ;
2009-02-19 20:40:07 -08:00
wr32 ( E1000_VLVF ( i ) , reg ) ;
return 0 ;
}
} else {
if ( i < E1000_VLVF_ARRAY_SIZE ) {
/* remove vf from the pool */
reg & = ~ ( 1 < < ( E1000_VLVF_POOLSEL_SHIFT + vf ) ) ;
/* if pool is empty then remove entry from vfta */
if ( ! ( reg & E1000_VLVF_POOLSEL_MASK ) ) {
reg = 0 ;
igb_vfta_set ( hw , vid , false ) ;
}
wr32 ( E1000_VLVF ( i ) , reg ) ;
return 0 ;
}
}
return - 1 ;
}
static int igb_set_vf_vlan ( struct igb_adapter * adapter , u32 * msgbuf , u32 vf )
{
int add = ( msgbuf [ 0 ] & E1000_VT_MSGINFO_MASK ) > > E1000_VT_MSGINFO_SHIFT ;
int vid = ( msgbuf [ 1 ] & E1000_VLVF_VLANID_MASK ) ;
return igb_vlvf_set ( adapter , vid , add , vf ) ;
}
static inline void igb_vf_reset_event ( struct igb_adapter * adapter , u32 vf )
{
struct e1000_hw * hw = & adapter - > hw ;
/* disable mailbox functionality for vf */
adapter - > vf_data [ vf ] . clear_to_send = false ;
/* reset offloads to defaults */
igb_set_vmolr ( hw , vf ) ;
/* reset vlans for device */
igb_clear_vf_vfta ( adapter , vf ) ;
/* reset multicast table array for vf */
adapter - > vf_data [ vf ] . num_vf_mc_hashes = 0 ;
/* Flush and reset the mta with the new values */
igb_set_multi ( adapter - > netdev ) ;
}
static inline void igb_vf_reset_msg ( struct igb_adapter * adapter , u32 vf )
{
struct e1000_hw * hw = & adapter - > hw ;
unsigned char * vf_mac = adapter - > vf_data [ vf ] . vf_mac_addresses ;
u32 reg , msgbuf [ 3 ] ;
u8 * addr = ( u8 * ) ( & msgbuf [ 1 ] ) ;
/* process all the same items cleared in a function level reset */
igb_vf_reset_event ( adapter , vf ) ;
/* set vf mac address */
igb_rar_set ( hw , vf_mac , vf + 1 ) ;
igb_set_rah_pool ( hw , vf , vf + 1 ) ;
/* enable transmit and receive for vf */
reg = rd32 ( E1000_VFTE ) ;
wr32 ( E1000_VFTE , reg | ( 1 < < vf ) ) ;
reg = rd32 ( E1000_VFRE ) ;
wr32 ( E1000_VFRE , reg | ( 1 < < vf ) ) ;
/* enable mailbox functionality for vf */
adapter - > vf_data [ vf ] . clear_to_send = true ;
/* reply to reset with ack and vf mac address */
msgbuf [ 0 ] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK ;
memcpy ( addr , vf_mac , 6 ) ;
igb_write_mbx ( hw , msgbuf , 3 , vf ) ;
}
static int igb_set_vf_mac_addr ( struct igb_adapter * adapter , u32 * msg , int vf )
{
unsigned char * addr = ( char * ) & msg [ 1 ] ;
int err = - 1 ;
if ( is_valid_ether_addr ( addr ) )
err = igb_set_vf_mac ( adapter , vf , addr ) ;
return err ;
}
static void igb_rcv_ack_from_vf ( struct igb_adapter * adapter , u32 vf )
{
struct e1000_hw * hw = & adapter - > hw ;
u32 msg = E1000_VT_MSGTYPE_NACK ;
/* if device isn't clear to send it shouldn't be reading either */
if ( ! adapter - > vf_data [ vf ] . clear_to_send )
igb_write_mbx ( hw , & msg , 1 , vf ) ;
}
static void igb_msg_task ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
u32 vf ;
for ( vf = 0 ; vf < adapter - > vfs_allocated_count ; vf + + ) {
/* process any reset requests */
if ( ! igb_check_for_rst ( hw , vf ) ) {
adapter - > vf_data [ vf ] . clear_to_send = false ;
igb_vf_reset_event ( adapter , vf ) ;
}
/* process any messages pending */
if ( ! igb_check_for_msg ( hw , vf ) )
igb_rcv_msg_from_vf ( adapter , vf ) ;
/* process any acks */
if ( ! igb_check_for_ack ( hw , vf ) )
igb_rcv_ack_from_vf ( adapter , vf ) ;
}
}
static int igb_rcv_msg_from_vf ( struct igb_adapter * adapter , u32 vf )
{
u32 mbx_size = E1000_VFMAILBOX_SIZE ;
u32 msgbuf [ mbx_size ] ;
struct e1000_hw * hw = & adapter - > hw ;
s32 retval ;
retval = igb_read_mbx ( hw , msgbuf , mbx_size , vf ) ;
if ( retval )
dev_err ( & adapter - > pdev - > dev ,
" Error receiving message from VF \n " ) ;
/* this is a message we already processed, do nothing */
if ( msgbuf [ 0 ] & ( E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK ) )
return retval ;
/*
* until the vf completes a reset it should not be
* allowed to start any configuration .
*/
if ( msgbuf [ 0 ] = = E1000_VF_RESET ) {
igb_vf_reset_msg ( adapter , vf ) ;
return retval ;
}
if ( ! adapter - > vf_data [ vf ] . clear_to_send ) {
msgbuf [ 0 ] | = E1000_VT_MSGTYPE_NACK ;
igb_write_mbx ( hw , msgbuf , 1 , vf ) ;
return retval ;
}
switch ( ( msgbuf [ 0 ] & 0xFFFF ) ) {
case E1000_VF_SET_MAC_ADDR :
retval = igb_set_vf_mac_addr ( adapter , msgbuf , vf ) ;
break ;
case E1000_VF_SET_MULTICAST :
retval = igb_set_vf_multicasts ( adapter , msgbuf , vf ) ;
break ;
case E1000_VF_SET_LPE :
retval = igb_set_vf_rlpml ( adapter , msgbuf [ 1 ] , vf ) ;
break ;
case E1000_VF_SET_VLAN :
retval = igb_set_vf_vlan ( adapter , msgbuf , vf ) ;
break ;
default :
dev_err ( & adapter - > pdev - > dev , " Unhandled Msg %08x \n " , msgbuf [ 0 ] ) ;
retval = - 1 ;
break ;
}
/* notify the VF of the results of what it sent us */
if ( retval )
msgbuf [ 0 ] | = E1000_VT_MSGTYPE_NACK ;
else
msgbuf [ 0 ] | = E1000_VT_MSGTYPE_ACK ;
msgbuf [ 0 ] | = E1000_VT_MSGTYPE_CTS ;
igb_write_mbx ( hw , msgbuf , 1 , vf ) ;
return retval ;
}
2008-01-24 02:22:38 -08:00
/**
* igb_intr_msi - Interrupt Handler
* @ irq : interrupt number
* @ data : pointer to a network interface device structure
* */
static irqreturn_t igb_intr_msi ( int irq , void * data )
{
struct net_device * netdev = data ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
/* read ICR disables interrupts using IAM */
u32 icr = rd32 ( E1000_ICR ) ;
2008-07-08 15:14:44 -07:00
igb_write_itr ( adapter - > rx_ring ) ;
2008-01-24 02:22:38 -08:00
2009-02-06 23:19:08 +00:00
if ( icr & E1000_ICR_DOUTSYNC ) {
/* HW is reporting DMA is out of sync */
adapter - > stats . doosync + + ;
}
2008-01-24 02:22:38 -08:00
if ( icr & ( E1000_ICR_RXSEQ | E1000_ICR_LSC ) ) {
hw - > mac . get_link_status = 1 ;
if ( ! test_bit ( __IGB_DOWN , & adapter - > state ) )
mod_timer ( & adapter - > watchdog_timer , jiffies + 1 ) ;
}
2009-01-19 16:43:59 -08:00
napi_schedule ( & adapter - > rx_ring [ 0 ] . napi ) ;
2008-01-24 02:22:38 -08:00
return IRQ_HANDLED ;
}
/**
2009-02-06 23:20:49 +00:00
* igb_intr - Legacy Interrupt Handler
2008-01-24 02:22:38 -08:00
* @ irq : interrupt number
* @ data : pointer to a network interface device structure
* */
static irqreturn_t igb_intr ( int irq , void * data )
{
struct net_device * netdev = data ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
* need for the IMC write */
u32 icr = rd32 ( E1000_ICR ) ;
if ( ! icr )
return IRQ_NONE ; /* Not our interrupt */
2008-07-08 15:14:44 -07:00
igb_write_itr ( adapter - > rx_ring ) ;
2008-01-24 02:22:38 -08:00
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set , then the adapter didn ' t send an interrupt */
if ( ! ( icr & E1000_ICR_INT_ASSERTED ) )
return IRQ_NONE ;
2009-02-06 23:19:08 +00:00
if ( icr & E1000_ICR_DOUTSYNC ) {
/* HW is reporting DMA is out of sync */
adapter - > stats . doosync + + ;
}
2008-01-24 02:22:38 -08:00
if ( icr & ( E1000_ICR_RXSEQ | E1000_ICR_LSC ) ) {
hw - > mac . get_link_status = 1 ;
/* guard against interrupt when we're going down */
if ( ! test_bit ( __IGB_DOWN , & adapter - > state ) )
mod_timer ( & adapter - > watchdog_timer , jiffies + 1 ) ;
}
2009-01-19 16:43:59 -08:00
napi_schedule ( & adapter - > rx_ring [ 0 ] . napi ) ;
2008-01-24 02:22:38 -08:00
return IRQ_HANDLED ;
}
2009-02-19 20:39:04 -08:00
static inline void igb_rx_irq_enable ( struct igb_ring * rx_ring )
2008-01-24 02:22:38 -08:00
{
2008-07-08 15:06:51 -07:00
struct igb_adapter * adapter = rx_ring - > adapter ;
2009-02-19 20:39:04 -08:00
struct e1000_hw * hw = & adapter - > hw ;
2008-01-24 02:22:38 -08:00
2009-02-19 20:39:04 -08:00
if ( adapter - > itr_setting & 3 ) {
if ( adapter - > num_rx_queues = = 1 )
2008-07-08 15:14:44 -07:00
igb_set_itr ( adapter ) ;
2009-02-19 20:39:04 -08:00
else
igb_update_ring_itr ( rx_ring ) ;
2008-01-24 02:22:38 -08:00
}
2009-02-19 20:39:04 -08:00
if ( ! test_bit ( __IGB_DOWN , & adapter - > state ) ) {
if ( adapter - > msix_entries )
wr32 ( E1000_EIMS , rx_ring - > eims_value ) ;
else
igb_irq_enable ( adapter ) ;
}
2008-01-24 02:22:38 -08:00
}
2009-02-19 20:39:04 -08:00
/**
* igb_poll - NAPI Rx polling callback
* @ napi : napi polling structure
* @ budget : count of how many packets we should handle
* */
static int igb_poll ( struct napi_struct * napi , int budget )
2008-01-24 02:22:38 -08:00
{
struct igb_ring * rx_ring = container_of ( napi , struct igb_ring , napi ) ;
int work_done = 0 ;
2008-10-17 11:08:31 -07:00
# ifdef CONFIG_IGB_DCA
2009-03-13 20:40:58 +00:00
if ( rx_ring - > adapter - > flags & IGB_FLAG_DCA_ENABLED )
2008-07-08 15:07:55 -07:00
igb_update_rx_dca ( rx_ring ) ;
# endif
2008-06-27 10:59:48 -07:00
igb_clean_rx_irq_adv ( rx_ring , & work_done , budget ) ;
2008-01-24 02:22:38 -08:00
2009-02-19 20:39:04 -08:00
if ( rx_ring - > buddy ) {
# ifdef CONFIG_IGB_DCA
2009-03-13 20:40:58 +00:00
if ( rx_ring - > adapter - > flags & IGB_FLAG_DCA_ENABLED )
2009-02-19 20:39:04 -08:00
igb_update_tx_dca ( rx_ring - > buddy ) ;
# endif
if ( ! igb_clean_tx_irq ( rx_ring - > buddy ) )
work_done = budget ;
}
2008-01-24 02:22:38 -08:00
/* If not enough Rx work done, exit the polling mode */
2009-03-13 20:40:38 +00:00
if ( work_done < budget ) {
2009-01-19 16:43:59 -08:00
napi_complete ( napi ) ;
2009-02-19 20:39:04 -08:00
igb_rx_irq_enable ( rx_ring ) ;
2008-01-24 02:22:38 -08:00
}
2009-02-19 20:39:04 -08:00
return work_done ;
2008-01-24 02:22:38 -08:00
}
2008-03-16 22:23:24 +00:00
2009-02-12 05:03:43 +00:00
/**
* igb_hwtstamp - utility function which checks for TX time stamp
* @ adapter : board private structure
* @ skb : packet that was just sent
*
* If we were asked to do hardware stamping and such a time stamp is
* available , then it must have been for this skb here because we only
* allow only one such packet into the queue .
*/
static void igb_tx_hwtstamp ( struct igb_adapter * adapter , struct sk_buff * skb )
{
union skb_shared_tx * shtx = skb_tx ( skb ) ;
struct e1000_hw * hw = & adapter - > hw ;
if ( unlikely ( shtx - > hardware ) ) {
u32 valid = rd32 ( E1000_TSYNCTXCTL ) & E1000_TSYNCTXCTL_VALID ;
if ( valid ) {
u64 regval = rd32 ( E1000_TXSTMPL ) ;
u64 ns ;
struct skb_shared_hwtstamps shhwtstamps ;
memset ( & shhwtstamps , 0 , sizeof ( shhwtstamps ) ) ;
regval | = ( u64 ) rd32 ( E1000_TXSTMPH ) < < 32 ;
ns = timecounter_cyc2time ( & adapter - > clock ,
regval ) ;
timecompare_update ( & adapter - > compare , ns ) ;
shhwtstamps . hwtstamp = ns_to_ktime ( ns ) ;
shhwtstamps . syststamp =
timecompare_transform ( & adapter - > compare , ns ) ;
skb_tstamp_tx ( skb , & shhwtstamps ) ;
}
}
}
2008-01-24 02:22:38 -08:00
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @ adapter : board private structure
* returns true if ring is completely cleaned
* */
2008-06-27 10:59:48 -07:00
static bool igb_clean_tx_irq ( struct igb_ring * tx_ring )
2008-01-24 02:22:38 -08:00
{
2008-06-27 10:59:48 -07:00
struct igb_adapter * adapter = tx_ring - > adapter ;
struct net_device * netdev = adapter - > netdev ;
2008-12-26 01:33:18 -08:00
struct e1000_hw * hw = & adapter - > hw ;
2008-01-24 02:22:38 -08:00
struct igb_buffer * buffer_info ;
struct sk_buff * skb ;
2008-12-26 01:33:18 -08:00
union e1000_adv_tx_desc * tx_desc , * eop_desc ;
2008-01-24 02:22:38 -08:00
unsigned int total_bytes = 0 , total_packets = 0 ;
2008-12-26 01:33:18 -08:00
unsigned int i , eop , count = 0 ;
bool cleaned = false ;
2008-01-24 02:22:38 -08:00
i = tx_ring - > next_to_clean ;
2008-12-26 01:33:18 -08:00
eop = tx_ring - > buffer_info [ i ] . next_to_watch ;
eop_desc = E1000_TX_DESC_ADV ( * tx_ring , eop ) ;
while ( ( eop_desc - > wb . status & cpu_to_le32 ( E1000_TXD_STAT_DD ) ) & &
( count < tx_ring - > count ) ) {
for ( cleaned = false ; ! cleaned ; count + + ) {
tx_desc = E1000_TX_DESC_ADV ( * tx_ring , i ) ;
2008-01-24 02:22:38 -08:00
buffer_info = & tx_ring - > buffer_info [ i ] ;
2008-12-26 01:33:18 -08:00
cleaned = ( i = = eop ) ;
2008-01-24 02:22:38 -08:00
skb = buffer_info - > skb ;
if ( skb ) {
unsigned int segs , bytecount ;
/* gso_segs is currently only valid for tcp */
segs = skb_shinfo ( skb ) - > gso_segs ? : 1 ;
/* multiply data chunks by size of headers */
bytecount = ( ( segs - 1 ) * skb_headlen ( skb ) ) +
skb - > len ;
total_packets + = segs ;
total_bytes + = bytecount ;
2009-02-12 05:03:43 +00:00
igb_tx_hwtstamp ( adapter , skb ) ;
2008-01-24 02:22:38 -08:00
}
igb_unmap_and_free_tx_resource ( adapter , buffer_info ) ;
2008-12-26 01:33:18 -08:00
tx_desc - > wb . status = 0 ;
2008-01-24 02:22:38 -08:00
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
}
2008-12-26 01:33:18 -08:00
eop = tx_ring - > buffer_info [ i ] . next_to_watch ;
eop_desc = E1000_TX_DESC_ADV ( * tx_ring , eop ) ;
}
2008-01-24 02:22:38 -08:00
tx_ring - > next_to_clean = i ;
2008-08-26 04:25:08 -07:00
if ( unlikely ( count & &
2008-01-24 02:22:38 -08:00
netif_carrier_ok ( netdev ) & &
2009-03-20 00:16:50 +00:00
igb_desc_unused ( tx_ring ) > = IGB_TX_QUEUE_WAKE ) ) {
2008-01-24 02:22:38 -08:00
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean .
*/
smp_mb ( ) ;
2008-07-08 15:06:51 -07:00
if ( __netif_subqueue_stopped ( netdev , tx_ring - > queue_index ) & &
! ( test_bit ( __IGB_DOWN , & adapter - > state ) ) ) {
netif_wake_subqueue ( netdev , tx_ring - > queue_index ) ;
+ + adapter - > restart_queue ;
}
2008-01-24 02:22:38 -08:00
}
if ( tx_ring - > detect_tx_hung ) {
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
tx_ring - > detect_tx_hung = false ;
if ( tx_ring - > buffer_info [ i ] . time_stamp & &
time_after ( jiffies , tx_ring - > buffer_info [ i ] . time_stamp +
( adapter - > tx_timeout_factor * HZ ) )
& & ! ( rd32 ( E1000_STATUS ) &
E1000_STATUS_TXOFF ) ) {
/* detected Tx unit hang */
dev_err ( & adapter - > pdev - > dev ,
" Detected Tx Unit Hang \n "
2008-07-08 15:10:12 -07:00
" Tx Queue <%d> \n "
2008-01-24 02:22:38 -08:00
" TDH <%x> \n "
" TDT <%x> \n "
" next_to_use <%x> \n "
" next_to_clean <%x> \n "
" buffer_info[next_to_clean] \n "
" time_stamp <%lx> \n "
2008-12-26 01:33:18 -08:00
" next_to_watch <%x> \n "
2008-01-24 02:22:38 -08:00
" jiffies <%lx> \n "
" desc.status <%x> \n " ,
2008-07-08 15:10:12 -07:00
tx_ring - > queue_index ,
2008-01-24 02:22:38 -08:00
readl ( adapter - > hw . hw_addr + tx_ring - > head ) ,
readl ( adapter - > hw . hw_addr + tx_ring - > tail ) ,
tx_ring - > next_to_use ,
tx_ring - > next_to_clean ,
tx_ring - > buffer_info [ i ] . time_stamp ,
2008-12-26 01:33:18 -08:00
eop ,
2008-01-24 02:22:38 -08:00
jiffies ,
2008-12-26 01:33:18 -08:00
eop_desc - > wb . status ) ;
2008-07-08 15:06:51 -07:00
netif_stop_subqueue ( netdev , tx_ring - > queue_index ) ;
2008-01-24 02:22:38 -08:00
}
}
tx_ring - > total_bytes + = total_bytes ;
tx_ring - > total_packets + = total_packets ;
2008-07-08 15:07:24 -07:00
tx_ring - > tx_stats . bytes + = total_bytes ;
tx_ring - > tx_stats . packets + = total_packets ;
2008-01-24 02:22:38 -08:00
adapter - > net_stats . tx_bytes + = total_bytes ;
adapter - > net_stats . tx_packets + = total_packets ;
2008-12-26 01:33:18 -08:00
return ( count < tx_ring - > count ) ;
2008-01-24 02:22:38 -08:00
}
/**
* igb_receive_skb - helper function to handle rx indications
2009-02-06 23:19:29 +00:00
* @ ring : pointer to receive ring receving this packet
2008-01-24 02:22:38 -08:00
* @ status : descriptor status field as written by hardware
2009-02-12 18:16:59 +00:00
* @ rx_desc : receive descriptor containing vlan and type information .
2008-01-24 02:22:38 -08:00
* @ skb : pointer to sk_buff to be indicated to stack
* */
2008-07-08 15:12:13 -07:00
static void igb_receive_skb ( struct igb_ring * ring , u8 status ,
union e1000_adv_rx_desc * rx_desc ,
struct sk_buff * skb )
{
struct igb_adapter * adapter = ring - > adapter ;
bool vlan_extracted = ( adapter - > vlgrp & & ( status & E1000_RXD_STAT_VP ) ) ;
2009-01-27 16:22:32 -08:00
skb_record_rx_queue ( skb , ring - > queue_index ) ;
2009-01-19 15:20:57 -08:00
if ( skb - > ip_summed = = CHECKSUM_UNNECESSARY ) {
2008-07-08 15:12:13 -07:00
if ( vlan_extracted )
2009-01-19 15:20:57 -08:00
vlan_gro_receive ( & ring - > napi , adapter - > vlgrp ,
le16_to_cpu ( rx_desc - > wb . upper . vlan ) ,
skb ) ;
2008-07-08 15:12:13 -07:00
else
2009-01-19 15:20:57 -08:00
napi_gro_receive ( & ring - > napi , skb ) ;
2008-07-08 15:12:13 -07:00
} else {
if ( vlan_extracted )
vlan_hwaccel_receive_skb ( skb , adapter - > vlgrp ,
le16_to_cpu ( rx_desc - > wb . upper . vlan ) ) ;
else
netif_receive_skb ( skb ) ;
}
2008-01-24 02:22:38 -08:00
}
static inline void igb_rx_checksum_adv ( struct igb_adapter * adapter ,
u32 status_err , struct sk_buff * skb )
{
skb - > ip_summed = CHECKSUM_NONE ;
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
if ( ( status_err & E1000_RXD_STAT_IXSM ) | | ! adapter - > rx_csum )
return ;
/* TCP/UDP checksum error bit is set */
if ( status_err &
( E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE ) ) {
/* let the stack verify checksum errors */
adapter - > hw_csum_err + + ;
return ;
}
/* It must be a TCP or UDP packet with a valid checksum */
if ( status_err & ( E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS ) )
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
adapter - > hw_csum_good + + ;
}
2008-06-27 10:59:48 -07:00
static bool igb_clean_rx_irq_adv ( struct igb_ring * rx_ring ,
int * work_done , int budget )
2008-01-24 02:22:38 -08:00
{
2008-06-27 10:59:48 -07:00
struct igb_adapter * adapter = rx_ring - > adapter ;
2008-01-24 02:22:38 -08:00
struct net_device * netdev = adapter - > netdev ;
2009-02-12 05:03:43 +00:00
struct e1000_hw * hw = & adapter - > hw ;
2008-01-24 02:22:38 -08:00
struct pci_dev * pdev = adapter - > pdev ;
union e1000_adv_rx_desc * rx_desc , * next_rxd ;
struct igb_buffer * buffer_info , * next_buffer ;
struct sk_buff * skb ;
bool cleaned = false ;
int cleaned_count = 0 ;
unsigned int total_bytes = 0 , total_packets = 0 ;
2009-02-12 18:16:59 +00:00
unsigned int i ;
u32 length , hlen , staterr ;
2008-01-24 02:22:38 -08:00
i = rx_ring - > next_to_clean ;
2009-02-06 23:15:04 +00:00
buffer_info = & rx_ring - > buffer_info [ i ] ;
2008-01-24 02:22:38 -08:00
rx_desc = E1000_RX_DESC_ADV ( * rx_ring , i ) ;
staterr = le32_to_cpu ( rx_desc - > wb . upper . status_error ) ;
while ( staterr & E1000_RXD_STAT_DD ) {
if ( * work_done > = budget )
break ;
( * work_done ) + + ;
2009-02-06 23:15:04 +00:00
skb = buffer_info - > skb ;
prefetch ( skb - > data - NET_IP_ALIGN ) ;
buffer_info - > skb = NULL ;
i + + ;
if ( i = = rx_ring - > count )
i = 0 ;
next_rxd = E1000_RX_DESC_ADV ( * rx_ring , i ) ;
prefetch ( next_rxd ) ;
next_buffer = & rx_ring - > buffer_info [ i ] ;
2008-01-24 02:22:38 -08:00
length = le16_to_cpu ( rx_desc - > wb . upper . length ) ;
cleaned = true ;
cleaned_count + + ;
2008-07-08 15:11:40 -07:00
if ( ! adapter - > rx_ps_hdr_size ) {
pci_unmap_single ( pdev , buffer_info - > dma ,
adapter - > rx_buffer_len +
NET_IP_ALIGN ,
PCI_DMA_FROMDEVICE ) ;
skb_put ( skb , length ) ;
goto send_up ;
2008-01-24 02:22:38 -08:00
}
2009-02-06 23:15:04 +00:00
/* HW will not DMA in data larger than the given buffer, even
* if it parses the ( NFS , of course ) header to be larger . In
* that case , it fills the header buffer and spills the rest
* into the page .
*/
hlen = ( le16_to_cpu ( rx_desc - > wb . lower . lo_dword . hdr_info ) &
E1000_RXDADV_HDRBUFLEN_MASK ) > > E1000_RXDADV_HDRBUFLEN_SHIFT ;
if ( hlen > adapter - > rx_ps_hdr_size )
hlen = adapter - > rx_ps_hdr_size ;
2008-07-08 15:11:40 -07:00
if ( ! skb_shinfo ( skb ) - > nr_frags ) {
pci_unmap_single ( pdev , buffer_info - > dma ,
2009-02-12 18:16:59 +00:00
adapter - > rx_ps_hdr_size + NET_IP_ALIGN ,
2008-07-08 15:11:40 -07:00
PCI_DMA_FROMDEVICE ) ;
skb_put ( skb , hlen ) ;
}
if ( length ) {
2008-01-24 02:22:38 -08:00
pci_unmap_page ( pdev , buffer_info - > page_dma ,
2008-07-08 15:11:40 -07:00
PAGE_SIZE / 2 , PCI_DMA_FROMDEVICE ) ;
2008-01-24 02:22:38 -08:00
buffer_info - > page_dma = 0 ;
2008-07-08 15:11:40 -07:00
skb_fill_page_desc ( skb , skb_shinfo ( skb ) - > nr_frags + + ,
buffer_info - > page ,
buffer_info - > page_offset ,
length ) ;
if ( ( adapter - > rx_buffer_len > ( PAGE_SIZE / 2 ) ) | |
( page_count ( buffer_info - > page ) ! = 1 ) )
buffer_info - > page = NULL ;
else
get_page ( buffer_info - > page ) ;
2008-01-24 02:22:38 -08:00
skb - > len + = length ;
skb - > data_len + = length ;
2008-07-08 15:11:40 -07:00
skb - > truesize + = length ;
2008-01-24 02:22:38 -08:00
}
2008-07-08 15:11:40 -07:00
if ( ! ( staterr & E1000_RXD_STAT_EOP ) ) {
2008-11-20 00:47:34 -08:00
buffer_info - > skb = next_buffer - > skb ;
buffer_info - > dma = next_buffer - > dma ;
next_buffer - > skb = skb ;
next_buffer - > dma = 0 ;
2008-07-08 15:11:40 -07:00
goto next_desc ;
}
2009-02-06 23:15:04 +00:00
send_up :
2009-02-12 05:03:43 +00:00
/*
* If this bit is set , then the RX registers contain
* the time stamp . No other packet will be time
* stamped until we read these registers , so read the
* registers to make them available again . Because
* only one packet can be time stamped at a time , we
* know that the register values must belong to this
* one here and therefore we don ' t need to compare
* any of the additional attributes stored for it .
*
* If nothing went wrong , then it should have a
* skb_shared_tx that we can turn into a
* skb_shared_hwtstamps .
*
* TODO : can time stamping be triggered ( thus locking
* the registers ) without the packet reaching this point
* here ? In that case RX time stamping would get stuck .
*
* TODO : in " time stamp all packets " mode this bit is
* not set . Need a global flag for this mode and then
* always read the registers . Cannot be done without
* a race condition .
*/
if ( unlikely ( staterr & E1000_RXD_STAT_TS ) ) {
u64 regval ;
u64 ns ;
struct skb_shared_hwtstamps * shhwtstamps =
skb_hwtstamps ( skb ) ;
WARN ( ! ( rd32 ( E1000_TSYNCRXCTL ) & E1000_TSYNCRXCTL_VALID ) ,
" igb: no RX time stamp available for time stamped packet " ) ;
regval = rd32 ( E1000_RXSTMPL ) ;
regval | = ( u64 ) rd32 ( E1000_RXSTMPH ) < < 32 ;
ns = timecounter_cyc2time ( & adapter - > clock , regval ) ;
timecompare_update ( & adapter - > compare , ns ) ;
memset ( shhwtstamps , 0 , sizeof ( * shhwtstamps ) ) ;
shhwtstamps - > hwtstamp = ns_to_ktime ( ns ) ;
shhwtstamps - > syststamp =
timecompare_transform ( & adapter - > compare , ns ) ;
}
2008-01-24 02:22:38 -08:00
if ( staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK ) {
dev_kfree_skb_irq ( skb ) ;
goto next_desc ;
}
total_bytes + = skb - > len ;
total_packets + + ;
igb_rx_checksum_adv ( adapter , staterr , skb ) ;
skb - > protocol = eth_type_trans ( skb , netdev ) ;
2008-07-08 15:12:13 -07:00
igb_receive_skb ( rx_ring , staterr , rx_desc , skb ) ;
2008-01-24 02:22:38 -08:00
next_desc :
rx_desc - > wb . upper . status_error = 0 ;
/* return some buffers to hardware, one at a time is too slow */
if ( cleaned_count > = IGB_RX_BUFFER_WRITE ) {
2008-06-27 10:59:48 -07:00
igb_alloc_rx_buffers_adv ( rx_ring , cleaned_count ) ;
2008-01-24 02:22:38 -08:00
cleaned_count = 0 ;
}
/* use prefetched values */
rx_desc = next_rxd ;
buffer_info = next_buffer ;
staterr = le32_to_cpu ( rx_desc - > wb . upper . status_error ) ;
}
2008-07-08 15:11:40 -07:00
2008-01-24 02:22:38 -08:00
rx_ring - > next_to_clean = i ;
2009-03-20 00:16:50 +00:00
cleaned_count = igb_desc_unused ( rx_ring ) ;
2008-01-24 02:22:38 -08:00
if ( cleaned_count )
2008-06-27 10:59:48 -07:00
igb_alloc_rx_buffers_adv ( rx_ring , cleaned_count ) ;
2008-01-24 02:22:38 -08:00
rx_ring - > total_packets + = total_packets ;
rx_ring - > total_bytes + = total_bytes ;
rx_ring - > rx_stats . packets + = total_packets ;
rx_ring - > rx_stats . bytes + = total_bytes ;
adapter - > net_stats . rx_bytes + = total_bytes ;
adapter - > net_stats . rx_packets + = total_packets ;
return cleaned ;
}
/**
* igb_alloc_rx_buffers_adv - Replace used receive buffers ; packet split
* @ adapter : address of board private structure
* */
2008-06-27 10:59:48 -07:00
static void igb_alloc_rx_buffers_adv ( struct igb_ring * rx_ring ,
2008-01-24 02:22:38 -08:00
int cleaned_count )
{
2008-06-27 10:59:48 -07:00
struct igb_adapter * adapter = rx_ring - > adapter ;
2008-01-24 02:22:38 -08:00
struct net_device * netdev = adapter - > netdev ;
struct pci_dev * pdev = adapter - > pdev ;
union e1000_adv_rx_desc * rx_desc ;
struct igb_buffer * buffer_info ;
struct sk_buff * skb ;
unsigned int i ;
2009-02-06 23:15:25 +00:00
int bufsz ;
2008-01-24 02:22:38 -08:00
i = rx_ring - > next_to_use ;
buffer_info = & rx_ring - > buffer_info [ i ] ;
2009-02-06 23:15:25 +00:00
if ( adapter - > rx_ps_hdr_size )
bufsz = adapter - > rx_ps_hdr_size ;
else
bufsz = adapter - > rx_buffer_len ;
bufsz + = NET_IP_ALIGN ;
2008-01-24 02:22:38 -08:00
while ( cleaned_count - - ) {
rx_desc = E1000_RX_DESC_ADV ( * rx_ring , i ) ;
2008-07-08 15:11:40 -07:00
if ( adapter - > rx_ps_hdr_size & & ! buffer_info - > page_dma ) {
2008-01-24 02:22:38 -08:00
if ( ! buffer_info - > page ) {
2008-07-08 15:11:40 -07:00
buffer_info - > page = alloc_page ( GFP_ATOMIC ) ;
if ( ! buffer_info - > page ) {
adapter - > alloc_rx_buff_failed + + ;
goto no_buffers ;
}
buffer_info - > page_offset = 0 ;
} else {
buffer_info - > page_offset ^ = PAGE_SIZE / 2 ;
2008-01-24 02:22:38 -08:00
}
buffer_info - > page_dma =
2009-02-06 23:15:25 +00:00
pci_map_page ( pdev , buffer_info - > page ,
2008-07-08 15:11:40 -07:00
buffer_info - > page_offset ,
PAGE_SIZE / 2 ,
2008-01-24 02:22:38 -08:00
PCI_DMA_FROMDEVICE ) ;
}
if ( ! buffer_info - > skb ) {
skb = netdev_alloc_skb ( netdev , bufsz ) ;
if ( ! skb ) {
adapter - > alloc_rx_buff_failed + + ;
goto no_buffers ;
}
/* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed
*/
skb_reserve ( skb , NET_IP_ALIGN ) ;
buffer_info - > skb = skb ;
buffer_info - > dma = pci_map_single ( pdev , skb - > data ,
bufsz ,
PCI_DMA_FROMDEVICE ) ;
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write - back erases this info . */
if ( adapter - > rx_ps_hdr_size ) {
rx_desc - > read . pkt_addr =
cpu_to_le64 ( buffer_info - > page_dma ) ;
rx_desc - > read . hdr_addr = cpu_to_le64 ( buffer_info - > dma ) ;
} else {
rx_desc - > read . pkt_addr =
cpu_to_le64 ( buffer_info - > dma ) ;
rx_desc - > read . hdr_addr = 0 ;
}
i + + ;
if ( i = = rx_ring - > count )
i = 0 ;
buffer_info = & rx_ring - > buffer_info [ i ] ;
}
no_buffers :
if ( rx_ring - > next_to_use ! = i ) {
rx_ring - > next_to_use = i ;
if ( i = = 0 )
i = ( rx_ring - > count - 1 ) ;
else
i - - ;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch . ( Only
* applicable for weak - ordered memory model archs ,
* such as IA - 64 ) . */
wmb ( ) ;
writel ( i , adapter - > hw . hw_addr + rx_ring - > tail ) ;
}
}
/**
* igb_mii_ioctl -
* @ netdev :
* @ ifreq :
* @ cmd :
* */
static int igb_mii_ioctl ( struct net_device * netdev , struct ifreq * ifr , int cmd )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct mii_ioctl_data * data = if_mii ( ifr ) ;
if ( adapter - > hw . phy . media_type ! = e1000_media_type_copper )
return - EOPNOTSUPP ;
switch ( cmd ) {
case SIOCGMIIPHY :
data - > phy_id = adapter - > hw . phy . addr ;
break ;
case SIOCGMIIREG :
if ( ! capable ( CAP_NET_ADMIN ) )
return - EPERM ;
2008-11-21 21:30:24 -08:00
if ( igb_read_phy_reg ( & adapter - > hw , data - > reg_num & 0x1F ,
& data - > val_out ) )
2008-01-24 02:22:38 -08:00
return - EIO ;
break ;
case SIOCSMIIREG :
default :
return - EOPNOTSUPP ;
}
return 0 ;
}
2009-02-12 05:03:42 +00:00
/**
* igb_hwtstamp_ioctl - control hardware time stamping
* @ netdev :
* @ ifreq :
* @ cmd :
*
2009-02-12 05:03:43 +00:00
* Outgoing time stamping can be enabled and disabled . Play nice and
* disable it when requested , although it shouldn ' t case any overhead
* when no packet needs it . At most one packet in the queue may be
* marked for time stamping , otherwise it would be impossible to tell
* for sure to which packet the hardware time stamp belongs .
*
* Incoming time stamping has to be configured via the hardware
* filters . Not all combinations are supported , in particular event
* type has to be specified . Matching the kind of event packet is
* not supported , with the exception of " all V2 events regardless of
* level 2 or 4 " .
*
2009-02-12 05:03:42 +00:00
* */
static int igb_hwtstamp_ioctl ( struct net_device * netdev ,
struct ifreq * ifr , int cmd )
{
2009-02-12 05:03:43 +00:00
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
2009-02-12 05:03:42 +00:00
struct hwtstamp_config config ;
2009-02-12 05:03:43 +00:00
u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED ;
u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED ;
u32 tsync_rx_ctl_type = 0 ;
u32 tsync_rx_cfg = 0 ;
int is_l4 = 0 ;
int is_l2 = 0 ;
short port = 319 ; /* PTP */
u32 regval ;
2009-02-12 05:03:42 +00:00
if ( copy_from_user ( & config , ifr - > ifr_data , sizeof ( config ) ) )
return - EFAULT ;
/* reserved for future extensions */
if ( config . flags )
return - EINVAL ;
2009-02-12 05:03:43 +00:00
switch ( config . tx_type ) {
case HWTSTAMP_TX_OFF :
tsync_tx_ctl_bit = 0 ;
break ;
case HWTSTAMP_TX_ON :
tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED ;
break ;
default :
return - ERANGE ;
}
switch ( config . rx_filter ) {
case HWTSTAMP_FILTER_NONE :
tsync_rx_ctl_bit = 0 ;
break ;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT :
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT :
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT :
case HWTSTAMP_FILTER_ALL :
/*
* register TSYNCRXCFG must be set , therefore it is not
* possible to time stamp both Sync and Delay_Req messages
* = > fall back to time stamping all packets
*/
tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL ;
config . rx_filter = HWTSTAMP_FILTER_ALL ;
break ;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC :
tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1 ;
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE ;
is_l4 = 1 ;
break ;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ :
tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1 ;
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE ;
is_l4 = 1 ;
break ;
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC :
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC :
tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2 ;
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE ;
is_l2 = 1 ;
is_l4 = 1 ;
config . rx_filter = HWTSTAMP_FILTER_SOME ;
break ;
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ :
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ :
tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2 ;
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE ;
is_l2 = 1 ;
is_l4 = 1 ;
config . rx_filter = HWTSTAMP_FILTER_SOME ;
break ;
case HWTSTAMP_FILTER_PTP_V2_EVENT :
case HWTSTAMP_FILTER_PTP_V2_SYNC :
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ :
tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2 ;
config . rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT ;
is_l2 = 1 ;
break ;
default :
return - ERANGE ;
}
/* enable/disable TX */
regval = rd32 ( E1000_TSYNCTXCTL ) ;
regval = ( regval & ~ E1000_TSYNCTXCTL_ENABLED ) | tsync_tx_ctl_bit ;
wr32 ( E1000_TSYNCTXCTL , regval ) ;
/* enable/disable RX, define which PTP packets are time stamped */
regval = rd32 ( E1000_TSYNCRXCTL ) ;
regval = ( regval & ~ E1000_TSYNCRXCTL_ENABLED ) | tsync_rx_ctl_bit ;
regval = ( regval & ~ 0xE ) | tsync_rx_ctl_type ;
wr32 ( E1000_TSYNCRXCTL , regval ) ;
wr32 ( E1000_TSYNCRXCFG , tsync_rx_cfg ) ;
/*
* Ethertype Filter Queue Filter [ 0 ] [ 15 : 0 ] = 0x88F7
* ( Ethertype to filter on )
* Ethertype Filter Queue Filter [ 0 ] [ 26 ] = 0x1 ( Enable filter )
* Ethertype Filter Queue Filter [ 0 ] [ 30 ] = 0x1 ( Enable Timestamping )
*/
wr32 ( E1000_ETQF0 , is_l2 ? 0x440088f7 : 0 ) ;
/* L4 Queue Filter[0]: only filter by source and destination port */
wr32 ( E1000_SPQF0 , htons ( port ) ) ;
wr32 ( E1000_IMIREXT ( 0 ) , is_l4 ?
( ( 1 < < 12 ) | ( 1 < < 19 ) /* bypass size and control flags */ ) : 0 ) ;
wr32 ( E1000_IMIR ( 0 ) , is_l4 ?
( htons ( port )
| ( 0 < < 16 ) /* immediate interrupt disabled */
| 0 /* (1<<17) bit cleared: do not bypass
destination port check */ )
: 0 ) ;
wr32 ( E1000_FTQF0 , is_l4 ?
( 0x11 /* UDP */
| ( 1 < < 15 ) /* VF not compared */
| ( 1 < < 27 ) /* Enable Timestamping */
| ( 7 < < 28 ) /* only source port filter enabled,
source / target address and protocol
masked */ )
: ( ( 1 < < 15 ) | ( 15 < < 28 ) /* all mask bits set = filter not
enabled */ ) ) ;
wrfl ( ) ;
adapter - > hwtstamp_config = config ;
/* clear TX/RX time stamp registers, just to be sure */
regval = rd32 ( E1000_TXSTMPH ) ;
regval = rd32 ( E1000_RXSTMPH ) ;
2009-02-12 05:03:42 +00:00
2009-02-12 05:03:43 +00:00
return copy_to_user ( ifr - > ifr_data , & config , sizeof ( config ) ) ?
- EFAULT : 0 ;
2009-02-12 05:03:42 +00:00
}
2008-01-24 02:22:38 -08:00
/**
* igb_ioctl -
* @ netdev :
* @ ifreq :
* @ cmd :
* */
static int igb_ioctl ( struct net_device * netdev , struct ifreq * ifr , int cmd )
{
switch ( cmd ) {
case SIOCGMIIPHY :
case SIOCGMIIREG :
case SIOCSMIIREG :
return igb_mii_ioctl ( netdev , ifr , cmd ) ;
2009-02-12 05:03:42 +00:00
case SIOCSHWTSTAMP :
return igb_hwtstamp_ioctl ( netdev , ifr , cmd ) ;
2008-01-24 02:22:38 -08:00
default :
return - EOPNOTSUPP ;
}
}
static void igb_vlan_rx_register ( struct net_device * netdev ,
struct vlan_group * grp )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
u32 ctrl , rctl ;
igb_irq_disable ( adapter ) ;
adapter - > vlgrp = grp ;
if ( grp ) {
/* enable VLAN tag insert/strip */
ctrl = rd32 ( E1000_CTRL ) ;
ctrl | = E1000_CTRL_VME ;
wr32 ( E1000_CTRL , ctrl ) ;
/* enable VLAN receive filtering */
rctl = rd32 ( E1000_RCTL ) ;
rctl & = ~ E1000_RCTL_CFIEN ;
wr32 ( E1000_RCTL , rctl ) ;
igb_update_mng_vlan ( adapter ) ;
} else {
/* disable VLAN tag insert/strip */
ctrl = rd32 ( E1000_CTRL ) ;
ctrl & = ~ E1000_CTRL_VME ;
wr32 ( E1000_CTRL , ctrl ) ;
if ( adapter - > mng_vlan_id ! = ( u16 ) IGB_MNG_VLAN_NONE ) {
igb_vlan_rx_kill_vid ( netdev , adapter - > mng_vlan_id ) ;
adapter - > mng_vlan_id = IGB_MNG_VLAN_NONE ;
}
}
2009-02-19 20:39:44 -08:00
igb_rlpml_set ( adapter ) ;
2008-01-24 02:22:38 -08:00
if ( ! test_bit ( __IGB_DOWN , & adapter - > state ) )
igb_irq_enable ( adapter ) ;
}
static void igb_vlan_rx_add_vid ( struct net_device * netdev , u16 vid )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
2009-02-19 20:40:07 -08:00
int pf_id = adapter - > vfs_allocated_count ;
2008-01-24 02:22:38 -08:00
2009-02-06 23:20:31 +00:00
if ( ( hw - > mng_cookie . status &
2008-01-24 02:22:38 -08:00
E1000_MNG_DHCP_COOKIE_STATUS_VLAN ) & &
( vid = = adapter - > mng_vlan_id ) )
return ;
2009-02-19 20:40:07 -08:00
/* add vid to vlvf if sr-iov is enabled,
* if that fails add directly to filter table */
if ( igb_vlvf_set ( adapter , vid , true , pf_id ) )
igb_vfta_set ( hw , vid , true ) ;
2008-01-24 02:22:38 -08:00
}
static void igb_vlan_rx_kill_vid ( struct net_device * netdev , u16 vid )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
2009-02-19 20:40:07 -08:00
int pf_id = adapter - > vfs_allocated_count ;
2008-01-24 02:22:38 -08:00
igb_irq_disable ( adapter ) ;
vlan_group_set_device ( adapter - > vlgrp , vid , NULL ) ;
if ( ! test_bit ( __IGB_DOWN , & adapter - > state ) )
igb_irq_enable ( adapter ) ;
if ( ( adapter - > hw . mng_cookie . status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN ) & &
( vid = = adapter - > mng_vlan_id ) ) {
/* release control to f/w */
igb_release_hw_control ( adapter ) ;
return ;
}
2009-02-19 20:40:07 -08:00
/* remove vid from vlvf if sr-iov is enabled,
* if not in vlvf remove from vfta */
if ( igb_vlvf_set ( adapter , vid , false , pf_id ) )
igb_vfta_set ( hw , vid , false ) ;
2008-01-24 02:22:38 -08:00
}
static void igb_restore_vlan ( struct igb_adapter * adapter )
{
igb_vlan_rx_register ( adapter - > netdev , adapter - > vlgrp ) ;
if ( adapter - > vlgrp ) {
u16 vid ;
for ( vid = 0 ; vid < VLAN_GROUP_ARRAY_LEN ; vid + + ) {
if ( ! vlan_group_get_device ( adapter - > vlgrp , vid ) )
continue ;
igb_vlan_rx_add_vid ( adapter - > netdev , vid ) ;
}
}
}
int igb_set_spd_dplx ( struct igb_adapter * adapter , u16 spddplx )
{
struct e1000_mac_info * mac = & adapter - > hw . mac ;
mac - > autoneg = 0 ;
/* Fiber NICs only allow 1000 gbps Full duplex */
if ( ( adapter - > hw . phy . media_type = = e1000_media_type_fiber ) & &
spddplx ! = ( SPEED_1000 + DUPLEX_FULL ) ) {
dev_err ( & adapter - > pdev - > dev ,
" Unsupported Speed/Duplex configuration \n " ) ;
return - EINVAL ;
}
switch ( spddplx ) {
case SPEED_10 + DUPLEX_HALF :
mac - > forced_speed_duplex = ADVERTISE_10_HALF ;
break ;
case SPEED_10 + DUPLEX_FULL :
mac - > forced_speed_duplex = ADVERTISE_10_FULL ;
break ;
case SPEED_100 + DUPLEX_HALF :
mac - > forced_speed_duplex = ADVERTISE_100_HALF ;
break ;
case SPEED_100 + DUPLEX_FULL :
mac - > forced_speed_duplex = ADVERTISE_100_FULL ;
break ;
case SPEED_1000 + DUPLEX_FULL :
mac - > autoneg = 1 ;
adapter - > hw . phy . autoneg_advertised = ADVERTISE_1000_FULL ;
break ;
case SPEED_1000 + DUPLEX_HALF : /* not supported */
default :
dev_err ( & adapter - > pdev - > dev ,
" Unsupported Speed/Duplex configuration \n " ) ;
return - EINVAL ;
}
return 0 ;
}
2009-03-31 21:23:50 +00:00
static int __igb_shutdown ( struct pci_dev * pdev , bool * enable_wake )
2008-01-24 02:22:38 -08:00
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
2008-07-08 15:10:12 -07:00
u32 ctrl , rctl , status ;
2008-01-24 02:22:38 -08:00
u32 wufc = adapter - > wol ;
# ifdef CONFIG_PM
int retval = 0 ;
# endif
netif_device_detach ( netdev ) ;
2008-07-08 15:13:38 -07:00
if ( netif_running ( netdev ) )
igb_close ( netdev ) ;
igb_reset_interrupt_capability ( adapter ) ;
igb_free_queues ( adapter ) ;
2008-01-24 02:22:38 -08:00
# ifdef CONFIG_PM
retval = pci_save_state ( pdev ) ;
if ( retval )
return retval ;
# endif
status = rd32 ( E1000_STATUS ) ;
if ( status & E1000_STATUS_LU )
wufc & = ~ E1000_WUFC_LNKC ;
if ( wufc ) {
igb_setup_rctl ( adapter ) ;
igb_set_multi ( netdev ) ;
/* turn on all-multi mode if wake on multicast is enabled */
if ( wufc & E1000_WUFC_MC ) {
rctl = rd32 ( E1000_RCTL ) ;
rctl | = E1000_RCTL_MPE ;
wr32 ( E1000_RCTL , rctl ) ;
}
ctrl = rd32 ( E1000_CTRL ) ;
/* advertise wake from D3Cold */
# define E1000_CTRL_ADVD3WUC 0x00100000
/* phy power management enable */
# define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl | = E1000_CTRL_ADVD3WUC ;
wr32 ( E1000_CTRL , ctrl ) ;
/* Allow time for pending master requests to run */
igb_disable_pcie_master ( & adapter - > hw ) ;
wr32 ( E1000_WUC , E1000_WUC_PME_EN ) ;
wr32 ( E1000_WUFC , wufc ) ;
} else {
wr32 ( E1000_WUC , 0 ) ;
wr32 ( E1000_WUFC , 0 ) ;
}
2009-03-31 21:23:50 +00:00
* enable_wake = wufc | | adapter - > en_mng_pt ;
if ( ! * enable_wake )
2008-07-08 15:10:12 -07:00
igb_shutdown_fiber_serdes_link_82575 ( hw ) ;
2008-01-24 02:22:38 -08:00
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant . */
igb_release_hw_control ( adapter ) ;
pci_disable_device ( pdev ) ;
return 0 ;
}
# ifdef CONFIG_PM
2009-03-31 21:23:50 +00:00
static int igb_suspend ( struct pci_dev * pdev , pm_message_t state )
{
int retval ;
bool wake ;
retval = __igb_shutdown ( pdev , & wake ) ;
if ( retval )
return retval ;
if ( wake ) {
pci_prepare_to_sleep ( pdev ) ;
} else {
pci_wake_from_d3 ( pdev , false ) ;
pci_set_power_state ( pdev , PCI_D3hot ) ;
}
return 0 ;
}
2008-01-24 02:22:38 -08:00
static int igb_resume ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
u32 err ;
pci_set_power_state ( pdev , PCI_D0 ) ;
pci_restore_state ( pdev ) ;
2008-06-20 12:10:30 +09:00
2009-02-06 23:16:04 +00:00
err = pci_enable_device_mem ( pdev ) ;
2008-01-24 02:22:38 -08:00
if ( err ) {
dev_err ( & pdev - > dev ,
" igb: Cannot enable PCI device from suspend \n " ) ;
return err ;
}
pci_set_master ( pdev ) ;
pci_enable_wake ( pdev , PCI_D3hot , 0 ) ;
pci_enable_wake ( pdev , PCI_D3cold , 0 ) ;
2008-07-08 15:13:38 -07:00
igb_set_interrupt_capability ( adapter ) ;
if ( igb_alloc_queues ( adapter ) ) {
dev_err ( & pdev - > dev , " Unable to allocate memory for queues \n " ) ;
return - ENOMEM ;
2008-01-24 02:22:38 -08:00
}
/* e1000_power_up_phy(adapter); */
igb_reset ( adapter ) ;
2009-02-06 23:21:10 +00:00
/* let the f/w know that the h/w is now under the control of the
* driver . */
igb_get_hw_control ( adapter ) ;
2008-01-24 02:22:38 -08:00
wr32 ( E1000_WUS , ~ 0 ) ;
2008-07-08 15:13:38 -07:00
if ( netif_running ( netdev ) ) {
err = igb_open ( netdev ) ;
if ( err )
return err ;
}
2008-01-24 02:22:38 -08:00
netif_device_attach ( netdev ) ;
return 0 ;
}
# endif
static void igb_shutdown ( struct pci_dev * pdev )
{
2009-03-31 21:23:50 +00:00
bool wake ;
__igb_shutdown ( pdev , & wake ) ;
if ( system_state = = SYSTEM_POWER_OFF ) {
pci_wake_from_d3 ( pdev , wake ) ;
pci_set_power_state ( pdev , PCI_D3hot ) ;
}
2008-01-24 02:22:38 -08:00
}
# ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling ' interrupt ' - used by things like netconsole to send skbs
* without having to re - enable interrupts . It ' s not called while
* the interrupt routine is executing .
*/
static void igb_netpoll ( struct net_device * netdev )
{
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
2009-02-06 23:19:29 +00:00
struct e1000_hw * hw = & adapter - > hw ;
2008-01-24 02:22:38 -08:00
int i ;
2009-02-06 23:19:29 +00:00
if ( ! adapter - > msix_entries ) {
igb_irq_disable ( adapter ) ;
napi_schedule ( & adapter - > rx_ring [ 0 ] . napi ) ;
return ;
}
2008-01-24 02:22:38 -08:00
2009-02-06 23:19:29 +00:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
struct igb_ring * tx_ring = & adapter - > tx_ring [ i ] ;
wr32 ( E1000_EIMC , tx_ring - > eims_value ) ;
igb_clean_tx_irq ( tx_ring ) ;
wr32 ( E1000_EIMS , tx_ring - > eims_value ) ;
}
2008-01-24 02:22:38 -08:00
2009-02-06 23:19:29 +00:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
struct igb_ring * rx_ring = & adapter - > rx_ring [ i ] ;
wr32 ( E1000_EIMC , rx_ring - > eims_value ) ;
napi_schedule ( & rx_ring - > napi ) ;
}
2008-01-24 02:22:38 -08:00
}
# endif /* CONFIG_NET_POLL_CONTROLLER */
/**
* igb_io_error_detected - called when PCI error is detected
* @ pdev : Pointer to PCI device
* @ state : The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected .
*/
static pci_ers_result_t igb_io_error_detected ( struct pci_dev * pdev ,
pci_channel_state_t state )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
netif_device_detach ( netdev ) ;
if ( netif_running ( netdev ) )
igb_down ( adapter ) ;
pci_disable_device ( pdev ) ;
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET ;
}
/**
* igb_io_slot_reset - called after the pci bus has been reset .
* @ pdev : Pointer to PCI device
*
* Restart the card from scratch , as if from a cold - boot . Implementation
* resembles the first - half of the igb_resume routine .
*/
static pci_ers_result_t igb_io_slot_reset ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
struct e1000_hw * hw = & adapter - > hw ;
2008-11-27 00:24:37 -08:00
pci_ers_result_t result ;
2008-06-20 12:10:30 +09:00
int err ;
2008-01-24 02:22:38 -08:00
2009-02-06 23:16:04 +00:00
if ( pci_enable_device_mem ( pdev ) ) {
2008-01-24 02:22:38 -08:00
dev_err ( & pdev - > dev ,
" Cannot re-enable PCI device after reset. \n " ) ;
2008-11-27 00:24:37 -08:00
result = PCI_ERS_RESULT_DISCONNECT ;
} else {
pci_set_master ( pdev ) ;
pci_restore_state ( pdev ) ;
2008-01-24 02:22:38 -08:00
2008-11-27 00:24:37 -08:00
pci_enable_wake ( pdev , PCI_D3hot , 0 ) ;
pci_enable_wake ( pdev , PCI_D3cold , 0 ) ;
2008-01-24 02:22:38 -08:00
2008-11-27 00:24:37 -08:00
igb_reset ( adapter ) ;
wr32 ( E1000_WUS , ~ 0 ) ;
result = PCI_ERS_RESULT_RECOVERED ;
}
2008-01-24 02:22:38 -08:00
2008-12-11 20:34:19 -08:00
err = pci_cleanup_aer_uncorrect_error_status ( pdev ) ;
if ( err ) {
dev_err ( & pdev - > dev , " pci_cleanup_aer_uncorrect_error_status "
" failed 0x%0x \n " , err ) ;
/* non-fatal, continue */
}
2008-11-27 00:24:37 -08:00
return result ;
2008-01-24 02:22:38 -08:00
}
/**
* igb_io_resume - called when traffic can start flowing again .
* @ pdev : Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation . Implementation resembles the
* second - half of the igb_resume routine .
*/
static void igb_io_resume ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct igb_adapter * adapter = netdev_priv ( netdev ) ;
if ( netif_running ( netdev ) ) {
if ( igb_up ( adapter ) ) {
dev_err ( & pdev - > dev , " igb_up failed after reset \n " ) ;
return ;
}
}
netif_device_attach ( netdev ) ;
/* let the f/w know that the h/w is now under the control of the
* driver . */
igb_get_hw_control ( adapter ) ;
}
2009-02-19 20:39:44 -08:00
static inline void igb_set_vmolr ( struct e1000_hw * hw , int vfn )
{
u32 reg_data ;
reg_data = rd32 ( E1000_VMOLR ( vfn ) ) ;
reg_data | = E1000_VMOLR_BAM | /* Accept broadcast */
E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
E1000_VMOLR_AUPE | /* Accept untagged packets */
E1000_VMOLR_STRVLAN ; /* Strip vlan tags */
wr32 ( E1000_VMOLR ( vfn ) , reg_data ) ;
}
2009-02-19 20:40:07 -08:00
static inline int igb_set_vf_rlpml ( struct igb_adapter * adapter , int size ,
int vfn )
2009-02-19 20:39:44 -08:00
{
struct e1000_hw * hw = & adapter - > hw ;
u32 vmolr ;
vmolr = rd32 ( E1000_VMOLR ( vfn ) ) ;
vmolr & = ~ E1000_VMOLR_RLPML_MASK ;
vmolr | = size | E1000_VMOLR_LPE ;
wr32 ( E1000_VMOLR ( vfn ) , vmolr ) ;
2009-02-19 20:40:07 -08:00
return 0 ;
2009-02-19 20:39:44 -08:00
}
static inline void igb_set_rah_pool ( struct e1000_hw * hw , int pool , int entry )
{
u32 reg_data ;
reg_data = rd32 ( E1000_RAH ( entry ) ) ;
reg_data & = ~ E1000_RAH_POOL_MASK ;
reg_data | = E1000_RAH_POOL_1 < < pool ; ;
wr32 ( E1000_RAH ( entry ) , reg_data ) ;
}
static void igb_set_mc_list_pools ( struct igb_adapter * adapter ,
int entry_count , u16 total_rar_filters )
{
struct e1000_hw * hw = & adapter - > hw ;
int i = adapter - > vfs_allocated_count + 1 ;
if ( ( i + entry_count ) < total_rar_filters )
total_rar_filters = i + entry_count ;
for ( ; i < total_rar_filters ; i + + )
igb_set_rah_pool ( hw , adapter - > vfs_allocated_count , i ) ;
}
2009-02-19 20:40:07 -08:00
static int igb_set_vf_mac ( struct igb_adapter * adapter ,
int vf , unsigned char * mac_addr )
{
struct e1000_hw * hw = & adapter - > hw ;
int rar_entry = vf + 1 ; /* VF MAC addresses start at entry 1 */
igb_rar_set ( hw , mac_addr , rar_entry ) ;
2009-02-19 20:40:30 -08:00
memcpy ( adapter - > vf_data [ vf ] . vf_mac_addresses , mac_addr , ETH_ALEN ) ;
2009-02-19 20:40:07 -08:00
igb_set_rah_pool ( hw , vf , rar_entry ) ;
return 0 ;
}
static void igb_vmm_control ( struct igb_adapter * adapter )
{
struct e1000_hw * hw = & adapter - > hw ;
u32 reg_data ;
if ( ! adapter - > vfs_allocated_count )
return ;
/* VF's need PF reset indication before they
* can send / receive mail */
reg_data = rd32 ( E1000_CTRL_EXT ) ;
reg_data | = E1000_CTRL_EXT_PFRSTD ;
wr32 ( E1000_CTRL_EXT , reg_data ) ;
igb_vmdq_set_loopback_pf ( hw , true ) ;
igb_vmdq_set_replication_pf ( hw , true ) ;
}
2008-01-24 02:22:38 -08:00
/* igb_main.c */