2007-09-16 01:07:45 +04:00
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright ( c ) 1999 - 2007 Intel Corporation .
This program is free software ; you can redistribute it and / or modify it
under the terms and conditions of the GNU General Public License ,
version 2 , as published by the Free Software Foundation .
This program is distributed in the hope it will be useful , but WITHOUT
ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
more details .
You should have received a copy of the GNU General Public License along with
this program ; if not , write to the Free Software Foundation , Inc . ,
51 Franklin St - Fifth Floor , Boston , MA 02110 - 1301 USA .
The full GNU General Public License is included in this distribution in
the file called " COPYING " .
Contact Information :
Linux NICS < linux . nics @ intel . com >
e1000 - devel Mailing List < e1000 - devel @ lists . sourceforge . net >
Intel Corporation , 5200 N . E . Elam Young Parkway , Hillsboro , OR 97124 - 6497
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include <linux/types.h>
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/netdevice.h>
# include <linux/vmalloc.h>
# include <linux/string.h>
# include <linux/in.h>
# include <linux/ip.h>
# include <linux/tcp.h>
# include <linux/ipv6.h>
# include <net/checksum.h>
# include <net/ip6_checksum.h>
# include <linux/ethtool.h>
# include <linux/if_vlan.h>
# include "ixgbe.h"
# include "ixgbe_common.h"
char ixgbe_driver_name [ ] = " ixgbe " ;
2007-10-29 20:46:24 +03:00
static const char ixgbe_driver_string [ ] =
" Intel(R) 10 Gigabit PCI Express Network Driver " ;
2007-09-16 01:07:45 +04:00
2008-08-09 03:24:19 +04:00
# define DRV_VERSION "1.3.18-k4"
2007-10-29 20:46:24 +03:00
const char ixgbe_driver_version [ ] = DRV_VERSION ;
static const char ixgbe_copyright [ ] =
" Copyright (c) 1999-2007 Intel Corporation. " ;
2007-09-16 01:07:45 +04:00
static const struct ixgbe_info * ixgbe_info_tbl [ ] = {
2007-11-01 01:22:10 +03:00
[ board_82598 ] = & ixgbe_82598_info ,
2007-09-16 01:07:45 +04:00
} ;
/* ixgbe_pci_tbl - PCI Device ID Table
*
* Wildcard entries ( PCI_ANY_ID ) should come last
* Last entry must be all 0 s
*
* { Vendor ID , Device ID , SubVendor ID , SubDevice ID ,
* Class , Class Mask , private data ( not used ) }
*/
static struct pci_device_id ixgbe_pci_tbl [ ] = {
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598AF_DUAL_PORT ) ,
2007-11-01 01:22:10 +03:00
board_82598 } ,
2007-09-16 01:07:45 +04:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598AF_SINGLE_PORT ) ,
2007-11-01 01:22:10 +03:00
board_82598 } ,
2007-09-16 01:07:45 +04:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598EB_CX4 ) ,
2007-11-01 01:22:10 +03:00
board_82598 } ,
2008-08-09 03:24:19 +04:00
{ PCI_VDEVICE ( INTEL , IXGBE_DEV_ID_82598_CX4_DUAL_PORT ) ,
board_82598 } ,
2007-09-16 01:07:45 +04:00
/* required last entry */
{ 0 , }
} ;
MODULE_DEVICE_TABLE ( pci , ixgbe_pci_tbl ) ;
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
static int ixgbe_notify_dca ( struct notifier_block * , unsigned long event ,
void * p ) ;
static struct notifier_block dca_notifier = {
. notifier_call = ixgbe_notify_dca ,
. next = NULL ,
. priority = 0
} ;
# endif
2007-09-16 01:07:45 +04:00
MODULE_AUTHOR ( " Intel Corporation, <linux.nics@intel.com> " ) ;
MODULE_DESCRIPTION ( " Intel(R) 10 Gigabit PCI Express Network Driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( DRV_VERSION ) ;
# define DEFAULT_DEBUG_LEVEL_SHIFT 3
2008-02-02 02:59:04 +03:00
static void ixgbe_release_hw_control ( struct ixgbe_adapter * adapter )
{
u32 ctrl_ext ;
/* Let firmware take over control of h/w */
ctrl_ext = IXGBE_READ_REG ( & adapter - > hw , IXGBE_CTRL_EXT ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_CTRL_EXT ,
ctrl_ext & ~ IXGBE_CTRL_EXT_DRV_LOAD ) ;
}
static void ixgbe_get_hw_control ( struct ixgbe_adapter * adapter )
{
u32 ctrl_ext ;
/* Let firmware know the driver has taken over */
ctrl_ext = IXGBE_READ_REG ( & adapter - > hw , IXGBE_CTRL_EXT ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_CTRL_EXT ,
ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD ) ;
}
2007-09-16 01:07:45 +04:00
# ifdef DEBUG
/**
* ixgbe_get_hw_dev_name - return device name string
* used by hardware layer to print debugging information
* */
char * ixgbe_get_hw_dev_name ( struct ixgbe_hw * hw )
{
struct ixgbe_adapter * adapter = hw - > back ;
struct net_device * netdev = adapter - > netdev ;
return netdev - > name ;
}
# endif
static void ixgbe_set_ivar ( struct ixgbe_adapter * adapter , u16 int_alloc_entry ,
u8 msix_vector )
{
u32 ivar , index ;
msix_vector | = IXGBE_IVAR_ALLOC_VAL ;
index = ( int_alloc_entry > > 2 ) & 0x1F ;
ivar = IXGBE_READ_REG ( & adapter - > hw , IXGBE_IVAR ( index ) ) ;
ivar & = ~ ( 0xFF < < ( 8 * ( int_alloc_entry & 0x3 ) ) ) ;
ivar | = ( msix_vector < < ( 8 * ( int_alloc_entry & 0x3 ) ) ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_IVAR ( index ) , ivar ) ;
}
static void ixgbe_unmap_and_free_tx_resource ( struct ixgbe_adapter * adapter ,
struct ixgbe_tx_buffer
* tx_buffer_info )
{
if ( tx_buffer_info - > dma ) {
pci_unmap_page ( adapter - > pdev ,
tx_buffer_info - > dma ,
tx_buffer_info - > length , PCI_DMA_TODEVICE ) ;
tx_buffer_info - > dma = 0 ;
}
if ( tx_buffer_info - > skb ) {
dev_kfree_skb_any ( tx_buffer_info - > skb ) ;
tx_buffer_info - > skb = NULL ;
}
/* tx_buffer_info must be completely set up in the transmit path */
}
static inline bool ixgbe_check_tx_hang ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * tx_ring ,
unsigned int eop ,
union ixgbe_adv_tx_desc * eop_desc )
{
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter - > detect_tx_hung = false ;
if ( tx_ring - > tx_buffer_info [ eop ] . dma & &
time_after ( jiffies , tx_ring - > tx_buffer_info [ eop ] . time_stamp + HZ ) & &
! ( IXGBE_READ_REG ( & adapter - > hw , IXGBE_TFCS ) & IXGBE_TFCS_TXOFF ) ) {
/* detected Tx unit hang */
DPRINTK ( DRV , ERR , " Detected Tx Unit Hang \n "
" TDH <%x> \n "
" TDT <%x> \n "
" next_to_use <%x> \n "
" next_to_clean <%x> \n "
" tx_buffer_info[next_to_clean] \n "
" time_stamp <%lx> \n "
" next_to_watch <%x> \n "
" jiffies <%lx> \n "
" next_to_watch.status <%x> \n " ,
readl ( adapter - > hw . hw_addr + tx_ring - > head ) ,
readl ( adapter - > hw . hw_addr + tx_ring - > tail ) ,
tx_ring - > next_to_use ,
tx_ring - > next_to_clean ,
tx_ring - > tx_buffer_info [ eop ] . time_stamp ,
eop , jiffies , eop_desc - > wb . status ) ;
return true ;
}
return false ;
}
2008-02-02 02:58:49 +03:00
# define IXGBE_MAX_TXD_PWR 14
# define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
# define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
( ( ( S ) & ( IXGBE_MAX_DATA_PER_TXD - 1 ) ) ? 1 : 0 ) )
# define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
MAX_SKB_FRAGS * TXD_USE_COUNT ( PAGE_SIZE ) + 1 ) /* for context */
2007-09-16 01:07:45 +04:00
/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @ adapter : board private structure
* */
static bool ixgbe_clean_tx_irq ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * tx_ring )
{
struct net_device * netdev = adapter - > netdev ;
union ixgbe_adv_tx_desc * tx_desc , * eop_desc ;
struct ixgbe_tx_buffer * tx_buffer_info ;
unsigned int i , eop ;
bool cleaned = false ;
2008-02-02 02:58:49 +03:00
unsigned int total_tx_bytes = 0 , total_tx_packets = 0 ;
2007-09-16 01:07:45 +04:00
i = tx_ring - > next_to_clean ;
eop = tx_ring - > tx_buffer_info [ i ] . next_to_watch ;
eop_desc = IXGBE_TX_DESC_ADV ( * tx_ring , eop ) ;
while ( eop_desc - > wb . status & cpu_to_le32 ( IXGBE_TXD_STAT_DD ) ) {
2008-02-02 02:58:49 +03:00
cleaned = false ;
while ( ! cleaned ) {
2007-09-16 01:07:45 +04:00
tx_desc = IXGBE_TX_DESC_ADV ( * tx_ring , i ) ;
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
cleaned = ( i = = eop ) ;
tx_ring - > stats . bytes + = tx_buffer_info - > length ;
2008-02-02 02:58:49 +03:00
if ( cleaned ) {
struct sk_buff * skb = tx_buffer_info - > skb ;
unsigned int segs , bytecount ;
segs = skb_shinfo ( skb ) - > gso_segs ? : 1 ;
/* multiply data chunks by size of headers */
bytecount = ( ( segs - 1 ) * skb_headlen ( skb ) ) +
skb - > len ;
total_tx_packets + = segs ;
total_tx_bytes + = bytecount ;
}
2007-09-16 01:07:45 +04:00
ixgbe_unmap_and_free_tx_resource ( adapter ,
tx_buffer_info ) ;
tx_desc - > wb . status = 0 ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
}
tx_ring - > stats . packets + + ;
eop = tx_ring - > tx_buffer_info [ i ] . next_to_watch ;
eop_desc = IXGBE_TX_DESC_ADV ( * tx_ring , eop ) ;
/* weight of a sort for tx, avoid endless transmit cleanup */
2008-02-02 02:58:49 +03:00
if ( total_tx_packets > = tx_ring - > work_limit )
2007-09-16 01:07:45 +04:00
break ;
}
tx_ring - > next_to_clean = i ;
2008-02-02 02:58:49 +03:00
# define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if ( total_tx_packets & & netif_carrier_ok ( netdev ) & &
( IXGBE_DESC_UNUSED ( tx_ring ) > = TX_WAKE_THRESHOLD ) ) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean .
*/
smp_mb ( ) ;
2008-03-04 02:03:52 +03:00
if ( __netif_subqueue_stopped ( netdev , tx_ring - > queue_index ) & &
! test_bit ( __IXGBE_DOWN , & adapter - > state ) ) {
netif_wake_subqueue ( netdev , tx_ring - > queue_index ) ;
adapter - > restart_queue + + ;
}
2008-02-02 02:58:49 +03:00
}
2007-09-16 01:07:45 +04:00
if ( adapter - > detect_tx_hung )
if ( ixgbe_check_tx_hang ( adapter , tx_ring , eop , eop_desc ) )
2008-03-04 02:03:52 +03:00
netif_stop_subqueue ( netdev , tx_ring - > queue_index ) ;
2007-09-16 01:07:45 +04:00
2008-02-02 02:58:49 +03:00
if ( total_tx_packets > = tx_ring - > work_limit )
2007-09-16 01:07:45 +04:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EICS , tx_ring - > eims_value ) ;
2008-03-04 02:03:57 +03:00
tx_ring - > total_bytes + = total_tx_bytes ;
tx_ring - > total_packets + = total_tx_packets ;
2008-02-02 02:59:19 +03:00
adapter - > net_stats . tx_bytes + = total_tx_bytes ;
adapter - > net_stats . tx_packets + = total_tx_packets ;
2008-02-02 02:58:49 +03:00
cleaned = total_tx_packets ? true : false ;
2007-09-16 01:07:45 +04:00
return cleaned ;
}
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
static void ixgbe_update_rx_dca ( struct ixgbe_adapter * adapter ,
2008-08-26 15:27:08 +04:00
struct ixgbe_ring * rx_ring )
2008-03-04 02:04:02 +03:00
{
u32 rxctrl ;
int cpu = get_cpu ( ) ;
2008-08-26 15:27:08 +04:00
int q = rx_ring - adapter - > rx_ring ;
2008-03-04 02:04:02 +03:00
2008-08-26 15:27:08 +04:00
if ( rx_ring - > cpu ! = cpu ) {
2008-03-04 02:04:02 +03:00
rxctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_DCA_RXCTRL ( q ) ) ;
rxctrl & = ~ IXGBE_DCA_RXCTRL_CPUID_MASK ;
rxctrl | = dca_get_tag ( cpu ) ;
rxctrl | = IXGBE_DCA_RXCTRL_DESC_DCA_EN ;
rxctrl | = IXGBE_DCA_RXCTRL_HEAD_DCA_EN ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_RXCTRL ( q ) , rxctrl ) ;
2008-08-26 15:27:08 +04:00
rx_ring - > cpu = cpu ;
2008-03-04 02:04:02 +03:00
}
put_cpu ( ) ;
}
static void ixgbe_update_tx_dca ( struct ixgbe_adapter * adapter ,
2008-08-26 15:27:08 +04:00
struct ixgbe_ring * tx_ring )
2008-03-04 02:04:02 +03:00
{
u32 txctrl ;
int cpu = get_cpu ( ) ;
2008-08-26 15:27:08 +04:00
int q = tx_ring - adapter - > tx_ring ;
2008-03-04 02:04:02 +03:00
2008-08-26 15:27:08 +04:00
if ( tx_ring - > cpu ! = cpu ) {
2008-03-04 02:04:02 +03:00
txctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_DCA_TXCTRL ( q ) ) ;
txctrl & = ~ IXGBE_DCA_TXCTRL_CPUID_MASK ;
txctrl | = dca_get_tag ( cpu ) ;
txctrl | = IXGBE_DCA_TXCTRL_DESC_DCA_EN ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_TXCTRL ( q ) , txctrl ) ;
2008-08-26 15:27:08 +04:00
tx_ring - > cpu = cpu ;
2008-03-04 02:04:02 +03:00
}
put_cpu ( ) ;
}
static void ixgbe_setup_dca ( struct ixgbe_adapter * adapter )
{
int i ;
if ( ! ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED ) )
return ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
adapter - > tx_ring [ i ] . cpu = - 1 ;
ixgbe_update_tx_dca ( adapter , & adapter - > tx_ring [ i ] ) ;
}
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
adapter - > rx_ring [ i ] . cpu = - 1 ;
ixgbe_update_rx_dca ( adapter , & adapter - > rx_ring [ i ] ) ;
}
}
static int __ixgbe_notify_dca ( struct device * dev , void * data )
{
struct net_device * netdev = dev_get_drvdata ( dev ) ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
unsigned long event = * ( unsigned long * ) data ;
switch ( event ) {
case DCA_PROVIDER_ADD :
adapter - > flags | = IXGBE_FLAG_DCA_ENABLED ;
/* Always use CB2 mode, difference is masked
* in the CB driver . */
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_CTRL , 2 ) ;
2008-03-27 14:39:17 +03:00
if ( dca_add_requester ( dev ) = = 0 ) {
2008-03-04 02:04:02 +03:00
ixgbe_setup_dca ( adapter ) ;
break ;
}
/* Fall Through since DCA is disabled. */
case DCA_PROVIDER_REMOVE :
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED ) {
dca_remove_requester ( dev ) ;
adapter - > flags & = ~ IXGBE_FLAG_DCA_ENABLED ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_CTRL , 1 ) ;
}
break ;
}
2008-03-27 14:39:17 +03:00
return 0 ;
2008-03-04 02:04:02 +03:00
}
# endif /* CONFIG_DCA */
2007-09-16 01:07:45 +04:00
/**
* ixgbe_receive_skb - Send a completed packet up the stack
* @ adapter : board private structure
* @ skb : packet to send up
2008-06-19 02:32:19 +04:00
* @ status : hardware indication of status of receive
* @ rx_ring : rx descriptor ring ( for a specific queue ) to setup
* @ rx_desc : rx descriptor
2007-09-16 01:07:45 +04:00
* */
static void ixgbe_receive_skb ( struct ixgbe_adapter * adapter ,
2008-06-19 02:32:19 +04:00
struct sk_buff * skb , u8 status ,
struct ixgbe_ring * ring ,
union ixgbe_adv_rx_desc * rx_desc )
2007-09-16 01:07:45 +04:00
{
2008-06-19 02:32:19 +04:00
bool is_vlan = ( status & IXGBE_RXD_STAT_VP ) ;
u16 tag = le16_to_cpu ( rx_desc - > wb . upper . vlan ) ;
2007-09-16 01:07:45 +04:00
2008-06-19 02:32:19 +04:00
if ( adapter - > netdev - > features & NETIF_F_LRO & &
skb - > ip_summed = = CHECKSUM_UNNECESSARY ) {
2007-09-16 01:07:45 +04:00
if ( adapter - > vlgrp & & is_vlan )
2008-06-19 02:32:19 +04:00
lro_vlan_hwaccel_receive_skb ( & ring - > lro_mgr , skb ,
adapter - > vlgrp , tag ,
rx_desc ) ;
2007-09-16 01:07:45 +04:00
else
2008-06-19 02:32:19 +04:00
lro_receive_skb ( & ring - > lro_mgr , skb , rx_desc ) ;
ring - > lro_used = true ;
} else {
if ( ! ( adapter - > flags & IXGBE_FLAG_IN_NETPOLL ) ) {
if ( adapter - > vlgrp & & is_vlan )
vlan_hwaccel_receive_skb ( skb , adapter - > vlgrp , tag ) ;
else
netif_receive_skb ( skb ) ;
} else {
if ( adapter - > vlgrp & & is_vlan )
vlan_hwaccel_rx ( skb , adapter - > vlgrp , tag ) ;
else
netif_rx ( skb ) ;
}
2007-09-16 01:07:45 +04:00
}
}
2008-02-02 02:59:09 +03:00
/**
* ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
* @ adapter : address of board private structure
* @ status_err : hardware indication of status of receive
* @ skb : skb currently being received and modified
* */
2007-09-16 01:07:45 +04:00
static inline void ixgbe_rx_checksum ( struct ixgbe_adapter * adapter ,
2008-08-26 15:26:56 +04:00
u32 status_err , struct sk_buff * skb )
2007-09-16 01:07:45 +04:00
{
skb - > ip_summed = CHECKSUM_NONE ;
2008-08-26 15:26:56 +04:00
/* Rx csum disabled */
if ( ! ( adapter - > flags & IXGBE_FLAG_RX_CSUM_ENABLED ) )
2007-09-16 01:07:45 +04:00
return ;
2008-02-02 02:59:09 +03:00
/* if IP and error */
if ( ( status_err & IXGBE_RXD_STAT_IPCS ) & &
( status_err & IXGBE_RXDADV_ERR_IPE ) ) {
2007-09-16 01:07:45 +04:00
adapter - > hw_csum_rx_error + + ;
return ;
}
2008-02-02 02:59:09 +03:00
if ( ! ( status_err & IXGBE_RXD_STAT_L4CS ) )
return ;
if ( status_err & IXGBE_RXDADV_ERR_TCPE ) {
adapter - > hw_csum_rx_error + + ;
return ;
}
2007-09-16 01:07:45 +04:00
/* It must be a TCP or UDP packet with a valid checksum */
2008-02-02 02:59:09 +03:00
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
2007-09-16 01:07:45 +04:00
adapter - > hw_csum_rx_good + + ;
}
/**
* ixgbe_alloc_rx_buffers - Replace used receive buffers ; packet split
* @ adapter : address of board private structure
* */
static void ixgbe_alloc_rx_buffers ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * rx_ring ,
int cleaned_count )
{
struct net_device * netdev = adapter - > netdev ;
struct pci_dev * pdev = adapter - > pdev ;
union ixgbe_adv_rx_desc * rx_desc ;
2008-08-26 15:27:08 +04:00
struct ixgbe_rx_buffer * bi ;
2007-09-16 01:07:45 +04:00
unsigned int i ;
unsigned int bufsz = adapter - > rx_buf_len + NET_IP_ALIGN ;
i = rx_ring - > next_to_use ;
2008-08-26 15:27:08 +04:00
bi = & rx_ring - > rx_buffer_info [ i ] ;
2007-09-16 01:07:45 +04:00
while ( cleaned_count - - ) {
rx_desc = IXGBE_RX_DESC_ADV ( * rx_ring , i ) ;
2008-08-26 15:27:08 +04:00
if ( ! bi - > page & &
( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) ) {
bi - > page = alloc_page ( GFP_ATOMIC ) ;
if ( ! bi - > page ) {
2007-09-16 01:07:45 +04:00
adapter - > alloc_rx_page_failed + + ;
goto no_buffers ;
}
2008-08-26 15:27:08 +04:00
bi - > page_dma = pci_map_page ( pdev , bi - > page , 0 ,
PAGE_SIZE ,
PCI_DMA_FROMDEVICE ) ;
2007-09-16 01:07:45 +04:00
}
2008-08-26 15:27:08 +04:00
if ( ! bi - > skb ) {
struct sk_buff * skb = netdev_alloc_skb ( netdev , bufsz ) ;
2007-09-16 01:07:45 +04:00
if ( ! skb ) {
adapter - > alloc_rx_buff_failed + + ;
goto no_buffers ;
}
/*
* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed
*/
skb_reserve ( skb , NET_IP_ALIGN ) ;
2008-08-26 15:27:08 +04:00
bi - > skb = skb ;
bi - > dma = pci_map_single ( pdev , skb - > data , bufsz ,
PCI_DMA_FROMDEVICE ) ;
2007-09-16 01:07:45 +04:00
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write - back erases this info . */
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
2008-08-26 15:27:08 +04:00
rx_desc - > read . pkt_addr = cpu_to_le64 ( bi - > page_dma ) ;
rx_desc - > read . hdr_addr = cpu_to_le64 ( bi - > dma ) ;
2007-09-16 01:07:45 +04:00
} else {
2008-08-26 15:27:08 +04:00
rx_desc - > read . pkt_addr = cpu_to_le64 ( bi - > dma ) ;
2007-09-16 01:07:45 +04:00
}
i + + ;
if ( i = = rx_ring - > count )
i = 0 ;
2008-08-26 15:27:08 +04:00
bi = & rx_ring - > rx_buffer_info [ i ] ;
2007-09-16 01:07:45 +04:00
}
no_buffers :
if ( rx_ring - > next_to_use ! = i ) {
rx_ring - > next_to_use = i ;
if ( i - - = = 0 )
i = ( rx_ring - > count - 1 ) ;
/*
* Force memory writes to complete before letting h / w
* know there are new descriptors to fetch . ( Only
* applicable for weak - ordered memory model archs ,
* such as IA - 64 ) .
*/
wmb ( ) ;
writel ( i , adapter - > hw . hw_addr + rx_ring - > tail ) ;
}
}
static bool ixgbe_clean_rx_irq ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * rx_ring ,
int * work_done , int work_to_do )
{
struct net_device * netdev = adapter - > netdev ;
struct pci_dev * pdev = adapter - > pdev ;
union ixgbe_adv_rx_desc * rx_desc , * next_rxd ;
struct ixgbe_rx_buffer * rx_buffer_info , * next_buffer ;
struct sk_buff * skb ;
unsigned int i ;
u32 upper_len , len , staterr ;
2008-06-19 02:32:19 +04:00
u16 hdr_info ;
bool cleaned = false ;
2007-09-16 01:07:45 +04:00
int cleaned_count = 0 ;
2008-02-02 02:59:19 +03:00
unsigned int total_rx_bytes = 0 , total_rx_packets = 0 ;
2007-09-16 01:07:45 +04:00
i = rx_ring - > next_to_clean ;
upper_len = 0 ;
rx_desc = IXGBE_RX_DESC_ADV ( * rx_ring , i ) ;
staterr = le32_to_cpu ( rx_desc - > wb . upper . status_error ) ;
rx_buffer_info = & rx_ring - > rx_buffer_info [ i ] ;
while ( staterr & IXGBE_RXD_STAT_DD ) {
if ( * work_done > = work_to_do )
break ;
( * work_done ) + + ;
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
hdr_info =
le16_to_cpu ( rx_desc - > wb . lower . lo_dword . hdr_info ) ;
len =
( ( hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK ) > >
IXGBE_RXDADV_HDRBUFLEN_SHIFT ) ;
if ( hdr_info & IXGBE_RXDADV_SPH )
adapter - > rx_hdr_split + + ;
if ( len > IXGBE_RX_HDR_SIZE )
len = IXGBE_RX_HDR_SIZE ;
upper_len = le16_to_cpu ( rx_desc - > wb . upper . length ) ;
} else
len = le16_to_cpu ( rx_desc - > wb . upper . length ) ;
cleaned = true ;
skb = rx_buffer_info - > skb ;
prefetch ( skb - > data - NET_IP_ALIGN ) ;
rx_buffer_info - > skb = NULL ;
if ( len & & ! skb_shinfo ( skb ) - > nr_frags ) {
pci_unmap_single ( pdev , rx_buffer_info - > dma ,
adapter - > rx_buf_len + NET_IP_ALIGN ,
PCI_DMA_FROMDEVICE ) ;
skb_put ( skb , len ) ;
}
if ( upper_len ) {
pci_unmap_page ( pdev , rx_buffer_info - > page_dma ,
PAGE_SIZE , PCI_DMA_FROMDEVICE ) ;
rx_buffer_info - > page_dma = 0 ;
skb_fill_page_desc ( skb , skb_shinfo ( skb ) - > nr_frags ,
rx_buffer_info - > page , 0 , upper_len ) ;
rx_buffer_info - > page = NULL ;
skb - > len + = upper_len ;
skb - > data_len + = upper_len ;
skb - > truesize + = upper_len ;
}
i + + ;
if ( i = = rx_ring - > count )
i = 0 ;
next_buffer = & rx_ring - > rx_buffer_info [ i ] ;
next_rxd = IXGBE_RX_DESC_ADV ( * rx_ring , i ) ;
prefetch ( next_rxd ) ;
cleaned_count + + ;
if ( staterr & IXGBE_RXD_STAT_EOP ) {
rx_ring - > stats . packets + + ;
rx_ring - > stats . bytes + = skb - > len ;
} else {
rx_buffer_info - > skb = next_buffer - > skb ;
rx_buffer_info - > dma = next_buffer - > dma ;
next_buffer - > skb = skb ;
adapter - > non_eop_descs + + ;
goto next_desc ;
}
if ( staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK ) {
dev_kfree_skb_irq ( skb ) ;
goto next_desc ;
}
ixgbe_rx_checksum ( adapter , staterr , skb ) ;
2008-02-02 02:59:19 +03:00
/* probably a little skewed due to removing CRC */
total_rx_bytes + = skb - > len ;
total_rx_packets + + ;
2007-09-16 01:07:45 +04:00
skb - > protocol = eth_type_trans ( skb , netdev ) ;
2008-06-19 02:32:19 +04:00
ixgbe_receive_skb ( adapter , skb , staterr , rx_ring , rx_desc ) ;
2007-09-16 01:07:45 +04:00
netdev - > last_rx = jiffies ;
next_desc :
rx_desc - > wb . upper . status_error = 0 ;
/* return some buffers to hardware, one at a time is too slow */
if ( cleaned_count > = IXGBE_RX_BUFFER_WRITE ) {
ixgbe_alloc_rx_buffers ( adapter , rx_ring , cleaned_count ) ;
cleaned_count = 0 ;
}
/* use prefetched values */
rx_desc = next_rxd ;
rx_buffer_info = next_buffer ;
staterr = le32_to_cpu ( rx_desc - > wb . upper . status_error ) ;
2008-06-19 02:32:19 +04:00
}
if ( rx_ring - > lro_used ) {
lro_flush_all ( & rx_ring - > lro_mgr ) ;
rx_ring - > lro_used = false ;
2007-09-16 01:07:45 +04:00
}
rx_ring - > next_to_clean = i ;
cleaned_count = IXGBE_DESC_UNUSED ( rx_ring ) ;
if ( cleaned_count )
ixgbe_alloc_rx_buffers ( adapter , rx_ring , cleaned_count ) ;
2008-03-04 02:03:57 +03:00
rx_ring - > total_packets + = total_rx_packets ;
rx_ring - > total_bytes + = total_rx_bytes ;
adapter - > net_stats . rx_bytes + = total_rx_bytes ;
adapter - > net_stats . rx_packets + = total_rx_packets ;
2007-09-16 01:07:45 +04:00
return cleaned ;
}
2008-03-04 02:03:45 +03:00
static int ixgbe_clean_rxonly ( struct napi_struct * , int ) ;
2007-09-16 01:07:45 +04:00
/**
* ixgbe_configure_msix - Configure MSI - X hardware
* @ adapter : board private structure
*
* ixgbe_configure_msix sets up the hardware to properly generate MSI - X
* interrupts .
* */
static void ixgbe_configure_msix ( struct ixgbe_adapter * adapter )
{
2008-03-04 02:03:45 +03:00
struct ixgbe_q_vector * q_vector ;
int i , j , q_vectors , v_idx , r_idx ;
u32 mask ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
/* Populate the IVAR table and set the ITR values to the
* corresponding register .
*/
for ( v_idx = 0 ; v_idx < q_vectors ; v_idx + + ) {
q_vector = & adapter - > q_vector [ v_idx ] ;
/* XXX for_each_bit(...) */
r_idx = find_first_bit ( q_vector - > rxr_idx ,
adapter - > num_rx_queues ) ;
for ( i = 0 ; i < q_vector - > rxr_count ; i + + ) {
j = adapter - > rx_ring [ r_idx ] . reg_idx ;
ixgbe_set_ivar ( adapter , IXGBE_IVAR_RX_QUEUE ( j ) , v_idx ) ;
r_idx = find_next_bit ( q_vector - > rxr_idx ,
adapter - > num_rx_queues ,
r_idx + 1 ) ;
}
r_idx = find_first_bit ( q_vector - > txr_idx ,
adapter - > num_tx_queues ) ;
for ( i = 0 ; i < q_vector - > txr_count ; i + + ) {
j = adapter - > tx_ring [ r_idx ] . reg_idx ;
ixgbe_set_ivar ( adapter , IXGBE_IVAR_TX_QUEUE ( j ) , v_idx ) ;
r_idx = find_next_bit ( q_vector - > txr_idx ,
adapter - > num_tx_queues ,
r_idx + 1 ) ;
}
/* if this is a tx only vector use half the irq (tx) rate */
if ( q_vector - > txr_count & & ! q_vector - > rxr_count )
q_vector - > eitr = adapter - > tx_eitr ;
else
/* rx only or mixed */
q_vector - > eitr = adapter - > rx_eitr ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EITR ( v_idx ) ,
EITR_INTS_PER_SEC_TO_REG ( q_vector - > eitr ) ) ;
2007-09-16 01:07:45 +04:00
}
2008-03-04 02:03:45 +03:00
ixgbe_set_ivar ( adapter , IXGBE_IVAR_OTHER_CAUSES_INDEX , v_idx ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EITR ( v_idx ) , 1950 ) ;
/* set up to autoclear timer, lsc, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK ;
mask & = ~ IXGBE_EIMS_OTHER ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIAC , mask ) ;
2007-09-16 01:07:45 +04:00
}
2008-03-04 02:03:57 +03:00
enum latency_range {
lowest_latency = 0 ,
low_latency = 1 ,
bulk_latency = 2 ,
latency_invalid = 255
} ;
/**
* ixgbe_update_itr - update the dynamic ITR value based on statistics
* @ adapter : pointer to adapter
* @ eitr : eitr setting ( ints per sec ) to give last timeslice
* @ itr_setting : current throttle rate in ints / second
* @ packets : the number of packets during this measurement interval
* @ bytes : the number of bytes during this measurement interval
*
* Stores a new ITR value based on packets and byte
* counts during the last interrupt . The advantage of per interrupt
* computation is faster updates and more accurate ITR for the current
* traffic pattern . Constants in this function were computed
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput .
* this functionality is controlled by the InterruptThrottleRate module
* parameter ( see ixgbe_param . c )
* */
static u8 ixgbe_update_itr ( struct ixgbe_adapter * adapter ,
u32 eitr , u8 itr_setting ,
int packets , int bytes )
{
unsigned int retval = itr_setting ;
u32 timepassed_us ;
u64 bytes_perint ;
if ( packets = = 0 )
goto update_itr_done ;
/* simple throttlerate management
* 0 - 20 MB / s lowest ( 100000 ints / s )
* 20 - 100 MB / s low ( 20000 ints / s )
* 100 - 1249 MB / s bulk ( 8000 ints / s )
*/
/* what was last interrupt timeslice? */
timepassed_us = 1000000 / eitr ;
bytes_perint = bytes / timepassed_us ; /* bytes/usec */
switch ( itr_setting ) {
case lowest_latency :
if ( bytes_perint > adapter - > eitr_low )
retval = low_latency ;
break ;
case low_latency :
if ( bytes_perint > adapter - > eitr_high )
retval = bulk_latency ;
else if ( bytes_perint < = adapter - > eitr_low )
retval = lowest_latency ;
break ;
case bulk_latency :
if ( bytes_perint < = adapter - > eitr_high )
retval = low_latency ;
break ;
}
update_itr_done :
return retval ;
}
static void ixgbe_set_itr_msix ( struct ixgbe_q_vector * q_vector )
{
struct ixgbe_adapter * adapter = q_vector - > adapter ;
struct ixgbe_hw * hw = & adapter - > hw ;
u32 new_itr ;
u8 current_itr , ret_itr ;
int i , r_idx , v_idx = ( ( void * ) q_vector - ( void * ) ( adapter - > q_vector ) ) /
sizeof ( struct ixgbe_q_vector ) ;
struct ixgbe_ring * rx_ring , * tx_ring ;
r_idx = find_first_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ) ;
for ( i = 0 ; i < q_vector - > txr_count ; i + + ) {
tx_ring = & ( adapter - > tx_ring [ r_idx ] ) ;
ret_itr = ixgbe_update_itr ( adapter , q_vector - > eitr ,
q_vector - > tx_eitr ,
tx_ring - > total_packets ,
tx_ring - > total_bytes ) ;
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
q_vector - > tx_eitr = ( ( q_vector - > tx_eitr > ret_itr ) ?
q_vector - > tx_eitr - 1 : ret_itr ) ;
r_idx = find_next_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ,
r_idx + 1 ) ;
}
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
for ( i = 0 ; i < q_vector - > rxr_count ; i + + ) {
rx_ring = & ( adapter - > rx_ring [ r_idx ] ) ;
ret_itr = ixgbe_update_itr ( adapter , q_vector - > eitr ,
q_vector - > rx_eitr ,
rx_ring - > total_packets ,
rx_ring - > total_bytes ) ;
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
q_vector - > rx_eitr = ( ( q_vector - > rx_eitr > ret_itr ) ?
q_vector - > rx_eitr - 1 : ret_itr ) ;
r_idx = find_next_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ,
r_idx + 1 ) ;
}
current_itr = max ( q_vector - > rx_eitr , q_vector - > tx_eitr ) ;
switch ( current_itr ) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency :
new_itr = 100000 ;
break ;
case low_latency :
new_itr = 20000 ; /* aka hwitr = ~200 */
break ;
case bulk_latency :
default :
new_itr = 8000 ;
break ;
}
if ( new_itr ! = q_vector - > eitr ) {
u32 itr_reg ;
/* do an exponential smoothing */
new_itr = ( ( q_vector - > eitr * 90 ) / 100 ) + ( ( new_itr * 10 ) / 100 ) ;
q_vector - > eitr = new_itr ;
itr_reg = EITR_INTS_PER_SEC_TO_REG ( new_itr ) ;
/* must write high and low 16 bits to reset counter */
DPRINTK ( TX_ERR , DEBUG , " writing eitr(%d): %08X \n " , v_idx ,
itr_reg ) ;
IXGBE_WRITE_REG ( hw , IXGBE_EITR ( v_idx ) , itr_reg | ( itr_reg ) < < 16 ) ;
}
return ;
}
2007-09-16 01:07:45 +04:00
static irqreturn_t ixgbe_msix_lsc ( int irq , void * data )
{
struct net_device * netdev = data ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
u32 eicr = IXGBE_READ_REG ( hw , IXGBE_EICR ) ;
if ( eicr & IXGBE_EICR_LSC ) {
adapter - > lsc_int + + ;
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
mod_timer ( & adapter - > watchdog_timer , jiffies ) ;
}
2008-02-02 02:58:41 +03:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
IXGBE_WRITE_REG ( hw , IXGBE_EIMS , IXGBE_EIMS_OTHER ) ;
2007-09-16 01:07:45 +04:00
return IRQ_HANDLED ;
}
static irqreturn_t ixgbe_msix_clean_tx ( int irq , void * data )
{
2008-03-04 02:03:45 +03:00
struct ixgbe_q_vector * q_vector = data ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2008-08-26 15:27:08 +04:00
struct ixgbe_ring * tx_ring ;
2008-03-04 02:03:45 +03:00
int i , r_idx ;
if ( ! q_vector - > txr_count )
return IRQ_HANDLED ;
r_idx = find_first_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ) ;
for ( i = 0 ; i < q_vector - > txr_count ; i + + ) {
2008-08-26 15:27:08 +04:00
tx_ring = & ( adapter - > tx_ring [ r_idx ] ) ;
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED )
2008-08-26 15:27:08 +04:00
ixgbe_update_tx_dca ( adapter , tx_ring ) ;
2008-03-04 02:04:02 +03:00
# endif
2008-08-26 15:27:08 +04:00
tx_ring - > total_bytes = 0 ;
tx_ring - > total_packets = 0 ;
ixgbe_clean_tx_irq ( adapter , tx_ring ) ;
2008-03-04 02:03:45 +03:00
r_idx = find_next_bit ( q_vector - > txr_idx , adapter - > num_tx_queues ,
r_idx + 1 ) ;
}
2007-09-16 01:07:45 +04:00
return IRQ_HANDLED ;
}
2008-03-04 02:03:45 +03:00
/**
* ixgbe_msix_clean_rx - single unshared vector rx clean ( all queues )
* @ irq : unused
* @ data : pointer to our q_vector struct for this interrupt vector
* */
2007-09-16 01:07:45 +04:00
static irqreturn_t ixgbe_msix_clean_rx ( int irq , void * data )
{
2008-03-04 02:03:45 +03:00
struct ixgbe_q_vector * q_vector = data ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2008-08-26 15:27:08 +04:00
struct ixgbe_ring * rx_ring ;
2008-03-04 02:03:45 +03:00
int r_idx ;
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
if ( ! q_vector - > rxr_count )
return IRQ_HANDLED ;
2008-08-26 15:27:08 +04:00
rx_ring = & ( adapter - > rx_ring [ r_idx ] ) ;
2008-03-04 02:03:45 +03:00
/* disable interrupts on this vector only */
2008-08-26 15:27:08 +04:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMC , rx_ring - > v_idx ) ;
rx_ring - > total_bytes = 0 ;
rx_ring - > total_packets = 0 ;
2008-03-04 02:03:45 +03:00
netif_rx_schedule ( adapter - > netdev , & q_vector - > napi ) ;
return IRQ_HANDLED ;
}
static irqreturn_t ixgbe_msix_clean_many ( int irq , void * data )
{
ixgbe_msix_clean_rx ( irq , data ) ;
ixgbe_msix_clean_tx ( irq , data ) ;
2007-09-16 01:07:45 +04:00
return IRQ_HANDLED ;
}
2008-03-04 02:03:45 +03:00
/**
* ixgbe_clean_rxonly - msix ( aka one shot ) rx clean routine
* @ napi : napi struct with our devices info in it
* @ budget : amount of work driver is allowed to do this pass , in packets
*
* */
2007-09-16 01:07:45 +04:00
static int ixgbe_clean_rxonly ( struct napi_struct * napi , int budget )
{
2008-03-04 02:03:45 +03:00
struct ixgbe_q_vector * q_vector =
container_of ( napi , struct ixgbe_q_vector , napi ) ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2008-08-26 15:27:08 +04:00
struct ixgbe_ring * rx_ring ;
2007-09-16 01:07:45 +04:00
int work_done = 0 ;
2008-03-04 02:03:45 +03:00
long r_idx ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
r_idx = find_first_bit ( q_vector - > rxr_idx , adapter - > num_rx_queues ) ;
2008-08-26 15:27:08 +04:00
rx_ring = & ( adapter - > rx_ring [ r_idx ] ) ;
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED )
2008-08-26 15:27:08 +04:00
ixgbe_update_rx_dca ( adapter , rx_ring ) ;
2008-03-04 02:04:02 +03:00
# endif
2007-09-16 01:07:45 +04:00
2008-08-26 15:27:08 +04:00
ixgbe_clean_rx_irq ( adapter , rx_ring , & work_done , budget ) ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
/* If all Rx work done, exit the polling mode */
if ( work_done < budget ) {
netif_rx_complete ( adapter - > netdev , napi ) ;
2008-03-04 02:03:57 +03:00
if ( adapter - > rx_eitr < IXGBE_MIN_ITR_USECS )
ixgbe_set_itr_msix ( q_vector ) ;
2007-09-16 01:07:45 +04:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
2008-08-26 15:27:08 +04:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMS , rx_ring - > v_idx ) ;
2007-09-16 01:07:45 +04:00
}
return work_done ;
}
2008-03-04 02:03:45 +03:00
static inline void map_vector_to_rxq ( struct ixgbe_adapter * a , int v_idx ,
int r_idx )
{
a - > q_vector [ v_idx ] . adapter = a ;
set_bit ( r_idx , a - > q_vector [ v_idx ] . rxr_idx ) ;
a - > q_vector [ v_idx ] . rxr_count + + ;
a - > rx_ring [ r_idx ] . v_idx = 1 < < v_idx ;
}
static inline void map_vector_to_txq ( struct ixgbe_adapter * a , int v_idx ,
int r_idx )
{
a - > q_vector [ v_idx ] . adapter = a ;
set_bit ( r_idx , a - > q_vector [ v_idx ] . txr_idx ) ;
a - > q_vector [ v_idx ] . txr_count + + ;
a - > tx_ring [ r_idx ] . v_idx = 1 < < v_idx ;
}
2007-09-16 01:07:45 +04:00
/**
2008-03-04 02:03:45 +03:00
* ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
* @ adapter : board private structure to initialize
* @ vectors : allotted vector count for descriptor rings
2007-09-16 01:07:45 +04:00
*
2008-03-04 02:03:45 +03:00
* This function maps descriptor rings to the queue - specific vectors
* we were allotted through the MSI - X enabling code . Ideally , we ' d have
* one vector per ring / queue , but on a constrained vector budget , we
* group the rings as " efficiently " as possible . You would add new
* mapping configurations in here .
2007-09-16 01:07:45 +04:00
* */
2008-03-04 02:03:45 +03:00
static int ixgbe_map_rings_to_vectors ( struct ixgbe_adapter * adapter ,
int vectors )
{
int v_start = 0 ;
int rxr_idx = 0 , txr_idx = 0 ;
int rxr_remaining = adapter - > num_rx_queues ;
int txr_remaining = adapter - > num_tx_queues ;
int i , j ;
int rqpv , tqpv ;
int err = 0 ;
/* No mapping required if MSI-X is disabled. */
if ( ! ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) )
goto out ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
/*
* The ideal configuration . . .
* We have enough vectors to map one per queue .
*/
if ( vectors = = adapter - > num_rx_queues + adapter - > num_tx_queues ) {
for ( ; rxr_idx < rxr_remaining ; v_start + + , rxr_idx + + )
map_vector_to_rxq ( adapter , v_start , rxr_idx ) ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
for ( ; txr_idx < txr_remaining ; v_start + + , txr_idx + + )
map_vector_to_txq ( adapter , v_start , txr_idx ) ;
2007-09-16 01:07:45 +04:00
goto out ;
2008-03-04 02:03:45 +03:00
}
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
/*
* If we don ' t have enough vectors for a 1 - to - 1
* mapping , we ' ll have to group them so there are
* multiple queues per vector .
*/
/* Re-adjusting *qpv takes care of the remainder. */
for ( i = v_start ; i < vectors ; i + + ) {
rqpv = DIV_ROUND_UP ( rxr_remaining , vectors - i ) ;
for ( j = 0 ; j < rqpv ; j + + ) {
map_vector_to_rxq ( adapter , i , rxr_idx ) ;
rxr_idx + + ;
rxr_remaining - - ;
}
}
for ( i = v_start ; i < vectors ; i + + ) {
tqpv = DIV_ROUND_UP ( txr_remaining , vectors - i ) ;
for ( j = 0 ; j < tqpv ; j + + ) {
map_vector_to_txq ( adapter , i , txr_idx ) ;
txr_idx + + ;
txr_remaining - - ;
2007-09-16 01:07:45 +04:00
}
}
2008-03-04 02:03:45 +03:00
out :
return err ;
}
/**
* ixgbe_request_msix_irqs - Initialize MSI - X interrupts
* @ adapter : board private structure
*
* ixgbe_request_msix_irqs allocates MSI - X vectors and requests
* interrupts from the kernel .
* */
static int ixgbe_request_msix_irqs ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
irqreturn_t ( * handler ) ( int , void * ) ;
int i , vector , q_vectors , err ;
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
/* Map the Tx/Rx rings to the vectors we were allotted. */
err = ixgbe_map_rings_to_vectors ( adapter , q_vectors ) ;
if ( err )
goto out ;
# define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
( ! ( _v ) - > txr_count ) ? & ixgbe_msix_clean_rx : \
& ixgbe_msix_clean_many )
for ( vector = 0 ; vector < q_vectors ; vector + + ) {
handler = SET_HANDLER ( & adapter - > q_vector [ vector ] ) ;
sprintf ( adapter - > name [ vector ] , " %s:v%d-%s " ,
netdev - > name , vector ,
( handler = = & ixgbe_msix_clean_rx ) ? " Rx " :
( ( handler = = & ixgbe_msix_clean_tx ) ? " Tx " : " TxRx " ) ) ;
err = request_irq ( adapter - > msix_entries [ vector ] . vector ,
handler , 0 , adapter - > name [ vector ] ,
& ( adapter - > q_vector [ vector ] ) ) ;
2007-09-16 01:07:45 +04:00
if ( err ) {
DPRINTK ( PROBE , ERR ,
" request_irq failed for MSIX interrupt "
" Error: %d \n " , err ) ;
2008-03-04 02:03:45 +03:00
goto free_queue_irqs ;
2007-09-16 01:07:45 +04:00
}
}
2008-03-04 02:03:45 +03:00
sprintf ( adapter - > name [ vector ] , " %s:lsc " , netdev - > name ) ;
err = request_irq ( adapter - > msix_entries [ vector ] . vector ,
& ixgbe_msix_lsc , 0 , adapter - > name [ vector ] , netdev ) ;
2007-09-16 01:07:45 +04:00
if ( err ) {
DPRINTK ( PROBE , ERR ,
" request_irq for msix_lsc failed: %d \n " , err ) ;
2008-03-04 02:03:45 +03:00
goto free_queue_irqs ;
2007-09-16 01:07:45 +04:00
}
return 0 ;
2008-03-04 02:03:45 +03:00
free_queue_irqs :
for ( i = vector - 1 ; i > = 0 ; i - - )
free_irq ( adapter - > msix_entries [ - - vector ] . vector ,
& ( adapter - > q_vector [ i ] ) ) ;
adapter - > flags & = ~ IXGBE_FLAG_MSIX_ENABLED ;
pci_disable_msix ( adapter - > pdev ) ;
2007-09-16 01:07:45 +04:00
kfree ( adapter - > msix_entries ) ;
adapter - > msix_entries = NULL ;
2008-03-04 02:03:45 +03:00
out :
2007-09-16 01:07:45 +04:00
return err ;
}
2008-03-04 02:03:57 +03:00
static void ixgbe_set_itr ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
struct ixgbe_q_vector * q_vector = adapter - > q_vector ;
u8 current_itr ;
u32 new_itr = q_vector - > eitr ;
struct ixgbe_ring * rx_ring = & adapter - > rx_ring [ 0 ] ;
struct ixgbe_ring * tx_ring = & adapter - > tx_ring [ 0 ] ;
q_vector - > tx_eitr = ixgbe_update_itr ( adapter , new_itr ,
q_vector - > tx_eitr ,
tx_ring - > total_packets ,
tx_ring - > total_bytes ) ;
q_vector - > rx_eitr = ixgbe_update_itr ( adapter , new_itr ,
q_vector - > rx_eitr ,
rx_ring - > total_packets ,
rx_ring - > total_bytes ) ;
current_itr = max ( q_vector - > rx_eitr , q_vector - > tx_eitr ) ;
switch ( current_itr ) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency :
new_itr = 100000 ;
break ;
case low_latency :
new_itr = 20000 ; /* aka hwitr = ~200 */
break ;
case bulk_latency :
new_itr = 8000 ;
break ;
default :
break ;
}
if ( new_itr ! = q_vector - > eitr ) {
u32 itr_reg ;
/* do an exponential smoothing */
new_itr = ( ( q_vector - > eitr * 90 ) / 100 ) + ( ( new_itr * 10 ) / 100 ) ;
q_vector - > eitr = new_itr ;
itr_reg = EITR_INTS_PER_SEC_TO_REG ( new_itr ) ;
/* must write high and low 16 bits to reset counter */
IXGBE_WRITE_REG ( hw , IXGBE_EITR ( 0 ) , itr_reg | ( itr_reg ) < < 16 ) ;
}
return ;
}
2008-03-04 02:03:45 +03:00
static inline void ixgbe_irq_enable ( struct ixgbe_adapter * adapter ) ;
2007-09-16 01:07:45 +04:00
/**
2008-03-04 02:03:45 +03:00
* ixgbe_intr - legacy mode Interrupt Handler
2007-09-16 01:07:45 +04:00
* @ irq : interrupt number
* @ data : pointer to a network interface device structure
* @ pt_regs : CPU registers structure
* */
static irqreturn_t ixgbe_intr ( int irq , void * data )
{
struct net_device * netdev = data ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
u32 eicr ;
2008-03-04 02:03:45 +03:00
/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
* therefore no explict interrupt disable is necessary */
eicr = IXGBE_READ_REG ( hw , IXGBE_EICR ) ;
2007-09-16 01:07:45 +04:00
if ( ! eicr )
return IRQ_NONE ; /* Not our interrupt */
if ( eicr & IXGBE_EICR_LSC ) {
adapter - > lsc_int + + ;
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
mod_timer ( & adapter - > watchdog_timer , jiffies ) ;
}
2008-03-04 02:03:45 +03:00
if ( netif_rx_schedule_prep ( netdev , & adapter - > q_vector [ 0 ] . napi ) ) {
2008-03-04 02:03:57 +03:00
adapter - > tx_ring [ 0 ] . total_packets = 0 ;
adapter - > tx_ring [ 0 ] . total_bytes = 0 ;
adapter - > rx_ring [ 0 ] . total_packets = 0 ;
adapter - > rx_ring [ 0 ] . total_bytes = 0 ;
2008-03-04 02:03:45 +03:00
/* would disable interrupts here but EIAM disabled it */
__netif_rx_schedule ( netdev , & adapter - > q_vector [ 0 ] . napi ) ;
2007-09-16 01:07:45 +04:00
}
return IRQ_HANDLED ;
}
2008-03-04 02:03:45 +03:00
static inline void ixgbe_reset_q_vectors ( struct ixgbe_adapter * adapter )
{
int i , q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
for ( i = 0 ; i < q_vectors ; i + + ) {
struct ixgbe_q_vector * q_vector = & adapter - > q_vector [ i ] ;
bitmap_zero ( q_vector - > rxr_idx , MAX_RX_QUEUES ) ;
bitmap_zero ( q_vector - > txr_idx , MAX_TX_QUEUES ) ;
q_vector - > rxr_count = 0 ;
q_vector - > txr_count = 0 ;
}
}
2007-09-16 01:07:45 +04:00
/**
* ixgbe_request_irq - initialize interrupts
* @ adapter : board private structure
*
* Attempts to configure interrupts using the best available
* capabilities of the hardware and kernel .
* */
2008-03-04 02:03:45 +03:00
static int ixgbe_request_irq ( struct ixgbe_adapter * adapter )
2007-09-16 01:07:45 +04:00
{
struct net_device * netdev = adapter - > netdev ;
2008-03-04 02:03:45 +03:00
int err ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
err = ixgbe_request_msix_irqs ( adapter ) ;
} else if ( adapter - > flags & IXGBE_FLAG_MSI_ENABLED ) {
err = request_irq ( adapter - > pdev - > irq , & ixgbe_intr , 0 ,
netdev - > name , netdev ) ;
} else {
err = request_irq ( adapter - > pdev - > irq , & ixgbe_intr , IRQF_SHARED ,
netdev - > name , netdev ) ;
2007-09-16 01:07:45 +04:00
}
if ( err )
DPRINTK ( PROBE , ERR , " request_irq failed, Error %d \n " , err ) ;
return err ;
}
static void ixgbe_free_irq ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
2008-03-04 02:03:45 +03:00
int i , q_vectors ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
q_vectors = adapter - > num_msix_vectors ;
i = q_vectors - 1 ;
2007-09-16 01:07:45 +04:00
free_irq ( adapter - > msix_entries [ i ] . vector , netdev ) ;
2008-03-04 02:03:45 +03:00
i - - ;
for ( ; i > = 0 ; i - - ) {
free_irq ( adapter - > msix_entries [ i ] . vector ,
& ( adapter - > q_vector [ i ] ) ) ;
}
ixgbe_reset_q_vectors ( adapter ) ;
} else {
free_irq ( adapter - > pdev - > irq , netdev ) ;
2007-09-16 01:07:45 +04:00
}
}
/**
* ixgbe_irq_disable - Mask off interrupt generation on the NIC
* @ adapter : board private structure
* */
static inline void ixgbe_irq_disable ( struct ixgbe_adapter * adapter )
{
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMC , ~ 0 ) ;
IXGBE_WRITE_FLUSH ( & adapter - > hw ) ;
2008-03-04 02:03:45 +03:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
int i ;
for ( i = 0 ; i < adapter - > num_msix_vectors ; i + + )
synchronize_irq ( adapter - > msix_entries [ i ] . vector ) ;
} else {
synchronize_irq ( adapter - > pdev - > irq ) ;
}
2007-09-16 01:07:45 +04:00
}
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @ adapter : board private structure
* */
static inline void ixgbe_irq_enable ( struct ixgbe_adapter * adapter )
{
2008-03-04 02:03:45 +03:00
u32 mask ;
mask = IXGBE_EIMS_ENABLE_MASK ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EIMS , mask ) ;
2008-02-02 02:58:41 +03:00
IXGBE_WRITE_FLUSH ( & adapter - > hw ) ;
2007-09-16 01:07:45 +04:00
}
/**
* ixgbe_configure_msi_and_legacy - Initialize PIN ( INTA . . . ) and MSI interrupts
*
* */
static void ixgbe_configure_msi_and_legacy ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
2008-03-04 02:03:45 +03:00
IXGBE_WRITE_REG ( hw , IXGBE_EITR ( 0 ) ,
EITR_INTS_PER_SEC_TO_REG ( adapter - > rx_eitr ) ) ;
2007-09-16 01:07:45 +04:00
ixgbe_set_ivar ( adapter , IXGBE_IVAR_RX_QUEUE ( 0 ) , 0 ) ;
2008-03-04 02:03:45 +03:00
ixgbe_set_ivar ( adapter , IXGBE_IVAR_TX_QUEUE ( 0 ) , 0 ) ;
map_vector_to_rxq ( adapter , 0 , 0 ) ;
map_vector_to_txq ( adapter , 0 , 0 ) ;
DPRINTK ( HW , INFO , " Legacy interrupt IVAR setup done \n " ) ;
2007-09-16 01:07:45 +04:00
}
/**
2008-08-26 15:27:08 +04:00
* ixgbe_configure_tx - Configure 8259 x Transmit Unit after Reset
2007-09-16 01:07:45 +04:00
* @ adapter : board private structure
*
* Configure the Tx unit of the MAC after a reset .
* */
static void ixgbe_configure_tx ( struct ixgbe_adapter * adapter )
{
u64 tdba ;
struct ixgbe_hw * hw = & adapter - > hw ;
2008-03-04 02:03:45 +03:00
u32 i , j , tdlen , txctrl ;
2007-09-16 01:07:45 +04:00
/* Setup the HW Tx Head and Tail descriptor pointers */
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
2008-03-04 02:03:45 +03:00
j = adapter - > tx_ring [ i ] . reg_idx ;
2007-09-16 01:07:45 +04:00
tdba = adapter - > tx_ring [ i ] . dma ;
tdlen = adapter - > tx_ring [ i ] . count *
2008-03-04 02:03:45 +03:00
sizeof ( union ixgbe_adv_tx_desc ) ;
IXGBE_WRITE_REG ( hw , IXGBE_TDBAL ( j ) ,
( tdba & DMA_32BIT_MASK ) ) ;
IXGBE_WRITE_REG ( hw , IXGBE_TDBAH ( j ) , ( tdba > > 32 ) ) ;
IXGBE_WRITE_REG ( hw , IXGBE_TDLEN ( j ) , tdlen ) ;
IXGBE_WRITE_REG ( hw , IXGBE_TDH ( j ) , 0 ) ;
IXGBE_WRITE_REG ( hw , IXGBE_TDT ( j ) , 0 ) ;
adapter - > tx_ring [ i ] . head = IXGBE_TDH ( j ) ;
adapter - > tx_ring [ i ] . tail = IXGBE_TDT ( j ) ;
/* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren ' t delivered in order .
*/
txctrl = IXGBE_READ_REG ( hw , IXGBE_DCA_TXCTRL ( i ) ) ;
txctrl & = ~ IXGBE_DCA_TXCTRL_TX_WB_RO_EN ;
IXGBE_WRITE_REG ( hw , IXGBE_DCA_TXCTRL ( i ) , txctrl ) ;
2007-09-16 01:07:45 +04:00
}
}
# define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
( ( ( S ) & ( PAGE_SIZE - 1 ) ) ? 1 : 0 ) )
# define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2008-06-19 02:32:19 +04:00
/**
* ixgbe_get_skb_hdr - helper function for LRO header processing
* @ skb : pointer to sk_buff to be added to LRO packet
* @ iphdr : pointer to tcp header structure
* @ tcph : pointer to tcp header structure
* @ hdr_flags : pointer to header flags
* @ priv : private data
* */
static int ixgbe_get_skb_hdr ( struct sk_buff * skb , void * * iphdr , void * * tcph ,
u64 * hdr_flags , void * priv )
{
union ixgbe_adv_rx_desc * rx_desc = priv ;
/* Verify that this is a valid IPv4 TCP packet */
if ( ! ( rx_desc - > wb . lower . lo_dword . pkt_info &
( IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP ) ) )
return - 1 ;
/* Set network headers */
skb_reset_network_header ( skb ) ;
skb_set_transport_header ( skb , ip_hdrlen ( skb ) ) ;
* iphdr = ip_hdr ( skb ) ;
* tcph = tcp_hdr ( skb ) ;
* hdr_flags = LRO_IPV4 | LRO_TCP ;
return 0 ;
}
2007-09-16 01:07:45 +04:00
/**
2008-08-26 15:27:08 +04:00
* ixgbe_configure_rx - Configure 8259 x Receive Unit after Reset
2007-09-16 01:07:45 +04:00
* @ adapter : board private structure
*
* Configure the Rx unit of the MAC after a reset .
* */
static void ixgbe_configure_rx ( struct ixgbe_adapter * adapter )
{
u64 rdba ;
struct ixgbe_hw * hw = & adapter - > hw ;
struct net_device * netdev = adapter - > netdev ;
int max_frame = netdev - > mtu + ETH_HLEN + ETH_FCS_LEN ;
2008-03-04 02:03:45 +03:00
int i , j ;
2007-09-16 01:07:45 +04:00
u32 rdlen , rxctrl , rxcsum ;
u32 random [ 10 ] ;
u32 fctrl , hlreg0 ;
u32 pages ;
2008-03-04 02:03:45 +03:00
u32 reta = 0 , mrqc , srrctl ;
2007-09-16 01:07:45 +04:00
/* Decide whether to use packet split mode or not */
if ( netdev - > mtu > ETH_DATA_LEN )
adapter - > flags | = IXGBE_FLAG_RX_PS_ENABLED ;
else
adapter - > flags & = ~ IXGBE_FLAG_RX_PS_ENABLED ;
/* Set the RX buffer length according to the mode */
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
adapter - > rx_buf_len = IXGBE_RX_HDR_SIZE ;
} else {
if ( netdev - > mtu < = ETH_DATA_LEN )
adapter - > rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE ;
else
adapter - > rx_buf_len = ALIGN ( max_frame , 1024 ) ;
}
fctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_FCTRL ) ;
fctrl | = IXGBE_FCTRL_BAM ;
2008-03-04 02:03:45 +03:00
fctrl | = IXGBE_FCTRL_DPF ; /* discard pause frames when FC enabled */
2007-09-16 01:07:45 +04:00
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_FCTRL , fctrl ) ;
hlreg0 = IXGBE_READ_REG ( hw , IXGBE_HLREG0 ) ;
if ( adapter - > netdev - > mtu < = ETH_DATA_LEN )
hlreg0 & = ~ IXGBE_HLREG0_JUMBOEN ;
else
hlreg0 | = IXGBE_HLREG0_JUMBOEN ;
IXGBE_WRITE_REG ( hw , IXGBE_HLREG0 , hlreg0 ) ;
pages = PAGE_USE_COUNT ( adapter - > netdev - > mtu ) ;
srrctl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_SRRCTL ( 0 ) ) ;
srrctl & = ~ IXGBE_SRRCTL_BSIZEHDR_MASK ;
srrctl & = ~ IXGBE_SRRCTL_BSIZEPKT_MASK ;
if ( adapter - > flags & IXGBE_FLAG_RX_PS_ENABLED ) {
srrctl | = PAGE_SIZE > > IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
srrctl | = IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS ;
srrctl | = ( ( IXGBE_RX_HDR_SIZE < <
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT ) &
IXGBE_SRRCTL_BSIZEHDR_MASK ) ;
} else {
srrctl | = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF ;
if ( adapter - > rx_buf_len = = MAXIMUM_ETHERNET_VLAN_SIZE )
srrctl | =
IXGBE_RXBUFFER_2048 > > IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
else
srrctl | =
adapter - > rx_buf_len > > IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
}
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_SRRCTL ( 0 ) , srrctl ) ;
rdlen = adapter - > rx_ring [ 0 ] . count * sizeof ( union ixgbe_adv_rx_desc ) ;
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG ( hw , IXGBE_RXCTRL ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RXCTRL , rxctrl & ~ IXGBE_RXCTRL_RXEN ) ;
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
rdba = adapter - > rx_ring [ i ] . dma ;
IXGBE_WRITE_REG ( hw , IXGBE_RDBAL ( i ) , ( rdba & DMA_32BIT_MASK ) ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RDBAH ( i ) , ( rdba > > 32 ) ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RDLEN ( i ) , rdlen ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RDH ( i ) , 0 ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RDT ( i ) , 0 ) ;
adapter - > rx_ring [ i ] . head = IXGBE_RDH ( i ) ;
adapter - > rx_ring [ i ] . tail = IXGBE_RDT ( i ) ;
}
2008-06-19 02:32:19 +04:00
/* Intitial LRO Settings */
adapter - > rx_ring [ i ] . lro_mgr . max_aggr = IXGBE_MAX_LRO_AGGREGATE ;
adapter - > rx_ring [ i ] . lro_mgr . max_desc = IXGBE_MAX_LRO_DESCRIPTORS ;
adapter - > rx_ring [ i ] . lro_mgr . get_skb_header = ixgbe_get_skb_hdr ;
adapter - > rx_ring [ i ] . lro_mgr . features = LRO_F_EXTRACT_VLAN_ID ;
if ( ! ( adapter - > flags & IXGBE_FLAG_IN_NETPOLL ) )
adapter - > rx_ring [ i ] . lro_mgr . features | = LRO_F_NAPI ;
adapter - > rx_ring [ i ] . lro_mgr . dev = adapter - > netdev ;
adapter - > rx_ring [ i ] . lro_mgr . ip_summed = CHECKSUM_UNNECESSARY ;
adapter - > rx_ring [ i ] . lro_mgr . ip_summed_aggr = CHECKSUM_UNNECESSARY ;
2008-03-04 02:03:45 +03:00
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED ) {
2007-09-16 01:07:45 +04:00
/* Fill out redirection table */
2008-03-04 02:03:45 +03:00
for ( i = 0 , j = 0 ; i < 128 ; i + + , j + + ) {
if ( j = = adapter - > ring_feature [ RING_F_RSS ] . indices )
j = 0 ;
/* reta = 4-byte sliding window of
* 0x00 . . ( indices - 1 ) ( indices - 1 ) 00. . etc . */
reta = ( reta < < 8 ) | ( j * 0x11 ) ;
if ( ( i & 3 ) = = 3 )
IXGBE_WRITE_REG ( hw , IXGBE_RETA ( i > > 2 ) , reta ) ;
2007-09-16 01:07:45 +04:00
}
/* Fill out hash function seeds */
2008-03-04 02:03:45 +03:00
/* XXX use a random constant here to glue certain flows */
get_random_bytes ( & random [ 0 ] , 40 ) ;
2007-09-16 01:07:45 +04:00
for ( i = 0 ; i < 10 ; i + + )
2008-03-04 02:03:45 +03:00
IXGBE_WRITE_REG ( hw , IXGBE_RSSRK ( i ) , random [ i ] ) ;
2007-09-16 01:07:45 +04:00
mrqc = IXGBE_MRQC_RSSEN
/* Perform hash on these packet types */
| IXGBE_MRQC_RSS_FIELD_IPV4
| IXGBE_MRQC_RSS_FIELD_IPV4_TCP
| IXGBE_MRQC_RSS_FIELD_IPV4_UDP
| IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
| IXGBE_MRQC_RSS_FIELD_IPV6_EX
| IXGBE_MRQC_RSS_FIELD_IPV6
| IXGBE_MRQC_RSS_FIELD_IPV6_TCP
| IXGBE_MRQC_RSS_FIELD_IPV6_UDP
| IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP ;
IXGBE_WRITE_REG ( hw , IXGBE_MRQC , mrqc ) ;
2008-03-04 02:03:45 +03:00
}
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
rxcsum = IXGBE_READ_REG ( hw , IXGBE_RXCSUM ) ;
if ( adapter - > flags & IXGBE_FLAG_RSS_ENABLED | |
adapter - > flags & IXGBE_FLAG_RX_CSUM_ENABLED ) {
/* Disable indicating checksum in descriptor, enables
* RSS hash */
2007-09-16 01:07:45 +04:00
rxcsum | = IXGBE_RXCSUM_PCSD ;
}
2008-03-04 02:03:45 +03:00
if ( ! ( rxcsum & IXGBE_RXCSUM_PCSD ) ) {
/* Enable IPv4 payload checksum for UDP fragments
* if PCSD is not set */
rxcsum | = IXGBE_RXCSUM_IPPCSE ;
}
IXGBE_WRITE_REG ( hw , IXGBE_RXCSUM , rxcsum ) ;
2007-09-16 01:07:45 +04:00
}
static void ixgbe_vlan_rx_register ( struct net_device * netdev ,
struct vlan_group * grp )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
u32 ctrl ;
2008-02-02 02:58:41 +03:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_disable ( adapter ) ;
2007-09-16 01:07:45 +04:00
adapter - > vlgrp = grp ;
if ( grp ) {
/* enable VLAN tag insert/strip */
ctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_VLNCTRL ) ;
2008-07-17 07:15:45 +04:00
ctrl | = IXGBE_VLNCTRL_VME ;
2007-09-16 01:07:45 +04:00
ctrl & = ~ IXGBE_VLNCTRL_CFIEN ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_VLNCTRL , ctrl ) ;
}
2008-02-02 02:58:41 +03:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_enable ( adapter ) ;
2007-09-16 01:07:45 +04:00
}
static void ixgbe_vlan_rx_add_vid ( struct net_device * netdev , u16 vid )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
/* add VID to filter table */
ixgbe_set_vfta ( & adapter - > hw , vid , 0 , true ) ;
}
static void ixgbe_vlan_rx_kill_vid ( struct net_device * netdev , u16 vid )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2008-02-02 02:58:41 +03:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_disable ( adapter ) ;
2007-09-16 01:07:45 +04:00
vlan_group_set_device ( adapter - > vlgrp , vid , NULL ) ;
2008-02-02 02:58:41 +03:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_enable ( adapter ) ;
2007-09-16 01:07:45 +04:00
/* remove VID from filter table */
ixgbe_set_vfta ( & adapter - > hw , vid , 0 , false ) ;
}
static void ixgbe_restore_vlan ( struct ixgbe_adapter * adapter )
{
ixgbe_vlan_rx_register ( adapter - > netdev , adapter - > vlgrp ) ;
if ( adapter - > vlgrp ) {
u16 vid ;
for ( vid = 0 ; vid < VLAN_GROUP_ARRAY_LEN ; vid + + ) {
if ( ! vlan_group_get_device ( adapter - > vlgrp , vid ) )
continue ;
ixgbe_vlan_rx_add_vid ( adapter - > netdev , vid ) ;
}
}
}
2008-08-26 15:27:02 +04:00
static u8 * ixgbe_addr_list_itr ( struct ixgbe_hw * hw , u8 * * mc_addr_ptr , u32 * vmdq )
{
struct dev_mc_list * mc_ptr ;
u8 * addr = * mc_addr_ptr ;
* vmdq = 0 ;
mc_ptr = container_of ( addr , struct dev_mc_list , dmi_addr [ 0 ] ) ;
if ( mc_ptr - > next )
* mc_addr_ptr = mc_ptr - > next - > dmi_addr ;
else
* mc_addr_ptr = NULL ;
return addr ;
}
2007-09-16 01:07:45 +04:00
/**
2008-08-26 15:27:02 +04:00
* ixgbe_set_rx_mode - Unicast , Multicast and Promiscuous mode set
2007-09-16 01:07:45 +04:00
* @ netdev : network interface device structure
*
2008-08-26 15:27:02 +04:00
* The set_rx_method entry point is called whenever the unicast / multicast
* address list or the network interface flags are updated . This routine is
* responsible for configuring the hardware for proper unicast , multicast and
* promiscuous mode .
2007-09-16 01:07:45 +04:00
* */
2008-08-26 15:27:02 +04:00
static void ixgbe_set_rx_mode ( struct net_device * netdev )
2007-09-16 01:07:45 +04:00
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_hw * hw = & adapter - > hw ;
2008-08-27 05:30:04 +04:00
u32 fctrl , vlnctrl ;
2008-08-26 15:27:02 +04:00
u8 * addr_list = NULL ;
int addr_count = 0 ;
2007-09-16 01:07:45 +04:00
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG ( hw , IXGBE_FCTRL ) ;
2008-08-27 05:30:04 +04:00
vlnctrl = IXGBE_READ_REG ( hw , IXGBE_VLNCTRL ) ;
2007-09-16 01:07:45 +04:00
if ( netdev - > flags & IFF_PROMISC ) {
2008-08-26 15:27:02 +04:00
hw - > addr_ctrl . user_set_promisc = 1 ;
2007-09-16 01:07:45 +04:00
fctrl | = ( IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE ) ;
2008-08-27 05:30:04 +04:00
vlnctrl & = ~ IXGBE_VLNCTRL_VFE ;
2007-09-16 01:07:45 +04:00
} else {
2008-07-17 07:15:45 +04:00
if ( netdev - > flags & IFF_ALLMULTI ) {
fctrl | = IXGBE_FCTRL_MPE ;
fctrl & = ~ IXGBE_FCTRL_UPE ;
} else {
fctrl & = ~ ( IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE ) ;
}
2008-08-27 05:30:04 +04:00
vlnctrl | = IXGBE_VLNCTRL_VFE ;
2008-08-26 15:27:02 +04:00
hw - > addr_ctrl . user_set_promisc = 0 ;
2007-09-16 01:07:45 +04:00
}
IXGBE_WRITE_REG ( hw , IXGBE_FCTRL , fctrl ) ;
2008-08-27 05:30:04 +04:00
IXGBE_WRITE_REG ( hw , IXGBE_VLNCTRL , vlnctrl ) ;
2007-09-16 01:07:45 +04:00
2008-08-26 15:27:02 +04:00
/* reprogram secondary unicast list */
addr_count = netdev - > uc_count ;
if ( addr_count )
addr_list = netdev - > uc_list - > dmi_addr ;
ixgbe_update_uc_addr_list ( hw , addr_list , addr_count ,
ixgbe_addr_list_itr ) ;
2007-09-16 01:07:45 +04:00
2008-08-26 15:27:02 +04:00
/* reprogram multicast list */
addr_count = netdev - > mc_count ;
if ( addr_count )
addr_list = netdev - > mc_list - > dmi_addr ;
ixgbe_update_mc_addr_list ( hw , addr_list , addr_count ,
ixgbe_addr_list_itr ) ;
2007-09-16 01:07:45 +04:00
}
2008-03-04 02:03:45 +03:00
static void ixgbe_napi_enable_all ( struct ixgbe_adapter * adapter )
{
int q_idx ;
struct ixgbe_q_vector * q_vector ;
int q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
/* legacy and MSI only use one vector */
if ( ! ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) )
q_vectors = 1 ;
for ( q_idx = 0 ; q_idx < q_vectors ; q_idx + + ) {
q_vector = & adapter - > q_vector [ q_idx ] ;
if ( ! q_vector - > rxr_count )
continue ;
napi_enable ( & q_vector - > napi ) ;
}
}
static void ixgbe_napi_disable_all ( struct ixgbe_adapter * adapter )
{
int q_idx ;
struct ixgbe_q_vector * q_vector ;
int q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
/* legacy and MSI only use one vector */
if ( ! ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) )
q_vectors = 1 ;
for ( q_idx = 0 ; q_idx < q_vectors ; q_idx + + ) {
q_vector = & adapter - > q_vector [ q_idx ] ;
if ( ! q_vector - > rxr_count )
continue ;
napi_disable ( & q_vector - > napi ) ;
}
}
2007-09-16 01:07:45 +04:00
static void ixgbe_configure ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
int i ;
2008-08-26 15:27:02 +04:00
ixgbe_set_rx_mode ( netdev ) ;
2007-09-16 01:07:45 +04:00
ixgbe_restore_vlan ( adapter ) ;
ixgbe_configure_tx ( adapter ) ;
ixgbe_configure_rx ( adapter ) ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
ixgbe_alloc_rx_buffers ( adapter , & adapter - > rx_ring [ i ] ,
( adapter - > rx_ring [ i ] . count - 1 ) ) ;
}
static int ixgbe_up_complete ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
struct ixgbe_hw * hw = & adapter - > hw ;
2008-03-04 02:03:45 +03:00
int i , j = 0 ;
2007-09-16 01:07:45 +04:00
int max_frame = netdev - > mtu + ETH_HLEN + ETH_FCS_LEN ;
2008-03-04 02:03:45 +03:00
u32 txdctl , rxdctl , mhadd ;
u32 gpie ;
2007-09-16 01:07:45 +04:00
2008-02-02 02:59:04 +03:00
ixgbe_get_hw_control ( adapter ) ;
2008-03-04 02:03:45 +03:00
if ( ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) | |
( adapter - > flags & IXGBE_FLAG_MSI_ENABLED ) ) {
2007-09-16 01:07:45 +04:00
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
gpie = ( IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD ) ;
} else {
/* MSI only */
2008-03-04 02:03:45 +03:00
gpie = 0 ;
2007-09-16 01:07:45 +04:00
}
2008-03-04 02:03:45 +03:00
/* XXX: to interrupt immediately for EICS writes, enable this */
/* gpie |= IXGBE_GPIE_EIMEN; */
IXGBE_WRITE_REG ( hw , IXGBE_GPIE , gpie ) ;
2007-09-16 01:07:45 +04:00
}
2008-03-04 02:03:45 +03:00
if ( ! ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) ) {
/* legacy interrupts, use EIAM to auto-mask when reading EICR,
* specifically only auto mask tx and rx interrupts */
IXGBE_WRITE_REG ( hw , IXGBE_EIAM , IXGBE_EICS_RTX_QUEUE ) ;
}
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
mhadd = IXGBE_READ_REG ( hw , IXGBE_MHADD ) ;
2007-09-16 01:07:45 +04:00
if ( max_frame ! = ( mhadd > > IXGBE_MHADD_MFS_SHIFT ) ) {
mhadd & = ~ IXGBE_MHADD_MFS_MASK ;
mhadd | = max_frame < < IXGBE_MHADD_MFS_SHIFT ;
IXGBE_WRITE_REG ( hw , IXGBE_MHADD , mhadd ) ;
}
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
2008-03-04 02:03:45 +03:00
j = adapter - > tx_ring [ i ] . reg_idx ;
txdctl = IXGBE_READ_REG ( hw , IXGBE_TXDCTL ( j ) ) ;
2007-09-16 01:07:45 +04:00
txdctl | = IXGBE_TXDCTL_ENABLE ;
2008-03-04 02:03:45 +03:00
IXGBE_WRITE_REG ( hw , IXGBE_TXDCTL ( j ) , txdctl ) ;
2007-09-16 01:07:45 +04:00
}
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
2008-03-04 02:03:45 +03:00
j = adapter - > rx_ring [ i ] . reg_idx ;
rxdctl = IXGBE_READ_REG ( hw , IXGBE_RXDCTL ( j ) ) ;
/* enable PTHRESH=32 descriptors (half the internal cache)
* and HTHRESH = 0 descriptors ( to minimize latency on fetch ) ,
* this also removes a pesky rx_no_buffer_count increment */
rxdctl | = 0x0020 ;
2007-09-16 01:07:45 +04:00
rxdctl | = IXGBE_RXDCTL_ENABLE ;
2008-03-04 02:03:45 +03:00
IXGBE_WRITE_REG ( hw , IXGBE_RXDCTL ( j ) , rxdctl ) ;
2007-09-16 01:07:45 +04:00
}
/* enable all receives */
rxdctl = IXGBE_READ_REG ( hw , IXGBE_RXCTRL ) ;
rxdctl | = ( IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN ) ;
IXGBE_WRITE_REG ( hw , IXGBE_RXCTRL , rxdctl ) ;
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED )
ixgbe_configure_msix ( adapter ) ;
else
ixgbe_configure_msi_and_legacy ( adapter ) ;
clear_bit ( __IXGBE_DOWN , & adapter - > state ) ;
2008-03-04 02:03:45 +03:00
ixgbe_napi_enable_all ( adapter ) ;
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG ( hw , IXGBE_EICR ) ;
2007-09-16 01:07:45 +04:00
ixgbe_irq_enable ( adapter ) ;
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn ' t be a problem */
mod_timer ( & adapter - > watchdog_timer , jiffies ) ;
return 0 ;
}
2008-02-02 02:58:41 +03:00
void ixgbe_reinit_locked ( struct ixgbe_adapter * adapter )
{
WARN_ON ( in_interrupt ( ) ) ;
while ( test_and_set_bit ( __IXGBE_RESETTING , & adapter - > state ) )
msleep ( 1 ) ;
ixgbe_down ( adapter ) ;
ixgbe_up ( adapter ) ;
clear_bit ( __IXGBE_RESETTING , & adapter - > state ) ;
}
2007-09-16 01:07:45 +04:00
int ixgbe_up ( struct ixgbe_adapter * adapter )
{
/* hardware has been reset, we need to reload some things */
ixgbe_configure ( adapter ) ;
return ixgbe_up_complete ( adapter ) ;
}
void ixgbe_reset ( struct ixgbe_adapter * adapter )
{
if ( ixgbe_init_hw ( & adapter - > hw ) )
DPRINTK ( PROBE , ERR , " Hardware Error \n " ) ;
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar ( & adapter - > hw , 0 , adapter - > hw . mac . addr , 0 , IXGBE_RAH_AV ) ;
}
# ifdef CONFIG_PM
static int ixgbe_resume ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2008-03-04 02:03:45 +03:00
u32 err ;
2007-09-16 01:07:45 +04:00
pci_set_power_state ( pdev , PCI_D0 ) ;
pci_restore_state ( pdev ) ;
err = pci_enable_device ( pdev ) ;
if ( err ) {
printk ( KERN_ERR " ixgbe: Cannot enable PCI device from " \
" suspend \n " ) ;
return err ;
}
pci_set_master ( pdev ) ;
pci_enable_wake ( pdev , PCI_D3hot , 0 ) ;
pci_enable_wake ( pdev , PCI_D3cold , 0 ) ;
if ( netif_running ( netdev ) ) {
2008-03-04 02:03:45 +03:00
err = ixgbe_request_irq ( adapter ) ;
2007-09-16 01:07:45 +04:00
if ( err )
return err ;
}
ixgbe_reset ( adapter ) ;
if ( netif_running ( netdev ) )
ixgbe_up ( adapter ) ;
netif_device_attach ( netdev ) ;
return 0 ;
}
# endif
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @ adapter : board private structure
* @ rx_ring : ring to free buffers from
* */
static void ixgbe_clean_rx_ring ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * rx_ring )
{
struct pci_dev * pdev = adapter - > pdev ;
unsigned long size ;
unsigned int i ;
/* Free all the Rx ring sk_buffs */
for ( i = 0 ; i < rx_ring - > count ; i + + ) {
struct ixgbe_rx_buffer * rx_buffer_info ;
rx_buffer_info = & rx_ring - > rx_buffer_info [ i ] ;
if ( rx_buffer_info - > dma ) {
pci_unmap_single ( pdev , rx_buffer_info - > dma ,
adapter - > rx_buf_len ,
PCI_DMA_FROMDEVICE ) ;
rx_buffer_info - > dma = 0 ;
}
if ( rx_buffer_info - > skb ) {
dev_kfree_skb ( rx_buffer_info - > skb ) ;
rx_buffer_info - > skb = NULL ;
}
if ( ! rx_buffer_info - > page )
continue ;
pci_unmap_page ( pdev , rx_buffer_info - > page_dma , PAGE_SIZE ,
PCI_DMA_FROMDEVICE ) ;
rx_buffer_info - > page_dma = 0 ;
put_page ( rx_buffer_info - > page ) ;
rx_buffer_info - > page = NULL ;
}
size = sizeof ( struct ixgbe_rx_buffer ) * rx_ring - > count ;
memset ( rx_ring - > rx_buffer_info , 0 , size ) ;
/* Zero out the descriptor ring */
memset ( rx_ring - > desc , 0 , rx_ring - > size ) ;
rx_ring - > next_to_clean = 0 ;
rx_ring - > next_to_use = 0 ;
writel ( 0 , adapter - > hw . hw_addr + rx_ring - > head ) ;
writel ( 0 , adapter - > hw . hw_addr + rx_ring - > tail ) ;
}
/**
* ixgbe_clean_tx_ring - Free Tx Buffers
* @ adapter : board private structure
* @ tx_ring : ring to be cleaned
* */
static void ixgbe_clean_tx_ring ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * tx_ring )
{
struct ixgbe_tx_buffer * tx_buffer_info ;
unsigned long size ;
unsigned int i ;
/* Free all the Tx ring sk_buffs */
for ( i = 0 ; i < tx_ring - > count ; i + + ) {
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
ixgbe_unmap_and_free_tx_resource ( adapter , tx_buffer_info ) ;
}
size = sizeof ( struct ixgbe_tx_buffer ) * tx_ring - > count ;
memset ( tx_ring - > tx_buffer_info , 0 , size ) ;
/* Zero out the descriptor ring */
memset ( tx_ring - > desc , 0 , tx_ring - > size ) ;
tx_ring - > next_to_use = 0 ;
tx_ring - > next_to_clean = 0 ;
writel ( 0 , adapter - > hw . hw_addr + tx_ring - > head ) ;
writel ( 0 , adapter - > hw . hw_addr + tx_ring - > tail ) ;
}
/**
2008-03-04 02:03:45 +03:00
* ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2007-09-16 01:07:45 +04:00
* @ adapter : board private structure
* */
2008-03-04 02:03:45 +03:00
static void ixgbe_clean_all_rx_rings ( struct ixgbe_adapter * adapter )
2007-09-16 01:07:45 +04:00
{
int i ;
2008-03-04 02:03:45 +03:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
ixgbe_clean_rx_ring ( adapter , & adapter - > rx_ring [ i ] ) ;
2007-09-16 01:07:45 +04:00
}
/**
2008-03-04 02:03:45 +03:00
* ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2007-09-16 01:07:45 +04:00
* @ adapter : board private structure
* */
2008-03-04 02:03:45 +03:00
static void ixgbe_clean_all_tx_rings ( struct ixgbe_adapter * adapter )
2007-09-16 01:07:45 +04:00
{
int i ;
2008-03-04 02:03:45 +03:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
ixgbe_clean_tx_ring ( adapter , & adapter - > tx_ring [ i ] ) ;
2007-09-16 01:07:45 +04:00
}
void ixgbe_down ( struct ixgbe_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
u32 rxctrl ;
/* signal that we are down to the interrupt handler */
set_bit ( __IXGBE_DOWN , & adapter - > state ) ;
/* disable receives */
rxctrl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_RXCTRL ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_RXCTRL ,
rxctrl & ~ IXGBE_RXCTRL_RXEN ) ;
netif_tx_disable ( netdev ) ;
/* disable transmits in the hardware */
/* flush both disables */
IXGBE_WRITE_FLUSH ( & adapter - > hw ) ;
msleep ( 10 ) ;
ixgbe_irq_disable ( adapter ) ;
2008-03-04 02:03:45 +03:00
ixgbe_napi_disable_all ( adapter ) ;
2007-09-16 01:07:45 +04:00
del_timer_sync ( & adapter - > watchdog_timer ) ;
netif_carrier_off ( netdev ) ;
2008-07-17 12:56:23 +04:00
netif_tx_stop_all_queues ( netdev ) ;
2007-09-16 01:07:45 +04:00
2008-06-25 04:00:56 +04:00
if ( ! pci_channel_offline ( adapter - > pdev ) )
ixgbe_reset ( adapter ) ;
2007-09-16 01:07:45 +04:00
ixgbe_clean_all_tx_rings ( adapter ) ;
ixgbe_clean_all_rx_rings ( adapter ) ;
}
static int ixgbe_suspend ( struct pci_dev * pdev , pm_message_t state )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
# ifdef CONFIG_PM
int retval = 0 ;
# endif
netif_device_detach ( netdev ) ;
if ( netif_running ( netdev ) ) {
ixgbe_down ( adapter ) ;
ixgbe_free_irq ( adapter ) ;
}
# ifdef CONFIG_PM
retval = pci_save_state ( pdev ) ;
if ( retval )
return retval ;
# endif
pci_enable_wake ( pdev , PCI_D3hot , 0 ) ;
pci_enable_wake ( pdev , PCI_D3cold , 0 ) ;
2008-02-02 02:59:04 +03:00
ixgbe_release_hw_control ( adapter ) ;
2007-09-16 01:07:45 +04:00
pci_disable_device ( pdev ) ;
pci_set_power_state ( pdev , pci_choose_state ( pdev , state ) ) ;
return 0 ;
}
static void ixgbe_shutdown ( struct pci_dev * pdev )
{
ixgbe_suspend ( pdev , PMSG_SUSPEND ) ;
}
/**
2008-03-04 02:03:45 +03:00
* ixgbe_poll - NAPI Rx polling callback
* @ napi : structure for representing this polling device
* @ budget : how many packets driver is allowed to clean
*
* This function is used for legacy and MSI , NAPI mode
2007-09-16 01:07:45 +04:00
* */
2008-03-04 02:03:45 +03:00
static int ixgbe_poll ( struct napi_struct * napi , int budget )
2007-09-16 01:07:45 +04:00
{
2008-03-04 02:03:45 +03:00
struct ixgbe_q_vector * q_vector = container_of ( napi ,
struct ixgbe_q_vector , napi ) ;
struct ixgbe_adapter * adapter = q_vector - > adapter ;
2008-01-16 09:43:24 +03:00
int tx_cleaned = 0 , work_done = 0 ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED ) {
ixgbe_update_tx_dca ( adapter , adapter - > tx_ring ) ;
ixgbe_update_rx_dca ( adapter , adapter - > rx_ring ) ;
}
# endif
2008-01-16 09:43:24 +03:00
tx_cleaned = ixgbe_clean_tx_irq ( adapter , adapter - > tx_ring ) ;
2008-03-04 02:03:45 +03:00
ixgbe_clean_rx_irq ( adapter , adapter - > rx_ring , & work_done , budget ) ;
2007-09-16 01:07:45 +04:00
2008-01-16 09:43:24 +03:00
if ( tx_cleaned )
work_done = budget ;
2008-01-08 08:06:12 +03:00
/* If budget not fully consumed, exit the polling mode */
if ( work_done < budget ) {
2008-03-04 02:03:45 +03:00
netif_rx_complete ( adapter - > netdev , napi ) ;
2008-03-04 02:03:57 +03:00
if ( adapter - > rx_eitr < IXGBE_MIN_ITR_USECS )
ixgbe_set_itr ( adapter ) ;
2008-02-02 02:58:41 +03:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) )
ixgbe_irq_enable ( adapter ) ;
2007-09-16 01:07:45 +04:00
}
return work_done ;
}
/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @ netdev : network interface device structure
* */
static void ixgbe_tx_timeout ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
/* Do the reset outside of interrupt context */
schedule_work ( & adapter - > reset_task ) ;
}
static void ixgbe_reset_task ( struct work_struct * work )
{
struct ixgbe_adapter * adapter ;
adapter = container_of ( work , struct ixgbe_adapter , reset_task ) ;
adapter - > tx_timeout_count + + ;
2008-02-02 02:58:41 +03:00
ixgbe_reinit_locked ( adapter ) ;
2007-09-16 01:07:45 +04:00
}
2008-03-04 02:03:45 +03:00
static void ixgbe_acquire_msix_vectors ( struct ixgbe_adapter * adapter ,
int vectors )
{
int err , vector_threshold ;
/* We'll want at least 3 (vector_threshold):
* 1 ) TxQ [ 0 ] Cleanup
* 2 ) RxQ [ 0 ] Cleanup
* 3 ) Other ( Link Status Change , etc . )
* 4 ) TCP Timer ( optional )
*/
vector_threshold = MIN_MSIX_COUNT ;
/* The more we get, the more we will assign to Tx/Rx Cleanup
* for the separate queues . . . where Rx Cleanup > = Tx Cleanup .
* Right now , we simply care about how many we ' ll get ; we ' ll
* set them up later while requesting irq ' s .
*/
while ( vectors > = vector_threshold ) {
err = pci_enable_msix ( adapter - > pdev , adapter - > msix_entries ,
vectors ) ;
if ( ! err ) /* Success in acquiring all requested vectors. */
break ;
else if ( err < 0 )
vectors = 0 ; /* Nasty failure, quit now */
else /* err == number of vectors we should try again with */
vectors = err ;
}
if ( vectors < vector_threshold ) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we ' ll go with either a single MSI
* vector or fall back to legacy interrupts .
*/
DPRINTK ( HW , DEBUG , " Unable to allocate MSI-X interrupts \n " ) ;
adapter - > flags & = ~ IXGBE_FLAG_MSIX_ENABLED ;
kfree ( adapter - > msix_entries ) ;
adapter - > msix_entries = NULL ;
adapter - > flags & = ~ IXGBE_FLAG_RSS_ENABLED ;
adapter - > num_tx_queues = 1 ;
adapter - > num_rx_queues = 1 ;
} else {
adapter - > flags | = IXGBE_FLAG_MSIX_ENABLED ; /* Woot! */
adapter - > num_msix_vectors = vectors ;
}
}
static void __devinit ixgbe_set_num_queues ( struct ixgbe_adapter * adapter )
{
int nrq , ntq ;
int feature_mask = 0 , rss_i , rss_m ;
/* Number of supported queues */
switch ( adapter - > hw . mac . type ) {
case ixgbe_mac_82598EB :
rss_i = adapter - > ring_feature [ RING_F_RSS ] . indices ;
rss_m = 0 ;
feature_mask | = IXGBE_FLAG_RSS_ENABLED ;
switch ( adapter - > flags & feature_mask ) {
case ( IXGBE_FLAG_RSS_ENABLED ) :
rss_m = 0xF ;
nrq = rss_i ;
2008-03-04 02:03:52 +03:00
ntq = rss_i ;
2008-03-04 02:03:45 +03:00
break ;
case 0 :
default :
rss_i = 0 ;
rss_m = 0 ;
nrq = 1 ;
ntq = 1 ;
break ;
}
adapter - > ring_feature [ RING_F_RSS ] . indices = rss_i ;
adapter - > ring_feature [ RING_F_RSS ] . mask = rss_m ;
break ;
default :
nrq = 1 ;
ntq = 1 ;
break ;
}
adapter - > num_rx_queues = nrq ;
adapter - > num_tx_queues = ntq ;
}
/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @ adapter : board private structure to initialize
*
* Once we know the feature - set enabled for the device , we ' ll cache
* the register offset the descriptor ring is assigned to .
* */
static void __devinit ixgbe_cache_ring_register ( struct ixgbe_adapter * adapter )
{
/* TODO: Remove all uses of the indices in the cases where multiple
* features are OR ' d together , if the feature set makes sense .
*/
int feature_mask = 0 , rss_i ;
int i , txr_idx , rxr_idx ;
/* Number of supported queues */
switch ( adapter - > hw . mac . type ) {
case ixgbe_mac_82598EB :
rss_i = adapter - > ring_feature [ RING_F_RSS ] . indices ;
txr_idx = 0 ;
rxr_idx = 0 ;
feature_mask | = IXGBE_FLAG_RSS_ENABLED ;
switch ( adapter - > flags & feature_mask ) {
case ( IXGBE_FLAG_RSS_ENABLED ) :
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
adapter - > rx_ring [ i ] . reg_idx = i ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
adapter - > tx_ring [ i ] . reg_idx = i ;
break ;
case 0 :
default :
break ;
}
break ;
default :
break ;
}
}
2007-09-16 01:07:45 +04:00
/**
* ixgbe_alloc_queues - Allocate memory for all rings
* @ adapter : board private structure to initialize
*
* We allocate one ring per queue at run - time since we don ' t know the
* number of queues at compile - time . The polling_netdev array is
* intended for Multiqueue , but should work fine with a single queue .
* */
static int __devinit ixgbe_alloc_queues ( struct ixgbe_adapter * adapter )
{
int i ;
adapter - > tx_ring = kcalloc ( adapter - > num_tx_queues ,
sizeof ( struct ixgbe_ring ) , GFP_KERNEL ) ;
if ( ! adapter - > tx_ring )
2008-03-04 02:03:45 +03:00
goto err_tx_ring_allocation ;
2007-09-16 01:07:45 +04:00
adapter - > rx_ring = kcalloc ( adapter - > num_rx_queues ,
sizeof ( struct ixgbe_ring ) , GFP_KERNEL ) ;
2008-03-04 02:03:45 +03:00
if ( ! adapter - > rx_ring )
goto err_rx_ring_allocation ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
adapter - > tx_ring [ i ] . count = IXGBE_DEFAULT_TXD ;
adapter - > tx_ring [ i ] . queue_index = i ;
}
2007-09-16 01:07:45 +04:00
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
adapter - > rx_ring [ i ] . count = IXGBE_DEFAULT_RXD ;
2008-03-04 02:03:45 +03:00
adapter - > rx_ring [ i ] . queue_index = i ;
}
ixgbe_cache_ring_register ( adapter ) ;
return 0 ;
err_rx_ring_allocation :
kfree ( adapter - > tx_ring ) ;
err_tx_ring_allocation :
return - ENOMEM ;
}
/**
* ixgbe_set_interrupt_capability - set MSI - X or MSI if supported
* @ adapter : board private structure to initialize
*
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel .
* */
static int __devinit ixgbe_set_interrupt_capability ( struct ixgbe_adapter
* adapter )
{
int err = 0 ;
int vector , v_budget ;
/*
* It ' s easy to be greedy for MSI - X vectors , but it really
* doesn ' t do us much good if we have a lot more vectors
* than CPU ' s . So let ' s be conservative and only ask for
* ( roughly ) twice the number of vectors as there are CPU ' s .
*/
v_budget = min ( adapter - > num_rx_queues + adapter - > num_tx_queues ,
( int ) ( num_online_cpus ( ) * 2 ) ) + NON_Q_VECTORS ;
/*
* At the same time , hardware can only support a maximum of
* MAX_MSIX_COUNT vectors . With features such as RSS and VMDq ,
* we can easily reach upwards of 64 Rx descriptor queues and
* 32 Tx queues . Thus , we cap it off in those rare cases where
* the cpu count also exceeds our vector limit .
*/
v_budget = min ( v_budget , MAX_MSIX_COUNT ) ;
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI - X capabilities of the adapter . */
adapter - > msix_entries = kcalloc ( v_budget ,
sizeof ( struct msix_entry ) , GFP_KERNEL ) ;
if ( ! adapter - > msix_entries ) {
adapter - > flags & = ~ IXGBE_FLAG_RSS_ENABLED ;
ixgbe_set_num_queues ( adapter ) ;
kfree ( adapter - > tx_ring ) ;
kfree ( adapter - > rx_ring ) ;
err = ixgbe_alloc_queues ( adapter ) ;
if ( err ) {
DPRINTK ( PROBE , ERR , " Unable to allocate memory "
" for queues \n " ) ;
goto out ;
}
goto try_msi ;
}
for ( vector = 0 ; vector < v_budget ; vector + + )
adapter - > msix_entries [ vector ] . entry = vector ;
ixgbe_acquire_msix_vectors ( adapter , v_budget ) ;
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED )
goto out ;
try_msi :
err = pci_enable_msi ( adapter - > pdev ) ;
if ( ! err ) {
adapter - > flags | = IXGBE_FLAG_MSI_ENABLED ;
} else {
DPRINTK ( HW , DEBUG , " Unable to allocate MSI interrupt, "
" falling back to legacy. Error: %d \n " , err ) ;
/* reset err */
err = 0 ;
}
out :
2008-03-04 02:03:52 +03:00
/* Notify the stack of the (possibly) reduced Tx Queue count. */
2008-07-17 12:56:23 +04:00
adapter - > netdev - > real_num_tx_queues = adapter - > num_tx_queues ;
2008-03-04 02:03:45 +03:00
return err ;
}
static void ixgbe_reset_interrupt_capability ( struct ixgbe_adapter * adapter )
{
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
adapter - > flags & = ~ IXGBE_FLAG_MSIX_ENABLED ;
pci_disable_msix ( adapter - > pdev ) ;
kfree ( adapter - > msix_entries ) ;
adapter - > msix_entries = NULL ;
} else if ( adapter - > flags & IXGBE_FLAG_MSI_ENABLED ) {
adapter - > flags & = ~ IXGBE_FLAG_MSI_ENABLED ;
pci_disable_msi ( adapter - > pdev ) ;
}
return ;
}
/**
* ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
* @ adapter : board private structure to initialize
*
* We determine which interrupt scheme to use based on . . .
* - Kernel support ( MSI , MSI - X )
* - which can be user - defined ( via MODULE_PARAM )
* - Hardware queue count ( num_ * _queues )
* - defined by miscellaneous hardware support / features ( RSS , etc . )
* */
static int __devinit ixgbe_init_interrupt_scheme ( struct ixgbe_adapter * adapter )
{
int err ;
/* Number of supported queues */
ixgbe_set_num_queues ( adapter ) ;
err = ixgbe_alloc_queues ( adapter ) ;
if ( err ) {
DPRINTK ( PROBE , ERR , " Unable to allocate memory for queues \n " ) ;
goto err_alloc_queues ;
}
err = ixgbe_set_interrupt_capability ( adapter ) ;
if ( err ) {
DPRINTK ( PROBE , ERR , " Unable to setup interrupt capabilities \n " ) ;
goto err_set_interrupt ;
2007-09-16 01:07:45 +04:00
}
2008-03-04 02:03:45 +03:00
DPRINTK ( DRV , INFO , " Multiqueue %s: Rx Queue count = %u, "
" Tx Queue count = %u \n " ,
( adapter - > num_rx_queues > 1 ) ? " Enabled " :
" Disabled " , adapter - > num_rx_queues , adapter - > num_tx_queues ) ;
set_bit ( __IXGBE_DOWN , & adapter - > state ) ;
2007-09-16 01:07:45 +04:00
return 0 ;
2008-03-04 02:03:45 +03:00
err_set_interrupt :
kfree ( adapter - > tx_ring ) ;
kfree ( adapter - > rx_ring ) ;
err_alloc_queues :
return err ;
2007-09-16 01:07:45 +04:00
}
/**
* ixgbe_sw_init - Initialize general software structures ( struct ixgbe_adapter )
* @ adapter : board private structure to initialize
*
* ixgbe_sw_init initializes the Adapter private data structure .
* Fields are initialized based on PCI device information and
* OS network device settings ( MTU size ) .
* */
static int __devinit ixgbe_sw_init ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
struct pci_dev * pdev = adapter - > pdev ;
2008-03-04 02:03:45 +03:00
unsigned int rss ;
/* Set capability flags */
rss = min ( IXGBE_MAX_RSS_INDICES , ( int ) num_online_cpus ( ) ) ;
adapter - > ring_feature [ RING_F_RSS ] . indices = rss ;
adapter - > flags | = IXGBE_FLAG_RSS_ENABLED ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:57 +03:00
/* Enable Dynamic interrupt throttling by default */
adapter - > rx_eitr = 1 ;
adapter - > tx_eitr = 1 ;
2007-09-16 01:07:45 +04:00
/* default flow control settings */
2008-08-26 15:27:10 +04:00
hw - > fc . original_type = ixgbe_fc_none ;
hw - > fc . type = ixgbe_fc_none ;
hw - > fc . high_water = IXGBE_DEFAULT_FCRTH ;
hw - > fc . low_water = IXGBE_DEFAULT_FCRTL ;
hw - > fc . pause_time = IXGBE_DEFAULT_FCPAUSE ;
hw - > fc . send_xon = true ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
/* select 10G link by default */
2007-09-16 01:07:45 +04:00
hw - > mac . link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN ;
if ( hw - > mac . ops . reset ( hw ) ) {
dev_err ( & pdev - > dev , " HW Init failed \n " ) ;
return - EIO ;
}
2007-11-01 01:22:10 +03:00
if ( hw - > mac . ops . setup_link_speed ( hw , IXGBE_LINK_SPEED_10GB_FULL , true ,
false ) ) {
2007-09-16 01:07:45 +04:00
dev_err ( & pdev - > dev , " Link Speed setup failed \n " ) ;
return - EIO ;
}
/* initialize eeprom parameters */
if ( ixgbe_init_eeprom ( hw ) ) {
dev_err ( & pdev - > dev , " EEPROM initialization failed \n " ) ;
return - EIO ;
}
2008-03-04 02:03:45 +03:00
/* enable rx csum by default */
2007-09-16 01:07:45 +04:00
adapter - > flags | = IXGBE_FLAG_RX_CSUM_ENABLED ;
set_bit ( __IXGBE_DOWN , & adapter - > state ) ;
return 0 ;
}
/**
* ixgbe_setup_tx_resources - allocate Tx resources ( Descriptors )
* @ adapter : board private structure
2008-08-26 15:27:08 +04:00
* @ tx_ring : tx descriptor ring ( for a specific queue ) to setup
2007-09-16 01:07:45 +04:00
*
* Return 0 on success , negative on failure
* */
int ixgbe_setup_tx_resources ( struct ixgbe_adapter * adapter ,
2008-08-26 15:27:08 +04:00
struct ixgbe_ring * tx_ring )
2007-09-16 01:07:45 +04:00
{
struct pci_dev * pdev = adapter - > pdev ;
int size ;
2008-08-26 15:27:08 +04:00
size = sizeof ( struct ixgbe_tx_buffer ) * tx_ring - > count ;
tx_ring - > tx_buffer_info = vmalloc ( size ) ;
if ( ! tx_ring - > tx_buffer_info ) {
2007-09-16 01:07:45 +04:00
DPRINTK ( PROBE , ERR ,
" Unable to allocate memory for the transmit descriptor ring \n " ) ;
return - ENOMEM ;
}
2008-08-26 15:27:08 +04:00
memset ( tx_ring - > tx_buffer_info , 0 , size ) ;
2007-09-16 01:07:45 +04:00
/* round up to nearest 4K */
2008-08-26 15:27:08 +04:00
tx_ring - > size = tx_ring - > count * sizeof ( union ixgbe_adv_tx_desc ) ;
tx_ring - > size = ALIGN ( tx_ring - > size , 4096 ) ;
2007-09-16 01:07:45 +04:00
2008-08-26 15:27:08 +04:00
tx_ring - > desc = pci_alloc_consistent ( pdev , tx_ring - > size ,
& tx_ring - > dma ) ;
if ( ! tx_ring - > desc ) {
vfree ( tx_ring - > tx_buffer_info ) ;
2007-09-16 01:07:45 +04:00
DPRINTK ( PROBE , ERR ,
" Memory allocation failed for the tx desc ring \n " ) ;
return - ENOMEM ;
}
2008-08-26 15:27:08 +04:00
tx_ring - > next_to_use = 0 ;
tx_ring - > next_to_clean = 0 ;
tx_ring - > work_limit = tx_ring - > count ;
2007-09-16 01:07:45 +04:00
return 0 ;
}
/**
* ixgbe_setup_rx_resources - allocate Rx resources ( Descriptors )
* @ adapter : board private structure
2008-08-26 15:27:08 +04:00
* @ rx_ring : rx descriptor ring ( for a specific queue ) to setup
2007-09-16 01:07:45 +04:00
*
* Returns 0 on success , negative on failure
* */
int ixgbe_setup_rx_resources ( struct ixgbe_adapter * adapter ,
2008-08-26 15:27:08 +04:00
struct ixgbe_ring * rx_ring )
2007-09-16 01:07:45 +04:00
{
struct pci_dev * pdev = adapter - > pdev ;
2008-03-04 02:03:45 +03:00
int size ;
2007-09-16 01:07:45 +04:00
2008-06-19 02:32:19 +04:00
size = sizeof ( struct net_lro_desc ) * IXGBE_MAX_LRO_DESCRIPTORS ;
2008-08-26 15:27:08 +04:00
rx_ring - > lro_mgr . lro_arr = vmalloc ( size ) ;
if ( ! rx_ring - > lro_mgr . lro_arr )
2008-06-19 02:32:19 +04:00
return - ENOMEM ;
2008-08-26 15:27:08 +04:00
memset ( rx_ring - > lro_mgr . lro_arr , 0 , size ) ;
2008-06-19 02:32:19 +04:00
2008-08-26 15:27:08 +04:00
size = sizeof ( struct ixgbe_rx_buffer ) * rx_ring - > count ;
rx_ring - > rx_buffer_info = vmalloc ( size ) ;
if ( ! rx_ring - > rx_buffer_info ) {
2007-09-16 01:07:45 +04:00
DPRINTK ( PROBE , ERR ,
" vmalloc allocation failed for the rx desc ring \n " ) ;
2008-06-19 02:32:19 +04:00
goto alloc_failed ;
2007-09-16 01:07:45 +04:00
}
2008-08-26 15:27:08 +04:00
memset ( rx_ring - > rx_buffer_info , 0 , size ) ;
2007-09-16 01:07:45 +04:00
/* Round up to nearest 4K */
2008-08-26 15:27:08 +04:00
rx_ring - > size = rx_ring - > count * sizeof ( union ixgbe_adv_rx_desc ) ;
rx_ring - > size = ALIGN ( rx_ring - > size , 4096 ) ;
2007-09-16 01:07:45 +04:00
2008-08-26 15:27:08 +04:00
rx_ring - > desc = pci_alloc_consistent ( pdev , rx_ring - > size , & rx_ring - > dma ) ;
2007-09-16 01:07:45 +04:00
2008-08-26 15:27:08 +04:00
if ( ! rx_ring - > desc ) {
2007-09-16 01:07:45 +04:00
DPRINTK ( PROBE , ERR ,
" Memory allocation failed for the rx desc ring \n " ) ;
2008-08-26 15:27:08 +04:00
vfree ( rx_ring - > rx_buffer_info ) ;
2008-06-19 02:32:19 +04:00
goto alloc_failed ;
2007-09-16 01:07:45 +04:00
}
2008-08-26 15:27:08 +04:00
rx_ring - > next_to_clean = 0 ;
rx_ring - > next_to_use = 0 ;
2007-09-16 01:07:45 +04:00
return 0 ;
2008-06-19 02:32:19 +04:00
alloc_failed :
2008-08-26 15:27:08 +04:00
vfree ( rx_ring - > lro_mgr . lro_arr ) ;
rx_ring - > lro_mgr . lro_arr = NULL ;
2008-06-19 02:32:19 +04:00
return - ENOMEM ;
2007-09-16 01:07:45 +04:00
}
/**
* ixgbe_free_tx_resources - Free Tx Resources per Queue
* @ adapter : board private structure
* @ tx_ring : Tx descriptor ring for a specific queue
*
* Free all transmit software resources
* */
static void ixgbe_free_tx_resources ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * tx_ring )
{
struct pci_dev * pdev = adapter - > pdev ;
ixgbe_clean_tx_ring ( adapter , tx_ring ) ;
vfree ( tx_ring - > tx_buffer_info ) ;
tx_ring - > tx_buffer_info = NULL ;
pci_free_consistent ( pdev , tx_ring - > size , tx_ring - > desc , tx_ring - > dma ) ;
tx_ring - > desc = NULL ;
}
/**
* ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
* @ adapter : board private structure
*
* Free all transmit software resources
* */
static void ixgbe_free_all_tx_resources ( struct ixgbe_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + )
ixgbe_free_tx_resources ( adapter , & adapter - > tx_ring [ i ] ) ;
}
/**
* ixgbe_free_rx_resources - Free Rx Resources
* @ adapter : board private structure
* @ rx_ring : ring to clean the resources from
*
* Free all receive software resources
* */
static void ixgbe_free_rx_resources ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * rx_ring )
{
struct pci_dev * pdev = adapter - > pdev ;
2008-06-19 02:32:19 +04:00
vfree ( rx_ring - > lro_mgr . lro_arr ) ;
rx_ring - > lro_mgr . lro_arr = NULL ;
2007-09-16 01:07:45 +04:00
ixgbe_clean_rx_ring ( adapter , rx_ring ) ;
vfree ( rx_ring - > rx_buffer_info ) ;
rx_ring - > rx_buffer_info = NULL ;
pci_free_consistent ( pdev , rx_ring - > size , rx_ring - > desc , rx_ring - > dma ) ;
rx_ring - > desc = NULL ;
}
/**
* ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
* @ adapter : board private structure
*
* Free all receive software resources
* */
static void ixgbe_free_all_rx_resources ( struct ixgbe_adapter * adapter )
{
int i ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + )
ixgbe_free_rx_resources ( adapter , & adapter - > rx_ring [ i ] ) ;
}
/**
2008-03-04 02:03:45 +03:00
* ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2007-09-16 01:07:45 +04:00
* @ adapter : board private structure
*
* If this function returns with an error , then it ' s possible one or
* more of the rings is populated ( while the rest are not ) . It is the
* callers duty to clean those orphaned rings .
*
* Return 0 on success , negative on failure
* */
static int ixgbe_setup_all_tx_resources ( struct ixgbe_adapter * adapter )
{
int i , err = 0 ;
for ( i = 0 ; i < adapter - > num_tx_queues ; i + + ) {
err = ixgbe_setup_tx_resources ( adapter , & adapter - > tx_ring [ i ] ) ;
if ( err ) {
DPRINTK ( PROBE , ERR ,
" Allocation for Tx Queue %u failed \n " , i ) ;
break ;
}
}
return err ;
}
/**
2008-03-04 02:03:45 +03:00
* ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2007-09-16 01:07:45 +04:00
* @ adapter : board private structure
*
* If this function returns with an error , then it ' s possible one or
* more of the rings is populated ( while the rest are not ) . It is the
* callers duty to clean those orphaned rings .
*
* Return 0 on success , negative on failure
* */
static int ixgbe_setup_all_rx_resources ( struct ixgbe_adapter * adapter )
{
int i , err = 0 ;
for ( i = 0 ; i < adapter - > num_rx_queues ; i + + ) {
err = ixgbe_setup_rx_resources ( adapter , & adapter - > rx_ring [ i ] ) ;
if ( err ) {
DPRINTK ( PROBE , ERR ,
" Allocation for Rx Queue %u failed \n " , i ) ;
break ;
}
}
return err ;
}
/**
* ixgbe_change_mtu - Change the Maximum Transfer Unit
* @ netdev : network interface device structure
* @ new_mtu : new value for maximum frame size
*
* Returns 0 on success , negative on failure
* */
static int ixgbe_change_mtu ( struct net_device * netdev , int new_mtu )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN ;
if ( ( max_frame < ( ETH_ZLEN + ETH_FCS_LEN ) ) | |
( max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE ) )
return - EINVAL ;
2008-03-04 02:03:45 +03:00
DPRINTK ( PROBE , INFO , " changing MTU from %d to %d \n " ,
netdev - > mtu , new_mtu ) ;
/* must set new MTU before calling down or up */
2007-09-16 01:07:45 +04:00
netdev - > mtu = new_mtu ;
2008-02-02 02:58:41 +03:00
if ( netif_running ( netdev ) )
ixgbe_reinit_locked ( adapter ) ;
2007-09-16 01:07:45 +04:00
return 0 ;
}
/**
* ixgbe_open - Called when a network interface is made active
* @ netdev : network interface device structure
*
* Returns 0 on success , negative value on failure
*
* The open entry point is called when a network interface is made
* active by the system ( IFF_UP ) . At this point all resources needed
* for transmit and receive operations are allocated , the interrupt
* handler is registered with the OS , the watchdog timer is started ,
* and the stack is notified that the interface is ready .
* */
static int ixgbe_open ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
int err ;
2008-02-11 20:26:01 +03:00
/* disallow open during test */
if ( test_bit ( __IXGBE_TESTING , & adapter - > state ) )
return - EBUSY ;
2007-09-16 01:07:45 +04:00
/* allocate transmit descriptors */
err = ixgbe_setup_all_tx_resources ( adapter ) ;
if ( err )
goto err_setup_tx ;
/* allocate receive descriptors */
err = ixgbe_setup_all_rx_resources ( adapter ) ;
if ( err )
goto err_setup_rx ;
ixgbe_configure ( adapter ) ;
2008-03-04 02:03:45 +03:00
err = ixgbe_request_irq ( adapter ) ;
2007-09-16 01:07:45 +04:00
if ( err )
goto err_req_irq ;
err = ixgbe_up_complete ( adapter ) ;
if ( err )
goto err_up ;
2008-07-18 15:33:03 +04:00
netif_tx_start_all_queues ( netdev ) ;
2007-09-16 01:07:45 +04:00
return 0 ;
err_up :
2008-02-02 02:59:04 +03:00
ixgbe_release_hw_control ( adapter ) ;
2007-09-16 01:07:45 +04:00
ixgbe_free_irq ( adapter ) ;
err_req_irq :
ixgbe_free_all_rx_resources ( adapter ) ;
err_setup_rx :
ixgbe_free_all_tx_resources ( adapter ) ;
err_setup_tx :
ixgbe_reset ( adapter ) ;
return err ;
}
/**
* ixgbe_close - Disables a network interface
* @ netdev : network interface device structure
*
* Returns 0 , this is not allowed to fail
*
* The close entry point is called when an interface is de - activated
* by the OS . The hardware is still under the drivers control , but
* needs to be disabled . A global MAC reset is issued to stop the
* hardware , and all transmit and receive resources are freed .
* */
static int ixgbe_close ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
ixgbe_down ( adapter ) ;
ixgbe_free_irq ( adapter ) ;
ixgbe_free_all_tx_resources ( adapter ) ;
ixgbe_free_all_rx_resources ( adapter ) ;
2008-02-02 02:59:04 +03:00
ixgbe_release_hw_control ( adapter ) ;
2007-09-16 01:07:45 +04:00
return 0 ;
}
/**
* ixgbe_update_stats - Update the board statistics counters .
* @ adapter : board private structure
* */
void ixgbe_update_stats ( struct ixgbe_adapter * adapter )
{
struct ixgbe_hw * hw = & adapter - > hw ;
2008-02-02 02:59:14 +03:00
u64 total_mpc = 0 ;
u32 i , missed_rx = 0 , mpc , bprc , lxon , lxoff , xon_off_tot ;
2007-09-16 01:07:45 +04:00
adapter - > stats . crcerrs + = IXGBE_READ_REG ( hw , IXGBE_CRCERRS ) ;
2008-02-02 02:59:14 +03:00
for ( i = 0 ; i < 8 ; i + + ) {
/* for packet buffers not used, the register should read 0 */
mpc = IXGBE_READ_REG ( hw , IXGBE_MPC ( i ) ) ;
missed_rx + = mpc ;
adapter - > stats . mpc [ i ] + = mpc ;
total_mpc + = adapter - > stats . mpc [ i ] ;
adapter - > stats . rnbc [ i ] + = IXGBE_READ_REG ( hw , IXGBE_RNBC ( i ) ) ;
}
adapter - > stats . gprc + = IXGBE_READ_REG ( hw , IXGBE_GPRC ) ;
/* work around hardware counting issue */
adapter - > stats . gprc - = missed_rx ;
/* 82598 hardware only has a 32 bit counter in the high register */
2007-09-16 01:07:45 +04:00
adapter - > stats . gorc + = IXGBE_READ_REG ( hw , IXGBE_GORCH ) ;
2008-02-02 02:59:14 +03:00
adapter - > stats . gotc + = IXGBE_READ_REG ( hw , IXGBE_GOTCH ) ;
adapter - > stats . tor + = IXGBE_READ_REG ( hw , IXGBE_TORH ) ;
2007-09-16 01:07:45 +04:00
bprc = IXGBE_READ_REG ( hw , IXGBE_BPRC ) ;
adapter - > stats . bprc + = bprc ;
adapter - > stats . mprc + = IXGBE_READ_REG ( hw , IXGBE_MPRC ) ;
adapter - > stats . mprc - = bprc ;
adapter - > stats . roc + = IXGBE_READ_REG ( hw , IXGBE_ROC ) ;
adapter - > stats . prc64 + = IXGBE_READ_REG ( hw , IXGBE_PRC64 ) ;
adapter - > stats . prc127 + = IXGBE_READ_REG ( hw , IXGBE_PRC127 ) ;
adapter - > stats . prc255 + = IXGBE_READ_REG ( hw , IXGBE_PRC255 ) ;
adapter - > stats . prc511 + = IXGBE_READ_REG ( hw , IXGBE_PRC511 ) ;
adapter - > stats . prc1023 + = IXGBE_READ_REG ( hw , IXGBE_PRC1023 ) ;
adapter - > stats . prc1522 + = IXGBE_READ_REG ( hw , IXGBE_PRC1522 ) ;
adapter - > stats . rlec + = IXGBE_READ_REG ( hw , IXGBE_RLEC ) ;
adapter - > stats . lxonrxc + = IXGBE_READ_REG ( hw , IXGBE_LXONRXC ) ;
adapter - > stats . lxoffrxc + = IXGBE_READ_REG ( hw , IXGBE_LXOFFRXC ) ;
2008-02-02 02:59:14 +03:00
lxon = IXGBE_READ_REG ( hw , IXGBE_LXONTXC ) ;
adapter - > stats . lxontxc + = lxon ;
lxoff = IXGBE_READ_REG ( hw , IXGBE_LXOFFTXC ) ;
adapter - > stats . lxofftxc + = lxoff ;
2007-09-16 01:07:45 +04:00
adapter - > stats . ruc + = IXGBE_READ_REG ( hw , IXGBE_RUC ) ;
adapter - > stats . gptc + = IXGBE_READ_REG ( hw , IXGBE_GPTC ) ;
2008-02-02 02:59:14 +03:00
adapter - > stats . mptc + = IXGBE_READ_REG ( hw , IXGBE_MPTC ) ;
/*
* 82598 errata - tx of flow control packets is included in tx counters
*/
xon_off_tot = lxon + lxoff ;
adapter - > stats . gptc - = xon_off_tot ;
adapter - > stats . mptc - = xon_off_tot ;
adapter - > stats . gotc - = ( xon_off_tot * ( ETH_ZLEN + ETH_FCS_LEN ) ) ;
2007-09-16 01:07:45 +04:00
adapter - > stats . ruc + = IXGBE_READ_REG ( hw , IXGBE_RUC ) ;
adapter - > stats . rfc + = IXGBE_READ_REG ( hw , IXGBE_RFC ) ;
adapter - > stats . rjc + = IXGBE_READ_REG ( hw , IXGBE_RJC ) ;
adapter - > stats . tpr + = IXGBE_READ_REG ( hw , IXGBE_TPR ) ;
adapter - > stats . ptc64 + = IXGBE_READ_REG ( hw , IXGBE_PTC64 ) ;
2008-02-02 02:59:14 +03:00
adapter - > stats . ptc64 - = xon_off_tot ;
2007-09-16 01:07:45 +04:00
adapter - > stats . ptc127 + = IXGBE_READ_REG ( hw , IXGBE_PTC127 ) ;
adapter - > stats . ptc255 + = IXGBE_READ_REG ( hw , IXGBE_PTC255 ) ;
adapter - > stats . ptc511 + = IXGBE_READ_REG ( hw , IXGBE_PTC511 ) ;
adapter - > stats . ptc1023 + = IXGBE_READ_REG ( hw , IXGBE_PTC1023 ) ;
adapter - > stats . ptc1522 + = IXGBE_READ_REG ( hw , IXGBE_PTC1522 ) ;
adapter - > stats . bptc + = IXGBE_READ_REG ( hw , IXGBE_BPTC ) ;
/* Fill out the OS statistics structure */
adapter - > net_stats . multicast = adapter - > stats . mprc ;
/* Rx Errors */
adapter - > net_stats . rx_errors = adapter - > stats . crcerrs +
adapter - > stats . rlec ;
adapter - > net_stats . rx_dropped = 0 ;
adapter - > net_stats . rx_length_errors = adapter - > stats . rlec ;
adapter - > net_stats . rx_crc_errors = adapter - > stats . crcerrs ;
2008-02-02 02:59:14 +03:00
adapter - > net_stats . rx_missed_errors = total_mpc ;
2007-09-16 01:07:45 +04:00
}
/**
* ixgbe_watchdog - Timer Call - back
* @ data : pointer to adapter cast into an unsigned long
* */
static void ixgbe_watchdog ( unsigned long data )
{
struct ixgbe_adapter * adapter = ( struct ixgbe_adapter * ) data ;
struct net_device * netdev = adapter - > netdev ;
bool link_up ;
u32 link_speed = 0 ;
2007-11-01 01:22:10 +03:00
adapter - > hw . mac . ops . check_link ( & adapter - > hw , & ( link_speed ) , & link_up ) ;
2007-09-16 01:07:45 +04:00
if ( link_up ) {
if ( ! netif_carrier_ok ( netdev ) ) {
u32 frctl = IXGBE_READ_REG ( & adapter - > hw , IXGBE_FCTRL ) ;
u32 rmcs = IXGBE_READ_REG ( & adapter - > hw , IXGBE_RMCS ) ;
# define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
# define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
DPRINTK ( LINK , INFO , " NIC Link is Up %s, "
" Flow Control: %s \n " ,
( link_speed = = IXGBE_LINK_SPEED_10GB_FULL ?
" 10 Gbps " :
( link_speed = = IXGBE_LINK_SPEED_1GB_FULL ?
2008-03-04 01:37:42 +03:00
" 1 Gbps " : " unknown speed " ) ) ,
2007-09-16 01:07:45 +04:00
( ( FLOW_RX & & FLOW_TX ) ? " RX/TX " :
( FLOW_RX ? " RX " :
( FLOW_TX ? " TX " : " None " ) ) ) ) ;
netif_carrier_on ( netdev ) ;
2008-07-17 12:56:23 +04:00
netif_tx_wake_all_queues ( netdev ) ;
2007-09-16 01:07:45 +04:00
} else {
/* Force detection of hung controller */
adapter - > detect_tx_hung = true ;
}
} else {
if ( netif_carrier_ok ( netdev ) ) {
DPRINTK ( LINK , INFO , " NIC Link is Down \n " ) ;
netif_carrier_off ( netdev ) ;
2008-07-17 12:56:23 +04:00
netif_tx_stop_all_queues ( netdev ) ;
2007-09-16 01:07:45 +04:00
}
}
ixgbe_update_stats ( adapter ) ;
2008-03-04 02:03:45 +03:00
if ( ! test_bit ( __IXGBE_DOWN , & adapter - > state ) ) {
/* Cause software interrupt to ensure rx rings are cleaned */
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
u32 eics =
( 1 < < ( adapter - > num_msix_vectors - NON_Q_VECTORS ) ) - 1 ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EICS , eics ) ;
} else {
/* for legacy and MSI interrupts don't set any bits that
* are enabled for EIAM , because this operation would
* set * both * EIMS and EICS for any bit in EIAM */
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_EICS ,
( IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER ) ) ;
}
/* Reset the timer */
2007-09-16 01:07:45 +04:00
mod_timer ( & adapter - > watchdog_timer ,
round_jiffies ( jiffies + 2 * HZ ) ) ;
2008-03-04 02:03:45 +03:00
}
2007-09-16 01:07:45 +04:00
}
static int ixgbe_tso ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * tx_ring , struct sk_buff * skb ,
u32 tx_flags , u8 * hdr_len )
{
struct ixgbe_adv_tx_context_desc * context_desc ;
unsigned int i ;
int err ;
struct ixgbe_tx_buffer * tx_buffer_info ;
u32 vlan_macip_lens = 0 , type_tucmd_mlhl = 0 ;
u32 mss_l4len_idx = 0 , l4len ;
if ( skb_is_gso ( skb ) ) {
if ( skb_header_cloned ( skb ) ) {
err = pskb_expand_head ( skb , 0 , 0 , GFP_ATOMIC ) ;
if ( err )
return err ;
}
l4len = tcp_hdrlen ( skb ) ;
* hdr_len + = l4len ;
2007-12-10 21:54:12 +03:00
if ( skb - > protocol = = htons ( ETH_P_IP ) ) {
2007-09-16 01:07:45 +04:00
struct iphdr * iph = ip_hdr ( skb ) ;
iph - > tot_len = 0 ;
iph - > check = 0 ;
tcp_hdr ( skb ) - > check = ~ csum_tcpudp_magic ( iph - > saddr ,
iph - > daddr , 0 ,
IPPROTO_TCP ,
0 ) ;
adapter - > hw_tso_ctxt + + ;
} else if ( skb_shinfo ( skb ) - > gso_type = = SKB_GSO_TCPV6 ) {
ipv6_hdr ( skb ) - > payload_len = 0 ;
tcp_hdr ( skb ) - > check =
~ csum_ipv6_magic ( & ipv6_hdr ( skb ) - > saddr ,
& ipv6_hdr ( skb ) - > daddr ,
0 , IPPROTO_TCP , 0 ) ;
adapter - > hw_tso6_ctxt + + ;
}
i = tx_ring - > next_to_use ;
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
context_desc = IXGBE_TX_CTXTDESC_ADV ( * tx_ring , i ) ;
/* VLAN MACLEN IPLEN */
if ( tx_flags & IXGBE_TX_FLAGS_VLAN )
vlan_macip_lens | =
( tx_flags & IXGBE_TX_FLAGS_VLAN_MASK ) ;
vlan_macip_lens | = ( ( skb_network_offset ( skb ) ) < <
IXGBE_ADVTXD_MACLEN_SHIFT ) ;
* hdr_len + = skb_network_offset ( skb ) ;
vlan_macip_lens | =
( skb_transport_header ( skb ) - skb_network_header ( skb ) ) ;
* hdr_len + =
( skb_transport_header ( skb ) - skb_network_header ( skb ) ) ;
context_desc - > vlan_macip_lens = cpu_to_le32 ( vlan_macip_lens ) ;
context_desc - > seqnum_seed = 0 ;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd_mlhl | = ( IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT ) ;
2007-12-10 21:54:12 +03:00
if ( skb - > protocol = = htons ( ETH_P_IP ) )
2007-09-16 01:07:45 +04:00
type_tucmd_mlhl | = IXGBE_ADVTXD_TUCMD_IPV4 ;
type_tucmd_mlhl | = IXGBE_ADVTXD_TUCMD_L4T_TCP ;
context_desc - > type_tucmd_mlhl = cpu_to_le32 ( type_tucmd_mlhl ) ;
/* MSS L4LEN IDX */
mss_l4len_idx | =
( skb_shinfo ( skb ) - > gso_size < < IXGBE_ADVTXD_MSS_SHIFT ) ;
mss_l4len_idx | = ( l4len < < IXGBE_ADVTXD_L4LEN_SHIFT ) ;
context_desc - > mss_l4len_idx = cpu_to_le32 ( mss_l4len_idx ) ;
tx_buffer_info - > time_stamp = jiffies ;
tx_buffer_info - > next_to_watch = i ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
tx_ring - > next_to_use = i ;
return true ;
}
return false ;
}
static bool ixgbe_tx_csum ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * tx_ring ,
struct sk_buff * skb , u32 tx_flags )
{
struct ixgbe_adv_tx_context_desc * context_desc ;
unsigned int i ;
struct ixgbe_tx_buffer * tx_buffer_info ;
u32 vlan_macip_lens = 0 , type_tucmd_mlhl = 0 ;
if ( skb - > ip_summed = = CHECKSUM_PARTIAL | |
( tx_flags & IXGBE_TX_FLAGS_VLAN ) ) {
i = tx_ring - > next_to_use ;
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
context_desc = IXGBE_TX_CTXTDESC_ADV ( * tx_ring , i ) ;
if ( tx_flags & IXGBE_TX_FLAGS_VLAN )
vlan_macip_lens | =
( tx_flags & IXGBE_TX_FLAGS_VLAN_MASK ) ;
vlan_macip_lens | = ( skb_network_offset ( skb ) < <
IXGBE_ADVTXD_MACLEN_SHIFT ) ;
if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
vlan_macip_lens | = ( skb_transport_header ( skb ) -
skb_network_header ( skb ) ) ;
context_desc - > vlan_macip_lens = cpu_to_le32 ( vlan_macip_lens ) ;
context_desc - > seqnum_seed = 0 ;
type_tucmd_mlhl | = ( IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT ) ;
if ( skb - > ip_summed = = CHECKSUM_PARTIAL ) {
2008-02-13 02:20:33 +03:00
switch ( skb - > protocol ) {
case __constant_htons ( ETH_P_IP ) :
2007-09-16 01:07:45 +04:00
type_tucmd_mlhl | = IXGBE_ADVTXD_TUCMD_IPV4 ;
2008-02-13 02:20:33 +03:00
if ( ip_hdr ( skb ) - > protocol = = IPPROTO_TCP )
type_tucmd_mlhl | =
IXGBE_ADVTXD_TUCMD_L4T_TCP ;
break ;
case __constant_htons ( ETH_P_IPV6 ) :
/* XXX what about other V6 headers?? */
if ( ipv6_hdr ( skb ) - > nexthdr = = IPPROTO_TCP )
type_tucmd_mlhl | =
IXGBE_ADVTXD_TUCMD_L4T_TCP ;
break ;
2007-09-16 01:07:45 +04:00
2008-02-13 02:20:33 +03:00
default :
if ( unlikely ( net_ratelimit ( ) ) ) {
DPRINTK ( PROBE , WARNING ,
" partial checksum but proto=%x! \n " ,
skb - > protocol ) ;
}
break ;
}
2007-09-16 01:07:45 +04:00
}
context_desc - > type_tucmd_mlhl = cpu_to_le32 ( type_tucmd_mlhl ) ;
context_desc - > mss_l4len_idx = 0 ;
tx_buffer_info - > time_stamp = jiffies ;
tx_buffer_info - > next_to_watch = i ;
adapter - > hw_csum_tx_good + + ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
tx_ring - > next_to_use = i ;
return true ;
}
return false ;
}
static int ixgbe_tx_map ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * tx_ring ,
struct sk_buff * skb , unsigned int first )
{
struct ixgbe_tx_buffer * tx_buffer_info ;
unsigned int len = skb - > len ;
unsigned int offset = 0 , size , count = 0 , i ;
unsigned int nr_frags = skb_shinfo ( skb ) - > nr_frags ;
unsigned int f ;
len - = skb - > data_len ;
i = tx_ring - > next_to_use ;
while ( len ) {
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
size = min ( len , ( uint ) IXGBE_MAX_DATA_PER_TXD ) ;
tx_buffer_info - > length = size ;
tx_buffer_info - > dma = pci_map_single ( adapter - > pdev ,
skb - > data + offset ,
size , PCI_DMA_TODEVICE ) ;
tx_buffer_info - > time_stamp = jiffies ;
tx_buffer_info - > next_to_watch = i ;
len - = size ;
offset + = size ;
count + + ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
}
for ( f = 0 ; f < nr_frags ; f + + ) {
struct skb_frag_struct * frag ;
frag = & skb_shinfo ( skb ) - > frags [ f ] ;
len = frag - > size ;
offset = frag - > page_offset ;
while ( len ) {
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
size = min ( len , ( uint ) IXGBE_MAX_DATA_PER_TXD ) ;
tx_buffer_info - > length = size ;
tx_buffer_info - > dma = pci_map_page ( adapter - > pdev ,
frag - > page ,
offset ,
size , PCI_DMA_TODEVICE ) ;
tx_buffer_info - > time_stamp = jiffies ;
tx_buffer_info - > next_to_watch = i ;
len - = size ;
offset + = size ;
count + + ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
}
}
if ( i = = 0 )
i = tx_ring - > count - 1 ;
else
i = i - 1 ;
tx_ring - > tx_buffer_info [ i ] . skb = skb ;
tx_ring - > tx_buffer_info [ first ] . next_to_watch = i ;
return count ;
}
static void ixgbe_tx_queue ( struct ixgbe_adapter * adapter ,
struct ixgbe_ring * tx_ring ,
int tx_flags , int count , u32 paylen , u8 hdr_len )
{
union ixgbe_adv_tx_desc * tx_desc = NULL ;
struct ixgbe_tx_buffer * tx_buffer_info ;
u32 olinfo_status = 0 , cmd_type_len = 0 ;
unsigned int i ;
u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS ;
cmd_type_len | = IXGBE_ADVTXD_DTYP_DATA ;
cmd_type_len | = IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT ;
if ( tx_flags & IXGBE_TX_FLAGS_VLAN )
cmd_type_len | = IXGBE_ADVTXD_DCMD_VLE ;
if ( tx_flags & IXGBE_TX_FLAGS_TSO ) {
cmd_type_len | = IXGBE_ADVTXD_DCMD_TSE ;
olinfo_status | = IXGBE_TXD_POPTS_TXSM < <
IXGBE_ADVTXD_POPTS_SHIFT ;
if ( tx_flags & IXGBE_TX_FLAGS_IPV4 )
olinfo_status | = IXGBE_TXD_POPTS_IXSM < <
IXGBE_ADVTXD_POPTS_SHIFT ;
} else if ( tx_flags & IXGBE_TX_FLAGS_CSUM )
olinfo_status | = IXGBE_TXD_POPTS_TXSM < <
IXGBE_ADVTXD_POPTS_SHIFT ;
olinfo_status | = ( ( paylen - hdr_len ) < < IXGBE_ADVTXD_PAYLEN_SHIFT ) ;
i = tx_ring - > next_to_use ;
while ( count - - ) {
tx_buffer_info = & tx_ring - > tx_buffer_info [ i ] ;
tx_desc = IXGBE_TX_DESC_ADV ( * tx_ring , i ) ;
tx_desc - > read . buffer_addr = cpu_to_le64 ( tx_buffer_info - > dma ) ;
tx_desc - > read . cmd_type_len =
cpu_to_le32 ( cmd_type_len | tx_buffer_info - > length ) ;
tx_desc - > read . olinfo_status = cpu_to_le32 ( olinfo_status ) ;
i + + ;
if ( i = = tx_ring - > count )
i = 0 ;
}
tx_desc - > read . cmd_type_len | = cpu_to_le32 ( txd_cmd ) ;
/*
* Force memory writes to complete before letting h / w
* know there are new descriptors to fetch . ( Only
* applicable for weak - ordered memory model archs ,
* such as IA - 64 ) .
*/
wmb ( ) ;
tx_ring - > next_to_use = i ;
writel ( i , adapter - > hw . hw_addr + tx_ring - > tail ) ;
}
2008-02-02 02:58:49 +03:00
static int __ixgbe_maybe_stop_tx ( struct net_device * netdev ,
struct ixgbe_ring * tx_ring , int size )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
2008-03-04 02:03:52 +03:00
netif_stop_subqueue ( netdev , tx_ring - > queue_index ) ;
2008-02-02 02:58:49 +03:00
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue ( ) ;
* but since that doesn ' t exist yet , just open code it . */
smp_mb ( ) ;
/* We need to check again in a case another CPU has just
* made room available . */
if ( likely ( IXGBE_DESC_UNUSED ( tx_ring ) < size ) )
return - EBUSY ;
/* A reprieve! - use start_queue because it doesn't call schedule */
2008-03-04 02:03:52 +03:00
netif_wake_subqueue ( netdev , tx_ring - > queue_index ) ;
2008-02-02 02:58:49 +03:00
+ + adapter - > restart_queue ;
return 0 ;
}
static int ixgbe_maybe_stop_tx ( struct net_device * netdev ,
struct ixgbe_ring * tx_ring , int size )
{
if ( likely ( IXGBE_DESC_UNUSED ( tx_ring ) > = size ) )
return 0 ;
return __ixgbe_maybe_stop_tx ( netdev , tx_ring , size ) ;
}
2007-09-16 01:07:45 +04:00
static int ixgbe_xmit_frame ( struct sk_buff * skb , struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct ixgbe_ring * tx_ring ;
unsigned int len = skb - > len ;
unsigned int first ;
unsigned int tx_flags = 0 ;
2008-03-04 02:03:52 +03:00
u8 hdr_len = 0 ;
int r_idx = 0 , tso ;
2007-09-16 01:07:45 +04:00
unsigned int mss = 0 ;
int count = 0 ;
unsigned int f ;
unsigned int nr_frags = skb_shinfo ( skb ) - > nr_frags ;
len - = skb - > data_len ;
2008-03-04 02:03:52 +03:00
r_idx = ( adapter - > num_tx_queues - 1 ) & skb - > queue_mapping ;
tx_ring = & adapter - > tx_ring [ r_idx ] ;
2007-09-16 01:07:45 +04:00
if ( skb - > len < = 0 ) {
dev_kfree_skb ( skb ) ;
return NETDEV_TX_OK ;
}
mss = skb_shinfo ( skb ) - > gso_size ;
if ( mss )
count + + ;
else if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
count + + ;
count + = TXD_USE_COUNT ( len ) ;
for ( f = 0 ; f < nr_frags ; f + + )
count + = TXD_USE_COUNT ( skb_shinfo ( skb ) - > frags [ f ] . size ) ;
2008-02-02 02:58:49 +03:00
if ( ixgbe_maybe_stop_tx ( netdev , tx_ring , count ) ) {
2007-09-16 01:07:45 +04:00
adapter - > tx_busy + + ;
return NETDEV_TX_BUSY ;
}
if ( adapter - > vlgrp & & vlan_tx_tag_present ( skb ) ) {
tx_flags | = IXGBE_TX_FLAGS_VLAN ;
tx_flags | = ( vlan_tx_tag_get ( skb ) < < IXGBE_TX_FLAGS_VLAN_SHIFT ) ;
}
2007-12-10 21:54:12 +03:00
if ( skb - > protocol = = htons ( ETH_P_IP ) )
2007-09-16 01:07:45 +04:00
tx_flags | = IXGBE_TX_FLAGS_IPV4 ;
first = tx_ring - > next_to_use ;
tso = ixgbe_tso ( adapter , tx_ring , skb , tx_flags , & hdr_len ) ;
if ( tso < 0 ) {
dev_kfree_skb_any ( skb ) ;
return NETDEV_TX_OK ;
}
if ( tso )
tx_flags | = IXGBE_TX_FLAGS_TSO ;
else if ( ixgbe_tx_csum ( adapter , tx_ring , skb , tx_flags ) & &
( skb - > ip_summed = = CHECKSUM_PARTIAL ) )
tx_flags | = IXGBE_TX_FLAGS_CSUM ;
ixgbe_tx_queue ( adapter , tx_ring , tx_flags ,
ixgbe_tx_map ( adapter , tx_ring , skb , first ) ,
skb - > len , hdr_len ) ;
netdev - > trans_start = jiffies ;
2008-02-02 02:58:49 +03:00
ixgbe_maybe_stop_tx ( netdev , tx_ring , DESC_NEEDED ) ;
2007-09-16 01:07:45 +04:00
return NETDEV_TX_OK ;
}
/**
* ixgbe_get_stats - Get System Network Statistics
* @ netdev : network interface device structure
*
* Returns the address of the device statistics structure .
* The statistics are actually updated from the timer callback .
* */
static struct net_device_stats * ixgbe_get_stats ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
/* only return the current stats */
return & adapter - > net_stats ;
}
/**
* ixgbe_set_mac - Change the Ethernet Address of the NIC
* @ netdev : network interface device structure
* @ p : pointer to an address structure
*
* Returns 0 on success , negative on failure
* */
static int ixgbe_set_mac ( struct net_device * netdev , void * p )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
struct sockaddr * addr = p ;
if ( ! is_valid_ether_addr ( addr - > sa_data ) )
return - EADDRNOTAVAIL ;
memcpy ( netdev - > dev_addr , addr - > sa_data , netdev - > addr_len ) ;
memcpy ( adapter - > hw . mac . addr , addr - > sa_data , netdev - > addr_len ) ;
ixgbe_set_rar ( & adapter - > hw , 0 , adapter - > hw . mac . addr , 0 , IXGBE_RAH_AV ) ;
return 0 ;
}
# ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling ' interrupt ' - used by things like netconsole to send skbs
* without having to re - enable interrupts . It ' s not called while
* the interrupt routine is executing .
*/
static void ixgbe_netpoll ( struct net_device * netdev )
{
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
disable_irq ( adapter - > pdev - > irq ) ;
adapter - > flags | = IXGBE_FLAG_IN_NETPOLL ;
ixgbe_intr ( adapter - > pdev - > irq , netdev ) ;
adapter - > flags & = ~ IXGBE_FLAG_IN_NETPOLL ;
enable_irq ( adapter - > pdev - > irq ) ;
}
# endif
2008-03-04 02:03:45 +03:00
/**
* ixgbe_napi_add_all - prep napi structs for use
* @ adapter : private struct
* helper function to napi_add each possible q_vector - > napi
*/
static void ixgbe_napi_add_all ( struct ixgbe_adapter * adapter )
{
int i , q_vectors = adapter - > num_msix_vectors - NON_Q_VECTORS ;
int ( * poll ) ( struct napi_struct * , int ) ;
if ( adapter - > flags & IXGBE_FLAG_MSIX_ENABLED ) {
poll = & ixgbe_clean_rxonly ;
} else {
poll = & ixgbe_poll ;
/* only one q_vector for legacy modes */
q_vectors = 1 ;
}
for ( i = 0 ; i < q_vectors ; i + + ) {
struct ixgbe_q_vector * q_vector = & adapter - > q_vector [ i ] ;
netif_napi_add ( adapter - > netdev , & q_vector - > napi ,
( * poll ) , 64 ) ;
}
}
2007-09-16 01:07:45 +04:00
/**
* ixgbe_probe - Device Initialization Routine
* @ pdev : PCI device information struct
* @ ent : entry in ixgbe_pci_tbl
*
* Returns 0 on success , negative on failure
*
* ixgbe_probe initializes an adapter identified by a pci_dev structure .
* The OS initialization , configuring of the adapter private structure ,
* and a hardware reset occur .
* */
static int __devinit ixgbe_probe ( struct pci_dev * pdev ,
const struct pci_device_id * ent )
{
struct net_device * netdev ;
struct ixgbe_adapter * adapter = NULL ;
struct ixgbe_hw * hw ;
const struct ixgbe_info * ii = ixgbe_info_tbl [ ent - > driver_data ] ;
unsigned long mmio_start , mmio_len ;
static int cards_found ;
int i , err , pci_using_dac ;
u16 link_status , link_speed , link_width ;
u32 part_num ;
err = pci_enable_device ( pdev ) ;
if ( err )
return err ;
if ( ! pci_set_dma_mask ( pdev , DMA_64BIT_MASK ) & &
! pci_set_consistent_dma_mask ( pdev , DMA_64BIT_MASK ) ) {
pci_using_dac = 1 ;
} else {
err = pci_set_dma_mask ( pdev , DMA_32BIT_MASK ) ;
if ( err ) {
err = pci_set_consistent_dma_mask ( pdev , DMA_32BIT_MASK ) ;
if ( err ) {
dev_err ( & pdev - > dev , " No usable DMA "
" configuration, aborting \n " ) ;
goto err_dma ;
}
}
pci_using_dac = 0 ;
}
err = pci_request_regions ( pdev , ixgbe_driver_name ) ;
if ( err ) {
dev_err ( & pdev - > dev , " pci_request_regions failed 0x%x \n " , err ) ;
goto err_pci_reg ;
}
pci_set_master ( pdev ) ;
2008-04-23 22:09:24 +04:00
pci_save_state ( pdev ) ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:52 +03:00
netdev = alloc_etherdev_mq ( sizeof ( struct ixgbe_adapter ) , MAX_TX_QUEUES ) ;
2007-09-16 01:07:45 +04:00
if ( ! netdev ) {
err = - ENOMEM ;
goto err_alloc_etherdev ;
}
SET_NETDEV_DEV ( netdev , & pdev - > dev ) ;
pci_set_drvdata ( pdev , netdev ) ;
adapter = netdev_priv ( netdev ) ;
adapter - > netdev = netdev ;
adapter - > pdev = pdev ;
hw = & adapter - > hw ;
hw - > back = adapter ;
adapter - > msg_enable = ( 1 < < DEFAULT_DEBUG_LEVEL_SHIFT ) - 1 ;
mmio_start = pci_resource_start ( pdev , 0 ) ;
mmio_len = pci_resource_len ( pdev , 0 ) ;
hw - > hw_addr = ioremap ( mmio_start , mmio_len ) ;
if ( ! hw - > hw_addr ) {
err = - EIO ;
goto err_ioremap ;
}
for ( i = 1 ; i < = 5 ; i + + ) {
if ( pci_resource_len ( pdev , i ) = = 0 )
continue ;
}
netdev - > open = & ixgbe_open ;
netdev - > stop = & ixgbe_close ;
netdev - > hard_start_xmit = & ixgbe_xmit_frame ;
netdev - > get_stats = & ixgbe_get_stats ;
2008-08-26 15:27:02 +04:00
netdev - > set_rx_mode = & ixgbe_set_rx_mode ;
netdev - > set_multicast_list = & ixgbe_set_rx_mode ;
2007-09-16 01:07:45 +04:00
netdev - > set_mac_address = & ixgbe_set_mac ;
netdev - > change_mtu = & ixgbe_change_mtu ;
ixgbe_set_ethtool_ops ( netdev ) ;
netdev - > tx_timeout = & ixgbe_tx_timeout ;
netdev - > watchdog_timeo = 5 * HZ ;
netdev - > vlan_rx_register = ixgbe_vlan_rx_register ;
netdev - > vlan_rx_add_vid = ixgbe_vlan_rx_add_vid ;
netdev - > vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid ;
# ifdef CONFIG_NET_POLL_CONTROLLER
netdev - > poll_controller = ixgbe_netpoll ;
# endif
strcpy ( netdev - > name , pci_name ( pdev ) ) ;
netdev - > mem_start = mmio_start ;
netdev - > mem_end = mmio_start + mmio_len ;
adapter - > bd_number = cards_found ;
/* PCI config space info */
hw - > vendor_id = pdev - > vendor ;
hw - > device_id = pdev - > device ;
hw - > revision_id = pdev - > revision ;
hw - > subsystem_vendor_id = pdev - > subsystem_vendor ;
hw - > subsystem_device_id = pdev - > subsystem_device ;
/* Setup hw api */
memcpy ( & hw - > mac . ops , ii - > mac_ops , sizeof ( hw - > mac . ops ) ) ;
2008-03-04 02:03:45 +03:00
hw - > mac . type = ii - > mac ;
2007-09-16 01:07:45 +04:00
err = ii - > get_invariants ( hw ) ;
if ( err )
goto err_hw_init ;
/* setup the private structure */
err = ixgbe_sw_init ( adapter ) ;
if ( err )
goto err_sw_init ;
netdev - > features = NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER ;
2008-06-19 02:32:19 +04:00
netdev - > features | = NETIF_F_LRO ;
2007-09-16 01:07:45 +04:00
netdev - > features | = NETIF_F_TSO ;
netdev - > features | = NETIF_F_TSO6 ;
2008-06-05 15:05:30 +04:00
netdev - > vlan_features | = NETIF_F_TSO ;
netdev - > vlan_features | = NETIF_F_TSO6 ;
netdev - > vlan_features | = NETIF_F_HW_CSUM ;
netdev - > vlan_features | = NETIF_F_SG ;
2007-09-16 01:07:45 +04:00
if ( pci_using_dac )
netdev - > features | = NETIF_F_HIGHDMA ;
/* make sure the EEPROM is good */
if ( ixgbe_validate_eeprom_checksum ( hw , NULL ) < 0 ) {
dev_err ( & pdev - > dev , " The EEPROM Checksum Is Not Valid \n " ) ;
err = - EIO ;
goto err_eeprom ;
}
memcpy ( netdev - > dev_addr , hw - > mac . perm_addr , netdev - > addr_len ) ;
memcpy ( netdev - > perm_addr , hw - > mac . perm_addr , netdev - > addr_len ) ;
if ( ixgbe_validate_mac_addr ( netdev - > dev_addr ) ) {
err = - EIO ;
goto err_eeprom ;
}
init_timer ( & adapter - > watchdog_timer ) ;
adapter - > watchdog_timer . function = & ixgbe_watchdog ;
adapter - > watchdog_timer . data = ( unsigned long ) adapter ;
INIT_WORK ( & adapter - > reset_task , ixgbe_reset_task ) ;
2008-03-04 02:03:45 +03:00
err = ixgbe_init_interrupt_scheme ( adapter ) ;
if ( err )
goto err_sw_init ;
2007-09-16 01:07:45 +04:00
/* print bus type/speed/width info */
pci_read_config_word ( pdev , IXGBE_PCI_LINK_STATUS , & link_status ) ;
link_speed = link_status & IXGBE_PCI_LINK_SPEED ;
link_width = link_status & IXGBE_PCI_LINK_WIDTH ;
dev_info ( & pdev - > dev , " (PCI Express:%s:%s) "
" %02x:%02x:%02x:%02x:%02x:%02x \n " ,
( ( link_speed = = IXGBE_PCI_LINK_SPEED_5000 ) ? " 5.0Gb/s " :
( link_speed = = IXGBE_PCI_LINK_SPEED_2500 ) ? " 2.5Gb/s " :
" Unknown " ) ,
( ( link_width = = IXGBE_PCI_LINK_WIDTH_8 ) ? " Width x8 " :
( link_width = = IXGBE_PCI_LINK_WIDTH_4 ) ? " Width x4 " :
( link_width = = IXGBE_PCI_LINK_WIDTH_2 ) ? " Width x2 " :
( link_width = = IXGBE_PCI_LINK_WIDTH_1 ) ? " Width x1 " :
" Unknown " ) ,
netdev - > dev_addr [ 0 ] , netdev - > dev_addr [ 1 ] , netdev - > dev_addr [ 2 ] ,
netdev - > dev_addr [ 3 ] , netdev - > dev_addr [ 4 ] , netdev - > dev_addr [ 5 ] ) ;
ixgbe_read_part_num ( hw , & part_num ) ;
dev_info ( & pdev - > dev , " MAC: %d, PHY: %d, PBA No: %06x-%03x \n " ,
hw - > mac . type , hw - > phy . type ,
( part_num > > 8 ) , ( part_num & 0xff ) ) ;
2008-02-11 20:25:56 +03:00
if ( link_width < = IXGBE_PCI_LINK_WIDTH_4 ) {
dev_warn ( & pdev - > dev , " PCI-Express bandwidth available for "
" this card is not sufficient for optimal "
" performance. \n " ) ;
dev_warn ( & pdev - > dev , " For optimal performance a x8 "
" PCI-Express slot is required. \n " ) ;
}
2007-09-16 01:07:45 +04:00
/* reset the hardware with the new settings */
ixgbe_start_hw ( hw ) ;
netif_carrier_off ( netdev ) ;
2008-07-17 12:56:23 +04:00
netif_tx_stop_all_queues ( netdev ) ;
2007-09-16 01:07:45 +04:00
2008-03-04 02:03:45 +03:00
ixgbe_napi_add_all ( adapter ) ;
2007-09-16 01:07:45 +04:00
strcpy ( netdev - > name , " eth%d " ) ;
err = register_netdev ( netdev ) ;
if ( err )
goto err_register ;
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
2008-03-27 14:39:17 +03:00
if ( dca_add_requester ( & pdev - > dev ) = = 0 ) {
2008-03-04 02:04:02 +03:00
adapter - > flags | = IXGBE_FLAG_DCA_ENABLED ;
/* always use CB2 mode, difference is masked
* in the CB driver */
IXGBE_WRITE_REG ( hw , IXGBE_DCA_CTRL , 2 ) ;
ixgbe_setup_dca ( adapter ) ;
}
# endif
2007-09-16 01:07:45 +04:00
dev_info ( & pdev - > dev , " Intel(R) 10 Gigabit Network Connection \n " ) ;
cards_found + + ;
return 0 ;
err_register :
2008-02-02 02:59:04 +03:00
ixgbe_release_hw_control ( adapter ) ;
2007-09-16 01:07:45 +04:00
err_hw_init :
err_sw_init :
2008-03-04 02:03:45 +03:00
ixgbe_reset_interrupt_capability ( adapter ) ;
2007-09-16 01:07:45 +04:00
err_eeprom :
iounmap ( hw - > hw_addr ) ;
err_ioremap :
free_netdev ( netdev ) ;
err_alloc_etherdev :
pci_release_regions ( pdev ) ;
err_pci_reg :
err_dma :
pci_disable_device ( pdev ) ;
return err ;
}
/**
* ixgbe_remove - Device Removal Routine
* @ pdev : PCI device information struct
*
* ixgbe_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device . The could be caused by a
* Hot - Plug event , or because the driver is going to be removed from
* memory .
* */
static void __devexit ixgbe_remove ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct ixgbe_adapter * adapter = netdev_priv ( netdev ) ;
set_bit ( __IXGBE_DOWN , & adapter - > state ) ;
del_timer_sync ( & adapter - > watchdog_timer ) ;
flush_scheduled_work ( ) ;
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
if ( adapter - > flags & IXGBE_FLAG_DCA_ENABLED ) {
adapter - > flags & = ~ IXGBE_FLAG_DCA_ENABLED ;
dca_remove_requester ( & pdev - > dev ) ;
IXGBE_WRITE_REG ( & adapter - > hw , IXGBE_DCA_CTRL , 1 ) ;
}
# endif
2007-09-16 01:07:45 +04:00
unregister_netdev ( netdev ) ;
2008-03-04 02:03:45 +03:00
ixgbe_reset_interrupt_capability ( adapter ) ;
2008-02-02 02:59:04 +03:00
2008-03-04 02:03:45 +03:00
ixgbe_release_hw_control ( adapter ) ;
2007-09-16 01:07:45 +04:00
iounmap ( adapter - > hw . hw_addr ) ;
pci_release_regions ( pdev ) ;
2008-03-04 02:03:45 +03:00
DPRINTK ( PROBE , INFO , " complete \n " ) ;
kfree ( adapter - > tx_ring ) ;
kfree ( adapter - > rx_ring ) ;
2007-09-16 01:07:45 +04:00
free_netdev ( netdev ) ;
pci_disable_device ( pdev ) ;
}
/**
* ixgbe_io_error_detected - called when PCI error is detected
* @ pdev : Pointer to PCI device
* @ state : The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected .
*/
static pci_ers_result_t ixgbe_io_error_detected ( struct pci_dev * pdev ,
pci_channel_state_t state )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct ixgbe_adapter * adapter = netdev - > priv ;
netif_device_detach ( netdev ) ;
if ( netif_running ( netdev ) )
ixgbe_down ( adapter ) ;
pci_disable_device ( pdev ) ;
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET ;
}
/**
* ixgbe_io_slot_reset - called after the pci bus has been reset .
* @ pdev : Pointer to PCI device
*
* Restart the card from scratch , as if from a cold - boot .
*/
static pci_ers_result_t ixgbe_io_slot_reset ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct ixgbe_adapter * adapter = netdev - > priv ;
if ( pci_enable_device ( pdev ) ) {
DPRINTK ( PROBE , ERR ,
" Cannot re-enable PCI device after reset. \n " ) ;
return PCI_ERS_RESULT_DISCONNECT ;
}
pci_set_master ( pdev ) ;
2008-04-23 22:09:24 +04:00
pci_restore_state ( pdev ) ;
2007-09-16 01:07:45 +04:00
pci_enable_wake ( pdev , PCI_D3hot , 0 ) ;
pci_enable_wake ( pdev , PCI_D3cold , 0 ) ;
ixgbe_reset ( adapter ) ;
return PCI_ERS_RESULT_RECOVERED ;
}
/**
* ixgbe_io_resume - called when traffic can start flowing again .
* @ pdev : Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation .
*/
static void ixgbe_io_resume ( struct pci_dev * pdev )
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
struct ixgbe_adapter * adapter = netdev - > priv ;
if ( netif_running ( netdev ) ) {
if ( ixgbe_up ( adapter ) ) {
DPRINTK ( PROBE , INFO , " ixgbe_up failed after reset \n " ) ;
return ;
}
}
netif_device_attach ( netdev ) ;
}
static struct pci_error_handlers ixgbe_err_handler = {
. error_detected = ixgbe_io_error_detected ,
. slot_reset = ixgbe_io_slot_reset ,
. resume = ixgbe_io_resume ,
} ;
static struct pci_driver ixgbe_driver = {
. name = ixgbe_driver_name ,
. id_table = ixgbe_pci_tbl ,
. probe = ixgbe_probe ,
. remove = __devexit_p ( ixgbe_remove ) ,
# ifdef CONFIG_PM
. suspend = ixgbe_suspend ,
. resume = ixgbe_resume ,
# endif
. shutdown = ixgbe_shutdown ,
. err_handler = & ixgbe_err_handler
} ;
/**
* ixgbe_init_module - Driver Registration Routine
*
* ixgbe_init_module is the first routine called when the driver is
* loaded . All it does is register with the PCI subsystem .
* */
static int __init ixgbe_init_module ( void )
{
int ret ;
printk ( KERN_INFO " %s: %s - version %s \n " , ixgbe_driver_name ,
ixgbe_driver_string , ixgbe_driver_version ) ;
printk ( KERN_INFO " %s: %s \n " , ixgbe_driver_name , ixgbe_copyright ) ;
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
dca_register_notify ( & dca_notifier ) ;
# endif
2007-09-16 01:07:45 +04:00
ret = pci_register_driver ( & ixgbe_driver ) ;
return ret ;
}
module_init ( ixgbe_init_module ) ;
/**
* ixgbe_exit_module - Driver Exit Cleanup Routine
*
* ixgbe_exit_module is called just before the driver is removed
* from memory .
* */
static void __exit ixgbe_exit_module ( void )
{
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
dca_unregister_notify ( & dca_notifier ) ;
# endif
2007-09-16 01:07:45 +04:00
pci_unregister_driver ( & ixgbe_driver ) ;
}
2008-03-04 02:04:02 +03:00
# ifdef CONFIG_DCA
static int ixgbe_notify_dca ( struct notifier_block * nb , unsigned long event ,
void * p )
{
int ret_val ;
ret_val = driver_for_each_device ( & ixgbe_driver . driver , NULL , & event ,
__ixgbe_notify_dca ) ;
return ret_val ? NOTIFY_BAD : NOTIFY_DONE ;
}
# endif /* CONFIG_DCA */
2007-09-16 01:07:45 +04:00
module_exit ( ixgbe_exit_module ) ;
/* ixgbe_main.c */