2008-04-27 15:55:59 +04:00
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005 - 2006 Fen Systems Ltd .
* Copyright 2005 - 2008 Solarflare Communications Inc .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation , incorporated herein by reference .
*/
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/netdevice.h>
# include <linux/etherdevice.h>
# include <linux/delay.h>
# include <linux/notifier.h>
# include <linux/ip.h>
# include <linux/tcp.h>
# include <linux/in.h>
# include <linux/crc32.h>
# include <linux/ethtool.h>
2008-07-18 22:03:10 +04:00
# include <linux/topology.h>
2008-04-27 15:55:59 +04:00
# include "net_driver.h"
# include "ethtool.h"
# include "tx.h"
# include "rx.h"
# include "efx.h"
# include "mdio_10g.h"
# include "falcon.h"
# define EFX_MAX_MTU (9 * 1024)
/* RX slow fill workqueue. If memory allocation fails in the fast path,
* a work item is pushed onto this work queue to retry the allocation later ,
* to avoid the NIC being starved of RX buffers . Since this is a per cpu
* workqueue , there is nothing to be gained in making it per NIC
*/
static struct workqueue_struct * refill_workqueue ;
2008-12-13 08:33:02 +03:00
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue . This is not a per - nic work queue , because
* efx_reset_work ( ) acquires the rtnl lock , so resets are naturally serialised .
*/
static struct workqueue_struct * reset_workqueue ;
2008-04-27 15:55:59 +04:00
/**************************************************************************
*
* Configurable values
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Use separate channels for TX and RX events
*
2008-12-13 08:41:06 +03:00
* Set this to 1 to use separate channels for TX and RX . It allows us
* to control interrupt affinity separately for TX and RX .
2008-04-27 15:55:59 +04:00
*
2008-12-13 08:41:06 +03:00
* This is only used in MSI - X interrupt mode
2008-04-27 15:55:59 +04:00
*/
2008-12-13 08:41:06 +03:00
static unsigned int separate_tx_channels ;
module_param ( separate_tx_channels , uint , 0644 ) ;
MODULE_PARM_DESC ( separate_tx_channels ,
" Use separate channels for TX and RX " ) ;
2008-04-27 15:55:59 +04:00
/* This is the weight assigned to each of the (per-channel) virtual
* NAPI devices .
*/
static int napi_weight = 64 ;
/* This is the time (in jiffies) between invocations of the hardware
* monitor , which checks for known hardware bugs and resets the
* hardware and driver as necessary .
*/
unsigned int efx_monitor_interval = 1 * HZ ;
/* This controls whether or not the driver will initialise devices
* with invalid MAC addresses stored in the EEPROM or flash . If true ,
* such devices will be initialised with a random locally - generated
* MAC address . This allows for loading the sfc_mtd driver to
* reprogram the flash , even if the flash contents ( including the MAC
* address ) have previously been erased .
*/
static unsigned int allow_bad_hwaddr ;
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool .
*
* The default for RX should strike a balance between increasing the
* round - trip latency and reducing overhead .
*/
static unsigned int rx_irq_mod_usec = 60 ;
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool .
*
* This default is chosen to ensure that a 10 G link does not go idle
* while a TX queue is stopped after it has become full . A queue is
* restarted when it drops below half full . The time this takes ( assuming
* worst case 3 descriptors per packet and 1024 descriptors ) is
* 512 / 3 * 1.2 = 205 usec .
*/
static unsigned int tx_irq_mod_usec = 150 ;
/* This is the first interrupt mode to try out of:
* 0 = > MSI - X
* 1 = > MSI
* 2 = > legacy
*/
static unsigned int interrupt_mode ;
/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
* i . e . the number of CPUs among which we may distribute simultaneous
* interrupt handling .
*
* Cards without MSI - X will only target one CPU via legacy or MSI interrupt .
* The default ( 0 ) means to assign an interrupt to each package ( level II cache )
*/
static unsigned int rss_cpus ;
module_param ( rss_cpus , uint , 0444 ) ;
MODULE_PARM_DESC ( rss_cpus , " Number of CPUs to use for Receive-Side Scaling " ) ;
2008-12-13 08:34:54 +03:00
static int phy_flash_cfg ;
module_param ( phy_flash_cfg , int , 0644 ) ;
MODULE_PARM_DESC ( phy_flash_cfg , " Set PHYs into reflash mode initially " ) ;
2009-03-20 16:30:37 +03:00
static unsigned irq_adapt_low_thresh = 10000 ;
module_param ( irq_adapt_low_thresh , uint , 0644 ) ;
MODULE_PARM_DESC ( irq_adapt_low_thresh ,
" Threshold score for reducing IRQ moderation " ) ;
static unsigned irq_adapt_high_thresh = 20000 ;
module_param ( irq_adapt_high_thresh , uint , 0644 ) ;
MODULE_PARM_DESC ( irq_adapt_high_thresh ,
" Threshold score for increasing IRQ moderation " ) ;
2008-04-27 15:55:59 +04:00
/**************************************************************************
*
* Utility functions and prototypes
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static void efx_remove_channel ( struct efx_channel * channel ) ;
static void efx_remove_port ( struct efx_nic * efx ) ;
static void efx_fini_napi ( struct efx_nic * efx ) ;
static void efx_fini_channels ( struct efx_nic * efx ) ;
# define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
2008-09-01 15:49:08 +04:00
if ( efx - > state = = STATE_RUNNING ) \
2008-04-27 15:55:59 +04:00
ASSERT_RTNL ( ) ; \
} while ( 0 )
/**************************************************************************
*
* Event queue processing
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Process channel's event queue
*
* This function is responsible for processing the event queue of a
* single channel . The caller must guarantee that this function will
* never be concurrently called more than once on the same channel ,
* though different channels may be being processed concurrently .
*/
2008-09-01 15:47:12 +04:00
static int efx_process_channel ( struct efx_channel * channel , int rx_quota )
2008-04-27 15:55:59 +04:00
{
2008-09-01 15:48:08 +04:00
struct efx_nic * efx = channel - > efx ;
int rx_packets ;
2008-04-27 15:55:59 +04:00
2008-09-01 15:48:08 +04:00
if ( unlikely ( efx - > reset_pending ! = RESET_TYPE_NONE | |
2008-04-27 15:55:59 +04:00
! channel - > enabled ) )
2008-09-01 15:48:08 +04:00
return 0 ;
2008-04-27 15:55:59 +04:00
2008-09-01 15:48:08 +04:00
rx_packets = falcon_process_eventq ( channel , rx_quota ) ;
if ( rx_packets = = 0 )
return 0 ;
2008-04-27 15:55:59 +04:00
/* Deliver last RX packet. */
if ( channel - > rx_pkt ) {
__efx_rx_packet ( channel , channel - > rx_pkt ,
channel - > rx_pkt_csummed ) ;
channel - > rx_pkt = NULL ;
}
efx_rx_strategy ( channel ) ;
2008-09-01 15:48:08 +04:00
efx_fast_push_rx_descriptors ( & efx - > rx_queue [ channel - > channel ] ) ;
2008-04-27 15:55:59 +04:00
2008-09-01 15:48:08 +04:00
return rx_packets ;
2008-04-27 15:55:59 +04:00
}
/* Mark channel as finished processing
*
* Note that since we will not receive further interrupts for this
* channel before we finish processing and call the eventq_read_ack ( )
* method , there is no need to use the interrupt hold - off timers .
*/
static inline void efx_channel_processed ( struct efx_channel * channel )
{
2008-05-17 00:18:14 +04:00
/* The interrupt handler for this channel may set work_pending
* as soon as we acknowledge the events we ' ve seen . Make sure
* it ' s cleared before then . */
2008-09-01 15:46:50 +04:00
channel - > work_pending = false ;
2008-05-17 00:18:14 +04:00
smp_wmb ( ) ;
2008-04-27 15:55:59 +04:00
falcon_eventq_read_ack ( channel ) ;
}
/* NAPI poll handler
*
* NAPI guarantees serialisation of polls of the same device , which
* provides the guarantee required by efx_process_channel ( ) .
*/
static int efx_poll ( struct napi_struct * napi , int budget )
{
struct efx_channel * channel =
container_of ( napi , struct efx_channel , napi_str ) ;
int rx_packets ;
EFX_TRACE ( channel - > efx , " channel %d NAPI poll executing on CPU %d \n " ,
channel - > channel , raw_smp_processor_id ( ) ) ;
2008-09-01 15:48:08 +04:00
rx_packets = efx_process_channel ( channel , budget ) ;
2008-04-27 15:55:59 +04:00
if ( rx_packets < budget ) {
2009-03-20 16:30:37 +03:00
struct efx_nic * efx = channel - > efx ;
if ( channel - > used_flags & EFX_USED_BY_RX & &
efx - > irq_rx_adaptive & &
unlikely ( + + channel - > irq_count = = 1000 ) ) {
unsigned old_irq_moderation = channel - > irq_moderation ;
if ( unlikely ( channel - > irq_mod_score <
irq_adapt_low_thresh ) ) {
channel - > irq_moderation =
max_t ( int ,
channel - > irq_moderation -
FALCON_IRQ_MOD_RESOLUTION ,
FALCON_IRQ_MOD_RESOLUTION ) ;
} else if ( unlikely ( channel - > irq_mod_score >
irq_adapt_high_thresh ) ) {
channel - > irq_moderation =
min ( channel - > irq_moderation +
FALCON_IRQ_MOD_RESOLUTION ,
efx - > irq_rx_moderation ) ;
}
if ( channel - > irq_moderation ! = old_irq_moderation )
falcon_set_int_moderation ( channel ) ;
channel - > irq_count = 0 ;
channel - > irq_mod_score = 0 ;
}
2008-04-27 15:55:59 +04:00
/* There is no race here; although napi_disable() will
2009-01-20 03:43:59 +03:00
* only wait for napi_complete ( ) , this isn ' t a problem
2008-04-27 15:55:59 +04:00
* since efx_channel_processed ( ) will have no effect if
* interrupts have already been disabled .
*/
2009-01-20 03:43:59 +03:00
napi_complete ( napi ) ;
2008-04-27 15:55:59 +04:00
efx_channel_processed ( channel ) ;
}
return rx_packets ;
}
/* Process the eventq of the specified channel immediately on this CPU
*
* Disable hardware generated interrupts , wait for any existing
* processing to finish , then directly poll ( and ack ) the eventq .
* Finally reenable NAPI and interrupts .
*
* Since we are touching interrupts the caller should hold the suspend lock
*/
void efx_process_channel_now ( struct efx_channel * channel )
{
struct efx_nic * efx = channel - > efx ;
BUG_ON ( ! channel - > used_flags ) ;
BUG_ON ( ! channel - > enabled ) ;
/* Disable interrupts and wait for ISRs to complete */
falcon_disable_interrupts ( efx ) ;
if ( efx - > legacy_irq )
synchronize_irq ( efx - > legacy_irq ) ;
2008-09-01 15:47:38 +04:00
if ( channel - > irq )
2008-04-27 15:55:59 +04:00
synchronize_irq ( channel - > irq ) ;
/* Wait for any NAPI processing to complete */
napi_disable ( & channel - > napi_str ) ;
/* Poll the channel */
2008-05-17 00:14:27 +04:00
efx_process_channel ( channel , efx - > type - > evq_size ) ;
2008-04-27 15:55:59 +04:00
/* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */
efx_channel_processed ( channel ) ;
napi_enable ( & channel - > napi_str ) ;
falcon_enable_interrupts ( efx ) ;
}
/* Create event queue
* Event queue memory allocations are done only once . If the channel
* is reset , the memory buffer will be reused ; this guards against
* errors during channel reset and also simplifies interrupt handling .
*/
static int efx_probe_eventq ( struct efx_channel * channel )
{
EFX_LOG ( channel - > efx , " chan %d create event queue \n " , channel - > channel ) ;
return falcon_probe_eventq ( channel ) ;
}
/* Prepare channel's event queue */
2008-09-01 15:48:46 +04:00
static void efx_init_eventq ( struct efx_channel * channel )
2008-04-27 15:55:59 +04:00
{
EFX_LOG ( channel - > efx , " chan %d init event queue \n " , channel - > channel ) ;
channel - > eventq_read_ptr = 0 ;
2008-09-01 15:48:46 +04:00
falcon_init_eventq ( channel ) ;
2008-04-27 15:55:59 +04:00
}
static void efx_fini_eventq ( struct efx_channel * channel )
{
EFX_LOG ( channel - > efx , " chan %d fini event queue \n " , channel - > channel ) ;
falcon_fini_eventq ( channel ) ;
}
static void efx_remove_eventq ( struct efx_channel * channel )
{
EFX_LOG ( channel - > efx , " chan %d remove event queue \n " , channel - > channel ) ;
falcon_remove_eventq ( channel ) ;
}
/**************************************************************************
*
* Channel handling
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static int efx_probe_channel ( struct efx_channel * channel )
{
struct efx_tx_queue * tx_queue ;
struct efx_rx_queue * rx_queue ;
int rc ;
EFX_LOG ( channel - > efx , " creating channel %d \n " , channel - > channel ) ;
rc = efx_probe_eventq ( channel ) ;
if ( rc )
goto fail1 ;
efx_for_each_channel_tx_queue ( tx_queue , channel ) {
rc = efx_probe_tx_queue ( tx_queue ) ;
if ( rc )
goto fail2 ;
}
efx_for_each_channel_rx_queue ( rx_queue , channel ) {
rc = efx_probe_rx_queue ( rx_queue ) ;
if ( rc )
goto fail3 ;
}
channel - > n_rx_frm_trunc = 0 ;
return 0 ;
fail3 :
efx_for_each_channel_rx_queue ( rx_queue , channel )
efx_remove_rx_queue ( rx_queue ) ;
fail2 :
efx_for_each_channel_tx_queue ( tx_queue , channel )
efx_remove_tx_queue ( tx_queue ) ;
fail1 :
return rc ;
}
2008-12-13 08:37:02 +03:00
static void efx_set_channel_names ( struct efx_nic * efx )
{
struct efx_channel * channel ;
const char * type = " " ;
int number ;
efx_for_each_channel ( channel , efx ) {
number = channel - > channel ;
if ( efx - > n_channels > efx - > n_rx_queues ) {
if ( channel - > channel < efx - > n_rx_queues ) {
type = " -rx " ;
} else {
type = " -tx " ;
number - = efx - > n_rx_queues ;
}
}
snprintf ( channel - > name , sizeof ( channel - > name ) ,
" %s%s-%d " , efx - > name , type , number ) ;
}
}
2008-04-27 15:55:59 +04:00
/* Channels are shutdown and reinitialised whilst the NIC is running
* to propagate configuration changes ( mtu , checksum offload ) , or
* to clear hardware error conditions
*/
2008-09-01 15:48:46 +04:00
static void efx_init_channels ( struct efx_nic * efx )
2008-04-27 15:55:59 +04:00
{
struct efx_tx_queue * tx_queue ;
struct efx_rx_queue * rx_queue ;
struct efx_channel * channel ;
2008-05-17 00:15:06 +04:00
/* Calculate the rx buffer allocation parameters required to
* support the current MTU , including padding for header
* alignment and overruns .
*/
efx - > rx_buffer_len = ( max ( EFX_PAGE_IP_ALIGN , NET_IP_ALIGN ) +
EFX_MAX_FRAME_LEN ( efx - > net_dev - > mtu ) +
efx - > type - > rx_buffer_padding ) ;
efx - > rx_buffer_order = get_order ( efx - > rx_buffer_len ) ;
2008-04-27 15:55:59 +04:00
/* Initialise the channels */
efx_for_each_channel ( channel , efx ) {
EFX_LOG ( channel - > efx , " init chan %d \n " , channel - > channel ) ;
2008-09-01 15:48:46 +04:00
efx_init_eventq ( channel ) ;
2008-04-27 15:55:59 +04:00
2008-09-01 15:48:46 +04:00
efx_for_each_channel_tx_queue ( tx_queue , channel )
efx_init_tx_queue ( tx_queue ) ;
2008-04-27 15:55:59 +04:00
/* The rx buffer allocation strategy is MTU dependent */
efx_rx_strategy ( channel ) ;
2008-09-01 15:48:46 +04:00
efx_for_each_channel_rx_queue ( rx_queue , channel )
efx_init_rx_queue ( rx_queue ) ;
2008-04-27 15:55:59 +04:00
WARN_ON ( channel - > rx_pkt ! = NULL ) ;
efx_rx_strategy ( channel ) ;
}
}
/* This enables event queue processing and packet transmission.
*
* Note that this function is not allowed to fail , since that would
* introduce too much complexity into the suspend / resume path .
*/
static void efx_start_channel ( struct efx_channel * channel )
{
struct efx_rx_queue * rx_queue ;
EFX_LOG ( channel - > efx , " starting chan %d \n " , channel - > channel ) ;
2008-05-17 00:18:14 +04:00
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it . Make sure it ' s cleared before
* then . Similarly , make sure it sees the enabled flag set . */
2008-09-01 15:46:50 +04:00
channel - > work_pending = false ;
channel - > enabled = true ;
2008-05-17 00:18:14 +04:00
smp_wmb ( ) ;
2008-04-27 15:55:59 +04:00
napi_enable ( & channel - > napi_str ) ;
/* Load up RX descriptors */
efx_for_each_channel_rx_queue ( rx_queue , channel )
efx_fast_push_rx_descriptors ( rx_queue ) ;
}
/* This disables event queue processing and packet transmission.
* This function does not guarantee that all queue processing
* ( e . g . RX refill ) is complete .
*/
static void efx_stop_channel ( struct efx_channel * channel )
{
struct efx_rx_queue * rx_queue ;
if ( ! channel - > enabled )
return ;
EFX_LOG ( channel - > efx , " stop chan %d \n " , channel - > channel ) ;
2008-09-01 15:46:50 +04:00
channel - > enabled = false ;
2008-04-27 15:55:59 +04:00
napi_disable ( & channel - > napi_str ) ;
/* Ensure that any worker threads have exited or will be no-ops */
efx_for_each_channel_rx_queue ( rx_queue , channel ) {
spin_lock_bh ( & rx_queue - > add_lock ) ;
spin_unlock_bh ( & rx_queue - > add_lock ) ;
}
}
static void efx_fini_channels ( struct efx_nic * efx )
{
struct efx_channel * channel ;
struct efx_tx_queue * tx_queue ;
struct efx_rx_queue * rx_queue ;
2008-09-01 15:49:37 +04:00
int rc ;
2008-04-27 15:55:59 +04:00
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
BUG_ON ( efx - > port_enabled ) ;
2008-09-01 15:49:37 +04:00
rc = falcon_flush_queues ( efx ) ;
if ( rc )
EFX_ERR ( efx , " failed to flush queues \n " ) ;
else
EFX_LOG ( efx , " successfully flushed all queues \n " ) ;
2008-04-27 15:55:59 +04:00
efx_for_each_channel ( channel , efx ) {
EFX_LOG ( channel - > efx , " shut down chan %d \n " , channel - > channel ) ;
efx_for_each_channel_rx_queue ( rx_queue , channel )
efx_fini_rx_queue ( rx_queue ) ;
efx_for_each_channel_tx_queue ( tx_queue , channel )
efx_fini_tx_queue ( tx_queue ) ;
efx_fini_eventq ( channel ) ;
}
}
static void efx_remove_channel ( struct efx_channel * channel )
{
struct efx_tx_queue * tx_queue ;
struct efx_rx_queue * rx_queue ;
EFX_LOG ( channel - > efx , " destroy chan %d \n " , channel - > channel ) ;
efx_for_each_channel_rx_queue ( rx_queue , channel )
efx_remove_rx_queue ( rx_queue ) ;
efx_for_each_channel_tx_queue ( tx_queue , channel )
efx_remove_tx_queue ( tx_queue ) ;
efx_remove_eventq ( channel ) ;
channel - > used_flags = 0 ;
}
void efx_schedule_slow_fill ( struct efx_rx_queue * rx_queue , int delay )
{
queue_delayed_work ( refill_workqueue , & rx_queue - > work , delay ) ;
}
/**************************************************************************
*
* Port handling
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* This ensures that the kernel is kept informed (via
* netif_carrier_on / off ) of the link status , and also maintains the
* link status ' s stop on the port ' s TX queue .
*/
static void efx_link_status_changed ( struct efx_nic * efx )
{
/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
* that no events are triggered between unregister_netdev ( ) and the
* driver unloading . A more general condition is that NETDEV_CHANGE
* can only be generated between NETDEV_UP and NETDEV_DOWN */
if ( ! netif_running ( efx - > net_dev ) )
return ;
2008-09-01 15:49:02 +04:00
if ( efx - > port_inhibited ) {
netif_carrier_off ( efx - > net_dev ) ;
return ;
}
2008-09-01 15:46:50 +04:00
if ( efx - > link_up ! = netif_carrier_ok ( efx - > net_dev ) ) {
2008-04-27 15:55:59 +04:00
efx - > n_link_state_changes + + ;
if ( efx - > link_up )
netif_carrier_on ( efx - > net_dev ) ;
else
netif_carrier_off ( efx - > net_dev ) ;
}
/* Status message for kernel log */
if ( efx - > link_up ) {
2008-12-13 08:43:33 +03:00
EFX_INFO ( efx , " link up at %uMbps %s-duplex (MTU %d)%s \n " ,
efx - > link_speed , efx - > link_fd ? " full " : " half " ,
2008-04-27 15:55:59 +04:00
efx - > net_dev - > mtu ,
( efx - > promiscuous ? " [PROMISC] " : " " ) ) ;
} else {
EFX_INFO ( efx , " link down \n " ) ;
}
}
2009-03-04 12:52:52 +03:00
static void efx_fini_port ( struct efx_nic * efx ) ;
2008-04-27 15:55:59 +04:00
/* This call reinitialises the MAC to pick up new PHY settings. The
* caller must hold the mac_lock */
2008-09-01 15:49:02 +04:00
void __efx_reconfigure_port ( struct efx_nic * efx )
2008-04-27 15:55:59 +04:00
{
WARN_ON ( ! mutex_is_locked ( & efx - > mac_lock ) ) ;
EFX_LOG ( efx , " reconfiguring MAC from PHY settings on CPU %d \n " ,
raw_smp_processor_id ( ) ) ;
2008-09-01 15:49:12 +04:00
/* Serialise the promiscuous flag with efx_set_multicast_list. */
if ( efx_dev_registered ( efx ) ) {
netif_addr_lock_bh ( efx - > net_dev ) ;
netif_addr_unlock_bh ( efx - > net_dev ) ;
}
2008-12-13 08:50:08 +03:00
falcon_deconfigure_mac_wrapper ( efx ) ;
/* Reconfigure the PHY, disabling transmit in mac level loopback. */
if ( LOOPBACK_INTERNAL ( efx ) )
efx - > phy_mode | = PHY_MODE_TX_DISABLED ;
else
efx - > phy_mode & = ~ PHY_MODE_TX_DISABLED ;
efx - > phy_op - > reconfigure ( efx ) ;
if ( falcon_switch_mac ( efx ) )
goto fail ;
efx - > mac_op - > reconfigure ( efx ) ;
2008-04-27 15:55:59 +04:00
/* Inform kernel of loss/gain of carrier */
efx_link_status_changed ( efx ) ;
2008-12-13 08:50:08 +03:00
return ;
fail :
EFX_ERR ( efx , " failed to reconfigure MAC \n " ) ;
2009-03-04 12:52:52 +03:00
efx - > port_enabled = false ;
efx_fini_port ( efx ) ;
2008-04-27 15:55:59 +04:00
}
/* Reinitialise the MAC to pick up new PHY settings, even if the port is
* disabled . */
void efx_reconfigure_port ( struct efx_nic * efx )
{
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
mutex_lock ( & efx - > mac_lock ) ;
__efx_reconfigure_port ( efx ) ;
mutex_unlock ( & efx - > mac_lock ) ;
}
/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
* we don ' t efx_reconfigure_port ( ) if the port is disabled . Care is taken
* in efx_stop_all ( ) and efx_start_port ( ) to prevent PHY events being lost */
2008-12-13 08:59:24 +03:00
static void efx_phy_work ( struct work_struct * data )
2008-04-27 15:55:59 +04:00
{
2008-12-13 08:59:24 +03:00
struct efx_nic * efx = container_of ( data , struct efx_nic , phy_work ) ;
2008-04-27 15:55:59 +04:00
mutex_lock ( & efx - > mac_lock ) ;
if ( efx - > port_enabled )
__efx_reconfigure_port ( efx ) ;
mutex_unlock ( & efx - > mac_lock ) ;
}
2008-12-13 08:59:24 +03:00
static void efx_mac_work ( struct work_struct * data )
{
struct efx_nic * efx = container_of ( data , struct efx_nic , mac_work ) ;
mutex_lock ( & efx - > mac_lock ) ;
if ( efx - > port_enabled )
efx - > mac_op - > irq ( efx ) ;
mutex_unlock ( & efx - > mac_lock ) ;
}
2008-04-27 15:55:59 +04:00
static int efx_probe_port ( struct efx_nic * efx )
{
int rc ;
EFX_LOG ( efx , " create port \n " ) ;
/* Connect up MAC/PHY operations table and read MAC address */
rc = falcon_probe_port ( efx ) ;
if ( rc )
goto err ;
2008-12-13 08:34:54 +03:00
if ( phy_flash_cfg )
efx - > phy_mode = PHY_MODE_SPECIAL ;
2008-04-27 15:55:59 +04:00
/* Sanity check MAC address */
if ( is_valid_ether_addr ( efx - > mac_address ) ) {
memcpy ( efx - > net_dev - > dev_addr , efx - > mac_address , ETH_ALEN ) ;
} else {
2008-10-28 01:59:26 +03:00
EFX_ERR ( efx , " invalid MAC address %pM \n " ,
efx - > mac_address ) ;
2008-04-27 15:55:59 +04:00
if ( ! allow_bad_hwaddr ) {
rc = - EINVAL ;
goto err ;
}
random_ether_addr ( efx - > net_dev - > dev_addr ) ;
2008-10-28 01:59:26 +03:00
EFX_INFO ( efx , " using locally-generated MAC %pM \n " ,
efx - > net_dev - > dev_addr ) ;
2008-04-27 15:55:59 +04:00
}
return 0 ;
err :
efx_remove_port ( efx ) ;
return rc ;
}
static int efx_init_port ( struct efx_nic * efx )
{
int rc ;
EFX_LOG ( efx , " init port \n " ) ;
2008-12-13 08:50:08 +03:00
rc = efx - > phy_op - > init ( efx ) ;
2008-04-27 15:55:59 +04:00
if ( rc )
return rc ;
2008-12-13 08:50:08 +03:00
mutex_lock ( & efx - > mac_lock ) ;
2009-01-29 20:50:51 +03:00
efx - > phy_op - > reconfigure ( efx ) ;
2008-12-13 08:50:08 +03:00
rc = falcon_switch_mac ( efx ) ;
mutex_unlock ( & efx - > mac_lock ) ;
if ( rc )
goto fail ;
efx - > mac_op - > reconfigure ( efx ) ;
2008-04-27 15:55:59 +04:00
2008-09-01 15:46:50 +04:00
efx - > port_initialized = true ;
2009-01-29 21:00:07 +03:00
efx_stats_enable ( efx ) ;
2008-04-27 15:55:59 +04:00
return 0 ;
2008-12-13 08:50:08 +03:00
fail :
efx - > phy_op - > fini ( efx ) ;
return rc ;
2008-04-27 15:55:59 +04:00
}
/* Allow efx_reconfigure_port() to be scheduled, and close the window
* between efx_stop_port and efx_flush_all whereby a previously scheduled
2008-12-13 08:59:24 +03:00
* efx_phy_work ( ) / efx_mac_work ( ) may have been cancelled */
2008-04-27 15:55:59 +04:00
static void efx_start_port ( struct efx_nic * efx )
{
EFX_LOG ( efx , " start port \n " ) ;
BUG_ON ( efx - > port_enabled ) ;
mutex_lock ( & efx - > mac_lock ) ;
2008-09-01 15:46:50 +04:00
efx - > port_enabled = true ;
2008-04-27 15:55:59 +04:00
__efx_reconfigure_port ( efx ) ;
2008-12-13 08:59:24 +03:00
efx - > mac_op - > irq ( efx ) ;
2008-04-27 15:55:59 +04:00
mutex_unlock ( & efx - > mac_lock ) ;
}
2008-12-13 08:59:24 +03:00
/* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing,
* and efx_set_multicast_list ( ) from scheduling efx_phy_work . efx_phy_work
* and efx_mac_work may still be scheduled via NAPI processing until
* efx_flush_all ( ) is called */
2008-04-27 15:55:59 +04:00
static void efx_stop_port ( struct efx_nic * efx )
{
EFX_LOG ( efx , " stop port \n " ) ;
mutex_lock ( & efx - > mac_lock ) ;
2008-09-01 15:46:50 +04:00
efx - > port_enabled = false ;
2008-04-27 15:55:59 +04:00
mutex_unlock ( & efx - > mac_lock ) ;
/* Serialise against efx_set_multicast_list() */
2008-05-17 00:16:10 +04:00
if ( efx_dev_registered ( efx ) ) {
2008-07-15 11:15:08 +04:00
netif_addr_lock_bh ( efx - > net_dev ) ;
netif_addr_unlock_bh ( efx - > net_dev ) ;
2008-04-27 15:55:59 +04:00
}
}
static void efx_fini_port ( struct efx_nic * efx )
{
EFX_LOG ( efx , " shut down port \n " ) ;
if ( ! efx - > port_initialized )
return ;
2009-01-29 21:00:07 +03:00
efx_stats_disable ( efx ) ;
2008-12-13 08:50:08 +03:00
efx - > phy_op - > fini ( efx ) ;
2008-09-01 15:46:50 +04:00
efx - > port_initialized = false ;
2008-04-27 15:55:59 +04:00
2008-09-01 15:46:50 +04:00
efx - > link_up = false ;
2008-04-27 15:55:59 +04:00
efx_link_status_changed ( efx ) ;
}
static void efx_remove_port ( struct efx_nic * efx )
{
EFX_LOG ( efx , " destroying port \n " ) ;
falcon_remove_port ( efx ) ;
}
/**************************************************************************
*
* NIC handling
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io ( struct efx_nic * efx )
{
struct pci_dev * pci_dev = efx - > pci_dev ;
dma_addr_t dma_mask = efx - > type - > max_dma_mask ;
int rc ;
EFX_LOG ( efx , " initialising I/O \n " ) ;
rc = pci_enable_device ( pci_dev ) ;
if ( rc ) {
EFX_ERR ( efx , " failed to enable PCI device \n " ) ;
goto fail1 ;
}
pci_set_master ( pci_dev ) ;
/* Set the PCI DMA mask. Try all possibilities from our
* genuine mask down to 32 bits , because some architectures
* ( e . g . x86_64 with iommu_sac_force set ) will allow 40 bit
* masks event though they reject 46 bit masks .
*/
while ( dma_mask > 0x7fffffffUL ) {
if ( pci_dma_supported ( pci_dev , dma_mask ) & &
( ( rc = pci_set_dma_mask ( pci_dev , dma_mask ) ) = = 0 ) )
break ;
dma_mask > > = 1 ;
}
if ( rc ) {
EFX_ERR ( efx , " could not find a suitable DMA mask \n " ) ;
goto fail2 ;
}
EFX_LOG ( efx , " using DMA mask %llx \n " , ( unsigned long long ) dma_mask ) ;
rc = pci_set_consistent_dma_mask ( pci_dev , dma_mask ) ;
if ( rc ) {
/* pci_set_consistent_dma_mask() is not *allowed* to
* fail with a mask that pci_set_dma_mask ( ) accepted ,
* but just in case . . .
*/
EFX_ERR ( efx , " failed to set consistent DMA mask \n " ) ;
goto fail2 ;
}
efx - > membase_phys = pci_resource_start ( efx - > pci_dev ,
efx - > type - > mem_bar ) ;
rc = pci_request_region ( pci_dev , efx - > type - > mem_bar , " sfc " ) ;
if ( rc ) {
EFX_ERR ( efx , " request for memory BAR failed \n " ) ;
rc = - EIO ;
goto fail3 ;
}
efx - > membase = ioremap_nocache ( efx - > membase_phys ,
efx - > type - > mem_map_size ) ;
if ( ! efx - > membase ) {
2008-05-17 00:17:06 +04:00
EFX_ERR ( efx , " could not map memory BAR %d at %llx+%x \n " ,
efx - > type - > mem_bar ,
( unsigned long long ) efx - > membase_phys ,
2008-04-27 15:55:59 +04:00
efx - > type - > mem_map_size ) ;
rc = - ENOMEM ;
goto fail4 ;
}
2008-05-17 00:17:06 +04:00
EFX_LOG ( efx , " memory BAR %u at %llx+%x (virtual %p) \n " ,
efx - > type - > mem_bar , ( unsigned long long ) efx - > membase_phys ,
efx - > type - > mem_map_size , efx - > membase ) ;
2008-04-27 15:55:59 +04:00
return 0 ;
fail4 :
2008-09-01 15:49:15 +04:00
pci_release_region ( efx - > pci_dev , efx - > type - > mem_bar ) ;
2008-04-27 15:55:59 +04:00
fail3 :
2008-05-17 00:15:29 +04:00
efx - > membase_phys = 0 ;
2008-04-27 15:55:59 +04:00
fail2 :
pci_disable_device ( efx - > pci_dev ) ;
fail1 :
return rc ;
}
static void efx_fini_io ( struct efx_nic * efx )
{
EFX_LOG ( efx , " shutting down I/O \n " ) ;
if ( efx - > membase ) {
iounmap ( efx - > membase ) ;
efx - > membase = NULL ;
}
if ( efx - > membase_phys ) {
pci_release_region ( efx - > pci_dev , efx - > type - > mem_bar ) ;
2008-05-17 00:15:29 +04:00
efx - > membase_phys = 0 ;
2008-04-27 15:55:59 +04:00
}
pci_disable_device ( efx - > pci_dev ) ;
}
2008-09-01 15:47:33 +04:00
/* Get number of RX queues wanted. Return number of online CPU
* packages in the expectation that an IRQ balancer will spread
* interrupts across them . */
static int efx_wanted_rx_queues ( void )
{
2009-01-11 08:58:09 +03:00
cpumask_var_t core_mask ;
2008-09-01 15:47:33 +04:00
int count ;
int cpu ;
2009-05-12 14:48:36 +04:00
if ( unlikely ( ! alloc_cpumask_var ( & core_mask , GFP_KERNEL ) ) ) {
2009-01-11 08:58:09 +03:00
printk ( KERN_WARNING
2009-05-12 14:48:36 +04:00
" sfc: RSS disabled due to allocation failure \n " ) ;
2009-01-11 08:58:09 +03:00
return 1 ;
}
cpumask_clear ( core_mask ) ;
2008-09-01 15:47:33 +04:00
count = 0 ;
for_each_online_cpu ( cpu ) {
2009-01-11 08:58:09 +03:00
if ( ! cpumask_test_cpu ( cpu , core_mask ) ) {
2008-09-01 15:47:33 +04:00
+ + count ;
2009-01-11 08:58:09 +03:00
cpumask_or ( core_mask , core_mask ,
2009-01-11 08:58:08 +03:00
topology_core_cpumask ( cpu ) ) ;
2008-09-01 15:47:33 +04:00
}
}
2009-01-11 08:58:09 +03:00
free_cpumask_var ( core_mask ) ;
2008-09-01 15:47:33 +04:00
return count ;
}
/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues .
*/
2008-04-27 15:55:59 +04:00
static void efx_probe_interrupts ( struct efx_nic * efx )
{
2008-09-01 15:47:33 +04:00
int max_channels =
min_t ( int , efx - > type - > phys_addr_channels , EFX_MAX_CHANNELS ) ;
2008-04-27 15:55:59 +04:00
int rc , i ;
if ( efx - > interrupt_mode = = EFX_INT_MODE_MSIX ) {
2008-09-01 15:47:33 +04:00
struct msix_entry xentries [ EFX_MAX_CHANNELS ] ;
int wanted_ints ;
2008-12-13 08:41:06 +03:00
int rx_queues ;
2008-07-18 22:03:10 +04:00
2008-09-01 15:47:33 +04:00
/* We want one RX queue and interrupt per CPU package
* ( or as specified by the rss_cpus module parameter ) .
* We will need one channel per interrupt .
*/
2008-12-13 08:41:06 +03:00
rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues ( ) ;
wanted_ints = rx_queues + ( separate_tx_channels ? 1 : 0 ) ;
wanted_ints = min ( wanted_ints , max_channels ) ;
2008-04-27 15:55:59 +04:00
2008-12-13 08:41:06 +03:00
for ( i = 0 ; i < wanted_ints ; i + + )
2008-04-27 15:55:59 +04:00
xentries [ i ] . entry = i ;
2008-12-13 08:41:06 +03:00
rc = pci_enable_msix ( efx - > pci_dev , xentries , wanted_ints ) ;
2008-04-27 15:55:59 +04:00
if ( rc > 0 ) {
2008-12-13 08:41:06 +03:00
EFX_ERR ( efx , " WARNING: Insufficient MSI-X vectors "
" available (%d < %d). \n " , rc , wanted_ints ) ;
EFX_ERR ( efx , " WARNING: Performance may be reduced. \n " ) ;
EFX_BUG_ON_PARANOID ( rc > = wanted_ints ) ;
wanted_ints = rc ;
2008-04-27 15:55:59 +04:00
rc = pci_enable_msix ( efx - > pci_dev , xentries ,
2008-12-13 08:41:06 +03:00
wanted_ints ) ;
2008-04-27 15:55:59 +04:00
}
if ( rc = = 0 ) {
2008-12-13 08:41:06 +03:00
efx - > n_rx_queues = min ( rx_queues , wanted_ints ) ;
efx - > n_channels = wanted_ints ;
for ( i = 0 ; i < wanted_ints ; i + + )
2008-04-27 15:55:59 +04:00
efx - > channel [ i ] . irq = xentries [ i ] . vector ;
} else {
/* Fall back to single channel MSI */
efx - > interrupt_mode = EFX_INT_MODE_MSI ;
EFX_ERR ( efx , " could not enable MSI-X \n " ) ;
}
}
/* Try single interrupt MSI */
if ( efx - > interrupt_mode = = EFX_INT_MODE_MSI ) {
2008-09-01 15:47:48 +04:00
efx - > n_rx_queues = 1 ;
2008-12-13 08:41:06 +03:00
efx - > n_channels = 1 ;
2008-04-27 15:55:59 +04:00
rc = pci_enable_msi ( efx - > pci_dev ) ;
if ( rc = = 0 ) {
efx - > channel [ 0 ] . irq = efx - > pci_dev - > irq ;
} else {
EFX_ERR ( efx , " could not enable MSI \n " ) ;
efx - > interrupt_mode = EFX_INT_MODE_LEGACY ;
}
}
/* Assume legacy interrupts */
if ( efx - > interrupt_mode = = EFX_INT_MODE_LEGACY ) {
2008-09-01 15:47:48 +04:00
efx - > n_rx_queues = 1 ;
2008-12-13 08:41:06 +03:00
efx - > n_channels = 1 + ( separate_tx_channels ? 1 : 0 ) ;
2008-04-27 15:55:59 +04:00
efx - > legacy_irq = efx - > pci_dev - > irq ;
}
}
static void efx_remove_interrupts ( struct efx_nic * efx )
{
struct efx_channel * channel ;
/* Remove MSI/MSI-X interrupts */
2008-09-01 15:47:38 +04:00
efx_for_each_channel ( channel , efx )
2008-04-27 15:55:59 +04:00
channel - > irq = 0 ;
pci_disable_msi ( efx - > pci_dev ) ;
pci_disable_msix ( efx - > pci_dev ) ;
/* Remove legacy interrupt */
efx - > legacy_irq = 0 ;
}
2008-09-01 15:47:48 +04:00
static void efx_set_channels ( struct efx_nic * efx )
2008-04-27 15:55:59 +04:00
{
struct efx_tx_queue * tx_queue ;
struct efx_rx_queue * rx_queue ;
2008-09-01 15:44:59 +04:00
efx_for_each_tx_queue ( tx_queue , efx ) {
2008-12-13 08:41:06 +03:00
if ( separate_tx_channels )
tx_queue - > channel = & efx - > channel [ efx - > n_channels - 1 ] ;
2008-09-01 15:44:59 +04:00
else
tx_queue - > channel = & efx - > channel [ 0 ] ;
tx_queue - > channel - > used_flags | = EFX_USED_BY_TX ;
}
2008-04-27 15:55:59 +04:00
2008-09-01 15:47:48 +04:00
efx_for_each_rx_queue ( rx_queue , efx ) {
rx_queue - > channel = & efx - > channel [ rx_queue - > queue ] ;
rx_queue - > channel - > used_flags | = EFX_USED_BY_RX ;
2008-04-27 15:55:59 +04:00
}
}
static int efx_probe_nic ( struct efx_nic * efx )
{
int rc ;
EFX_LOG ( efx , " creating NIC \n " ) ;
/* Carry out hardware-type specific initialisation */
rc = falcon_probe_nic ( efx ) ;
if ( rc )
return rc ;
/* Determine the number of channels and RX queues by trying to hook
* in MSI - X interrupts . */
efx_probe_interrupts ( efx ) ;
2008-09-01 15:47:48 +04:00
efx_set_channels ( efx ) ;
2008-04-27 15:55:59 +04:00
/* Initialise the interrupt moderation settings */
2009-03-20 16:30:37 +03:00
efx_init_irq_moderation ( efx , tx_irq_mod_usec , rx_irq_mod_usec , true ) ;
2008-04-27 15:55:59 +04:00
return 0 ;
}
static void efx_remove_nic ( struct efx_nic * efx )
{
EFX_LOG ( efx , " destroying NIC \n " ) ;
efx_remove_interrupts ( efx ) ;
falcon_remove_nic ( efx ) ;
}
/**************************************************************************
*
* NIC startup / shutdown
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static int efx_probe_all ( struct efx_nic * efx )
{
struct efx_channel * channel ;
int rc ;
/* Create NIC */
rc = efx_probe_nic ( efx ) ;
if ( rc ) {
EFX_ERR ( efx , " failed to create NIC \n " ) ;
goto fail1 ;
}
/* Create port */
rc = efx_probe_port ( efx ) ;
if ( rc ) {
EFX_ERR ( efx , " failed to create port \n " ) ;
goto fail2 ;
}
/* Create channels */
efx_for_each_channel ( channel , efx ) {
rc = efx_probe_channel ( channel ) ;
if ( rc ) {
EFX_ERR ( efx , " failed to create channel %d \n " ,
channel - > channel ) ;
goto fail3 ;
}
}
2008-12-13 08:37:02 +03:00
efx_set_channel_names ( efx ) ;
2008-04-27 15:55:59 +04:00
return 0 ;
fail3 :
efx_for_each_channel ( channel , efx )
efx_remove_channel ( channel ) ;
efx_remove_port ( efx ) ;
fail2 :
efx_remove_nic ( efx ) ;
fail1 :
return rc ;
}
/* Called after previous invocation(s) of efx_stop_all, restarts the
* port , kernel transmit queue , NAPI processing and hardware interrupts ,
* and ensures that the port is scheduled to be reconfigured .
* This function is safe to call multiple times when the NIC is in any
* state . */
static void efx_start_all ( struct efx_nic * efx )
{
struct efx_channel * channel ;
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
/* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */
if ( efx - > port_enabled )
return ;
if ( ( efx - > state ! = STATE_RUNNING ) & & ( efx - > state ! = STATE_INIT ) )
return ;
2008-05-17 00:16:10 +04:00
if ( efx_dev_registered ( efx ) & & ! netif_running ( efx - > net_dev ) )
2008-04-27 15:55:59 +04:00
return ;
/* Mark the port as enabled so port reconfigurations can start, then
* restart the transmit interface early so the watchdog timer stops */
efx_start_port ( efx ) ;
2008-09-01 15:48:20 +04:00
if ( efx_dev_registered ( efx ) )
efx_wake_queue ( efx ) ;
2008-04-27 15:55:59 +04:00
efx_for_each_channel ( channel , efx )
efx_start_channel ( channel ) ;
falcon_enable_interrupts ( efx ) ;
/* Start hardware monitor if we're in RUNNING */
if ( efx - > state = = STATE_RUNNING )
queue_delayed_work ( efx - > workqueue , & efx - > monitor_work ,
efx_monitor_interval ) ;
}
/* Flush all delayed work. Should only be called when no more delayed work
* will be scheduled . This doesn ' t flush pending online resets ( efx_reset ) ,
* since we ' re holding the rtnl_lock at this point . */
static void efx_flush_all ( struct efx_nic * efx )
{
struct efx_rx_queue * rx_queue ;
/* Make sure the hardware monitor is stopped */
cancel_delayed_work_sync ( & efx - > monitor_work ) ;
/* Ensure that all RX slow refills are complete. */
2008-05-17 00:15:49 +04:00
efx_for_each_rx_queue ( rx_queue , efx )
2008-04-27 15:55:59 +04:00
cancel_delayed_work_sync ( & rx_queue - > work ) ;
/* Stop scheduled port reconfigurations */
2008-12-13 08:59:24 +03:00
cancel_work_sync ( & efx - > mac_work ) ;
cancel_work_sync ( & efx - > phy_work ) ;
2008-04-27 15:55:59 +04:00
}
/* Quiesce hardware and software without bringing the link down.
* Safe to call multiple times , when the nic and interface is in any
* state . The caller is guaranteed to subsequently be in a position
* to modify any hardware and software state they see fit without
* taking locks . */
static void efx_stop_all ( struct efx_nic * efx )
{
struct efx_channel * channel ;
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
/* port_enabled can be read safely under the rtnl lock */
if ( ! efx - > port_enabled )
return ;
/* Disable interrupts and wait for ISR to complete */
falcon_disable_interrupts ( efx ) ;
if ( efx - > legacy_irq )
synchronize_irq ( efx - > legacy_irq ) ;
2008-09-01 15:47:38 +04:00
efx_for_each_channel ( channel , efx ) {
2008-04-27 15:55:59 +04:00
if ( channel - > irq )
synchronize_irq ( channel - > irq ) ;
2008-05-17 00:15:49 +04:00
}
2008-04-27 15:55:59 +04:00
/* Stop all NAPI processing and synchronous rx refills */
efx_for_each_channel ( channel , efx )
efx_stop_channel ( channel ) ;
/* Stop all asynchronous port reconfigurations. Since all
* event processing has already been stopped , there is no
* window to loose phy events */
efx_stop_port ( efx ) ;
2008-12-13 08:59:24 +03:00
/* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */
2008-04-27 15:55:59 +04:00
efx_flush_all ( efx ) ;
/* Isolate the MAC from the TX and RX engines, so that queue
* flushes will complete in a timely fashion . */
falcon_drain_tx_fifo ( efx ) ;
/* Stop the kernel transmit interface late, so the watchdog
* timer isn ' t ticking over the flush */
2008-05-17 00:16:10 +04:00
if ( efx_dev_registered ( efx ) ) {
2008-09-01 15:48:20 +04:00
efx_stop_queue ( efx ) ;
2008-04-27 15:55:59 +04:00
netif_tx_lock_bh ( efx - > net_dev ) ;
netif_tx_unlock_bh ( efx - > net_dev ) ;
}
}
static void efx_remove_all ( struct efx_nic * efx )
{
struct efx_channel * channel ;
efx_for_each_channel ( channel , efx )
efx_remove_channel ( channel ) ;
efx_remove_port ( efx ) ;
efx_remove_nic ( efx ) ;
}
/* A convinience function to safely flush all the queues */
2008-09-01 15:48:46 +04:00
void efx_flush_queues ( struct efx_nic * efx )
2008-04-27 15:55:59 +04:00
{
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
efx_stop_all ( efx ) ;
efx_fini_channels ( efx ) ;
2008-09-01 15:48:46 +04:00
efx_init_channels ( efx ) ;
2008-04-27 15:55:59 +04:00
efx_start_all ( efx ) ;
}
/**************************************************************************
*
* Interrupt moderation
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Set interrupt moderation parameters */
2009-03-20 16:30:37 +03:00
void efx_init_irq_moderation ( struct efx_nic * efx , int tx_usecs , int rx_usecs ,
bool rx_adaptive )
2008-04-27 15:55:59 +04:00
{
struct efx_tx_queue * tx_queue ;
struct efx_rx_queue * rx_queue ;
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
efx_for_each_tx_queue ( tx_queue , efx )
tx_queue - > channel - > irq_moderation = tx_usecs ;
2009-03-20 16:30:37 +03:00
efx - > irq_rx_adaptive = rx_adaptive ;
efx - > irq_rx_moderation = rx_usecs ;
2008-04-27 15:55:59 +04:00
efx_for_each_rx_queue ( rx_queue , efx )
rx_queue - > channel - > irq_moderation = rx_usecs ;
}
/**************************************************************************
*
* Hardware monitor
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Run periodically off the general workqueue. Serialised against
* efx_reconfigure_port via the mac_lock */
static void efx_monitor ( struct work_struct * data )
{
struct efx_nic * efx = container_of ( data , struct efx_nic ,
monitor_work . work ) ;
2008-12-13 08:59:24 +03:00
int rc ;
2008-04-27 15:55:59 +04:00
EFX_TRACE ( efx , " hardware monitor executing on CPU %d \n " ,
raw_smp_processor_id ( ) ) ;
/* If the mac_lock is already held then it is likely a port
* reconfiguration is already in place , which will likely do
* most of the work of check_hw ( ) anyway . */
2008-12-13 08:59:24 +03:00
if ( ! mutex_trylock ( & efx - > mac_lock ) )
goto out_requeue ;
if ( ! efx - > port_enabled )
goto out_unlock ;
rc = efx - > board_info . monitor ( efx ) ;
if ( rc ) {
EFX_ERR ( efx , " Board sensor %s; shutting down PHY \n " ,
( rc = = - ERANGE ) ? " reported fault " : " failed " ) ;
efx - > phy_mode | = PHY_MODE_LOW_POWER ;
falcon_sim_phy_event ( efx ) ;
2008-04-27 15:55:59 +04:00
}
2008-12-13 08:59:24 +03:00
efx - > phy_op - > poll ( efx ) ;
efx - > mac_op - > poll ( efx ) ;
2008-04-27 15:55:59 +04:00
2008-12-13 08:59:24 +03:00
out_unlock :
2008-04-27 15:55:59 +04:00
mutex_unlock ( & efx - > mac_lock ) ;
2008-12-13 08:59:24 +03:00
out_requeue :
2008-04-27 15:55:59 +04:00
queue_delayed_work ( efx - > workqueue , & efx - > monitor_work ,
efx_monitor_interval ) ;
}
/**************************************************************************
*
* ioctls
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Net device ioctl
* Context : process , rtnl_lock ( ) held .
*/
static int efx_ioctl ( struct net_device * net_dev , struct ifreq * ifr , int cmd )
{
2008-09-01 15:43:14 +04:00
struct efx_nic * efx = netdev_priv ( net_dev ) ;
2009-04-29 12:05:08 +04:00
struct mii_ioctl_data * data = if_mii ( ifr ) ;
2008-04-27 15:55:59 +04:00
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
2009-04-29 12:05:08 +04:00
/* Convert phy_id from older PRTAD/DEVAD format */
if ( ( cmd = = SIOCGMIIREG | | cmd = = SIOCSMIIREG ) & &
( data - > phy_id & 0xfc00 ) = = 0x0400 )
data - > phy_id ^ = MDIO_PHY_ID_C45 | 0x0400 ;
return mdio_mii_ioctl ( & efx - > mdio , data , cmd ) ;
2008-04-27 15:55:59 +04:00
}
/**************************************************************************
*
* NAPI interface
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static int efx_init_napi ( struct efx_nic * efx )
{
struct efx_channel * channel ;
efx_for_each_channel ( channel , efx ) {
channel - > napi_dev = efx - > net_dev ;
2009-04-15 06:47:46 +04:00
netif_napi_add ( channel - > napi_dev , & channel - > napi_str ,
efx_poll , napi_weight ) ;
2008-04-27 15:55:59 +04:00
}
return 0 ;
}
static void efx_fini_napi ( struct efx_nic * efx )
{
struct efx_channel * channel ;
efx_for_each_channel ( channel , efx ) {
2009-04-15 06:47:46 +04:00
if ( channel - > napi_dev )
netif_napi_del ( & channel - > napi_str ) ;
2008-04-27 15:55:59 +04:00
channel - > napi_dev = NULL ;
}
}
/**************************************************************************
*
* Kernel netpoll interface
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# ifdef CONFIG_NET_POLL_CONTROLLER
/* Although in the common case interrupts will be disabled, this is not
* guaranteed . However , all our work happens inside the NAPI callback ,
* so no locking is required .
*/
static void efx_netpoll ( struct net_device * net_dev )
{
2008-09-01 15:43:14 +04:00
struct efx_nic * efx = netdev_priv ( net_dev ) ;
2008-04-27 15:55:59 +04:00
struct efx_channel * channel ;
2008-09-01 15:47:38 +04:00
efx_for_each_channel ( channel , efx )
2008-04-27 15:55:59 +04:00
efx_schedule_channel ( channel ) ;
}
# endif
/**************************************************************************
*
* Kernel net device interface
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Context: process, rtnl_lock() held. */
static int efx_net_open ( struct net_device * net_dev )
{
2008-09-01 15:43:14 +04:00
struct efx_nic * efx = netdev_priv ( net_dev ) ;
2008-04-27 15:55:59 +04:00
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
EFX_LOG ( efx , " opening device %s on CPU %d \n " , net_dev - > name ,
raw_smp_processor_id ( ) ) ;
2008-12-27 00:48:51 +03:00
if ( efx - > state = = STATE_DISABLED )
return - EIO ;
2008-09-01 15:48:17 +04:00
if ( efx - > phy_mode & PHY_MODE_SPECIAL )
return - EBUSY ;
2008-04-27 15:55:59 +04:00
efx_start_all ( efx ) ;
return 0 ;
}
/* Context: process, rtnl_lock() held.
* Note that the kernel will ignore our return code ; this method
* should really be a void .
*/
static int efx_net_stop ( struct net_device * net_dev )
{
2008-09-01 15:43:14 +04:00
struct efx_nic * efx = netdev_priv ( net_dev ) ;
2008-04-27 15:55:59 +04:00
EFX_LOG ( efx , " closing %s on CPU %d \n " , net_dev - > name ,
raw_smp_processor_id ( ) ) ;
2008-12-27 00:48:51 +03:00
if ( efx - > state ! = STATE_DISABLED ) {
/* Stop the device and flush all the channels */
efx_stop_all ( efx ) ;
efx_fini_channels ( efx ) ;
efx_init_channels ( efx ) ;
}
2008-04-27 15:55:59 +04:00
return 0 ;
}
2009-01-29 21:00:07 +03:00
void efx_stats_disable ( struct efx_nic * efx )
{
spin_lock ( & efx - > stats_lock ) ;
+ + efx - > stats_disable_count ;
spin_unlock ( & efx - > stats_lock ) ;
}
void efx_stats_enable ( struct efx_nic * efx )
{
spin_lock ( & efx - > stats_lock ) ;
- - efx - > stats_disable_count ;
spin_unlock ( & efx - > stats_lock ) ;
}
2008-05-17 00:18:14 +04:00
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
2008-04-27 15:55:59 +04:00
static struct net_device_stats * efx_net_stats ( struct net_device * net_dev )
{
2008-09-01 15:43:14 +04:00
struct efx_nic * efx = netdev_priv ( net_dev ) ;
2008-04-27 15:55:59 +04:00
struct efx_mac_stats * mac_stats = & efx - > mac_stats ;
struct net_device_stats * stats = & net_dev - > stats ;
2008-05-17 00:18:14 +04:00
/* Update stats if possible, but do not wait if another thread
2009-01-29 21:00:07 +03:00
* is updating them or if MAC stats fetches are temporarily
* disabled ; slightly stale stats are acceptable .
2008-05-17 00:18:14 +04:00
*/
2008-04-27 15:55:59 +04:00
if ( ! spin_trylock ( & efx - > stats_lock ) )
return stats ;
2009-01-29 21:00:07 +03:00
if ( ! efx - > stats_disable_count ) {
2008-12-13 08:50:08 +03:00
efx - > mac_op - > update_stats ( efx ) ;
2008-04-27 15:55:59 +04:00
falcon_update_nic_stats ( efx ) ;
}
spin_unlock ( & efx - > stats_lock ) ;
stats - > rx_packets = mac_stats - > rx_packets ;
stats - > tx_packets = mac_stats - > tx_packets ;
stats - > rx_bytes = mac_stats - > rx_bytes ;
stats - > tx_bytes = mac_stats - > tx_bytes ;
stats - > multicast = mac_stats - > rx_multicast ;
stats - > collisions = mac_stats - > tx_collision ;
stats - > rx_length_errors = ( mac_stats - > rx_gtjumbo +
mac_stats - > rx_length_error ) ;
stats - > rx_over_errors = efx - > n_rx_nodesc_drop_cnt ;
stats - > rx_crc_errors = mac_stats - > rx_bad ;
stats - > rx_frame_errors = mac_stats - > rx_align_error ;
stats - > rx_fifo_errors = mac_stats - > rx_overflow ;
stats - > rx_missed_errors = mac_stats - > rx_missed ;
stats - > tx_window_errors = mac_stats - > tx_late_collision ;
stats - > rx_errors = ( stats - > rx_length_errors +
stats - > rx_over_errors +
stats - > rx_crc_errors +
stats - > rx_frame_errors +
stats - > rx_fifo_errors +
stats - > rx_missed_errors +
mac_stats - > rx_symbol_error ) ;
stats - > tx_errors = ( stats - > tx_window_errors +
mac_stats - > tx_bad ) ;
return stats ;
}
/* Context: netif_tx_lock held, BHs disabled. */
static void efx_watchdog ( struct net_device * net_dev )
{
2008-09-01 15:43:14 +04:00
struct efx_nic * efx = netdev_priv ( net_dev ) ;
2008-04-27 15:55:59 +04:00
2008-11-04 23:35:36 +03:00
EFX_ERR ( efx , " TX stuck with stop_count=%d port_enabled=%d: "
" resetting channels \n " ,
atomic_read ( & efx - > netif_stop_count ) , efx - > port_enabled ) ;
2008-04-27 15:55:59 +04:00
2008-11-04 23:35:36 +03:00
efx_schedule_reset ( efx , RESET_TYPE_TX_WATCHDOG ) ;
2008-04-27 15:55:59 +04:00
}
/* Context: process, rtnl_lock() held. */
static int efx_change_mtu ( struct net_device * net_dev , int new_mtu )
{
2008-09-01 15:43:14 +04:00
struct efx_nic * efx = netdev_priv ( net_dev ) ;
2008-04-27 15:55:59 +04:00
int rc = 0 ;
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
if ( new_mtu > EFX_MAX_MTU )
return - EINVAL ;
efx_stop_all ( efx ) ;
EFX_LOG ( efx , " changing MTU to %d \n " , new_mtu ) ;
efx_fini_channels ( efx ) ;
net_dev - > mtu = new_mtu ;
2008-09-01 15:48:46 +04:00
efx_init_channels ( efx ) ;
2008-04-27 15:55:59 +04:00
efx_start_all ( efx ) ;
return rc ;
}
static int efx_set_mac_address ( struct net_device * net_dev , void * data )
{
2008-09-01 15:43:14 +04:00
struct efx_nic * efx = netdev_priv ( net_dev ) ;
2008-04-27 15:55:59 +04:00
struct sockaddr * addr = data ;
char * new_addr = addr - > sa_data ;
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
if ( ! is_valid_ether_addr ( new_addr ) ) {
2008-10-28 01:59:26 +03:00
EFX_ERR ( efx , " invalid ethernet MAC address requested: %pM \n " ,
new_addr ) ;
2008-04-27 15:55:59 +04:00
return - EINVAL ;
}
memcpy ( net_dev - > dev_addr , new_addr , net_dev - > addr_len ) ;
/* Reconfigure the MAC */
efx_reconfigure_port ( efx ) ;
return 0 ;
}
2008-09-01 15:49:12 +04:00
/* Context: netif_addr_lock held, BHs disabled. */
2008-04-27 15:55:59 +04:00
static void efx_set_multicast_list ( struct net_device * net_dev )
{
2008-09-01 15:43:14 +04:00
struct efx_nic * efx = netdev_priv ( net_dev ) ;
2008-04-27 15:55:59 +04:00
struct dev_mc_list * mc_list = net_dev - > mc_list ;
union efx_multicast_hash * mc_hash = & efx - > multicast_hash ;
2008-09-01 15:49:12 +04:00
bool promiscuous = ! ! ( net_dev - > flags & IFF_PROMISC ) ;
bool changed = ( efx - > promiscuous ! = promiscuous ) ;
2008-04-27 15:55:59 +04:00
u32 crc ;
int bit ;
int i ;
2008-09-01 15:49:12 +04:00
efx - > promiscuous = promiscuous ;
2008-04-27 15:55:59 +04:00
/* Build multicast hash table */
if ( promiscuous | | ( net_dev - > flags & IFF_ALLMULTI ) ) {
memset ( mc_hash , 0xff , sizeof ( * mc_hash ) ) ;
} else {
memset ( mc_hash , 0x00 , sizeof ( * mc_hash ) ) ;
for ( i = 0 ; i < net_dev - > mc_count ; i + + ) {
crc = ether_crc_le ( ETH_ALEN , mc_list - > dmi_addr ) ;
bit = crc & ( EFX_MCAST_HASH_ENTRIES - 1 ) ;
set_bit_le ( bit , mc_hash - > byte ) ;
mc_list = mc_list - > next ;
}
}
2008-09-01 15:49:12 +04:00
if ( ! efx - > port_enabled )
/* Delay pushing settings until efx_start_port() */
return ;
if ( changed )
2008-12-13 08:59:24 +03:00
queue_work ( efx - > workqueue , & efx - > phy_work ) ;
2008-09-01 15:49:12 +04:00
2008-04-27 15:55:59 +04:00
/* Create and activate new global multicast hash table */
falcon_set_multicast_hash ( efx ) ;
}
2008-11-22 04:32:54 +03:00
static const struct net_device_ops efx_netdev_ops = {
. ndo_open = efx_net_open ,
. ndo_stop = efx_net_stop ,
. ndo_get_stats = efx_net_stats ,
. ndo_tx_timeout = efx_watchdog ,
. ndo_start_xmit = efx_hard_start_xmit ,
. ndo_validate_addr = eth_validate_addr ,
. ndo_do_ioctl = efx_ioctl ,
. ndo_change_mtu = efx_change_mtu ,
. ndo_set_mac_address = efx_set_mac_address ,
. ndo_set_multicast_list = efx_set_multicast_list ,
# ifdef CONFIG_NET_POLL_CONTROLLER
. ndo_poll_controller = efx_netpoll ,
# endif
} ;
2008-12-13 09:09:38 +03:00
static void efx_update_name ( struct efx_nic * efx )
{
strcpy ( efx - > name , efx - > net_dev - > name ) ;
efx_mtd_rename ( efx ) ;
efx_set_channel_names ( efx ) ;
}
2008-04-27 15:55:59 +04:00
static int efx_netdev_event ( struct notifier_block * this ,
unsigned long event , void * ptr )
{
2008-05-17 00:20:00 +04:00
struct net_device * net_dev = ptr ;
2008-04-27 15:55:59 +04:00
2008-12-13 09:09:38 +03:00
if ( net_dev - > netdev_ops = = & efx_netdev_ops & &
event = = NETDEV_CHANGENAME )
efx_update_name ( netdev_priv ( net_dev ) ) ;
2008-04-27 15:55:59 +04:00
return NOTIFY_DONE ;
}
static struct notifier_block efx_netdev_notifier = {
. notifier_call = efx_netdev_event ,
} ;
2008-12-13 08:47:23 +03:00
static ssize_t
show_phy_type ( struct device * dev , struct device_attribute * attr , char * buf )
{
struct efx_nic * efx = pci_get_drvdata ( to_pci_dev ( dev ) ) ;
return sprintf ( buf , " %d \n " , efx - > phy_type ) ;
}
static DEVICE_ATTR ( phy_type , 0644 , show_phy_type , NULL ) ;
2008-04-27 15:55:59 +04:00
static int efx_register_netdev ( struct efx_nic * efx )
{
struct net_device * net_dev = efx - > net_dev ;
int rc ;
net_dev - > watchdog_timeo = 5 * HZ ;
net_dev - > irq = efx - > pci_dev - > irq ;
2008-11-22 04:32:54 +03:00
net_dev - > netdev_ops = & efx_netdev_ops ;
2008-04-27 15:55:59 +04:00
SET_NETDEV_DEV ( net_dev , & efx - > pci_dev - > dev ) ;
SET_ETHTOOL_OPS ( net_dev , & efx_ethtool_ops ) ;
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off ( efx - > net_dev ) ;
/* Clear MAC statistics */
2008-12-13 08:50:08 +03:00
efx - > mac_op - > update_stats ( efx ) ;
2008-04-27 15:55:59 +04:00
memset ( & efx - > mac_stats , 0 , sizeof ( efx - > mac_stats ) ) ;
rc = register_netdev ( net_dev ) ;
if ( rc ) {
EFX_ERR ( efx , " could not register net dev \n " ) ;
return rc ;
}
2008-12-13 09:09:38 +03:00
rtnl_lock ( ) ;
efx_update_name ( efx ) ;
rtnl_unlock ( ) ;
2008-04-27 15:55:59 +04:00
2008-12-13 08:47:23 +03:00
rc = device_create_file ( & efx - > pci_dev - > dev , & dev_attr_phy_type ) ;
if ( rc ) {
EFX_ERR ( efx , " failed to init net dev attributes \n " ) ;
goto fail_registered ;
}
2008-04-27 15:55:59 +04:00
return 0 ;
2008-12-13 08:47:23 +03:00
fail_registered :
unregister_netdev ( net_dev ) ;
return rc ;
2008-04-27 15:55:59 +04:00
}
static void efx_unregister_netdev ( struct efx_nic * efx )
{
struct efx_tx_queue * tx_queue ;
if ( ! efx - > net_dev )
return ;
2008-09-01 15:43:14 +04:00
BUG_ON ( netdev_priv ( efx - > net_dev ) ! = efx ) ;
2008-04-27 15:55:59 +04:00
/* Free up any skbs still remaining. This has to happen before
* we try to unregister the netdev as running their destructors
* may be needed to get the device ref . count to 0. */
efx_for_each_tx_queue ( tx_queue , efx )
efx_release_tx_buffers ( tx_queue ) ;
2008-05-17 00:16:10 +04:00
if ( efx_dev_registered ( efx ) ) {
2008-04-27 15:55:59 +04:00
strlcpy ( efx - > name , pci_name ( efx - > pci_dev ) , sizeof ( efx - > name ) ) ;
2008-12-13 08:47:23 +03:00
device_remove_file ( & efx - > pci_dev - > dev , & dev_attr_phy_type ) ;
2008-04-27 15:55:59 +04:00
unregister_netdev ( efx - > net_dev ) ;
}
}
/**************************************************************************
*
* Device reset and suspend
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2008-09-01 15:48:50 +04:00
/* Tears down the entire software state and most of the hardware state
* before reset . */
2009-01-29 20:50:51 +03:00
void efx_reset_down ( struct efx_nic * efx , enum reset_type method ,
struct ethtool_cmd * ecmd )
2008-04-27 15:55:59 +04:00
{
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
2009-01-29 21:00:07 +03:00
efx_stats_disable ( efx ) ;
2008-09-01 15:48:50 +04:00
efx_stop_all ( efx ) ;
mutex_lock ( & efx - > mac_lock ) ;
2008-11-04 23:34:28 +03:00
mutex_lock ( & efx - > spi_lock ) ;
2008-09-01 15:48:50 +04:00
2008-12-13 08:50:08 +03:00
efx - > phy_op - > get_settings ( efx , ecmd ) ;
2008-04-27 15:55:59 +04:00
efx_fini_channels ( efx ) ;
2009-01-29 20:50:51 +03:00
if ( efx - > port_initialized & & method ! = RESET_TYPE_INVISIBLE )
efx - > phy_op - > fini ( efx ) ;
2008-04-27 15:55:59 +04:00
}
2008-09-01 15:48:50 +04:00
/* This function will always ensure that the locks acquired in
* efx_reset_down ( ) are released . A failure return code indicates
* that we were unable to reinitialise the hardware , and the
* driver should be disabled . If ok is false , then the rx and tx
* engines are not restarted , pending a RESET_DISABLE . */
2009-01-29 20:50:51 +03:00
int efx_reset_up ( struct efx_nic * efx , enum reset_type method ,
struct ethtool_cmd * ecmd , bool ok )
2008-04-27 15:55:59 +04:00
{
int rc ;
2008-09-01 15:48:50 +04:00
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
2008-04-27 15:55:59 +04:00
2008-09-01 15:48:50 +04:00
rc = falcon_init_nic ( efx ) ;
2008-04-27 15:55:59 +04:00
if ( rc ) {
2008-09-01 15:48:50 +04:00
EFX_ERR ( efx , " failed to initialise NIC \n " ) ;
ok = false ;
2008-04-27 15:55:59 +04:00
}
2009-01-29 20:50:51 +03:00
if ( efx - > port_initialized & & method ! = RESET_TYPE_INVISIBLE ) {
if ( ok ) {
rc = efx - > phy_op - > init ( efx ) ;
if ( rc )
ok = false ;
2009-03-04 12:52:52 +03:00
}
if ( ! ok )
2009-01-29 20:50:51 +03:00
efx - > port_initialized = false ;
}
2008-09-01 15:48:50 +04:00
if ( ok ) {
efx_init_channels ( efx ) ;
2008-04-27 15:55:59 +04:00
2008-12-13 08:50:08 +03:00
if ( efx - > phy_op - > set_settings ( efx , ecmd ) )
2008-09-01 15:48:50 +04:00
EFX_ERR ( efx , " could not restore PHY settings \n " ) ;
}
2008-11-04 23:34:28 +03:00
mutex_unlock ( & efx - > spi_lock ) ;
2008-09-01 15:48:50 +04:00
mutex_unlock ( & efx - > mac_lock ) ;
2008-09-01 15:49:02 +04:00
if ( ok ) {
2008-09-01 15:48:50 +04:00
efx_start_all ( efx ) ;
2009-01-29 21:00:07 +03:00
efx_stats_enable ( efx ) ;
2008-09-01 15:49:02 +04:00
}
2008-04-27 15:55:59 +04:00
return rc ;
}
/* Reset the NIC as transparently as possible. Do not reset the PHY
* Note that the reset may fail , in which case the card will be left
* in a most - probably - unusable state .
*
* This function will sleep . You cannot reset from within an atomic
* state ; use efx_schedule_reset ( ) instead .
*
* Grabs the rtnl_lock .
*/
static int efx_reset ( struct efx_nic * efx )
{
struct ethtool_cmd ecmd ;
enum reset_type method = efx - > reset_pending ;
2008-12-27 00:48:51 +03:00
int rc = 0 ;
2008-04-27 15:55:59 +04:00
/* Serialise with kernel interfaces */
rtnl_lock ( ) ;
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flag set so that efx_pci_probe_main will be retried */
if ( efx - > state ! = STATE_RUNNING ) {
EFX_INFO ( efx , " scheduled reset quenched. NIC not RUNNING \n " ) ;
2008-12-27 00:48:51 +03:00
goto out_unlock ;
2008-04-27 15:55:59 +04:00
}
EFX_INFO ( efx , " resetting (%d) \n " , method ) ;
2009-01-29 20:50:51 +03:00
efx_reset_down ( efx , method , & ecmd ) ;
2008-04-27 15:55:59 +04:00
rc = falcon_reset_hw ( efx , method ) ;
if ( rc ) {
EFX_ERR ( efx , " failed to reset hardware \n " ) ;
2008-12-27 00:48:51 +03:00
goto out_disable ;
2008-04-27 15:55:59 +04:00
}
/* Allow resets to be rescheduled. */
efx - > reset_pending = RESET_TYPE_NONE ;
/* Reinitialise bus-mastering, which may have been turned off before
* the reset was scheduled . This is still appropriate , even in the
* RESET_TYPE_DISABLE since this driver generally assumes the hardware
* can respond to requests . */
pci_set_master ( efx - > pci_dev ) ;
/* Leave device stopped if necessary */
if ( method = = RESET_TYPE_DISABLE ) {
2009-01-29 20:50:51 +03:00
efx_reset_up ( efx , method , & ecmd , false ) ;
2008-04-27 15:55:59 +04:00
rc = - EIO ;
2008-12-27 00:48:51 +03:00
} else {
2009-01-29 20:50:51 +03:00
rc = efx_reset_up ( efx , method , & ecmd , true ) ;
2008-04-27 15:55:59 +04:00
}
2008-12-27 00:48:51 +03:00
out_disable :
if ( rc ) {
EFX_ERR ( efx , " has been disabled \n " ) ;
efx - > state = STATE_DISABLED ;
dev_close ( efx - > net_dev ) ;
} else {
EFX_LOG ( efx , " reset complete \n " ) ;
}
2008-04-27 15:55:59 +04:00
2008-12-27 00:48:51 +03:00
out_unlock :
2008-04-27 15:55:59 +04:00
rtnl_unlock ( ) ;
return rc ;
}
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later .
*/
static void efx_reset_work ( struct work_struct * data )
{
struct efx_nic * nic = container_of ( data , struct efx_nic , reset_work ) ;
efx_reset ( nic ) ;
}
void efx_schedule_reset ( struct efx_nic * efx , enum reset_type type )
{
enum reset_type method ;
if ( efx - > reset_pending ! = RESET_TYPE_NONE ) {
EFX_INFO ( efx , " quenching already scheduled reset \n " ) ;
return ;
}
switch ( type ) {
case RESET_TYPE_INVISIBLE :
case RESET_TYPE_ALL :
case RESET_TYPE_WORLD :
case RESET_TYPE_DISABLE :
method = type ;
break ;
case RESET_TYPE_RX_RECOVERY :
case RESET_TYPE_RX_DESC_FETCH :
case RESET_TYPE_TX_DESC_FETCH :
case RESET_TYPE_TX_SKIP :
method = RESET_TYPE_INVISIBLE ;
break ;
default :
method = RESET_TYPE_ALL ;
break ;
}
if ( method ! = type )
EFX_LOG ( efx , " scheduling reset (%d:%d) \n " , type , method ) ;
else
EFX_LOG ( efx , " scheduling reset (%d) \n " , method ) ;
efx - > reset_pending = method ;
2008-12-13 08:33:02 +03:00
queue_work ( reset_workqueue , & efx - > reset_work ) ;
2008-04-27 15:55:59 +04:00
}
/**************************************************************************
*
* List of NICs we support
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* PCI device ID table */
static struct pci_device_id efx_pci_table [ ] __devinitdata = {
{ PCI_DEVICE ( EFX_VENDID_SFC , FALCON_A_P_DEVID ) ,
. driver_data = ( unsigned long ) & falcon_a_nic_type } ,
{ PCI_DEVICE ( EFX_VENDID_SFC , FALCON_B_P_DEVID ) ,
. driver_data = ( unsigned long ) & falcon_b_nic_type } ,
{ 0 } /* end of list */
} ;
/**************************************************************************
*
* Dummy PHY / MAC / Board operations
*
2008-09-01 15:48:36 +04:00
* Can be used for some unimplemented operations
2008-04-27 15:55:59 +04:00
* Needed so all function pointers are valid and do not have to be tested
* before use
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
int efx_port_dummy_op_int ( struct efx_nic * efx )
{
return 0 ;
}
void efx_port_dummy_op_void ( struct efx_nic * efx ) { }
2008-09-01 15:46:50 +04:00
void efx_port_dummy_op_blink ( struct efx_nic * efx , bool blink ) { }
2008-04-27 15:55:59 +04:00
2008-12-13 08:50:08 +03:00
static struct efx_mac_operations efx_dummy_mac_operations = {
. reconfigure = efx_port_dummy_op_void ,
2008-12-13 08:59:24 +03:00
. poll = efx_port_dummy_op_void ,
. irq = efx_port_dummy_op_void ,
2008-12-13 08:50:08 +03:00
} ;
2008-04-27 15:55:59 +04:00
static struct efx_phy_operations efx_dummy_phy_operations = {
. init = efx_port_dummy_op_int ,
. reconfigure = efx_port_dummy_op_void ,
2008-12-13 08:59:24 +03:00
. poll = efx_port_dummy_op_void ,
2008-04-27 15:55:59 +04:00
. fini = efx_port_dummy_op_void ,
. clear_interrupt = efx_port_dummy_op_void ,
} ;
static struct efx_board efx_dummy_board_info = {
2008-09-01 15:48:36 +04:00
. init = efx_port_dummy_op_int ,
2009-02-27 16:08:03 +03:00
. init_leds = efx_port_dummy_op_void ,
. set_id_led = efx_port_dummy_op_blink ,
2008-12-13 08:28:20 +03:00
. monitor = efx_port_dummy_op_int ,
2008-09-01 15:48:36 +04:00
. blink = efx_port_dummy_op_blink ,
. fini = efx_port_dummy_op_void ,
2008-04-27 15:55:59 +04:00
} ;
/**************************************************************************
*
* Data housekeeping
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* This zeroes out and then fills in the invariants in a struct
* efx_nic ( including all sub - structures ) .
*/
static int efx_init_struct ( struct efx_nic * efx , struct efx_nic_type * type ,
struct pci_dev * pci_dev , struct net_device * net_dev )
{
struct efx_channel * channel ;
struct efx_tx_queue * tx_queue ;
struct efx_rx_queue * rx_queue ;
2008-12-13 08:33:02 +03:00
int i ;
2008-04-27 15:55:59 +04:00
/* Initialise common structures */
memset ( efx , 0 , sizeof ( * efx ) ) ;
spin_lock_init ( & efx - > biu_lock ) ;
spin_lock_init ( & efx - > phy_lock ) ;
2008-11-04 23:34:28 +03:00
mutex_init ( & efx - > spi_lock ) ;
2008-04-27 15:55:59 +04:00
INIT_WORK ( & efx - > reset_work , efx_reset_work ) ;
INIT_DELAYED_WORK ( & efx - > monitor_work , efx_monitor ) ;
efx - > pci_dev = pci_dev ;
efx - > state = STATE_INIT ;
efx - > reset_pending = RESET_TYPE_NONE ;
strlcpy ( efx - > name , pci_name ( pci_dev ) , sizeof ( efx - > name ) ) ;
efx - > board_info = efx_dummy_board_info ;
efx - > net_dev = net_dev ;
2008-09-01 15:46:50 +04:00
efx - > rx_checksum_enabled = true ;
2008-04-27 15:55:59 +04:00
spin_lock_init ( & efx - > netif_stop_lock ) ;
spin_lock_init ( & efx - > stats_lock ) ;
2009-01-29 21:00:07 +03:00
efx - > stats_disable_count = 1 ;
2008-04-27 15:55:59 +04:00
mutex_init ( & efx - > mac_lock ) ;
2008-12-13 08:50:08 +03:00
efx - > mac_op = & efx_dummy_mac_operations ;
2008-04-27 15:55:59 +04:00
efx - > phy_op = & efx_dummy_phy_operations ;
2009-04-29 12:05:08 +04:00
efx - > mdio . dev = net_dev ;
2008-12-13 08:59:24 +03:00
INIT_WORK ( & efx - > phy_work , efx_phy_work ) ;
INIT_WORK ( & efx - > mac_work , efx_mac_work ) ;
2008-04-27 15:55:59 +04:00
atomic_set ( & efx - > netif_stop_count , 1 ) ;
for ( i = 0 ; i < EFX_MAX_CHANNELS ; i + + ) {
channel = & efx - > channel [ i ] ;
channel - > efx = efx ;
channel - > channel = i ;
2008-09-01 15:46:50 +04:00
channel - > work_pending = false ;
2008-04-27 15:55:59 +04:00
}
2008-09-01 15:44:59 +04:00
for ( i = 0 ; i < EFX_TX_QUEUE_COUNT ; i + + ) {
2008-04-27 15:55:59 +04:00
tx_queue = & efx - > tx_queue [ i ] ;
tx_queue - > efx = efx ;
tx_queue - > queue = i ;
tx_queue - > buffer = NULL ;
tx_queue - > channel = & efx - > channel [ 0 ] ; /* for safety */
2008-05-07 15:51:12 +04:00
tx_queue - > tso_headers_free = NULL ;
2008-04-27 15:55:59 +04:00
}
for ( i = 0 ; i < EFX_MAX_RX_QUEUES ; i + + ) {
rx_queue = & efx - > rx_queue [ i ] ;
rx_queue - > efx = efx ;
rx_queue - > queue = i ;
rx_queue - > channel = & efx - > channel [ 0 ] ; /* for safety */
rx_queue - > buffer = NULL ;
spin_lock_init ( & rx_queue - > add_lock ) ;
INIT_DELAYED_WORK ( & rx_queue - > work , efx_rx_work ) ;
}
efx - > type = type ;
/* Sanity-check NIC type */
EFX_BUG_ON_PARANOID ( efx - > type - > txd_ring_mask &
( efx - > type - > txd_ring_mask + 1 ) ) ;
EFX_BUG_ON_PARANOID ( efx - > type - > rxd_ring_mask &
( efx - > type - > rxd_ring_mask + 1 ) ) ;
EFX_BUG_ON_PARANOID ( efx - > type - > evq_size &
( efx - > type - > evq_size - 1 ) ) ;
/* As close as we can get to guaranteeing that we don't overflow */
EFX_BUG_ON_PARANOID ( efx - > type - > evq_size <
( efx - > type - > txd_ring_mask + 1 +
efx - > type - > rxd_ring_mask + 1 ) ) ;
EFX_BUG_ON_PARANOID ( efx - > type - > phys_addr_channels > EFX_MAX_CHANNELS ) ;
/* Higher numbered interrupt modes are less capable! */
efx - > interrupt_mode = max ( efx - > type - > max_interrupt_mode ,
interrupt_mode ) ;
2008-12-27 00:44:39 +03:00
/* Would be good to use the net_dev name, but we're too early */
snprintf ( efx - > workqueue_name , sizeof ( efx - > workqueue_name ) , " sfc%s " ,
pci_name ( pci_dev ) ) ;
efx - > workqueue = create_singlethread_workqueue ( efx - > workqueue_name ) ;
2008-12-13 08:33:02 +03:00
if ( ! efx - > workqueue )
return - ENOMEM ;
2008-07-18 22:01:20 +04:00
2008-04-27 15:55:59 +04:00
return 0 ;
}
static void efx_fini_struct ( struct efx_nic * efx )
{
if ( efx - > workqueue ) {
destroy_workqueue ( efx - > workqueue ) ;
efx - > workqueue = NULL ;
}
}
/**************************************************************************
*
* PCI interface
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Main body of final NIC shutdown code
* This is called only at module unload ( or hotplug removal ) .
*/
static void efx_pci_remove_main ( struct efx_nic * efx )
{
EFX_ASSERT_RESET_SERIALISED ( efx ) ;
/* Skip everything if we never obtained a valid membase */
if ( ! efx - > membase )
return ;
efx_fini_channels ( efx ) ;
efx_fini_port ( efx ) ;
/* Shutdown the board, then the NIC and board state */
2008-05-31 01:27:04 +04:00
efx - > board_info . fini ( efx ) ;
2008-04-27 15:55:59 +04:00
falcon_fini_interrupt ( efx ) ;
efx_fini_napi ( efx ) ;
efx_remove_all ( efx ) ;
}
/* Final NIC shutdown
* This is called only at module unload ( or hotplug removal ) .
*/
static void efx_pci_remove ( struct pci_dev * pci_dev )
{
struct efx_nic * efx ;
efx = pci_get_drvdata ( pci_dev ) ;
if ( ! efx )
return ;
/* Mark the NIC as fini, then stop the interface */
rtnl_lock ( ) ;
efx - > state = STATE_FINI ;
dev_close ( efx - > net_dev ) ;
/* Allow any queued efx_resets() to complete */
rtnl_unlock ( ) ;
if ( efx - > membase = = NULL )
goto out ;
efx_unregister_netdev ( efx ) ;
2008-12-13 09:09:38 +03:00
efx_mtd_remove ( efx ) ;
2008-04-27 15:55:59 +04:00
/* Wait for any scheduled resets to complete. No more will be
* scheduled from this point because efx_stop_all ( ) has been
* called , we are no longer registered with driverlink , and
* the net_device ' s have been removed . */
2008-12-13 08:33:02 +03:00
cancel_work_sync ( & efx - > reset_work ) ;
2008-04-27 15:55:59 +04:00
efx_pci_remove_main ( efx ) ;
out :
efx_fini_io ( efx ) ;
EFX_LOG ( efx , " shutdown successful \n " ) ;
pci_set_drvdata ( pci_dev , NULL ) ;
efx_fini_struct ( efx ) ;
free_netdev ( efx - > net_dev ) ;
} ;
/* Main body of NIC initialisation
* This is called at module load ( or hotplug insertion , theoretically ) .
*/
static int efx_pci_probe_main ( struct efx_nic * efx )
{
int rc ;
/* Do start-of-day initialisation */
rc = efx_probe_all ( efx ) ;
if ( rc )
goto fail1 ;
rc = efx_init_napi ( efx ) ;
if ( rc )
goto fail2 ;
/* Initialise the board */
rc = efx - > board_info . init ( efx ) ;
if ( rc ) {
EFX_ERR ( efx , " failed to initialise board \n " ) ;
goto fail3 ;
}
rc = falcon_init_nic ( efx ) ;
if ( rc ) {
EFX_ERR ( efx , " failed to initialise NIC \n " ) ;
goto fail4 ;
}
rc = efx_init_port ( efx ) ;
if ( rc ) {
EFX_ERR ( efx , " failed to initialise port \n " ) ;
goto fail5 ;
}
2008-09-01 15:48:46 +04:00
efx_init_channels ( efx ) ;
2008-04-27 15:55:59 +04:00
rc = falcon_init_interrupt ( efx ) ;
if ( rc )
2008-09-01 15:48:46 +04:00
goto fail6 ;
2008-04-27 15:55:59 +04:00
return 0 ;
fail6 :
2008-09-01 15:48:46 +04:00
efx_fini_channels ( efx ) ;
2008-04-27 15:55:59 +04:00
efx_fini_port ( efx ) ;
fail5 :
fail4 :
2008-12-13 08:28:20 +03:00
efx - > board_info . fini ( efx ) ;
2008-04-27 15:55:59 +04:00
fail3 :
efx_fini_napi ( efx ) ;
fail2 :
efx_remove_all ( efx ) ;
fail1 :
return rc ;
}
/* NIC initialisation
*
* This is called at module load ( or hotplug insertion ,
* theoretically ) . It sets up PCI mappings , tests and resets the NIC ,
* sets up and registers the network devices with the kernel and hooks
* the interrupt service routine . It does not prepare the device for
* transmission ; this is left to the first time one of the network
* interfaces is brought up ( i . e . efx_net_open ) .
*/
static int __devinit efx_pci_probe ( struct pci_dev * pci_dev ,
const struct pci_device_id * entry )
{
struct efx_nic_type * type = ( struct efx_nic_type * ) entry - > driver_data ;
struct net_device * net_dev ;
struct efx_nic * efx ;
int i , rc ;
/* Allocate and initialise a struct net_device and struct efx_nic */
net_dev = alloc_etherdev ( sizeof ( * efx ) ) ;
if ( ! net_dev )
return - ENOMEM ;
2008-05-07 15:51:12 +04:00
net_dev - > features | = ( NETIF_F_IP_CSUM | NETIF_F_SG |
2009-05-20 03:19:08 +04:00
NETIF_F_HIGHDMA | NETIF_F_TSO |
NETIF_F_GRO ) ;
2008-09-01 15:46:54 +04:00
/* Mask for features that also apply to VLAN devices */
net_dev - > vlan_features | = ( NETIF_F_ALL_CSUM | NETIF_F_SG |
2008-09-01 15:48:23 +04:00
NETIF_F_HIGHDMA | NETIF_F_TSO ) ;
2008-09-01 15:43:14 +04:00
efx = netdev_priv ( net_dev ) ;
2008-04-27 15:55:59 +04:00
pci_set_drvdata ( pci_dev , efx ) ;
rc = efx_init_struct ( efx , type , pci_dev , net_dev ) ;
if ( rc )
goto fail1 ;
EFX_INFO ( efx , " Solarflare Communications NIC detected \n " ) ;
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io ( efx ) ;
if ( rc )
goto fail2 ;
/* No serialisation is required with the reset path because
* we ' re in STATE_INIT . */
for ( i = 0 ; i < 5 ; i + + ) {
rc = efx_pci_probe_main ( efx ) ;
/* Serialise against efx_reset(). No more resets will be
* scheduled since efx_stop_all ( ) has been called , and we
* have not and never have been registered with either
* the rtnetlink or driverlink layers . */
2008-12-13 08:33:02 +03:00
cancel_work_sync ( & efx - > reset_work ) ;
2008-04-27 15:55:59 +04:00
2008-12-13 09:08:16 +03:00
if ( rc = = 0 ) {
if ( efx - > reset_pending ! = RESET_TYPE_NONE ) {
/* If there was a scheduled reset during
* probe , the NIC is probably hosed anyway */
efx_pci_remove_main ( efx ) ;
rc = - EIO ;
} else {
break ;
}
}
2008-04-27 15:55:59 +04:00
/* Retry if a recoverably reset event has been scheduled */
if ( ( efx - > reset_pending ! = RESET_TYPE_INVISIBLE ) & &
( efx - > reset_pending ! = RESET_TYPE_ALL ) )
goto fail3 ;
efx - > reset_pending = RESET_TYPE_NONE ;
}
if ( rc ) {
EFX_ERR ( efx , " Could not reset NIC \n " ) ;
goto fail4 ;
}
/* Switch to the running state before we expose the device to
* the OS . This is to ensure that the initial gathering of
* MAC stats succeeds . */
efx - > state = STATE_RUNNING ;
2008-12-13 09:09:38 +03:00
efx_mtd_probe ( efx ) ; /* allowed to fail */
2008-04-27 15:55:59 +04:00
rc = efx_register_netdev ( efx ) ;
if ( rc )
goto fail5 ;
EFX_LOG ( efx , " initialisation successful \n " ) ;
return 0 ;
fail5 :
efx_pci_remove_main ( efx ) ;
fail4 :
fail3 :
efx_fini_io ( efx ) ;
fail2 :
efx_fini_struct ( efx ) ;
fail1 :
EFX_LOG ( efx , " initialisation failed. rc=%d \n " , rc ) ;
free_netdev ( net_dev ) ;
return rc ;
}
static struct pci_driver efx_pci_driver = {
. name = EFX_DRIVER_NAME ,
. id_table = efx_pci_table ,
. probe = efx_pci_probe ,
. remove = efx_pci_remove ,
} ;
/**************************************************************************
*
* Kernel module interface
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
module_param ( interrupt_mode , uint , 0444 ) ;
MODULE_PARM_DESC ( interrupt_mode ,
" Interrupt mode (0=>MSIX 1=>MSI 2=>legacy) " ) ;
static int __init efx_init_module ( void )
{
int rc ;
printk ( KERN_INFO " Solarflare NET driver v " EFX_DRIVER_VERSION " \n " ) ;
rc = register_netdevice_notifier ( & efx_netdev_notifier ) ;
if ( rc )
goto err_notifier ;
refill_workqueue = create_workqueue ( " sfc_refill " ) ;
if ( ! refill_workqueue ) {
rc = - ENOMEM ;
goto err_refill ;
}
2008-12-13 08:33:02 +03:00
reset_workqueue = create_singlethread_workqueue ( " sfc_reset " ) ;
if ( ! reset_workqueue ) {
rc = - ENOMEM ;
goto err_reset ;
}
2008-04-27 15:55:59 +04:00
rc = pci_register_driver ( & efx_pci_driver ) ;
if ( rc < 0 )
goto err_pci ;
return 0 ;
err_pci :
2008-12-13 08:33:02 +03:00
destroy_workqueue ( reset_workqueue ) ;
err_reset :
2008-04-27 15:55:59 +04:00
destroy_workqueue ( refill_workqueue ) ;
err_refill :
unregister_netdevice_notifier ( & efx_netdev_notifier ) ;
err_notifier :
return rc ;
}
static void __exit efx_exit_module ( void )
{
printk ( KERN_INFO " Solarflare NET driver unloading \n " ) ;
pci_unregister_driver ( & efx_pci_driver ) ;
2008-12-13 08:33:02 +03:00
destroy_workqueue ( reset_workqueue ) ;
2008-04-27 15:55:59 +04:00
destroy_workqueue ( refill_workqueue ) ;
unregister_netdevice_notifier ( & efx_netdev_notifier ) ;
}
module_init ( efx_init_module ) ;
module_exit ( efx_exit_module ) ;
MODULE_AUTHOR ( " Michael Brown <mbrown@fensystems.co.uk> and "
" Solarflare Communications " ) ;
MODULE_DESCRIPTION ( " Solarflare Communications network driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DEVICE_TABLE ( pci , efx_pci_table ) ;