Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
This commit is contained in:
commit
d503859678
@ -25,6 +25,7 @@
|
|||||||
#include "net_driver.h"
|
#include "net_driver.h"
|
||||||
#include "efx.h"
|
#include "efx.h"
|
||||||
#include "nic.h"
|
#include "nic.h"
|
||||||
|
#include "selftest.h"
|
||||||
|
|
||||||
#include "mcdi.h"
|
#include "mcdi.h"
|
||||||
#include "workarounds.h"
|
#include "workarounds.h"
|
||||||
@ -163,12 +164,12 @@ static int phy_flash_cfg;
|
|||||||
module_param(phy_flash_cfg, int, 0644);
|
module_param(phy_flash_cfg, int, 0644);
|
||||||
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
|
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
|
||||||
|
|
||||||
static unsigned irq_adapt_low_thresh = 10000;
|
static unsigned irq_adapt_low_thresh = 8000;
|
||||||
module_param(irq_adapt_low_thresh, uint, 0644);
|
module_param(irq_adapt_low_thresh, uint, 0644);
|
||||||
MODULE_PARM_DESC(irq_adapt_low_thresh,
|
MODULE_PARM_DESC(irq_adapt_low_thresh,
|
||||||
"Threshold score for reducing IRQ moderation");
|
"Threshold score for reducing IRQ moderation");
|
||||||
|
|
||||||
static unsigned irq_adapt_high_thresh = 20000;
|
static unsigned irq_adapt_high_thresh = 16000;
|
||||||
module_param(irq_adapt_high_thresh, uint, 0644);
|
module_param(irq_adapt_high_thresh, uint, 0644);
|
||||||
MODULE_PARM_DESC(irq_adapt_high_thresh,
|
MODULE_PARM_DESC(irq_adapt_high_thresh,
|
||||||
"Threshold score for increasing IRQ moderation");
|
"Threshold score for increasing IRQ moderation");
|
||||||
@ -1564,8 +1565,9 @@ static void efx_start_all(struct efx_nic *efx)
|
|||||||
* since we're holding the rtnl_lock at this point. */
|
* since we're holding the rtnl_lock at this point. */
|
||||||
static void efx_flush_all(struct efx_nic *efx)
|
static void efx_flush_all(struct efx_nic *efx)
|
||||||
{
|
{
|
||||||
/* Make sure the hardware monitor is stopped */
|
/* Make sure the hardware monitor and event self-test are stopped */
|
||||||
cancel_delayed_work_sync(&efx->monitor_work);
|
cancel_delayed_work_sync(&efx->monitor_work);
|
||||||
|
efx_selftest_async_cancel(efx);
|
||||||
/* Stop scheduled port reconfigurations */
|
/* Stop scheduled port reconfigurations */
|
||||||
cancel_work_sync(&efx->mac_work);
|
cancel_work_sync(&efx->mac_work);
|
||||||
}
|
}
|
||||||
@ -1825,6 +1827,7 @@ static int efx_net_open(struct net_device *net_dev)
|
|||||||
efx_link_status_changed(efx);
|
efx_link_status_changed(efx);
|
||||||
|
|
||||||
efx_start_all(efx);
|
efx_start_all(efx);
|
||||||
|
efx_selftest_async_start(efx);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2375,6 +2378,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
|
|||||||
#endif
|
#endif
|
||||||
INIT_WORK(&efx->reset_work, efx_reset_work);
|
INIT_WORK(&efx->reset_work, efx_reset_work);
|
||||||
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
|
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
|
||||||
|
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
|
||||||
efx->pci_dev = pci_dev;
|
efx->pci_dev = pci_dev;
|
||||||
efx->msg_enable = debug;
|
efx->msg_enable = debug;
|
||||||
efx->state = STATE_INIT;
|
efx->state = STATE_INIT;
|
||||||
@ -2493,6 +2497,57 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
|
|||||||
free_netdev(efx->net_dev);
|
free_netdev(efx->net_dev);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* NIC VPD information
|
||||||
|
* Called during probe to display the part number of the
|
||||||
|
* installed NIC. VPD is potentially very large but this should
|
||||||
|
* always appear within the first 512 bytes.
|
||||||
|
*/
|
||||||
|
#define SFC_VPD_LEN 512
|
||||||
|
static void efx_print_product_vpd(struct efx_nic *efx)
|
||||||
|
{
|
||||||
|
struct pci_dev *dev = efx->pci_dev;
|
||||||
|
char vpd_data[SFC_VPD_LEN];
|
||||||
|
ssize_t vpd_size;
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
/* Get the vpd data from the device */
|
||||||
|
vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
|
||||||
|
if (vpd_size <= 0) {
|
||||||
|
netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Get the Read only section */
|
||||||
|
i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
|
||||||
|
if (i < 0) {
|
||||||
|
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
j = pci_vpd_lrdt_size(&vpd_data[i]);
|
||||||
|
i += PCI_VPD_LRDT_TAG_SIZE;
|
||||||
|
if (i + j > vpd_size)
|
||||||
|
j = vpd_size - i;
|
||||||
|
|
||||||
|
/* Get the Part number */
|
||||||
|
i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
|
||||||
|
if (i < 0) {
|
||||||
|
netif_err(efx, drv, efx->net_dev, "Part number not found\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
j = pci_vpd_info_field_size(&vpd_data[i]);
|
||||||
|
i += PCI_VPD_INFO_FLD_HDR_SIZE;
|
||||||
|
if (i + j > vpd_size) {
|
||||||
|
netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
netif_info(efx, drv, efx->net_dev,
|
||||||
|
"Part Number : %.*s\n", j, &vpd_data[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Main body of NIC initialisation
|
/* Main body of NIC initialisation
|
||||||
* This is called at module load (or hotplug insertion, theoretically).
|
* This is called at module load (or hotplug insertion, theoretically).
|
||||||
*/
|
*/
|
||||||
@ -2582,6 +2637,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
|
|||||||
netif_info(efx, probe, efx->net_dev,
|
netif_info(efx, probe, efx->net_dev,
|
||||||
"Solarflare NIC detected\n");
|
"Solarflare NIC detected\n");
|
||||||
|
|
||||||
|
efx_print_product_vpd(efx);
|
||||||
|
|
||||||
/* Set up basic I/O (BAR mappings etc) */
|
/* Set up basic I/O (BAR mappings etc) */
|
||||||
rc = efx_init_io(efx);
|
rc = efx_init_io(efx);
|
||||||
if (rc)
|
if (rc)
|
||||||
|
@ -148,7 +148,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
|
|||||||
|
|
||||||
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
|
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
|
||||||
{
|
{
|
||||||
channel->last_irq_cpu = raw_smp_processor_id();
|
channel->event_test_cpu = raw_smp_processor_id();
|
||||||
efx_schedule_channel(channel);
|
efx_schedule_channel(channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -709,8 +709,6 @@ static int sfe4003_init(struct efx_nic *efx)
|
|||||||
static const struct falcon_board_type board_types[] = {
|
static const struct falcon_board_type board_types[] = {
|
||||||
{
|
{
|
||||||
.id = FALCON_BOARD_SFE4001,
|
.id = FALCON_BOARD_SFE4001,
|
||||||
.ref_model = "SFE4001",
|
|
||||||
.gen_type = "10GBASE-T adapter",
|
|
||||||
.init = sfe4001_init,
|
.init = sfe4001_init,
|
||||||
.init_phy = efx_port_dummy_op_void,
|
.init_phy = efx_port_dummy_op_void,
|
||||||
.fini = sfe4001_fini,
|
.fini = sfe4001_fini,
|
||||||
@ -719,8 +717,6 @@ static const struct falcon_board_type board_types[] = {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
.id = FALCON_BOARD_SFE4002,
|
.id = FALCON_BOARD_SFE4002,
|
||||||
.ref_model = "SFE4002",
|
|
||||||
.gen_type = "XFP adapter",
|
|
||||||
.init = sfe4002_init,
|
.init = sfe4002_init,
|
||||||
.init_phy = sfe4002_init_phy,
|
.init_phy = sfe4002_init_phy,
|
||||||
.fini = efx_fini_lm87,
|
.fini = efx_fini_lm87,
|
||||||
@ -729,8 +725,6 @@ static const struct falcon_board_type board_types[] = {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
.id = FALCON_BOARD_SFE4003,
|
.id = FALCON_BOARD_SFE4003,
|
||||||
.ref_model = "SFE4003",
|
|
||||||
.gen_type = "10GBASE-CX4 adapter",
|
|
||||||
.init = sfe4003_init,
|
.init = sfe4003_init,
|
||||||
.init_phy = sfe4003_init_phy,
|
.init_phy = sfe4003_init_phy,
|
||||||
.fini = efx_fini_lm87,
|
.fini = efx_fini_lm87,
|
||||||
@ -739,8 +733,6 @@ static const struct falcon_board_type board_types[] = {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
.id = FALCON_BOARD_SFN4112F,
|
.id = FALCON_BOARD_SFN4112F,
|
||||||
.ref_model = "SFN4112F",
|
|
||||||
.gen_type = "SFP+ adapter",
|
|
||||||
.init = sfn4112f_init,
|
.init = sfn4112f_init,
|
||||||
.init_phy = sfn4112f_init_phy,
|
.init_phy = sfn4112f_init_phy,
|
||||||
.fini = efx_fini_lm87,
|
.fini = efx_fini_lm87,
|
||||||
@ -763,11 +755,6 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
|
|||||||
board->type = &board_types[i];
|
board->type = &board_types[i];
|
||||||
|
|
||||||
if (board->type) {
|
if (board->type) {
|
||||||
netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
|
|
||||||
(efx->pci_dev->subsystem_vendor ==
|
|
||||||
PCI_VENDOR_ID_SOLARFLARE)
|
|
||||||
? board->type->ref_model : board->type->gen_type,
|
|
||||||
'A' + board->major, board->minor);
|
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
|
netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
|
||||||
|
@ -324,8 +324,7 @@ enum efx_rx_alloc_method {
|
|||||||
* @eventq: Event queue buffer
|
* @eventq: Event queue buffer
|
||||||
* @eventq_mask: Event queue pointer mask
|
* @eventq_mask: Event queue pointer mask
|
||||||
* @eventq_read_ptr: Event queue read pointer
|
* @eventq_read_ptr: Event queue read pointer
|
||||||
* @last_eventq_read_ptr: Last event queue read pointer value.
|
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
|
||||||
* @last_irq_cpu: Last CPU to handle interrupt for this channel
|
|
||||||
* @irq_count: Number of IRQs since last adaptive moderation decision
|
* @irq_count: Number of IRQs since last adaptive moderation decision
|
||||||
* @irq_mod_score: IRQ moderation score
|
* @irq_mod_score: IRQ moderation score
|
||||||
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
|
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
|
||||||
@ -355,9 +354,8 @@ struct efx_channel {
|
|||||||
struct efx_special_buffer eventq;
|
struct efx_special_buffer eventq;
|
||||||
unsigned int eventq_mask;
|
unsigned int eventq_mask;
|
||||||
unsigned int eventq_read_ptr;
|
unsigned int eventq_read_ptr;
|
||||||
unsigned int last_eventq_read_ptr;
|
int event_test_cpu;
|
||||||
|
|
||||||
int last_irq_cpu;
|
|
||||||
unsigned int irq_count;
|
unsigned int irq_count;
|
||||||
unsigned int irq_mod_score;
|
unsigned int irq_mod_score;
|
||||||
#ifdef CONFIG_RFS_ACCEL
|
#ifdef CONFIG_RFS_ACCEL
|
||||||
@ -678,6 +676,7 @@ struct vfdi_status;
|
|||||||
* @irq_status: Interrupt status buffer
|
* @irq_status: Interrupt status buffer
|
||||||
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
|
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
|
||||||
* @irq_level: IRQ level/index for IRQs not triggered by an event queue
|
* @irq_level: IRQ level/index for IRQs not triggered by an event queue
|
||||||
|
* @selftest_work: Work item for asynchronous self-test
|
||||||
* @mtd_list: List of MTDs attached to the NIC
|
* @mtd_list: List of MTDs attached to the NIC
|
||||||
* @nic_data: Hardware dependent state
|
* @nic_data: Hardware dependent state
|
||||||
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
|
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
|
||||||
@ -791,6 +790,7 @@ struct efx_nic {
|
|||||||
struct efx_buffer irq_status;
|
struct efx_buffer irq_status;
|
||||||
unsigned irq_zero_count;
|
unsigned irq_zero_count;
|
||||||
unsigned irq_level;
|
unsigned irq_level;
|
||||||
|
struct delayed_work selftest_work;
|
||||||
|
|
||||||
#ifdef CONFIG_SFC_MTD
|
#ifdef CONFIG_SFC_MTD
|
||||||
struct list_head mtd_list;
|
struct list_head mtd_list;
|
||||||
|
@ -822,7 +822,6 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
|
|||||||
channel, tx_ev_q_label % EFX_TXQ_TYPES);
|
channel, tx_ev_q_label % EFX_TXQ_TYPES);
|
||||||
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
|
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
|
||||||
tx_queue->ptr_mask);
|
tx_queue->ptr_mask);
|
||||||
channel->irq_mod_score += tx_packets;
|
|
||||||
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
|
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
|
||||||
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
|
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
|
||||||
/* Rewrite the FIFO write pointer */
|
/* Rewrite the FIFO write pointer */
|
||||||
@ -1084,7 +1083,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
|
|||||||
code = _EFX_CHANNEL_MAGIC_CODE(magic);
|
code = _EFX_CHANNEL_MAGIC_CODE(magic);
|
||||||
|
|
||||||
if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
|
if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
|
||||||
/* ignore */
|
channel->event_test_cpu = raw_smp_processor_id();
|
||||||
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
|
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
|
||||||
/* The queue must be empty, so we won't receive any rx
|
/* The queue must be empty, so we won't receive any rx
|
||||||
* events, so efx_process_channel() won't refill the
|
* events, so efx_process_channel() won't refill the
|
||||||
@ -1333,8 +1332,10 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void efx_nic_generate_test_event(struct efx_channel *channel)
|
void efx_nic_event_test_start(struct efx_channel *channel)
|
||||||
{
|
{
|
||||||
|
channel->event_test_cpu = -1;
|
||||||
|
smp_wmb();
|
||||||
efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
|
efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1383,8 +1384,10 @@ void efx_nic_disable_interrupts(struct efx_nic *efx)
|
|||||||
* Interrupt must already have been enabled, otherwise nasty things
|
* Interrupt must already have been enabled, otherwise nasty things
|
||||||
* may happen.
|
* may happen.
|
||||||
*/
|
*/
|
||||||
void efx_nic_generate_interrupt(struct efx_nic *efx)
|
void efx_nic_irq_test_start(struct efx_nic *efx)
|
||||||
{
|
{
|
||||||
|
efx->last_irq_cpu = -1;
|
||||||
|
smp_wmb();
|
||||||
efx_nic_interrupts(efx, true, true);
|
efx_nic_interrupts(efx, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,10 +35,6 @@ static inline int efx_nic_rev(struct efx_nic *efx)
|
|||||||
|
|
||||||
extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
|
extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
|
||||||
|
|
||||||
static inline bool efx_nic_has_mc(struct efx_nic *efx)
|
|
||||||
{
|
|
||||||
return efx_nic_rev(efx) >= EFX_REV_SIENA_A0;
|
|
||||||
}
|
|
||||||
/* NIC has two interlinked PCI functions for the same port. */
|
/* NIC has two interlinked PCI functions for the same port. */
|
||||||
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
|
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
|
||||||
{
|
{
|
||||||
@ -73,8 +69,6 @@ enum {
|
|||||||
/**
|
/**
|
||||||
* struct falcon_board_type - board operations and type information
|
* struct falcon_board_type - board operations and type information
|
||||||
* @id: Board type id, as found in NVRAM
|
* @id: Board type id, as found in NVRAM
|
||||||
* @ref_model: Model number of Solarflare reference design
|
|
||||||
* @gen_type: Generic board type description
|
|
||||||
* @init: Allocate resources and initialise peripheral hardware
|
* @init: Allocate resources and initialise peripheral hardware
|
||||||
* @init_phy: Do board-specific PHY initialisation
|
* @init_phy: Do board-specific PHY initialisation
|
||||||
* @fini: Shut down hardware and free resources
|
* @fini: Shut down hardware and free resources
|
||||||
@ -83,8 +77,6 @@ enum {
|
|||||||
*/
|
*/
|
||||||
struct falcon_board_type {
|
struct falcon_board_type {
|
||||||
u8 id;
|
u8 id;
|
||||||
const char *ref_model;
|
|
||||||
const char *gen_type;
|
|
||||||
int (*init) (struct efx_nic *nic);
|
int (*init) (struct efx_nic *nic);
|
||||||
void (*init_phy) (struct efx_nic *efx);
|
void (*init_phy) (struct efx_nic *efx);
|
||||||
void (*fini) (struct efx_nic *nic);
|
void (*fini) (struct efx_nic *nic);
|
||||||
@ -305,14 +297,23 @@ extern void falcon_update_stats_xmac(struct efx_nic *efx);
|
|||||||
/* Interrupts and test events */
|
/* Interrupts and test events */
|
||||||
extern int efx_nic_init_interrupt(struct efx_nic *efx);
|
extern int efx_nic_init_interrupt(struct efx_nic *efx);
|
||||||
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
|
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
|
||||||
extern void efx_nic_generate_test_event(struct efx_channel *channel);
|
extern void efx_nic_event_test_start(struct efx_channel *channel);
|
||||||
extern void efx_nic_generate_interrupt(struct efx_nic *efx);
|
extern void efx_nic_irq_test_start(struct efx_nic *efx);
|
||||||
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
|
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
|
||||||
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
|
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
|
||||||
extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
|
extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
|
||||||
extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
|
extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
|
||||||
extern void falcon_irq_ack_a1(struct efx_nic *efx);
|
extern void falcon_irq_ack_a1(struct efx_nic *efx);
|
||||||
|
|
||||||
|
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
|
||||||
|
{
|
||||||
|
return ACCESS_ONCE(channel->event_test_cpu);
|
||||||
|
}
|
||||||
|
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
|
||||||
|
{
|
||||||
|
return ACCESS_ONCE(efx->last_irq_cpu);
|
||||||
|
}
|
||||||
|
|
||||||
/* Global Resources */
|
/* Global Resources */
|
||||||
extern int efx_nic_flush_queues(struct efx_nic *efx);
|
extern int efx_nic_flush_queues(struct efx_nic *efx);
|
||||||
extern void falcon_start_nic_stats(struct efx_nic *efx);
|
extern void falcon_start_nic_stats(struct efx_nic *efx);
|
||||||
|
@ -449,10 +449,8 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
|||||||
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
|
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Pass a received packet up through the generic GRO stack
|
/* Pass a received packet up through GRO. GRO can handle pages
|
||||||
*
|
* regardless of checksum state and skbs with a good checksum.
|
||||||
* Handles driverlink veto, and passes the fragment up via
|
|
||||||
* the appropriate GRO method
|
|
||||||
*/
|
*/
|
||||||
static void efx_rx_packet_gro(struct efx_channel *channel,
|
static void efx_rx_packet_gro(struct efx_channel *channel,
|
||||||
struct efx_rx_buffer *rx_buf,
|
struct efx_rx_buffer *rx_buf,
|
||||||
@ -461,7 +459,6 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
|
|||||||
struct napi_struct *napi = &channel->napi_str;
|
struct napi_struct *napi = &channel->napi_str;
|
||||||
gro_result_t gro_result;
|
gro_result_t gro_result;
|
||||||
|
|
||||||
/* Pass the skb/page into the GRO engine */
|
|
||||||
if (rx_buf->flags & EFX_RX_BUF_PAGE) {
|
if (rx_buf->flags & EFX_RX_BUF_PAGE) {
|
||||||
struct efx_nic *efx = channel->efx;
|
struct efx_nic *efx = channel->efx;
|
||||||
struct page *page = rx_buf->u.page;
|
struct page *page = rx_buf->u.page;
|
||||||
|
@ -25,6 +25,16 @@
|
|||||||
#include "selftest.h"
|
#include "selftest.h"
|
||||||
#include "workarounds.h"
|
#include "workarounds.h"
|
||||||
|
|
||||||
|
/* IRQ latency can be enormous because:
|
||||||
|
* - All IRQs may be disabled on a CPU for a *long* time by e.g. a
|
||||||
|
* slow serial console or an old IDE driver doing error recovery
|
||||||
|
* - The PREEMPT_RT patches mostly deal with this, but also allow a
|
||||||
|
* tasklet or normal task to be given higher priority than our IRQ
|
||||||
|
* threads
|
||||||
|
* Try to avoid blaming the hardware for this.
|
||||||
|
*/
|
||||||
|
#define IRQ_TIMEOUT HZ
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Loopback test packet structure
|
* Loopback test packet structure
|
||||||
*
|
*
|
||||||
@ -77,6 +87,9 @@ struct efx_loopback_state {
|
|||||||
struct efx_loopback_payload payload;
|
struct efx_loopback_payload payload;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* How long to wait for all the packets to arrive (in ms) */
|
||||||
|
#define LOOPBACK_TIMEOUT_MS 1000
|
||||||
|
|
||||||
/**************************************************************************
|
/**************************************************************************
|
||||||
*
|
*
|
||||||
* MII, NVRAM and register tests
|
* MII, NVRAM and register tests
|
||||||
@ -130,23 +143,25 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
|
|||||||
static int efx_test_interrupts(struct efx_nic *efx,
|
static int efx_test_interrupts(struct efx_nic *efx,
|
||||||
struct efx_self_tests *tests)
|
struct efx_self_tests *tests)
|
||||||
{
|
{
|
||||||
|
unsigned long timeout, wait;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
|
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
|
||||||
tests->interrupt = -1;
|
tests->interrupt = -1;
|
||||||
|
|
||||||
/* Reset interrupt flag */
|
efx_nic_irq_test_start(efx);
|
||||||
efx->last_irq_cpu = -1;
|
timeout = jiffies + IRQ_TIMEOUT;
|
||||||
smp_wmb();
|
wait = 1;
|
||||||
|
|
||||||
efx_nic_generate_interrupt(efx);
|
|
||||||
|
|
||||||
/* Wait for arrival of test interrupt. */
|
/* Wait for arrival of test interrupt. */
|
||||||
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
|
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
|
||||||
schedule_timeout_uninterruptible(HZ / 10);
|
do {
|
||||||
cpu = ACCESS_ONCE(efx->last_irq_cpu);
|
schedule_timeout_uninterruptible(wait);
|
||||||
if (cpu >= 0)
|
cpu = efx_nic_irq_test_irq_cpu(efx);
|
||||||
goto success;
|
if (cpu >= 0)
|
||||||
|
goto success;
|
||||||
|
wait *= 2;
|
||||||
|
} while (time_before(jiffies, timeout));
|
||||||
|
|
||||||
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
|
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
@ -159,61 +174,86 @@ static int efx_test_interrupts(struct efx_nic *efx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Test generation and receipt of interrupting events */
|
/* Test generation and receipt of interrupting events */
|
||||||
static int efx_test_eventq_irq(struct efx_channel *channel,
|
static int efx_test_eventq_irq(struct efx_nic *efx,
|
||||||
struct efx_self_tests *tests)
|
struct efx_self_tests *tests)
|
||||||
{
|
{
|
||||||
struct efx_nic *efx = channel->efx;
|
struct efx_channel *channel;
|
||||||
unsigned int read_ptr;
|
unsigned int read_ptr[EFX_MAX_CHANNELS];
|
||||||
bool napi_ran, dma_seen, int_seen;
|
unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
|
||||||
|
unsigned long timeout, wait;
|
||||||
|
|
||||||
read_ptr = channel->eventq_read_ptr;
|
BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
|
||||||
channel->last_irq_cpu = -1;
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
efx_nic_generate_test_event(channel);
|
efx_for_each_channel(channel, efx) {
|
||||||
|
read_ptr[channel->channel] = channel->eventq_read_ptr;
|
||||||
|
set_bit(channel->channel, &dma_pend);
|
||||||
|
set_bit(channel->channel, &int_pend);
|
||||||
|
efx_nic_event_test_start(channel);
|
||||||
|
}
|
||||||
|
|
||||||
/* Wait for arrival of interrupt. NAPI processing may or may
|
timeout = jiffies + IRQ_TIMEOUT;
|
||||||
|
wait = 1;
|
||||||
|
|
||||||
|
/* Wait for arrival of interrupts. NAPI processing may or may
|
||||||
* not complete in time, but we can cope in any case.
|
* not complete in time, but we can cope in any case.
|
||||||
*/
|
*/
|
||||||
msleep(10);
|
do {
|
||||||
napi_disable(&channel->napi_str);
|
schedule_timeout_uninterruptible(wait);
|
||||||
if (channel->eventq_read_ptr != read_ptr) {
|
|
||||||
napi_ran = true;
|
|
||||||
dma_seen = true;
|
|
||||||
int_seen = true;
|
|
||||||
} else {
|
|
||||||
napi_ran = false;
|
|
||||||
dma_seen = efx_nic_event_present(channel);
|
|
||||||
int_seen = ACCESS_ONCE(channel->last_irq_cpu) >= 0;
|
|
||||||
}
|
|
||||||
napi_enable(&channel->napi_str);
|
|
||||||
efx_nic_eventq_read_ack(channel);
|
|
||||||
|
|
||||||
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
|
efx_for_each_channel(channel, efx) {
|
||||||
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
|
napi_disable(&channel->napi_str);
|
||||||
|
if (channel->eventq_read_ptr !=
|
||||||
|
read_ptr[channel->channel]) {
|
||||||
|
set_bit(channel->channel, &napi_ran);
|
||||||
|
clear_bit(channel->channel, &dma_pend);
|
||||||
|
clear_bit(channel->channel, &int_pend);
|
||||||
|
} else {
|
||||||
|
if (efx_nic_event_present(channel))
|
||||||
|
clear_bit(channel->channel, &dma_pend);
|
||||||
|
if (efx_nic_event_test_irq_cpu(channel) >= 0)
|
||||||
|
clear_bit(channel->channel, &int_pend);
|
||||||
|
}
|
||||||
|
napi_enable(&channel->napi_str);
|
||||||
|
efx_nic_eventq_read_ack(channel);
|
||||||
|
}
|
||||||
|
|
||||||
if (dma_seen && int_seen) {
|
wait *= 2;
|
||||||
netif_dbg(efx, drv, efx->net_dev,
|
} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
|
||||||
"channel %d event queue passed (with%s NAPI)\n",
|
|
||||||
channel->channel, napi_ran ? "" : "out");
|
efx_for_each_channel(channel, efx) {
|
||||||
return 0;
|
bool dma_seen = !test_bit(channel->channel, &dma_pend);
|
||||||
} else {
|
bool int_seen = !test_bit(channel->channel, &int_pend);
|
||||||
/* Report failure and whether either interrupt or DMA worked */
|
|
||||||
netif_err(efx, drv, efx->net_dev,
|
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
|
||||||
"channel %d timed out waiting for event queue\n",
|
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
|
||||||
channel->channel);
|
|
||||||
if (int_seen)
|
if (dma_seen && int_seen) {
|
||||||
|
netif_dbg(efx, drv, efx->net_dev,
|
||||||
|
"channel %d event queue passed (with%s NAPI)\n",
|
||||||
|
channel->channel,
|
||||||
|
test_bit(channel->channel, &napi_ran) ?
|
||||||
|
"" : "out");
|
||||||
|
} else {
|
||||||
|
/* Report failure and whether either interrupt or DMA
|
||||||
|
* worked
|
||||||
|
*/
|
||||||
netif_err(efx, drv, efx->net_dev,
|
netif_err(efx, drv, efx->net_dev,
|
||||||
"channel %d saw interrupt "
|
"channel %d timed out waiting for event queue\n",
|
||||||
"during event queue test\n",
|
|
||||||
channel->channel);
|
channel->channel);
|
||||||
if (dma_seen)
|
if (int_seen)
|
||||||
netif_err(efx, drv, efx->net_dev,
|
netif_err(efx, drv, efx->net_dev,
|
||||||
"channel %d event was generated, but "
|
"channel %d saw interrupt "
|
||||||
"failed to trigger an interrupt\n",
|
"during event queue test\n",
|
||||||
channel->channel);
|
channel->channel);
|
||||||
return -ETIMEDOUT;
|
if (dma_seen)
|
||||||
|
netif_err(efx, drv, efx->net_dev,
|
||||||
|
"channel %d event was generated, but "
|
||||||
|
"failed to trigger an interrupt\n",
|
||||||
|
channel->channel);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
|
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||||
@ -516,10 +556,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
|
|||||||
begin_rc = efx_begin_loopback(tx_queue);
|
begin_rc = efx_begin_loopback(tx_queue);
|
||||||
|
|
||||||
/* This will normally complete very quickly, but be
|
/* This will normally complete very quickly, but be
|
||||||
* prepared to wait up to 100 ms. */
|
* prepared to wait much longer. */
|
||||||
msleep(1);
|
msleep(1);
|
||||||
if (!efx_poll_loopback(efx)) {
|
if (!efx_poll_loopback(efx)) {
|
||||||
msleep(100);
|
msleep(LOOPBACK_TIMEOUT_MS);
|
||||||
efx_poll_loopback(efx);
|
efx_poll_loopback(efx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -660,9 +700,10 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
|||||||
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
|
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
|
||||||
int phy_mode = efx->phy_mode;
|
int phy_mode = efx->phy_mode;
|
||||||
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
|
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
|
||||||
struct efx_channel *channel;
|
|
||||||
int rc_test = 0, rc_reset = 0, rc;
|
int rc_test = 0, rc_reset = 0, rc;
|
||||||
|
|
||||||
|
efx_selftest_async_cancel(efx);
|
||||||
|
|
||||||
/* Online (i.e. non-disruptive) testing
|
/* Online (i.e. non-disruptive) testing
|
||||||
* This checks interrupt generation, event delivery and PHY presence. */
|
* This checks interrupt generation, event delivery and PHY presence. */
|
||||||
|
|
||||||
@ -678,11 +719,9 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
|||||||
if (rc && !rc_test)
|
if (rc && !rc_test)
|
||||||
rc_test = rc;
|
rc_test = rc;
|
||||||
|
|
||||||
efx_for_each_channel(channel, efx) {
|
rc = efx_test_eventq_irq(efx, tests);
|
||||||
rc = efx_test_eventq_irq(channel, tests);
|
if (rc && !rc_test)
|
||||||
if (rc && !rc_test)
|
rc_test = rc;
|
||||||
rc_test = rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rc_test)
|
if (rc_test)
|
||||||
return rc_test;
|
return rc_test;
|
||||||
@ -757,3 +796,36 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
|||||||
return rc_test;
|
return rc_test;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void efx_selftest_async_start(struct efx_nic *efx)
|
||||||
|
{
|
||||||
|
struct efx_channel *channel;
|
||||||
|
|
||||||
|
efx_for_each_channel(channel, efx)
|
||||||
|
efx_nic_event_test_start(channel);
|
||||||
|
schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
void efx_selftest_async_cancel(struct efx_nic *efx)
|
||||||
|
{
|
||||||
|
cancel_delayed_work_sync(&efx->selftest_work);
|
||||||
|
}
|
||||||
|
|
||||||
|
void efx_selftest_async_work(struct work_struct *data)
|
||||||
|
{
|
||||||
|
struct efx_nic *efx = container_of(data, struct efx_nic,
|
||||||
|
selftest_work.work);
|
||||||
|
struct efx_channel *channel;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
efx_for_each_channel(channel, efx) {
|
||||||
|
cpu = efx_nic_event_test_irq_cpu(channel);
|
||||||
|
if (cpu < 0)
|
||||||
|
netif_err(efx, ifup, efx->net_dev,
|
||||||
|
"channel %d failed to trigger an interrupt\n",
|
||||||
|
channel->channel);
|
||||||
|
else
|
||||||
|
netif_dbg(efx, ifup, efx->net_dev,
|
||||||
|
"channel %d triggered interrupt on CPU %d\n",
|
||||||
|
channel->channel, cpu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -48,5 +48,8 @@ extern void efx_loopback_rx_packet(struct efx_nic *efx,
|
|||||||
extern int efx_selftest(struct efx_nic *efx,
|
extern int efx_selftest(struct efx_nic *efx,
|
||||||
struct efx_self_tests *tests,
|
struct efx_self_tests *tests,
|
||||||
unsigned flags);
|
unsigned flags);
|
||||||
|
extern void efx_selftest_async_start(struct efx_nic *efx);
|
||||||
|
extern void efx_selftest_async_cancel(struct efx_nic *efx);
|
||||||
|
extern void efx_selftest_async_work(struct work_struct *data);
|
||||||
|
|
||||||
#endif /* EFX_SELFTEST_H */
|
#endif /* EFX_SELFTEST_H */
|
||||||
|
@ -409,8 +409,7 @@ static void siena_remove_nic(struct efx_nic *efx)
|
|||||||
siena_reset_hw(efx, RESET_TYPE_ALL);
|
siena_reset_hw(efx, RESET_TYPE_ALL);
|
||||||
|
|
||||||
/* Relinquish the device back to the BMC */
|
/* Relinquish the device back to the BMC */
|
||||||
if (efx_nic_has_mc(efx))
|
efx_mcdi_drv_attach(efx, false, NULL);
|
||||||
efx_mcdi_drv_attach(efx, false, NULL);
|
|
||||||
|
|
||||||
/* Tear down the private nic state */
|
/* Tear down the private nic state */
|
||||||
kfree(efx->nic_data);
|
kfree(efx->nic_data);
|
||||||
|
@ -514,7 +514,7 @@ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
|
|||||||
|
|
||||||
if (abs_index < EFX_VI_BASE)
|
if (abs_index < EFX_VI_BASE)
|
||||||
return true;
|
return true;
|
||||||
vf_i = (abs_index - EFX_VI_BASE) * efx_vf_size(efx);
|
vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx);
|
||||||
if (vf_i >= efx->vf_init_count)
|
if (vf_i >= efx->vf_init_count)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user