e1000: fix whitespace issues and multi-line comments
Fixes whitespace issues, such as lines exceeding 80 chars, needless blank lines and the use of spaces where tabs are needed. In addition, fix multi-line comments to align with the networking standard. Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com>
This commit is contained in:
parent
efd9450e7e
commit
6cfbd97b3e
@ -142,7 +142,8 @@ struct e1000_adapter;
|
||||
#define E1000_MNG_VLAN_NONE (-1)
|
||||
|
||||
/* wrapper around a pointer to a socket buffer,
|
||||
* so a DMA handle can be stored along with the buffer */
|
||||
* so a DMA handle can be stored along with the buffer
|
||||
*/
|
||||
struct e1000_buffer {
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t dma;
|
||||
|
@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
|
||||
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
|
||||
|
||||
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
|
||||
* and HALF_DUPLEX != DUPLEX_HALF */
|
||||
|
||||
* and HALF_DUPLEX != DUPLEX_HALF
|
||||
*/
|
||||
if (adapter->link_duplex == FULL_DUPLEX)
|
||||
ecmd->duplex = DUPLEX_FULL;
|
||||
else
|
||||
@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
|
||||
if ((hw->media_type == e1000_media_type_copper) &&
|
||||
netif_carrier_ok(netdev))
|
||||
ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
|
||||
ETH_TP_MDI_X :
|
||||
ETH_TP_MDI);
|
||||
ETH_TP_MDI_X : ETH_TP_MDI);
|
||||
else
|
||||
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
|
||||
|
||||
@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
||||
/*
|
||||
* MDI setting is only allowed when autoneg enabled because
|
||||
/* MDI setting is only allowed when autoneg enabled because
|
||||
* some hardware doesn't allow MDI setting when speed or
|
||||
* duplex is forced.
|
||||
*/
|
||||
@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
/*
|
||||
* If the link is not reported up to netdev, interrupts are disabled,
|
||||
/* If the link is not reported up to netdev, interrupts are disabled,
|
||||
* and so the physical link state may have changed since we last
|
||||
* looked. Set get_link_status to make sure that the true link
|
||||
* state is interrogated, rather than pulling a cached and possibly
|
||||
@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
|
||||
ptr = (void *)eeprom_buff;
|
||||
|
||||
if (eeprom->offset & 1) {
|
||||
/* need read/modify/write of first changed EEPROM word */
|
||||
/* only the second byte of the word is being modified */
|
||||
/* need read/modify/write of first changed EEPROM word
|
||||
* only the second byte of the word is being modified
|
||||
*/
|
||||
ret_val = e1000_read_eeprom(hw, first_word, 1,
|
||||
&eeprom_buff[0]);
|
||||
ptr++;
|
||||
}
|
||||
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
|
||||
/* need read/modify/write of last changed EEPROM word */
|
||||
/* only the first byte of the word is being modified */
|
||||
/* need read/modify/write of last changed EEPROM word
|
||||
* only the first byte of the word is being modified
|
||||
*/
|
||||
ret_val = e1000_read_eeprom(hw, last_word, 1,
|
||||
&eeprom_buff[last_word - first_word]);
|
||||
}
|
||||
@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
|
||||
rx_old = adapter->rx_ring;
|
||||
|
||||
err = -ENOMEM;
|
||||
txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
|
||||
txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
|
||||
GFP_KERNEL);
|
||||
if (!txdr)
|
||||
goto err_alloc_tx;
|
||||
|
||||
rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
|
||||
rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
|
||||
GFP_KERNEL);
|
||||
if (!rxdr)
|
||||
goto err_alloc_rx;
|
||||
|
||||
@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
|
||||
goto err_setup_tx;
|
||||
|
||||
/* save the new, restore the old in order to free it,
|
||||
* then restore the new back again */
|
||||
* then restore the new back again
|
||||
*/
|
||||
|
||||
adapter->rx_ring = rx_old;
|
||||
adapter->tx_ring = tx_old;
|
||||
@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
|
||||
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
|
||||
|
||||
if (hw->mac_type >= e1000_82543) {
|
||||
|
||||
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
|
||||
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
|
||||
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
|
||||
@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
|
||||
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
|
||||
0xFFFFFFFF);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
|
||||
REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
|
||||
REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
|
||||
REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
|
||||
|
||||
}
|
||||
|
||||
value = E1000_MC_TBL_SIZE;
|
||||
@ -858,8 +856,9 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
|
||||
|
||||
*data = 0;
|
||||
|
||||
/* NOTE: we don't test MSI interrupts here, yet */
|
||||
/* Hook up test interrupt handler just for this test */
|
||||
/* NOTE: we don't test MSI interrupts here, yet
|
||||
* Hook up test interrupt handler just for this test
|
||||
*/
|
||||
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
|
||||
netdev))
|
||||
shared_int = false;
|
||||
@ -1260,7 +1259,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
|
||||
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
|
||||
else {
|
||||
/* Set the ILOS bit on the fiber Nic is half
|
||||
* duplex link is detected. */
|
||||
* duplex link is detected.
|
||||
*/
|
||||
stat_reg = er32(STATUS);
|
||||
if ((stat_reg & E1000_STATUS_FD) == 0)
|
||||
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
|
||||
@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
|
||||
hw->serdes_has_link = false;
|
||||
|
||||
/* On some blade server designs, link establishment
|
||||
* could take as long as 2-3 minutes */
|
||||
* could take as long as 2-3 minutes
|
||||
*/
|
||||
do {
|
||||
e1000_check_for_link(hw);
|
||||
if (hw->serdes_has_link)
|
||||
@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
|
||||
e_info(hw, "offline testing starting\n");
|
||||
|
||||
/* Link test performed before hardware reset so autoneg doesn't
|
||||
* interfere with test result */
|
||||
* interfere with test result
|
||||
*/
|
||||
if (e1000_link_test(adapter, &data[4]))
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
|
||||
@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
|
||||
default:
|
||||
/* dual port cards only support WoL on port A from now on
|
||||
* unless it was enabled in the eeprom for port B
|
||||
* so exclude FUNC_1 ports from having WoL enabled */
|
||||
* so exclude FUNC_1 ports from having WoL enabled
|
||||
*/
|
||||
if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
|
||||
!adapter->eeprom_wol) {
|
||||
wol->supported = 0;
|
||||
@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
|
||||
wol->wolopts = 0;
|
||||
|
||||
/* this function will set ->supported = 0 and return 1 if wol is not
|
||||
* supported by this hardware */
|
||||
* supported by this hardware
|
||||
*/
|
||||
if (e1000_wol_exclusion(adapter, wol) ||
|
||||
!device_can_wakeup(&adapter->pdev->dev))
|
||||
return;
|
||||
@ -1859,7 +1863,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
|
||||
ETH_GSTRING_LEN);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
|
||||
/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
|
||||
* e1000_init_module is the first routine called when the driver is
|
||||
* loaded. All it does is register with the PCI subsystem.
|
||||
**/
|
||||
|
||||
static int __init e1000_init_module(void)
|
||||
{
|
||||
int ret;
|
||||
@ -266,7 +265,6 @@ module_init(e1000_init_module);
|
||||
* e1000_exit_module is called just before the driver is removed
|
||||
* from memory.
|
||||
**/
|
||||
|
||||
static void __exit e1000_exit_module(void)
|
||||
{
|
||||
pci_unregister_driver(&e1000_driver);
|
||||
@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
|
||||
* e1000_irq_disable - Mask off interrupt generation on the NIC
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
|
||||
static void e1000_irq_disable(struct e1000_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
|
||||
* e1000_irq_enable - Enable default interrupt generation settings
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
|
||||
static void e1000_irq_enable(struct e1000_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
@ -398,7 +394,8 @@ static void e1000_configure(struct e1000_adapter *adapter)
|
||||
e1000_configure_rx(adapter);
|
||||
/* call E1000_DESC_UNUSED which always leaves
|
||||
* at least 1 descriptor unused to make sure
|
||||
* next_to_use != next_to_clean */
|
||||
* next_to_use != next_to_clean
|
||||
*/
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
struct e1000_rx_ring *ring = &adapter->rx_ring[i];
|
||||
adapter->alloc_rx_buf(adapter, ring,
|
||||
@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
|
||||
* The phy may be powered down to save power and turn off link when the
|
||||
* driver is unloaded and wake on lan is not enabled (among others)
|
||||
* *** this routine MUST be followed by a call to e1000_reset ***
|
||||
*
|
||||
**/
|
||||
|
||||
void e1000_power_up_phy(struct e1000_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
|
||||
/* Just clear the power down bit to wake the phy back up */
|
||||
if (hw->media_type == e1000_media_type_copper) {
|
||||
/* according to the manual, the phy will retain its
|
||||
* settings across a power-down/up cycle */
|
||||
* settings across a power-down/up cycle
|
||||
*/
|
||||
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
|
||||
mii_reg &= ~MII_CR_POWER_DOWN;
|
||||
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
|
||||
@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
|
||||
* The PHY cannot be powered down if any of the following is true *
|
||||
* (a) WoL is enabled
|
||||
* (b) AMT is active
|
||||
* (c) SoL/IDER session is active */
|
||||
* (c) SoL/IDER session is active
|
||||
*/
|
||||
if (!adapter->wol && hw->mac_type >= e1000_82540 &&
|
||||
hw->media_type == e1000_media_type_copper) {
|
||||
u16 mii_reg = 0;
|
||||
@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
|
||||
|
||||
e1000_irq_disable(adapter);
|
||||
|
||||
/*
|
||||
* Setting DOWN must be after irq_disable to prevent
|
||||
/* Setting DOWN must be after irq_disable to prevent
|
||||
* a screaming interrupt. Setting DOWN also prevents
|
||||
* tasks from rescheduling.
|
||||
*/
|
||||
@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
|
||||
* rounded up to the next 1KB and expressed in KB. Likewise,
|
||||
* the Rx FIFO should be large enough to accommodate at least
|
||||
* one full receive packet and is similarly rounded up and
|
||||
* expressed in KB. */
|
||||
* expressed in KB.
|
||||
*/
|
||||
pba = er32(PBA);
|
||||
/* upper 16 bits has Tx packet buffer allocation size in KB */
|
||||
tx_space = pba >> 16;
|
||||
/* lower 16 bits has Rx packet buffer allocation size in KB */
|
||||
pba &= 0xffff;
|
||||
/*
|
||||
* the tx fifo also stores 16 bytes of information about the tx
|
||||
/* the Tx fifo also stores 16 bytes of information about the Tx
|
||||
* but don't include ethernet FCS because hardware appends it
|
||||
*/
|
||||
min_tx_space = (hw->max_frame_size +
|
||||
@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
|
||||
|
||||
/* If current Tx allocation is less than the min Tx FIFO size,
|
||||
* and the min Tx FIFO size is less than the current Rx FIFO
|
||||
* allocation, take space away from current Rx allocation */
|
||||
* allocation, take space away from current Rx allocation
|
||||
*/
|
||||
if (tx_space < min_tx_space &&
|
||||
((min_tx_space - tx_space) < pba)) {
|
||||
pba = pba - (min_tx_space - tx_space);
|
||||
@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
|
||||
break;
|
||||
}
|
||||
|
||||
/* if short on rx space, rx wins and must trump tx
|
||||
* adjustment or use Early Receive if available */
|
||||
/* if short on Rx space, Rx wins and must trump Tx
|
||||
* adjustment or use Early Receive if available
|
||||
*/
|
||||
if (pba < min_rx_space)
|
||||
pba = min_rx_space;
|
||||
}
|
||||
@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
|
||||
|
||||
ew32(PBA, pba);
|
||||
|
||||
/*
|
||||
* flow control settings:
|
||||
/* flow control settings:
|
||||
* The high water mark must be low enough to fit one full frame
|
||||
* (or the size used for early receive) above it in the Rx FIFO.
|
||||
* Set it to the lower of:
|
||||
@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
|
||||
u32 ctrl = er32(CTRL);
|
||||
/* clear phy power management bit if we are in gig only mode,
|
||||
* which if enabled will attempt negotiation to 100Mb, which
|
||||
* can cause a loss of link at power off or driver unload */
|
||||
* can cause a loss of link at power off or driver unload
|
||||
*/
|
||||
ctrl &= ~E1000_CTRL_SWDPIN3;
|
||||
ew32(CTRL, ctrl);
|
||||
}
|
||||
@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
|
||||
static netdev_features_t e1000_fix_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
/*
|
||||
* Since there is no support for separate rx/tx vlan accel
|
||||
* enable/disable make sure tx flag is always in same state as rx.
|
||||
/* Since there is no support for separate Rx/Tx vlan accel
|
||||
* enable/disable make sure Tx flag is always in same state as Rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_RX)
|
||||
features |= NETIF_F_HW_VLAN_TX;
|
||||
@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (err)
|
||||
goto err_sw_init;
|
||||
|
||||
/*
|
||||
* there is a workaround being applied below that limits
|
||||
/* there is a workaround being applied below that limits
|
||||
* 64-bit DMA addresses to 64-bit hardware. There are some
|
||||
* 32-bit adapters that Tx hang when given 64-bit DMA addresses
|
||||
*/
|
||||
pci_using_dac = 0;
|
||||
if ((hw->bus_type == e1000_bus_type_pcix) &&
|
||||
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
|
||||
/*
|
||||
* according to DMA-API-HOWTO, coherent calls will always
|
||||
/* according to DMA-API-HOWTO, coherent calls will always
|
||||
* succeed if the set call did
|
||||
*/
|
||||
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
/* before reading the EEPROM, reset the controller to
|
||||
* put the device in a known good starting state */
|
||||
* put the device in a known good starting state
|
||||
*/
|
||||
|
||||
e1000_reset_hw(hw);
|
||||
|
||||
@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (e1000_validate_eeprom_checksum(hw) < 0) {
|
||||
e_err(probe, "The EEPROM Checksum Is Not Valid\n");
|
||||
e1000_dump_eeprom(adapter);
|
||||
/*
|
||||
* set MAC address to all zeroes to invalidate and temporary
|
||||
/* set MAC address to all zeroes to invalidate and temporary
|
||||
* disable this device for the user. This blocks regular
|
||||
* traffic while still permitting ethtool ioctls from reaching
|
||||
* the hardware as well as allowing the user to run the
|
||||
@ -1169,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
/* now that we have the eeprom settings, apply the special cases
|
||||
* where the eeprom may be wrong or the board simply won't support
|
||||
* wake on lan on a particular port */
|
||||
* wake on lan on a particular port
|
||||
*/
|
||||
switch (pdev->device) {
|
||||
case E1000_DEV_ID_82546GB_PCIE:
|
||||
adapter->eeprom_wol = 0;
|
||||
@ -1177,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
case E1000_DEV_ID_82546EB_FIBER:
|
||||
case E1000_DEV_ID_82546GB_FIBER:
|
||||
/* Wake events only supported on port A for dual fiber
|
||||
* regardless of eeprom setting */
|
||||
* regardless of eeprom setting
|
||||
*/
|
||||
if (er32(STATUS) & E1000_STATUS_FUNC_1)
|
||||
adapter->eeprom_wol = 0;
|
||||
break;
|
||||
@ -1270,7 +1267,6 @@ err_pci_reg:
|
||||
* Hot-Plug event, or because the driver is going to be removed from
|
||||
* memory.
|
||||
**/
|
||||
|
||||
static void e1000_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
@ -1306,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
|
||||
* e1000_sw_init initializes the Adapter private data structure.
|
||||
* e1000_init_hw_struct MUST be called before this function
|
||||
**/
|
||||
|
||||
static int e1000_sw_init(struct e1000_adapter *adapter)
|
||||
{
|
||||
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
|
||||
@ -1337,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
|
||||
* We allocate one ring per queue at run-time since we don't know the
|
||||
* number of queues at compile-time.
|
||||
**/
|
||||
|
||||
static int e1000_alloc_queues(struct e1000_adapter *adapter)
|
||||
{
|
||||
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
|
||||
@ -1367,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
|
||||
* handler is registered with the OS, the watchdog task is started,
|
||||
* and the stack is notified that the interface is ready.
|
||||
**/
|
||||
|
||||
static int e1000_open(struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
@ -1401,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
|
||||
/* before we allocate an interrupt, we must be ready to handle it.
|
||||
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
|
||||
* as soon as we call pci_request_irq, so we have to setup our
|
||||
* clean_rx handler before we do so. */
|
||||
* clean_rx handler before we do so.
|
||||
*/
|
||||
e1000_configure(adapter);
|
||||
|
||||
err = e1000_request_irq(adapter);
|
||||
@ -1444,7 +1438,6 @@ err_setup_tx:
|
||||
* needs to be disabled. A global MAC reset is issued to stop the
|
||||
* hardware, and all transmit and receive resources are freed.
|
||||
**/
|
||||
|
||||
static int e1000_close(struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
@ -1459,7 +1452,8 @@ static int e1000_close(struct net_device *netdev)
|
||||
e1000_free_all_rx_resources(adapter);
|
||||
|
||||
/* kill manageability vlan ID if supported, but not if a vlan with
|
||||
* the same ID is registered on the host OS (let 8021q kill it) */
|
||||
* the same ID is registered on the host OS (let 8021q kill it)
|
||||
*/
|
||||
if ((hw->mng_cookie.status &
|
||||
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
|
||||
!test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
|
||||
@ -1483,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
|
||||
unsigned long end = begin + len;
|
||||
|
||||
/* First rev 82545 and 82546 need to not allow any memory
|
||||
* write location to cross 64k boundary due to errata 23 */
|
||||
* write location to cross 64k boundary due to errata 23
|
||||
*/
|
||||
if (hw->mac_type == e1000_82545 ||
|
||||
hw->mac_type == e1000_ce4100 ||
|
||||
hw->mac_type == e1000_82546) {
|
||||
@ -1500,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
|
||||
*
|
||||
* Return 0 on success, negative on failure
|
||||
**/
|
||||
|
||||
static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
|
||||
struct e1000_tx_ring *txdr)
|
||||
{
|
||||
@ -1574,7 +1568,6 @@ setup_tx_desc_die:
|
||||
*
|
||||
* Return 0 on success, negative on failure
|
||||
**/
|
||||
|
||||
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
|
||||
{
|
||||
int i, err = 0;
|
||||
@ -1599,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
|
||||
*
|
||||
* Configure the Tx unit of the MAC after a reset.
|
||||
**/
|
||||
|
||||
static void e1000_configure_tx(struct e1000_adapter *adapter)
|
||||
{
|
||||
u64 tdba;
|
||||
@ -1620,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
|
||||
ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
|
||||
ew32(TDT, 0);
|
||||
ew32(TDH, 0);
|
||||
adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
|
||||
adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
|
||||
adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
|
||||
E1000_TDH : E1000_82542_TDH);
|
||||
adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
|
||||
E1000_TDT : E1000_82542_TDT);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1676,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
|
||||
adapter->txd_cmd |= E1000_TXD_CMD_RS;
|
||||
|
||||
/* Cache if we're 82544 running in PCI-X because we'll
|
||||
* need this to apply a workaround later in the send path. */
|
||||
* need this to apply a workaround later in the send path.
|
||||
*/
|
||||
if (hw->mac_type == e1000_82544 &&
|
||||
hw->bus_type == e1000_bus_type_pcix)
|
||||
adapter->pcix_82544 = true;
|
||||
@ -1692,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
|
||||
*
|
||||
* Returns 0 on success, negative on failure
|
||||
**/
|
||||
|
||||
static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
|
||||
struct e1000_rx_ring *rxdr)
|
||||
{
|
||||
@ -1771,7 +1765,6 @@ setup_rx_desc_die:
|
||||
*
|
||||
* Return 0 on success, negative on failure
|
||||
**/
|
||||
|
||||
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
|
||||
{
|
||||
int i, err = 0;
|
||||
@ -1840,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
|
||||
/* This is useful for sniffing bad packets. */
|
||||
if (adapter->netdev->features & NETIF_F_RXALL) {
|
||||
/* UPE and MPE will be handled by normal PROMISC logic
|
||||
* in e1000e_set_rx_mode */
|
||||
* in e1000e_set_rx_mode
|
||||
*/
|
||||
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
|
||||
E1000_RCTL_BAM | /* RX All Bcast Pkts */
|
||||
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
|
||||
@ -1862,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
|
||||
*
|
||||
* Configure the Rx unit of the MAC after a reset.
|
||||
**/
|
||||
|
||||
static void e1000_configure_rx(struct e1000_adapter *adapter)
|
||||
{
|
||||
u64 rdba;
|
||||
@ -1895,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
|
||||
}
|
||||
|
||||
/* Setup the HW Rx Head and Tail Descriptor Pointers and
|
||||
* the Base and Length of the Rx Descriptor Ring */
|
||||
* the Base and Length of the Rx Descriptor Ring
|
||||
*/
|
||||
switch (adapter->num_rx_queues) {
|
||||
case 1:
|
||||
default:
|
||||
@ -1905,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
|
||||
ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
|
||||
ew32(RDT, 0);
|
||||
ew32(RDH, 0);
|
||||
adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
|
||||
adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
|
||||
adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
|
||||
E1000_RDH : E1000_82542_RDH);
|
||||
adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
|
||||
E1000_RDT : E1000_82542_RDT);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1932,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
|
||||
*
|
||||
* Free all transmit software resources
|
||||
**/
|
||||
|
||||
static void e1000_free_tx_resources(struct e1000_adapter *adapter,
|
||||
struct e1000_tx_ring *tx_ring)
|
||||
{
|
||||
@ -1955,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
|
||||
*
|
||||
* Free all transmit software resources
|
||||
**/
|
||||
|
||||
void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
@ -1990,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
|
||||
* @adapter: board private structure
|
||||
* @tx_ring: ring to be cleaned
|
||||
**/
|
||||
|
||||
static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
|
||||
struct e1000_tx_ring *tx_ring)
|
||||
{
|
||||
@ -2026,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
|
||||
* e1000_clean_all_tx_rings - Free Tx Buffers for all queues
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
|
||||
static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
@ -2042,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
|
||||
*
|
||||
* Free all receive software resources
|
||||
**/
|
||||
|
||||
static void e1000_free_rx_resources(struct e1000_adapter *adapter,
|
||||
struct e1000_rx_ring *rx_ring)
|
||||
{
|
||||
@ -2065,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
|
||||
*
|
||||
* Free all receive software resources
|
||||
**/
|
||||
|
||||
void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
@ -2079,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
|
||||
* @adapter: board private structure
|
||||
* @rx_ring: ring to free buffers from
|
||||
**/
|
||||
|
||||
static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
|
||||
struct e1000_rx_ring *rx_ring)
|
||||
{
|
||||
@ -2138,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
|
||||
* e1000_clean_all_rx_rings - Free Rx Buffers for all queues
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
|
||||
static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
@ -2198,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
|
||||
*
|
||||
* Returns 0 on success, negative on failure
|
||||
**/
|
||||
|
||||
static int e1000_set_mac(struct net_device *netdev, void *p)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
@ -2233,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
|
||||
* responsible for configuring the hardware for proper unicast, multicast,
|
||||
* promiscuous mode, and all-multi behavior.
|
||||
**/
|
||||
|
||||
static void e1000_set_rx_mode(struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
@ -2317,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
|
||||
}
|
||||
|
||||
/* write the hash table completely, write from bottom to avoid
|
||||
* both stupid write combining chipsets, and flushing each write */
|
||||
* both stupid write combining chipsets, and flushing each write
|
||||
*/
|
||||
for (i = mta_reg_count - 1; i >= 0 ; i--) {
|
||||
/*
|
||||
* If we are on an 82544 has an errata where writing odd
|
||||
/* If we are on an 82544 has an errata where writing odd
|
||||
* offsets overwrites the previous even offset, but writing
|
||||
* backwards over the range solves the issue by always
|
||||
* writing the odd offset first
|
||||
@ -2533,7 +2519,8 @@ link_up:
|
||||
/* We've lost link, so the controller stops DMA,
|
||||
* but we've got queued Tx work that's never going
|
||||
* to get done, so reset controller to flush Tx.
|
||||
* (Do the reset outside of interrupt context). */
|
||||
* (Do the reset outside of interrupt context).
|
||||
*/
|
||||
adapter->tx_timeout_count++;
|
||||
schedule_work(&adapter->reset_task);
|
||||
/* exit immediately since reset is imminent */
|
||||
@ -2543,8 +2530,7 @@ link_up:
|
||||
|
||||
/* Simple mode for Interrupt Throttle Rate (ITR) */
|
||||
if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
|
||||
/*
|
||||
* Symmetric Tx/Rx gets a reduced ITR=2000;
|
||||
/* Symmetric Tx/Rx gets a reduced ITR=2000;
|
||||
* Total asymmetrical Tx or Rx gets ITR=8000;
|
||||
* everyone else is between 2000-8000.
|
||||
*/
|
||||
@ -2659,16 +2645,14 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
|
||||
goto set_itr_now;
|
||||
}
|
||||
|
||||
adapter->tx_itr = e1000_update_itr(adapter,
|
||||
adapter->tx_itr,
|
||||
adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
|
||||
adapter->total_tx_packets,
|
||||
adapter->total_tx_bytes);
|
||||
/* conservative mode (itr 3) eliminates the lowest_latency setting */
|
||||
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
|
||||
adapter->tx_itr = low_latency;
|
||||
|
||||
adapter->rx_itr = e1000_update_itr(adapter,
|
||||
adapter->rx_itr,
|
||||
adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
|
||||
adapter->total_rx_packets,
|
||||
adapter->total_rx_bytes);
|
||||
/* conservative mode (itr 3) eliminates the lowest_latency setting */
|
||||
@ -2696,7 +2680,8 @@ set_itr_now:
|
||||
if (new_itr != adapter->itr) {
|
||||
/* this attempts to bias the interrupt rate towards Bulk
|
||||
* by adding intermediate steps when interrupt rate is
|
||||
* increasing */
|
||||
* increasing
|
||||
*/
|
||||
new_itr = new_itr > adapter->itr ?
|
||||
min(adapter->itr + (new_itr >> 2), new_itr) :
|
||||
new_itr;
|
||||
@ -2861,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||
/* Workaround for Controller erratum --
|
||||
* descriptor for non-tso packet in a linear SKB that follows a
|
||||
* tso gets written back prematurely before the data is fully
|
||||
* DMA'd to the controller */
|
||||
* DMA'd to the controller
|
||||
*/
|
||||
if (!skb->data_len && tx_ring->last_tx_tso &&
|
||||
!skb_is_gso(skb)) {
|
||||
tx_ring->last_tx_tso = false;
|
||||
@ -2869,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||
}
|
||||
|
||||
/* Workaround for premature desc write-backs
|
||||
* in TSO mode. Append 4-byte sentinel desc */
|
||||
* in TSO mode. Append 4-byte sentinel desc
|
||||
*/
|
||||
if (unlikely(mss && !nr_frags && size == len && size > 8))
|
||||
size -= 4;
|
||||
/* work-around for errata 10 and it applies
|
||||
@ -2882,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||
size = 2015;
|
||||
|
||||
/* Workaround for potential 82544 hang in PCI-X. Avoid
|
||||
* terminating buffers within evenly-aligned dwords. */
|
||||
* terminating buffers within evenly-aligned dwords.
|
||||
*/
|
||||
if (unlikely(adapter->pcix_82544 &&
|
||||
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
|
||||
size > 4))
|
||||
@ -2925,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||
buffer_info = &tx_ring->buffer_info[i];
|
||||
size = min(len, max_per_txd);
|
||||
/* Workaround for premature desc write-backs
|
||||
* in TSO mode. Append 4-byte sentinel desc */
|
||||
if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
|
||||
* in TSO mode. Append 4-byte sentinel desc
|
||||
*/
|
||||
if (unlikely(mss && f == (nr_frags-1) &&
|
||||
size == len && size > 8))
|
||||
size -= 4;
|
||||
/* Workaround for potential 82544 hang in PCI-X.
|
||||
* Avoid terminating buffers within evenly-aligned
|
||||
* dwords. */
|
||||
* dwords.
|
||||
*/
|
||||
bufend = (unsigned long)
|
||||
page_to_phys(skb_frag_page(frag));
|
||||
bufend += offset + size - 1;
|
||||
@ -3035,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
|
||||
/* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch. (Only
|
||||
* applicable for weak-ordered memory model archs,
|
||||
* such as IA-64). */
|
||||
* such as IA-64).
|
||||
*/
|
||||
wmb();
|
||||
|
||||
tx_ring->next_to_use = i;
|
||||
writel(i, hw->hw_addr + tx_ring->tdt);
|
||||
/* we need this if more than one processor can write to our tail
|
||||
* at a time, it syncronizes IO on IA64/Altix systems */
|
||||
* at a time, it synchronizes IO on IA64/Altix systems
|
||||
*/
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
@ -3090,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
|
||||
netif_stop_queue(netdev);
|
||||
/* Herbert's original patch had:
|
||||
* smp_mb__after_netif_stop_queue();
|
||||
* but since that doesn't exist yet, just open code it. */
|
||||
* but since that doesn't exist yet, just open code it.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/* We need to check again in a case another CPU has just
|
||||
* made room available. */
|
||||
* made room available.
|
||||
*/
|
||||
if (likely(E1000_DESC_UNUSED(tx_ring) < size))
|
||||
return -EBUSY;
|
||||
|
||||
@ -3129,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
int tso;
|
||||
unsigned int f;
|
||||
|
||||
/* This goes back to the question of how to logically map a tx queue
|
||||
/* This goes back to the question of how to logically map a Tx queue
|
||||
* to a flow. Right now, performance is impacted slightly negatively
|
||||
* if using multiple tx queues. If the stack breaks away from a
|
||||
* single qdisc implementation, we can look at this again. */
|
||||
* if using multiple Tx queues. If the stack breaks away from a
|
||||
* single qdisc implementation, we can look at this again.
|
||||
*/
|
||||
tx_ring = adapter->tx_ring;
|
||||
|
||||
if (unlikely(skb->len <= 0)) {
|
||||
@ -3157,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
* initiating the DMA for each buffer. The calc is:
|
||||
* 4 = ceil(buffer len/mss). To make sure we don't
|
||||
* overrun the FIFO, adjust the max buffer len if mss
|
||||
* drops. */
|
||||
* drops.
|
||||
*/
|
||||
if (mss) {
|
||||
u8 hdr_len;
|
||||
max_per_txd = min(mss << 2, max_per_txd);
|
||||
@ -3173,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
* this hardware's requirements
|
||||
* NOTE: this is a TSO only workaround
|
||||
* if end byte alignment not correct move us
|
||||
* into the next dword */
|
||||
if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
|
||||
* into the next dword
|
||||
*/
|
||||
if ((unsigned long)(skb_tail_pointer(skb) - 1)
|
||||
& 4)
|
||||
break;
|
||||
/* fall through */
|
||||
pull_size = min((unsigned int)4, skb->data_len);
|
||||
@ -3222,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
count += nr_frags;
|
||||
|
||||
/* need: count + 2 desc gap to keep tail from touching
|
||||
* head, otherwise try next time */
|
||||
* head, otherwise try next time
|
||||
*/
|
||||
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
@ -3363,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
|
||||
/* Print Registers */
|
||||
e1000_regdump(adapter);
|
||||
|
||||
/*
|
||||
* transmit dump
|
||||
*/
|
||||
/* transmit dump */
|
||||
pr_info("TX Desc ring0 dump\n");
|
||||
|
||||
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
|
||||
@ -3426,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
|
||||
}
|
||||
|
||||
rx_ring_summary:
|
||||
/*
|
||||
* receive dump
|
||||
*/
|
||||
/* receive dump */
|
||||
pr_info("\nRX Desc ring dump\n");
|
||||
|
||||
/* Legacy Receive Descriptor Format
|
||||
@ -3493,7 +3489,6 @@ exit:
|
||||
* e1000_tx_timeout - Respond to a Tx Hang
|
||||
* @netdev: network interface device structure
|
||||
**/
|
||||
|
||||
static void e1000_tx_timeout(struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
@ -3521,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
|
||||
* Returns the address of the device statistics structure.
|
||||
* The statistics are actually updated from the watchdog.
|
||||
**/
|
||||
|
||||
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
|
||||
{
|
||||
/* only return the current stats */
|
||||
@ -3535,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
|
||||
*
|
||||
* Returns 0 on success, negative on failure
|
||||
**/
|
||||
|
||||
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
@ -3573,7 +3566,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
* larger slab size.
|
||||
* i.e. RXBUFFER_2048 --> size-4096 slab
|
||||
* however with the new *_jumbo_rx* routines, jumbo receives will use
|
||||
* fragmented skbs */
|
||||
* fragmented skbs
|
||||
*/
|
||||
|
||||
if (max_frame <= E1000_RXBUFFER_2048)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
|
||||
@ -3608,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
* e1000_update_stats - Update the board statistics counters
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
|
||||
void e1000_update_stats(struct e1000_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
@ -3619,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
|
||||
|
||||
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
|
||||
|
||||
/*
|
||||
* Prevent stats update while adapter is being reset, or if the pci
|
||||
/* Prevent stats update while adapter is being reset, or if the pci
|
||||
* connection is down.
|
||||
*/
|
||||
if (adapter->link_speed == 0)
|
||||
@ -3710,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
|
||||
/* Rx Errors */
|
||||
|
||||
/* RLEC on some newer hardware can be incorrect so build
|
||||
* our own version based on RUC and ROC */
|
||||
* our own version based on RUC and ROC
|
||||
*/
|
||||
netdev->stats.rx_errors = adapter->stats.rxerrc +
|
||||
adapter->stats.crcerrs + adapter->stats.algnerrc +
|
||||
adapter->stats.ruc + adapter->stats.roc +
|
||||
@ -3764,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
|
||||
* @irq: interrupt number
|
||||
* @data: pointer to a network interface device structure
|
||||
**/
|
||||
|
||||
static irqreturn_t e1000_intr(int irq, void *data)
|
||||
{
|
||||
struct net_device *netdev = data;
|
||||
@ -3775,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
||||
if (unlikely((!icr)))
|
||||
return IRQ_NONE; /* Not our interrupt */
|
||||
|
||||
/*
|
||||
* we might have caused the interrupt, but the above
|
||||
/* we might have caused the interrupt, but the above
|
||||
* read cleared it, and just in case the driver is
|
||||
* down there is nothing to do so return handled
|
||||
*/
|
||||
@ -3802,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
||||
__napi_schedule(&adapter->napi);
|
||||
} else {
|
||||
/* this really should not happen! if it does it is basically a
|
||||
* bug, but not a hard error, so enable ints and continue */
|
||||
* bug, but not a hard error, so enable ints and continue
|
||||
*/
|
||||
if (!test_bit(__E1000_DOWN, &adapter->flags))
|
||||
e1000_irq_enable(adapter);
|
||||
}
|
||||
@ -3816,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
||||
**/
|
||||
static int e1000_clean(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
|
||||
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
|
||||
napi);
|
||||
int tx_clean_complete = 0, work_done = 0;
|
||||
|
||||
tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
|
||||
@ -3907,7 +3900,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
|
||||
|
||||
if (adapter->detect_tx_hung) {
|
||||
/* Detect a transmit hang in hardware, this serializes the
|
||||
* check with the clearing of time_stamp and movement of i */
|
||||
* check with the clearing of time_stamp and movement of i
|
||||
*/
|
||||
adapter->detect_tx_hung = false;
|
||||
if (tx_ring->buffer_info[eop].time_stamp &&
|
||||
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
|
||||
@ -3954,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
|
||||
* @csum: receive descriptor csum field
|
||||
* @sk_buff: socket buffer with received data
|
||||
**/
|
||||
|
||||
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
|
||||
u32 csum, struct sk_buff *skb)
|
||||
{
|
||||
@ -4098,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
||||
/* recycle both page and skb */
|
||||
buffer_info->skb = skb;
|
||||
/* an error means any chain goes out the window
|
||||
* too */
|
||||
* too
|
||||
*/
|
||||
if (rx_ring->rx_skb_top)
|
||||
dev_kfree_skb(rx_ring->rx_skb_top);
|
||||
rx_ring->rx_skb_top = NULL;
|
||||
@ -4132,22 +4126,26 @@ process_skb:
|
||||
skb_shinfo(rxtop)->nr_frags,
|
||||
buffer_info->page, 0, length);
|
||||
/* re-use the current skb, we only consumed the
|
||||
* page */
|
||||
* page
|
||||
*/
|
||||
buffer_info->skb = skb;
|
||||
skb = rxtop;
|
||||
rxtop = NULL;
|
||||
e1000_consume_page(buffer_info, skb, length);
|
||||
} else {
|
||||
/* no chain, got EOP, this buf is the packet
|
||||
* copybreak to save the put_page/alloc_page */
|
||||
* copybreak to save the put_page/alloc_page
|
||||
*/
|
||||
if (length <= copybreak &&
|
||||
skb_tailroom(skb) >= length) {
|
||||
u8 *vaddr;
|
||||
vaddr = kmap_atomic(buffer_info->page);
|
||||
memcpy(skb_tail_pointer(skb), vaddr, length);
|
||||
memcpy(skb_tail_pointer(skb), vaddr,
|
||||
length);
|
||||
kunmap_atomic(vaddr);
|
||||
/* re-use the page, so don't erase
|
||||
* buffer_info->page */
|
||||
* buffer_info->page
|
||||
*/
|
||||
skb_put(skb, length);
|
||||
} else {
|
||||
skb_fill_page_desc(skb, 0,
|
||||
@ -4205,8 +4203,7 @@ next_desc:
|
||||
return cleaned;
|
||||
}
|
||||
|
||||
/*
|
||||
* this should improve performance for small packets with large amounts
|
||||
/* this should improve performance for small packets with large amounts
|
||||
* of reassembly being done in the stack
|
||||
*/
|
||||
static void e1000_check_copybreak(struct net_device *netdev,
|
||||
@ -4377,7 +4374,6 @@ next_desc:
|
||||
* @rx_ring: pointer to receive ring structure
|
||||
* @cleaned_count: number of buffers to allocate this pass
|
||||
**/
|
||||
|
||||
static void
|
||||
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
|
||||
struct e1000_rx_ring *rx_ring, int cleaned_count)
|
||||
@ -4451,7 +4447,8 @@ check_page:
|
||||
/* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch. (Only
|
||||
* applicable for weak-ordered memory model archs,
|
||||
* such as IA-64). */
|
||||
* such as IA-64).
|
||||
*/
|
||||
wmb();
|
||||
writel(i, adapter->hw.hw_addr + rx_ring->rdt);
|
||||
}
|
||||
@ -4461,7 +4458,6 @@ check_page:
|
||||
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
|
||||
* @adapter: address of board private structure
|
||||
**/
|
||||
|
||||
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
||||
struct e1000_rx_ring *rx_ring,
|
||||
int cleaned_count)
|
||||
@ -4532,8 +4528,7 @@ map_skb:
|
||||
break; /* while !buffer_info->skb */
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX if it was allocated cleanly it will never map to a
|
||||
/* XXX if it was allocated cleanly it will never map to a
|
||||
* boundary crossing
|
||||
*/
|
||||
|
||||
@ -4571,7 +4566,8 @@ map_skb:
|
||||
/* Force memory writes to complete before letting h/w
|
||||
* know there are new descriptors to fetch. (Only
|
||||
* applicable for weak-ordered memory model archs,
|
||||
* such as IA-64). */
|
||||
* such as IA-64).
|
||||
*/
|
||||
wmb();
|
||||
writel(i, hw->hw_addr + rx_ring->rdt);
|
||||
}
|
||||
@ -4581,7 +4577,6 @@ map_skb:
|
||||
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
|
||||
* @adapter:
|
||||
**/
|
||||
|
||||
static void e1000_smartspeed(struct e1000_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
@ -4594,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
|
||||
|
||||
if (adapter->smartspeed == 0) {
|
||||
/* If Master/Slave config fault is asserted twice,
|
||||
* we assume back-to-back */
|
||||
* we assume back-to-back
|
||||
*/
|
||||
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
|
||||
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
|
||||
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
|
||||
@ -4638,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
|
||||
* @ifreq:
|
||||
* @cmd:
|
||||
**/
|
||||
|
||||
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
@ -4657,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
* @ifreq:
|
||||
* @cmd:
|
||||
**/
|
||||
|
||||
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
|
||||
int cmd)
|
||||
{
|
||||
@ -4919,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
|
||||
hw->autoneg = 0;
|
||||
|
||||
/* Make sure dplx is at most 1 bit and lsb of speed is not set
|
||||
* for the switch() below to work */
|
||||
* for the switch() below to work
|
||||
*/
|
||||
if ((spd & 1) || (dplx & ~1))
|
||||
goto err_inval;
|
||||
|
||||
@ -5122,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/*
|
||||
* Polling 'interrupt' - used by things like netconsole to send skbs
|
||||
/* Polling 'interrupt' - used by things like netconsole to send skbs
|
||||
* without having to re-enable interrupts. It's not called while
|
||||
* the interrupt routine is executing.
|
||||
*/
|
||||
|
@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
|
||||
* value exists, a default value is used. The final value is stored
|
||||
* in a variable in the adapter structure.
|
||||
**/
|
||||
|
||||
void e1000_check_options(struct e1000_adapter *adapter)
|
||||
{
|
||||
struct e1000_option opt;
|
||||
@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
|
||||
.def = E1000_DEFAULT_RXD,
|
||||
.arg = { .r = {
|
||||
.min = E1000_MIN_RXD,
|
||||
.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
|
||||
.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
|
||||
E1000_MAX_82544_RXD
|
||||
}}
|
||||
};
|
||||
|
||||
@ -488,7 +488,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
|
||||
/* save the setting, because the dynamic bits
|
||||
* change itr.
|
||||
* clear the lower two bits because they are
|
||||
* used as control */
|
||||
* used as control
|
||||
*/
|
||||
adapter->itr_setting = adapter->itr & ~3;
|
||||
break;
|
||||
}
|
||||
@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
|
||||
*
|
||||
* Handles speed and duplex options on fiber adapters
|
||||
**/
|
||||
|
||||
static void e1000_check_fiber_options(struct e1000_adapter *adapter)
|
||||
{
|
||||
int bd = adapter->bd_number;
|
||||
@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
|
||||
*
|
||||
* Handles speed and duplex options on copper adapters
|
||||
**/
|
||||
|
||||
static void e1000_check_copper_options(struct e1000_adapter *adapter)
|
||||
{
|
||||
struct e1000_option opt;
|
||||
|
Loading…
x
Reference in New Issue
Block a user