Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1745 commits)
  dp83640: free packet queues on remove
  dp83640: use proper function to free transmit time stamping packets
  ipv6: Do not use routes from locally generated RAs
  |PATCH net-next] tg3: add tx_dropped counter
  be2net: don't create multiple RX/TX rings in multi channel mode
  be2net: don't create multiple TXQs in BE2
  be2net: refactor VF setup/teardown code into be_vf_setup/clear()
  be2net: add vlan/rx-mode/flow-control config to be_setup()
  net_sched: cls_flow: use skb_header_pointer()
  ipv4: avoid useless call of the function check_peer_pmtu
  TCP: remove TCP_DEBUG
  net: Fix driver name for mdio-gpio.c
  ipv4: tcp: fix TOS value in ACK messages sent from TIME_WAIT
  rtnetlink: Add missing manual netlink notification in dev_change_net_namespaces
  ipv4: fix ipsec forward performance regression
  jme: fix irq storm after suspend/resume
  route: fix ICMP redirect validation
  net: hold sock reference while processing tx timestamps
  tcp: md5: add more const attributes
  Add ethtool -g support to virtio_net
  ...

Fix up conflicts in:
 - drivers/net/Kconfig:
	The split-up generated a trivial conflict with removal of a
	stale reference to Documentation/networking/net-modules.txt.
	Remove it from the new location instead.
 - fs/sysfs/dir.c:
	Fairly nasty conflicts with the sysfs rb-tree usage, conflicting
	with Eric Biederman's changes for tagged directories.
This commit is contained in:
Linus Torvalds
2011-10-25 13:25:22 +02:00
2121 changed files with 200383 additions and 104486 deletions

View File

@ -0,0 +1,222 @@
#
# Intel network device configuration
#
config NET_VENDOR_INTEL
bool "Intel devices"
default y
depends on PCI || PCI_MSI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Intel cards. If you say Y, you will be asked for
your specific card in the following questions.
if NET_VENDOR_INTEL
config E100
tristate "Intel(R) PRO/100+ support"
depends on PCI
select NET_CORE
select MII
---help---
This driver supports Intel(R) PRO/100 family of adapters.
To verify that your adapter is supported, find the board ID number
on the adapter. Look for a label that has a barcode and a number
in the format 123456-001 (six digits hyphen three digits).
Use the above information and the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
to identify the adapter.
For the latest Intel PRO/100 network driver for Linux, see:
<http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
More specific information on configuring the driver is in
<file:Documentation/networking/e100.txt>.
To compile this driver as a module, choose M here. The module
will be called e100.
config E1000
tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
depends on PCI
---help---
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
To compile this driver as a module, choose M here. The module
will be called e1000.
config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
depends on PCI && (!SPARC32 || BROKEN)
select CRC32
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
use the regular e1000 driver For more information on how to
identify your adapter, go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called e1000e.
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
To compile this driver as a module, choose M here. The module
will be called igb.
config IGB_DCA
bool "Direct Cache Access (DCA) Support"
default y
depends on IGB && DCA && !(IGB=y && DCA=m)
---help---
Say Y here if you want to use Direct Cache Access (DCA) in the
driver. DCA is a method for warming the CPU cache before data
is used, with the intent of lessening the impact of cache misses.
config IGBVF
tristate "Intel(R) 82576 Virtual Function Ethernet support"
depends on PCI
---help---
This driver supports Intel(R) 82576 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
To compile this driver as a module, choose M here. The module
will be called igbvf.
config IXGB
tristate "Intel(R) PRO/10GbE support"
depends on PCI
---help---
This driver supports Intel(R) PRO/10GbE family of adapters for
PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
instead. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/ixgb.txt>.
To compile this driver as a module, choose M here. The module
will be called ixgb.
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI && INET
select MDIO
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called ixgbe.
config IXGBE_DCA
bool "Direct Cache Access (DCA) Support"
default y
depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
---help---
Say Y here if you want to use Direct Cache Access (DCA) in the
driver. DCA is a method for warming the CPU cache before data
is used, with the intent of lessening the impact of cache misses.
config IXGBE_DCB
bool "Data Center Bridging (DCB) Support"
default n
depends on IXGBE && DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the
driver.
If unsure, say N.
config IXGBEVF
tristate "Intel(R) 82599 Virtual Function Ethernet support"
depends on PCI_MSI
---help---
This driver supports Intel(R) 82599 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/ixgbevf.txt>.
To compile this driver as a module, choose M here. The module
will be called ixgbevf. MSI-X interrupt support is required
for this driver to work correctly.
endif # NET_VENDOR_INTEL

View File

@ -0,0 +1,12 @@
#
# Makefile for the Intel network device drivers.
#
obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_IXGB) += ixgb/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,35 @@
################################################################################
#
# Intel PRO/1000 Linux driver
# Copyright(c) 1999 - 2006 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# Linux NICS <linux.nics@intel.com>
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) PRO/1000 ethernet driver
#
obj-$(CONFIG_E1000) += e1000.o
e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o

View File

@ -0,0 +1,363 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2006 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* Linux PRO/1000 Ethernet Driver main header file */
#ifndef _E1000_H_
#define _E1000_H_
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/capability.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/pkt_sched.h>
#include <linux/list.h>
#include <linux/reboot.h>
#include <net/checksum.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#define BAR_0 0
#define BAR_1 1
#define BAR_5 5
#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
struct e1000_adapter;
#include "e1000_hw.h"
#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
#define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 256
#define E1000_MIN_TXD 48
#define E1000_MAX_82544_TXD 4096
#define E1000_DEFAULT_RXD 256
#define E1000_MAX_RXD 256
#define E1000_MIN_RXD 48
#define E1000_MAX_82544_RXD 4096
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */
#define E1000_RXBUFFER_512 512
#define E1000_RXBUFFER_1024 1024
#define E1000_RXBUFFER_2048 2048
#define E1000_RXBUFFER_4096 4096
#define E1000_RXBUFFER_8192 8192
#define E1000_RXBUFFER_16384 16384
/* SmartSpeed delimiters */
#define E1000_SMARTSPEED_DOWNSHIFT 3
#define E1000_SMARTSPEED_MAX 15
/* Packet Buffer allocations */
#define E1000_PBA_BYTES_SHIFT 0xA
#define E1000_TX_HEAD_ADDR_SHIFT 7
#define E1000_PBA_TX_MASK 0xFFFF0000
/* Flow Control Watermarks */
#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004
#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
#define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct e1000_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
unsigned int segs;
unsigned int bytecount;
u16 mapped_as_page;
};
struct e1000_tx_ring {
/* pointer to the descriptor ring memory */
void *desc;
/* physical address of the descriptor ring */
dma_addr_t dma;
/* length of descriptor ring in bytes */
unsigned int size;
/* number of descriptors in the ring */
unsigned int count;
/* next descriptor to associate a buffer with */
unsigned int next_to_use;
/* next descriptor to check for DD status bit */
unsigned int next_to_clean;
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
u16 tdh;
u16 tdt;
bool last_tx_tso;
};
struct e1000_rx_ring {
/* pointer to the descriptor ring memory */
void *desc;
/* physical address of the descriptor ring */
dma_addr_t dma;
/* length of descriptor ring in bytes */
unsigned int size;
/* number of descriptors in the ring */
unsigned int count;
/* next descriptor to associate a buffer with */
unsigned int next_to_use;
/* next descriptor to check for DD status bit */
unsigned int next_to_clean;
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
struct sk_buff *rx_skb_top;
/* cpu for rx queue */
int cpu;
u16 rdh;
u16 rdt;
};
#define E1000_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) \
? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
/* board specific private data structure */
struct e1000_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 mng_vlan_id;
u32 bd_number;
u32 rx_buffer_len;
u32 wol;
u32 smartspeed;
u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
spinlock_t stats_lock;
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
unsigned int total_rx_packets;
/* Interrupt Throttle Rate */
u32 itr;
u32 itr_setting;
u16 tx_itr;
u16 rx_itr;
u8 fc_autoneg;
/* TX */
struct e1000_tx_ring *tx_ring; /* One per active queue */
unsigned int restart_queue;
u32 txd_cmd;
u32 tx_int_delay;
u32 tx_abs_int_delay;
u32 gotcl;
u64 gotcl_old;
u64 tpt_old;
u64 colc_old;
u32 tx_timeout_count;
u32 tx_fifo_head;
u32 tx_head_addr;
u32 tx_fifo_size;
u8 tx_timeout_factor;
atomic_t tx_fifo_stall;
bool pcix_82544;
bool detect_tx_hung;
/* RX */
bool (*clean_rx)(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
void (*alloc_rx_buf)(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count);
struct e1000_rx_ring *rx_ring; /* One per active queue */
struct napi_struct napi;
int num_tx_queues;
int num_rx_queues;
u64 hw_csum_err;
u64 hw_csum_good;
u32 alloc_rx_buff_failed;
u32 rx_int_delay;
u32 rx_abs_int_delay;
bool rx_csum;
u32 gorcl;
u64 gorcl_old;
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
u32 test_icr;
struct e1000_tx_ring test_tx_ring;
struct e1000_rx_ring test_rx_ring;
int msg_enable;
/* to not mess up cache alignment, always add to the bottom */
bool tso_force;
bool smart_power_down; /* phy smart power down */
bool quad_port_a;
unsigned long flags;
u32 eeprom_wol;
/* for ioport free */
int bars;
int need_ioport;
bool discarding;
struct work_struct reset_task;
struct delayed_work watchdog_task;
struct delayed_work fifo_stall_task;
struct delayed_work phy_info_task;
struct mutex mutex;
};
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
__E1000_DOWN
};
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
#define e_dbg(format, arg...) \
netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
#define e_err(msglvl, format, arg...) \
netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_info(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_warn(msglvl, format, arg...) \
netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_notice(msglvl, format, arg...) \
netif_notice(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_dev_info(format, arg...) \
dev_info(&adapter->pdev->dev, format, ## arg)
#define e_dev_warn(format, arg...) \
dev_warn(&adapter->pdev->dev, format, ## arg)
#define e_dev_err(format, arg...) \
dev_err(&adapter->pdev->dev, format, ## arg)
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
extern int e1000_up(struct e1000_adapter *adapter);
extern void e1000_down(struct e1000_adapter *adapter);
extern void e1000_reinit_locked(struct e1000_adapter *adapter);
extern void e1000_reset(struct e1000_adapter *adapter);
extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_update_stats(struct e1000_adapter *adapter);
extern bool e1000_has_link(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,109 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2006 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* glue for the OS independent part of e1000
* includes register access macros
*/
#ifndef _E1000_OSDEP_H_
#define _E1000_OSDEP_H_
#include <asm/io.h>
#define CONFIG_RAM_BASE 0x60000
#define GBE_CONFIG_OFFSET 0x0
#define GBE_CONFIG_RAM_BASE \
((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
#define GBE_CONFIG_BASE_VIRT \
((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
(iowrite16_rep(base + offset, data, count))
#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
(ioread16_rep(base + (offset << 1), data, count))
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg)))
#define ew32(reg, value) \
(writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg))))
#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
writel((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 2))))
#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
readl((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 2)))
#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
writew((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1))))
#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
readw((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1)))
#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
writeb((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset))))
#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
readb((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset)))
#define E1000_WRITE_FLUSH() er32(STATUS)
#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
writel((value), ((a)->flash_address + reg)))
#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
readl((a)->flash_address + reg))
#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
writew((value), ((a)->flash_address + reg)))
#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
readw((a)->flash_address + reg))
#endif /* _E1000_OSDEP_H_ */

View File

@ -0,0 +1,755 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2006 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "e1000.h"
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define E1000_MAX_NIC 32
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/* Transmit Descriptor Count
*
* Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
* Valid Range: 80-4096 for 82544 and newer
*
* Default Value: 256
*/
E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
/* Receive Descriptor Count
*
* Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
* Valid Range: 80-4096 for 82544 and newer
*
* Default Value: 256
*/
E1000_PARAM(RxDescriptors, "Number of receive descriptors");
/* User Specified Speed Override
*
* Valid Range: 0, 10, 100, 1000
* - 0 - auto-negotiate at all supported speeds
* - 10 - only link at 10 Mbps
* - 100 - only link at 100 Mbps
* - 1000 - only link at 1000 Mbps
*
* Default Value: 0
*/
E1000_PARAM(Speed, "Speed setting");
/* User Specified Duplex Override
*
* Valid Range: 0-2
* - 0 - auto-negotiate for duplex
* - 1 - only link at half duplex
* - 2 - only link at full duplex
*
* Default Value: 0
*/
E1000_PARAM(Duplex, "Duplex setting");
/* Auto-negotiation Advertisement Override
*
* Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
*
* The AutoNeg value is a bit mask describing which speed and duplex
* combinations should be advertised during auto-negotiation.
* The supported speed and duplex modes are listed below
*
* Bit 7 6 5 4 3 2 1 0
* Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
* Duplex Full Full Half Full Half
*
* Default Value: 0x2F (copper); 0x20 (fiber)
*/
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
#define AUTONEG_ADV_DEFAULT 0x2F
#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
* Valid Range: 0-3
* - 0 - No Flow Control
* - 1 - Rx only, respond to PAUSE frames but do not generate them
* - 2 - Tx only, generate PAUSE frames but ignore them on receive
* - 3 - Full Flow Control Support
*
* Default Value: Read flow control settings from the EEPROM
*/
E1000_PARAM(FlowControl, "Flow Control setting");
#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
* Valid Range: 0, 1
* - 0 - disables all checksum offload
* - 1 - enables receive IP/TCP/UDP checksum offload
* on 82543 and newer -based NICs
*
* Default Value: 1
*/
E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
/* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non zero
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define DEFAULT_TIDV 8
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define DEFAULT_TADV 32
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define DEFAULT_RDTR 0
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define DEFAULT_RADV 8
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
/* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
*/
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
#define MAX_ITR 100000
#define MIN_ITR 100
/* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
* Default Value: 0 (disabled)
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
const char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
const struct e1000_opt_list { int i; char *str; } *p;
} l;
} arg;
};
static int __devinit e1000_validate_option(unsigned int *value,
const struct e1000_option *opt,
struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
e_dev_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
e_dev_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
e_dev_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option: {
int i;
const struct e1000_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
e_dev_info("%s\n", ent->str);
return 0;
}
}
}
break;
default:
BUG();
}
e_dev_info("Invalid %s value specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
static void e1000_check_fiber_options(struct e1000_adapter *adapter);
static void e1000_check_copper_options(struct e1000_adapter *adapter);
/**
* e1000_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
void __devinit e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
e_dev_warn("Warning: no configuration for board #%i "
"using defaults for all values\n", bd);
}
{ /* Transmit Descriptor Count */
struct e1000_tx_ring *tx_ring = adapter->tx_ring;
int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_TXD),
.def = E1000_DEFAULT_TXD,
.arg = { .r = {
.min = E1000_MIN_TXD,
.max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
}}
};
if (num_TxDescriptors > bd) {
tx_ring->count = TxDescriptors[bd];
e1000_validate_option(&tx_ring->count, &opt, adapter);
tx_ring->count = ALIGN(tx_ring->count,
REQ_TX_DESCRIPTOR_MULTIPLE);
} else {
tx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count;
}
{ /* Receive Descriptor Count */
struct e1000_rx_ring *rx_ring = adapter->rx_ring;
int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_RXD),
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
}}
};
if (num_RxDescriptors > bd) {
rx_ring->count = RxDescriptors[bd];
e1000_validate_option(&rx_ring->count, &opt, adapter);
rx_ring->count = ALIGN(rx_ring->count,
REQ_RX_DESCRIPTOR_MULTIPLE);
} else {
rx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count;
}
{ /* Checksum Offload Enable/Disable */
opt = (struct e1000_option) {
.type = enable_option,
.name = "Checksum Offload",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_XsumRX > bd) {
unsigned int rx_csum = XsumRX[bd];
e1000_validate_option(&rx_csum, &opt, adapter);
adapter->rx_csum = rx_csum;
} else {
adapter->rx_csum = opt.def;
}
}
{ /* Flow Control */
static const struct e1000_opt_list fc_list[] = {
{ E1000_FC_NONE, "Flow Control Disabled" },
{ E1000_FC_RX_PAUSE, "Flow Control Receive Only" },
{ E1000_FC_TX_PAUSE, "Flow Control Transmit Only" },
{ E1000_FC_FULL, "Flow Control Enabled" },
{ E1000_FC_DEFAULT, "Flow Control Hardware Default" }
};
opt = (struct e1000_option) {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
.def = E1000_FC_DEFAULT,
.arg = { .l = { .nr = ARRAY_SIZE(fc_list),
.p = fc_list }}
};
if (num_FlowControl > bd) {
unsigned int fc = FlowControl[bd];
e1000_validate_option(&fc, &opt, adapter);
adapter->hw.fc = adapter->hw.original_fc = fc;
} else {
adapter->hw.fc = adapter->hw.original_fc = opt.def;
}
}
{ /* Transmit Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = { .r = { .min = MIN_TXDELAY,
.max = MAX_TXDELAY }}
};
if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd];
e1000_validate_option(&adapter->tx_int_delay, &opt,
adapter);
} else {
adapter->tx_int_delay = opt.def;
}
}
{ /* Transmit Absolute Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TADV),
.def = DEFAULT_TADV,
.arg = { .r = { .min = MIN_TXABSDELAY,
.max = MAX_TXABSDELAY }}
};
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
}
{ /* Receive Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = { .r = { .min = MIN_RXDELAY,
.max = MAX_RXDELAY }}
};
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter);
} else {
adapter->rx_int_delay = opt.def;
}
}
{ /* Receive Absolute Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RADV),
.def = DEFAULT_RADV,
.arg = { .r = { .min = MIN_RXABSDELAY,
.max = MAX_RXABSDELAY }}
};
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
}
{ /* Interrupt Throttling Rate */
opt = (struct e1000_option) {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of " __MODULE_STRING(DEFAULT_ITR),
.def = DEFAULT_ITR,
.arg = { .r = { .min = MIN_ITR,
.max = MAX_ITR }}
};
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
e_dev_info("%s turned off\n", opt.name);
break;
case 1:
e_dev_info("%s set to dynamic mode\n",
opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
e_dev_info("%s set to dynamic conservative "
"mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 4:
e_dev_info("%s set to simplified "
"(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
* used as control */
adapter->itr_setting = adapter->itr & ~3;
break;
}
} else {
adapter->itr_setting = opt.def;
adapter->itr = 20000;
}
}
{ /* Smart Power Down */
opt = (struct e1000_option) {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
.def = OPTION_DISABLED
};
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
adapter->smart_power_down = spd;
} else {
adapter->smart_power_down = opt.def;
}
}
switch (adapter->hw.media_type) {
case e1000_media_type_fiber:
case e1000_media_type_internal_serdes:
e1000_check_fiber_options(adapter);
break;
case e1000_media_type_copper:
e1000_check_copper_options(adapter);
break;
default:
BUG();
}
}
/**
* e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
* @adapter: board private structure
*
* Handles speed and duplex options on fiber adapters
**/
static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
e_dev_info("Speed not valid for fiber adapters, parameter "
"ignored\n");
}
if (num_Duplex > bd) {
e_dev_info("Duplex not valid for fiber adapters, parameter "
"ignored\n");
}
if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
"adapters, parameter ignored\n");
}
}
/**
* e1000_check_copper_options - Range Checking for Link Options, Copper Version
* @adapter: board private structure
*
* Handles speed and duplex options on copper adapters
**/
static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
unsigned int speed, dplx, an;
int bd = adapter->bd_number;
{ /* Speed */
static const struct e1000_opt_list speed_list[] = {
{ 0, "" },
{ SPEED_10, "" },
{ SPEED_100, "" },
{ SPEED_1000, "" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "Speed",
.err = "parameter ignored",
.def = 0,
.arg = { .l = { .nr = ARRAY_SIZE(speed_list),
.p = speed_list }}
};
if (num_Speed > bd) {
speed = Speed[bd];
e1000_validate_option(&speed, &opt, adapter);
} else {
speed = opt.def;
}
}
{ /* Duplex */
static const struct e1000_opt_list dplx_list[] = {
{ 0, "" },
{ HALF_DUPLEX, "" },
{ FULL_DUPLEX, "" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "Duplex",
.err = "parameter ignored",
.def = 0,
.arg = { .l = { .nr = ARRAY_SIZE(dplx_list),
.p = dplx_list }}
};
if (num_Duplex > bd) {
dplx = Duplex[bd];
e1000_validate_option(&dplx, &opt, adapter);
} else {
dplx = opt.def;
}
}
if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
e_dev_info("AutoNeg specified along with Speed or Duplex, "
"parameter ignored\n");
adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
} else { /* Autoneg */
static const struct e1000_opt_list an_list[] =
#define AA "AutoNeg advertising "
{{ 0x01, AA "10/HD" },
{ 0x02, AA "10/FD" },
{ 0x03, AA "10/FD, 10/HD" },
{ 0x04, AA "100/HD" },
{ 0x05, AA "100/HD, 10/HD" },
{ 0x06, AA "100/HD, 10/FD" },
{ 0x07, AA "100/HD, 10/FD, 10/HD" },
{ 0x08, AA "100/FD" },
{ 0x09, AA "100/FD, 10/HD" },
{ 0x0a, AA "100/FD, 10/FD" },
{ 0x0b, AA "100/FD, 10/FD, 10/HD" },
{ 0x0c, AA "100/FD, 100/HD" },
{ 0x0d, AA "100/FD, 100/HD, 10/HD" },
{ 0x0e, AA "100/FD, 100/HD, 10/FD" },
{ 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
{ 0x20, AA "1000/FD" },
{ 0x21, AA "1000/FD, 10/HD" },
{ 0x22, AA "1000/FD, 10/FD" },
{ 0x23, AA "1000/FD, 10/FD, 10/HD" },
{ 0x24, AA "1000/FD, 100/HD" },
{ 0x25, AA "1000/FD, 100/HD, 10/HD" },
{ 0x26, AA "1000/FD, 100/HD, 10/FD" },
{ 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
{ 0x28, AA "1000/FD, 100/FD" },
{ 0x29, AA "1000/FD, 100/FD, 10/HD" },
{ 0x2a, AA "1000/FD, 100/FD, 10/FD" },
{ 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
{ 0x2c, AA "1000/FD, 100/FD, 100/HD" },
{ 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
{ 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
{ 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "AutoNeg",
.err = "parameter ignored",
.def = AUTONEG_ADV_DEFAULT,
.arg = { .l = { .nr = ARRAY_SIZE(an_list),
.p = an_list }}
};
if (num_AutoNeg > bd) {
an = AutoNeg[bd];
e1000_validate_option(&an, &opt, adapter);
} else {
an = opt.def;
}
adapter->hw.autoneg_advertised = an;
}
switch (speed + dplx) {
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
if ((num_Speed > bd) && (speed != 0 || dplx != 0))
e_dev_info("Speed and duplex autonegotiation "
"enabled\n");
break;
case HALF_DUPLEX:
e_dev_info("Half Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL |
ADVERTISE_1000_FULL;
break;
case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_10 + FULL_DUPLEX:
e_dev_info("Forcing to 10 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100:
e_dev_info("100 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100 + FULL_DUPLEX:
e_dev_info("Forcing to 100 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_1000:
e_dev_info("1000 Mbps Speed specified without Duplex\n");
goto full_duplex_only;
case SPEED_1000 + HALF_DUPLEX:
e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
/* fall through */
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
"only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
default:
BUG();
}
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
"Setting MDI-X to a compatible value.\n");
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
################################################################################
#
# Intel PRO/1000 Linux driver
# Copyright(c) 1999 - 2011 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# Linux NICS <linux.nics@intel.com>
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) PRO/1000 ethernet driver
#
obj-$(CONFIG_E1000E) += e1000e.o
e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
lib.o phy.o param.o ethtool.o netdev.o

View File

@ -0,0 +1,844 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
#define E1000_WUC_APME 0x00000001 /* APM Enable */
#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
/* Wake Up Filter Control */
#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
/* Wake Up Status */
#define E1000_WUS_LNKC E1000_WUFC_LNKC
#define E1000_WUS_MAG E1000_WUFC_MAG
#define E1000_WUS_EX E1000_WUFC_EX
#define E1000_WUS_MC E1000_WUFC_MC
#define E1000_WUS_BC E1000_WUFC_BC
/* Extended Device Control */
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
/* Receive Descriptor bit definitions */
#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
#define E1000_RXDEXT_STATERR_CXE 0x10000000
#define E1000_RXDEXT_STATERR_RXE 0x80000000
/* mask to determine if packets should be dropped due to frame errors */
#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
E1000_RXD_ERR_CE | \
E1000_RXD_ERR_SE | \
E1000_RXD_ERR_SEQ | \
E1000_RXD_ERR_CXE | \
E1000_RXD_ERR_RXE)
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
E1000_RXDEXT_STATERR_CE | \
E1000_RXDEXT_STATERR_SE | \
E1000_RXDEXT_STATERR_SEQ | \
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
/* Management Control */
#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
/* Enable MAC address filtering */
#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
/* Enable MNG packets to host memory */
#define E1000_MANC_EN_MNG2HOST 0x00200000
#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
/* Receive Control */
#define E1000_RCTL_EN 0x00000002 /* enable */
#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
/*
* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
* ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
* E1000_PSRCTL_BSIZE1_MASK) |
* ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
* E1000_PSRCTL_BSIZE2_MASK) |
* ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
* E1000_PSRCTL_BSIZE3_MASK))
* where value0 = [128..16256], default=256
* value1 = [1024..64512], default=4096
* value2 = [0..64512], default=4096
* value3 = [0..64512], default=0
*/
#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
/* SWFW_SYNC Definitions */
#define E1000_SWFW_EEP_SM 0x1
#define E1000_SWFW_PHY0_SM 0x2
#define E1000_SWFW_PHY1_SM 0x4
#define E1000_SWFW_CSR_SM 0x8
/* Device Control */
#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
/*
* Bit definitions for the Management Data IO (MDIO) and Management Data
* Clock (MDC) pins in the Device Control Register.
*/
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define E1000_STATUS_FUNC_SHIFT 2
#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
/* Constants used to interpret the masked PCI-X bus speed. */
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
#define ADVERTISE_100_FULL 0x0008
#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
#define ADVERTISE_1000_FULL 0x0020
/* 1000/H is not supported, nor spec-compliant. */
#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
ADVERTISE_1000_FULL)
#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
ADVERTISE_100_HALF | ADVERTISE_100_FULL)
#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
/* LED Control */
#define E1000_PHY_LED0_MODE_MASK 0x00000007
#define E1000_PHY_LED0_IVRT 0x00000008
#define E1000_PHY_LED0_MASK 0x0000001F
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
#define E1000_LEDCTL_LED0_IVRT 0x00000040
#define E1000_LEDCTL_LED0_BLINK 0x00000080
#define E1000_LEDCTL_MODE_LINK_UP 0x2
#define E1000_LEDCTL_MODE_LED_ON 0xE
#define E1000_LEDCTL_MODE_LED_OFF 0xF
/* Transmit Descriptor bit definitions */
#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
/* Transmit Control */
#define E1000_TCTL_EN 0x00000002 /* enable Tx */
#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
/* Transmit Arbitration Count */
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
/* Receive Checksum Control */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
/* Header split receive */
#define E1000_RFCTL_NFSW_DIS 0x00000040
#define E1000_RFCTL_NFSR_DIS 0x00000080
#define E1000_RFCTL_ACK_DIS 0x00001000
#define E1000_RFCTL_EXTEN 0x00008000
#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
/* Collision related configuration parameters */
#define E1000_COLLISION_THRESHOLD 15
#define E1000_CT_SHIFT 4
#define E1000_COLLISION_DISTANCE 63
#define E1000_COLD_SHIFT 12
/* Default values for the transmit IPG register */
#define DEFAULT_82543_TIPG_IPGT_COPPER 8
#define E1000_TIPG_IPGT_MASK 0x000003FF
#define DEFAULT_82543_TIPG_IPGR1 8
#define E1000_TIPG_IPGR1_SHIFT 10
#define DEFAULT_82543_TIPG_IPGR2 6
#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
#define E1000_TIPG_IPGR2_SHIFT 20
#define MAX_JUMBO_FRAME_SIZE 0x3F00
/* Extended Configuration Control and Size */
#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
#define E1000_KABGTXD_BGSQLBIAS 0x00050000
/* PBA constants */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define E1000_PBA_16K 0x0010 /* 16KB */
#define E1000_PBS_16K E1000_PBA_16K
#define IFS_MAX 80
#define IFS_MIN 40
#define IFS_RATIO 4
#define IFS_STEP 10
#define MIN_NUM_XMITS 1000
/* SW Semaphore Register */
#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
/* Interrupt Cause Read */
#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
/* PBA ECC Register */
#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */
#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
/*
* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
* o RXT0 = Receiver Timer Interrupt (ring 0)
* o TXDW = Transmit Descriptor Written Back
* o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
* o RXSEQ = Receive Sequence Error
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
E1000_IMS_RXT0 | \
E1000_IMS_TXDW | \
E1000_IMS_RXDMT0 | \
E1000_IMS_RXSEQ | \
E1000_IMS_LSC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
/* Interrupt Cause Set */
#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
/* Transmit Descriptor Control */
#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
/* Enable the counting of desc. still to be processed. */
#define E1000_TXDCTL_COUNT_DESC 0x00400000
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
#define FLOW_CONTROL_TYPE 0x8808
/* 802.1q VLAN Packet Size */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
/* Receive Address */
/*
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
#define E1000_RAR_ENTRIES 15
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
#define E1000_RAL_MAC_ADDR_LEN 4
#define E1000_RAH_MAC_ADDR_LEN 2
/* Error Codes */
#define E1000_ERR_NVM 1
#define E1000_ERR_PHY 2
#define E1000_ERR_CONFIG 3
#define E1000_ERR_PARAM 4
#define E1000_ERR_MAC_INIT 5
#define E1000_ERR_PHY_TYPE 6
#define E1000_ERR_RESET 9
#define E1000_ERR_MASTER_REQUESTS_PENDING 10
#define E1000_ERR_HOST_INTERFACE_COMMAND 11
#define E1000_BLK_PHY_RESET 12
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_INVALID_ARGUMENT 16
#define E1000_ERR_NO_SPACE 17
#define E1000_ERR_NVM_PBA_SECTION 18
/* Loop limit on how long we wait for auto-negotiation to complete */
#define FIBER_LINK_UP_LIMIT 50
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
#define PHY_FORCE_LIMIT 20
/* Number of 100 microseconds we wait for PCI Express master disable */
#define MASTER_DISABLE_TIMEOUT 800
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
#define PHY_CFG_TIMEOUT 100
/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
#define MDIO_OWNERSHIP_TIMEOUT 10
/* Number of milliseconds for NVM auto read done after MAC reset. */
#define AUTO_READ_DONE_TIMEOUT 10
/* Flow Control */
#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
/* Transmit Configuration Word */
#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
/* Receive Configuration Word */
#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
#define E1000_GCR_TXD_NO_SNOOP 0x00000008
#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
E1000_GCR_RXDSCW_NO_SNOOP | \
E1000_GCR_RXDSCR_NO_SNOOP | \
E1000_GCR_TXD_NO_SNOOP | \
E1000_GCR_TXDSCW_NO_SNOOP | \
E1000_GCR_TXDSCR_NO_SNOOP)
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define MII_CR_SPEED_1000 0x0040
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
/* Autoneg Advertisement Register */
#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
/* Link Partner Ability Register (Base Page) */
#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
/* Autoneg Expansion Register */
#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
/* 1000BASE-T Control Register */
#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
/* 0=DTE device */
#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
/* 0=Configure PHY as Slave */
#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
/* 0=Automatic Master/Slave config */
/* 1000BASE-T Status Register */
#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
#define PHY_CONTROL 0x00 /* Control Register */
#define PHY_STATUS 0x01 /* Status Register */
#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
/* NVM Control */
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
#define E1000_EECD_DI 0x00000004 /* NVM Data In */
#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
#define E1000_EECD_PRES 0x00000100 /* NVM Present */
#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
/* NVM Addressing bits based on type (0-small, 1-large) */
#define E1000_EECD_ADDR_BITS 0x00000400
#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */
#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
#define E1000_NVM_RW_REG_START 1 /* Start operation */
#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_3GIO_3 0x001A
#define NVM_INIT_CONTROL3_PORT_A 0x0024
#define NVM_CFG 0x0012
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
/* Mask bits for fields in Word 0x0f of the NVM */
#define NVM_WORD0F_PAUSE_MASK 0x3000
#define NVM_WORD0F_PAUSE 0x1000
#define NVM_WORD0F_ASM_DIR 0x2000
/* Mask bits for fields in Word 0x1a of the NVM */
#define NVM_WORD1A_ASPM_MASK 0x000C
/* Mask bits for fields in Word 0x03 of the EEPROM */
#define NVM_COMPAT_LOM 0x0800
/* length of string needed to store PBA number */
#define E1000_PBANUM_LENGTH 11
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* NVM Commands - SPI */
#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
/* SPI NVM Status Register */
#define NVM_STATUS_RDY_SPI 0x01
/* Word definitions for ID LED Settings */
#define ID_LED_RESERVED_0000 0x0000
#define ID_LED_RESERVED_FFFF 0xFFFF
#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
#define ID_LED_DEF1_DEF2 0x1
#define ID_LED_DEF1_ON2 0x2
#define ID_LED_DEF1_OFF2 0x3
#define ID_LED_ON1_DEF2 0x4
#define ID_LED_ON1_ON2 0x5
#define ID_LED_ON1_OFF2 0x6
#define ID_LED_OFF1_DEF2 0x7
#define ID_LED_OFF1_ON2 0x8
#define ID_LED_OFF1_OFF2 0x9
#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
#define IGP_ACTIVITY_LED_ENABLE 0x0300
#define IGP_LED3_MODE 0x07000000
/* PCI/PCI-X/PCI-EX Config space */
#define PCI_HEADER_TYPE_REGISTER 0x0E
#define PCIE_LINK_STATUS 0x12
#define PCI_HEADER_TYPE_MULTIFUNC 0x80
#define PCIE_LINK_WIDTH_MASK 0x3F0
#define PCIE_LINK_WIDTH_SHIFT 4
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
/* Bit definitions for valid PHY IDs. */
/*
* I = Integrated
* E = External
*/
#define M88E1000_E_PHY_ID 0x01410C50
#define M88E1000_I_PHY_ID 0x01410C30
#define M88E1011_I_PHY_ID 0x01410C20
#define IGP01E1000_I_PHY_ID 0x02A80380
#define M88E1111_I_PHY_ID 0x01410CC0
#define GG82563_E_PHY_ID 0x01410CA0
#define IGP03E1000_E_PHY_ID 0x02A80390
#define IFE_E_PHY_ID 0x02A80330
#define IFE_PLUS_E_PHY_ID 0x02A80320
#define IFE_C_E_PHY_ID 0x02A80310
#define BME1000_E_PHY_ID 0x01410CB0
#define BME1000_E_PHY_ID_R2 0x01410CB1
#define I82577_E_PHY_ID 0x01540050
#define I82578_E_PHY_ID 0x004DD040
#define I82579_E_PHY_ID 0x01540090
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
/* M88E1000 PHY Specific Control Register */
#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
/* Manual MDI configuration */
#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
/*
* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
* 0=Normal 10BASE-T Rx Threshold
*/
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
/* M88E1000 PHY Specific Status Register */
#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
#define M88E1000_PSSR_CABLE_LENGTH 0x0380
#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
/*
* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
/*
* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
/* M88EC018 Rev 2 specific DownShift settings */
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
/* BME1000 PHY Specific Control Register */
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
#define PHY_PAGE_SHIFT 5
#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
((reg) & MAX_PHY_REG_ADDRESS))
/*
* Bits...
* 15-5: page
* 4-0: register offset
*/
#define GG82563_PAGE_SHIFT 5
#define GG82563_REG(page, reg) \
(((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
#define GG82563_MIN_ALT_REG 30
/* GG82563 Specific Registers */
#define GG82563_PHY_SPEC_CTRL \
GG82563_REG(0, 16) /* PHY Specific Control */
#define GG82563_PHY_PAGE_SELECT \
GG82563_REG(0, 22) /* Page Select */
#define GG82563_PHY_SPEC_CTRL_2 \
GG82563_REG(0, 26) /* PHY Specific Control 2 */
#define GG82563_PHY_PAGE_SELECT_ALT \
GG82563_REG(0, 29) /* Alternate Page Select */
#define GG82563_PHY_MAC_SPEC_CTRL \
GG82563_REG(2, 21) /* MAC Specific Control Register */
#define GG82563_PHY_DSP_DISTANCE \
GG82563_REG(5, 26) /* DSP Distance */
/* Page 193 - Port Control Registers */
#define GG82563_PHY_KMRN_MODE_CTRL \
GG82563_REG(193, 16) /* Kumeran Mode Control */
#define GG82563_PHY_PWR_MGMT_CTRL \
GG82563_REG(193, 20) /* Power Management Control */
/* Page 194 - KMRN Registers */
#define GG82563_PHY_INBAND_CTRL \
GG82563_REG(194, 18) /* Inband Control */
/* MDI Control */
#define E1000_MDIC_REG_SHIFT 16
#define E1000_MDIC_PHY_SHIFT 21
#define E1000_MDIC_OP_WRITE 0x04000000
#define E1000_MDIC_OP_READ 0x08000000
#define E1000_MDIC_READY 0x10000000
#define E1000_MDIC_ERROR 0x40000000
/* SerDes Control */
#define E1000_GEN_POLL_TIMEOUT 640
#endif /* _E1000_DEFINES_H_ */

View File

@ -0,0 +1,742 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* Linux PRO/1000 Ethernet Driver main header file */
#ifndef _E1000_H_
#define _E1000_H_
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include "hw.h"
struct e1000_info;
#define e_dbg(format, arg...) \
netdev_dbg(hw->adapter->netdev, format, ## arg)
#define e_err(format, arg...) \
netdev_err(adapter->netdev, format, ## arg)
#define e_info(format, arg...) \
netdev_info(adapter->netdev, format, ## arg)
#define e_warn(format, arg...) \
netdev_warn(adapter->netdev, format, ## arg)
#define e_notice(format, arg...) \
netdev_notice(adapter->netdev, format, ## arg)
/* Interrupt modes, as used by the IntMode parameter */
#define E1000E_INT_MODE_LEGACY 0
#define E1000E_INT_MODE_MSI 1
#define E1000E_INT_MODE_MSIX 2
/* Tx/Rx descriptor defines */
#define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 4096
#define E1000_MIN_TXD 64
#define E1000_DEFAULT_RXD 256
#define E1000_MAX_RXD 4096
#define E1000_MIN_RXD 64
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* Early Receive defines */
#define E1000_ERT_2048 0x100
#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_APME 0x0400
#define E1000_MNG_VLAN_NONE (-1)
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
#define DEFAULT_JUMBO 9234
/* BM/HV Specific Registers */
#define BM_PORT_CTRL_PAGE 769
#define PHY_UPPER_SHIFT 21
#define BM_PHY_REG(page, reg) \
(((reg) & MAX_PHY_REG_ADDRESS) |\
(((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
(((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
/* PHY Wakeup Registers and defines */
#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
#define HV_STATS_PAGE 778
#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
/* BM PHY Copper Specific Status */
#define BM_CS_STATUS 17
#define BM_CS_STATUS_LINK_UP 0x0400
#define BM_CS_STATUS_RESOLVED 0x0800
#define BM_CS_STATUS_SPEED_MASK 0xC000
#define BM_CS_STATUS_SPEED_1000 0x8000
/* 82577 Mobile Phy Status Register */
#define HV_M_STATUS 26
#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
#define HV_M_STATUS_SPEED_MASK 0x0300
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_LINK_UP 0x0040
#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
#define DEFAULT_RDTR 0
#define DEFAULT_RADV 8
#define BURST_RDTR 0x20
#define BURST_RADV 0x20
/*
* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
* WTHRESH=4, and since we want 64 bytes at a time written back, set
* it to 5
*/
#define E1000_TXDCTL_DMA_BURST_ENABLE \
(E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
E1000_TXDCTL_COUNT_DESC | \
(5 << 16) | /* wthresh must be +1 more than desired */\
(1 << 8) | /* hthresh */ \
0x1f) /* pthresh */
#define E1000_RXDCTL_DMA_BURST_ENABLE \
(0x01000000 | /* set descriptor granularity */ \
(4 << 16) | /* set writeback threshold */ \
(4 << 8) | /* set prefetch threshold */ \
0x20) /* set hthresh */
#define E1000_TIDV_FPD (1 << 31)
#define E1000_RDTR_FPD (1 << 31)
enum e1000_boards {
board_82571,
board_82572,
board_82573,
board_82574,
board_82583,
board_80003es2lan,
board_ich8lan,
board_ich9lan,
board_ich10lan,
board_pchlan,
board_pch2lan,
};
struct e1000_ps_page {
struct page *page;
u64 dma; /* must be u64 - written to hw */
};
/*
* wrappers around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct e1000_buffer {
dma_addr_t dma;
struct sk_buff *skb;
union {
/* Tx */
struct {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
unsigned int segs;
unsigned int bytecount;
u16 mapped_as_page;
};
/* Rx */
struct {
/* arrays of page information for packet split */
struct e1000_ps_page *ps_pages;
struct page *page;
};
};
};
struct e1000_ring {
void *desc; /* pointer to ring memory */
dma_addr_t dma; /* phys address of ring */
unsigned int size; /* length of ring in bytes */
unsigned int count; /* number of desc. in ring */
u16 next_to_use;
u16 next_to_clean;
u16 head;
u16 tail;
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
char name[IFNAMSIZ + 5];
u32 ims_val;
u32 itr_val;
u16 itr_register;
int set_itr;
struct sk_buff *rx_skb_top;
};
/* PHY register snapshot values */
struct e1000_phy_regs {
u16 bmcr; /* basic mode control register */
u16 bmsr; /* basic mode status register */
u16 advertise; /* auto-negotiation advertisement */
u16 lpa; /* link partner ability register */
u16 expansion; /* auto-negotiation expansion reg */
u16 ctrl1000; /* 1000BASE-T control register */
u16 stat1000; /* 1000BASE-T status register */
u16 estatus; /* extended status register */
};
/* board specific private data structure */
struct e1000_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
struct work_struct watchdog_task;
const struct e1000_info *ei;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 bd_number;
u32 rx_buffer_len;
u16 mng_vlan_id;
u16 link_speed;
u16 link_duplex;
u16 eeprom_vers;
/* track device up/down/testing state */
unsigned long state;
/* Interrupt Throttle Rate */
u32 itr;
u32 itr_setting;
u16 tx_itr;
u16 rx_itr;
/*
* Tx
*/
struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
struct napi_struct napi;
unsigned int restart_queue;
u32 txd_cmd;
bool detect_tx_hung;
u8 tx_timeout_factor;
u32 tx_int_delay;
u32 tx_abs_int_delay;
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
unsigned int total_rx_packets;
/* Tx stats */
u64 tpt_old;
u64 colc_old;
u32 gotc;
u64 gotc_old;
u32 tx_timeout_count;
u32 tx_fifo_head;
u32 tx_head_addr;
u32 tx_fifo_size;
u32 tx_dma_failed;
/*
* Rx
*/
bool (*clean_rx) (struct e1000_adapter *adapter,
int *work_done, int work_to_do)
____cacheline_aligned_in_smp;
void (*alloc_rx_buf) (struct e1000_adapter *adapter,
int cleaned_count, gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
u32 rx_abs_int_delay;
/* Rx stats */
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
u32 gorc;
u64 gorc_old;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
unsigned int rx_ps_pages;
u16 rx_ps_bsize0;
u32 max_frame_size;
u32 min_frame_size;
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
spinlock_t stats64_lock;
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
/* Snapshot of PHY registers */
struct e1000_phy_regs phy_regs;
struct e1000_ring test_tx_ring;
struct e1000_ring test_rx_ring;
u32 test_icr;
u32 msg_enable;
unsigned int num_vectors;
struct msix_entry *msix_entries;
int int_mode;
u32 eiac_mask;
u32 eeprom_wol;
u32 wol;
u32 pba;
u32 max_hw_frame_size;
bool fc_autoneg;
unsigned int flags;
unsigned int flags2;
struct work_struct downshift_task;
struct work_struct update_phy_task;
struct work_struct print_hang_task;
bool idle_check;
int phy_hang_count;
};
struct e1000_info {
enum e1000_mac_type mac;
unsigned int flags;
unsigned int flags2;
u32 pba;
u32 max_hw_frame_size;
s32 (*get_variants)(struct e1000_adapter *);
const struct e1000_mac_operations *mac_ops;
const struct e1000_phy_operations *phy_ops;
const struct e1000_nvm_operations *nvm_ops;
};
/* hardware capability, feature, and workaround flags */
#define FLAG_HAS_AMT (1 << 0)
#define FLAG_HAS_FLASH (1 << 1)
#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
#define FLAG_HAS_WOL (1 << 3)
#define FLAG_HAS_ERT (1 << 4)
#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
#define FLAG_READ_ONLY_NVM (1 << 8)
#define FLAG_IS_ICH (1 << 9)
#define FLAG_HAS_MSIX (1 << 10)
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
#define FLAG_IS_QUAD_PORT_A (1 << 12)
#define FLAG_IS_QUAD_PORT (1 << 13)
#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14)
#define FLAG_APME_IN_WUC (1 << 15)
#define FLAG_APME_IN_CTRL3 (1 << 16)
#define FLAG_APME_CHECK_PORT_B (1 << 17)
#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
#define FLAG_NO_WAKE_UCAST (1 << 19)
#define FLAG_MNG_PT_ENABLED (1 << 20)
#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
#define FLAG_RX_NEEDS_RESTART (1 << 24)
#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
#define FLAG_SMART_POWER_DOWN (1 << 26)
#define FLAG_MSI_ENABLED (1 << 27)
/* reserved (1 << 28) */
#define FLAG_TSO_FORCE (1 << 29)
#define FLAG_RX_RESTART_NOW (1 << 30)
#define FLAG_MSI_TEST_FAILED (1 << 31)
#define FLAG2_CRC_STRIPPING (1 << 0)
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
#define FLAG2_IS_DISCARDING (1 << 2)
#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
#define FLAG2_HAS_PHY_STATS (1 << 4)
#define FLAG2_HAS_EEE (1 << 5)
#define FLAG2_DMA_BURST (1 << 6)
#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
#define FLAG2_DISABLE_AIM (1 << 8)
#define FLAG2_CHECK_PHY_HANG (1 << 9)
#define FLAG2_NO_DISABLE_RX (1 << 10)
#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
__E1000_ACCESS_SHARED_RESOURCE,
__E1000_DOWN
};
enum latency_range {
lowest_latency = 0,
low_latency = 1,
bulk_latency = 2,
latency_invalid = 255
};
extern char e1000e_driver_name[];
extern const char e1000e_driver_version[];
extern void e1000e_check_options(struct e1000_adapter *adapter);
extern void e1000e_set_ethtool_ops(struct net_device *netdev);
extern int e1000e_up(struct e1000_adapter *adapter);
extern void e1000e_down(struct e1000_adapter *adapter);
extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
extern void e1000e_reset(struct e1000_adapter *adapter);
extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64
*stats);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
extern unsigned int copybreak;
extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
extern const struct e1000_info e1000_82571_info;
extern const struct e1000_info e1000_82572_info;
extern const struct e1000_info e1000_82573_info;
extern const struct e1000_info e1000_82574_info;
extern const struct e1000_info e1000_82583_info;
extern const struct e1000_info e1000_ich8_info;
extern const struct e1000_info e1000_ich9_info;
extern const struct e1000_info e1000_ich10_info;
extern const struct e1000_info e1000_pch_info;
extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_es2_info;
extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
u32 pba_num_size);
extern s32 e1000e_commit_phy(struct e1000_hw *hw);
extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
extern s32 e1000e_id_led_init(struct e1000_hw *hw);
extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
extern s32 e1000e_setup_link(struct e1000_hw *hw);
extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list,
u32 mc_addr_count);
extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
extern void e1000e_config_collision_dist(struct e1000_hw *hw);
extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
extern void e1000e_reset_adaptive(struct e1000_hw *hw);
extern void e1000e_update_adaptive(struct e1000_hw *hw);
extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
u16 *data);
extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
u16 data);
extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
u16 *phy_reg);
extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
u16 *phy_reg);
extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
u16 data);
extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
u16 *data);
extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000e_check_downshift(struct e1000_hw *hw);
extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
u16 *data);
extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
u16 *data);
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
u16 data);
extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
u16 data);
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
extern bool e1000_check_phy_82574(struct e1000_hw *hw);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
return hw->phy.ops.reset(hw);
}
static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
{
return hw->phy.ops.check_reset_block(hw);
}
static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
{
return hw->phy.ops.read_reg(hw, offset, data);
}
static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
{
return hw->phy.ops.write_reg(hw, offset, data);
}
static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
{
return hw->phy.ops.get_cable_length(hw);
}
extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
extern void e1000e_release_nvm(struct e1000_hw *hw);
extern void e1000e_reload_nvm(struct e1000_hw *hw);
extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
if (hw->mac.ops.read_mac_addr)
return hw->mac.ops.read_mac_addr(hw);
return e1000_read_mac_addr_generic(hw);
}
static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
{
return hw->nvm.ops.validate(hw);
}
static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
{
return hw->nvm.ops.update(hw);
}
static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
return hw->nvm.ops.read(hw, offset, words, data);
}
static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
return hw->nvm.ops.write(hw, offset, words, data);
}
static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
{
return hw->phy.ops.get_info(hw);
}
static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
{
return hw->mac.ops.check_mng_mode(hw);
}
extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
{
return readl(hw->hw_addr + reg);
}
static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
{
writel(val, hw->hw_addr + reg);
}
#endif /* _E1000_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,984 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
#include <linux/types.h>
struct e1000_hw;
struct e1000_adapter;
#include "defines.h"
#define er32(reg) __er32(hw, E1000_##reg)
#define ew32(reg,val) __ew32(hw, E1000_##reg, (val))
#define e1e_flush() er32(STATUS)
#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
(writel((value), ((a)->hw_addr + reg + ((offset) << 2))))
#define E1000_READ_REG_ARRAY(a, reg, offset) \
(readl((a)->hw_addr + reg + ((offset) << 2)))
enum e1e_registers {
E1000_CTRL = 0x00000, /* Device Control - RW */
E1000_STATUS = 0x00008, /* Device Status - RO */
E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */
E1000_EERD = 0x00014, /* EEPROM Read - RW */
E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
E1000_FLA = 0x0001C, /* Flash Access - RW */
E1000_MDIC = 0x00020, /* MDI Control - RW */
E1000_SCTL = 0x00024, /* SerDes Control - RW */
E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
E1000_FCT = 0x00030, /* Flow Control Type - RW */
E1000_VET = 0x00038, /* VLAN Ether Type - RW */
E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */
E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
E1000_RCTL = 0x00100, /* Rx Control - RW */
E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
E1000_TCTL = 0x00400, /* Tx Control - RW */
E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
E1000_LEDCTL = 0x00E00, /* LED Control - RW */
E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
E1000_PBS = 0x01008, /* Packet Buffer Size */
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
E1000_PBA_ECC = 0x01100, /* PBA ECC Register */
E1000_ERT = 0x02008, /* Early Rx Threshold - RW */
E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */
E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */
E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */
E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
/* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
* Example usage:
* E1000_RDBAL_REG(current_rx_queue)
*
*/
#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8))
E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */
E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */
E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */
E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */
E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */
E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */
E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */
E1000_SCC = 0x04014, /* Single Collision Count - R/clr */
E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */
E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */
E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
E1000_COLC = 0x04028, /* Collision Count - R/clr */
E1000_DC = 0x04030, /* Defer Count - R/clr */
E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
E1000_IAC = 0x04100, /* Interrupt Assertion Count */
E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */
E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
E1000_RFCTL = 0x05008, /* Receive Filter Control */
E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
#define E1000_RA (E1000_RAL(0))
E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
E1000_WUC = 0x05800, /* Wakeup Control - RW */
E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
E1000_WUS = 0x05810, /* Wakeup Status - RO */
E1000_MANC = 0x05820, /* Management Control - RW */
E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
E1000_HOST_IF = 0x08800, /* Host Interface */
E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
E1000_GCR = 0x05B00, /* PCI-Ex Control */
E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
E1000_SWSM = 0x05B50, /* SW Semaphore */
E1000_FWSM = 0x05B54, /* FW Semaphore */
E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
E1000_FFLT_DBG = 0x05F04, /* Debug Register */
E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
E1000_HICR = 0x08F00, /* Host Interface Control */
};
#define E1000_MAX_PHY_ADDR 4
/* IGP01E1000 Specific Registers */
#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
#define IGP_PAGE_SHIFT 5
#define PHY_REG_MASK 0x1F
#define BM_WUC_PAGE 800
#define BM_WUC_ADDRESS_OPCODE 0x11
#define BM_WUC_DATA_OPCODE 0x12
#define BM_WUC_ENABLE_PAGE 769
#define BM_WUC_ENABLE_REG 17
#define BM_WUC_ENABLE_BIT (1 << 2)
#define BM_WUC_HOST_WU_BIT (1 << 4)
#define BM_WUC_ME_WU_BIT (1 << 5)
#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
#define IGP01E1000_PSSR_MDIX 0x0800
#define IGP01E1000_PSSR_SPEED_MASK 0xC000
#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
#define IGP02E1000_PHY_CHANNEL_NUM 4
#define IGP02E1000_PHY_AGC_A 0x11B1
#define IGP02E1000_PHY_AGC_B 0x12B1
#define IGP02E1000_PHY_AGC_C 0x14B1
#define IGP02E1000_PHY_AGC_D 0x18B1
#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
#define IGP02E1000_AGC_LENGTH_MASK 0x7F
#define IGP02E1000_AGC_RANGE 15
/* manage.c */
#define E1000_VFTA_ENTRY_SHIFT 5
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
#define E1000_HICR_EN 0x01 /* Enable bit - RO */
/* Driver sets this bit when done to put command in RAM */
#define E1000_HICR_C 0x02
#define E1000_HICR_FW_RESET_ENABLE 0x40
#define E1000_HICR_FW_RESET 0x80
#define E1000_FWSM_MODE_MASK 0xE
#define E1000_FWSM_MODE_SHIFT 1
#define E1000_MNG_IAMT_MODE 0x3
#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
/* nvm.c */
#define E1000_STM_OPCODE 0xDB00
#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
#define E1000_KMRNCTRLSTA_REN 0x00200000
#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
/* IFE PHY Extended Status Control */
#define IFE_PESC_POLARITY_REVERSED 0x0100
/* IFE PHY Special Control */
#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
#define IFE_PSC_FORCE_POLARITY 0x0020
/* IFE PHY Special Control and LED Control */
#define IFE_PSCL_PROBE_MODE 0x0020
#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
/* IFE PHY MDIX Control */
#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060
#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC
#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
#define E1000_DEV_ID_82572EI_COPPER 0x107D
#define E1000_DEV_ID_82572EI_FIBER 0x107E
#define E1000_DEV_ID_82572EI_SERDES 0x107F
#define E1000_DEV_ID_82572EI 0x10B9
#define E1000_DEV_ID_82573E 0x108B
#define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82574L 0x10D3
#define E1000_DEV_ID_82574LA 0x10F6
#define E1000_DEV_ID_82583V 0x150C
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
#define E1000_DEV_ID_ICH8_82567V_3 0x1501
#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
#define E1000_DEV_ID_ICH8_IGP_C 0x104B
#define E1000_DEV_ID_ICH8_IFE 0x104C
#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
#define E1000_DEV_ID_ICH8_IGP_M 0x104D
#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
#define E1000_DEV_ID_ICH9_BM 0x10E5
#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
#define E1000_DEV_ID_ICH9_IGP_C 0x294C
#define E1000_DEV_ID_ICH9_IFE 0x10C0
#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
#define E1000_DEV_ID_PCH2_LV_LM 0x1502
#define E1000_DEV_ID_PCH2_LV_V 0x1503
#define E1000_REVISION_4 4
#define E1000_FUNC_1 1
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
enum e1000_mac_type {
e1000_82571,
e1000_82572,
e1000_82573,
e1000_82574,
e1000_82583,
e1000_80003es2lan,
e1000_ich8lan,
e1000_ich9lan,
e1000_ich10lan,
e1000_pchlan,
e1000_pch2lan,
};
enum e1000_media_type {
e1000_media_type_unknown = 0,
e1000_media_type_copper = 1,
e1000_media_type_fiber = 2,
e1000_media_type_internal_serdes = 3,
e1000_num_media_types
};
enum e1000_nvm_type {
e1000_nvm_unknown = 0,
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
e1000_nvm_flash_sw
};
enum e1000_nvm_override {
e1000_nvm_override_none = 0,
e1000_nvm_override_spi_small,
e1000_nvm_override_spi_large
};
enum e1000_phy_type {
e1000_phy_unknown = 0,
e1000_phy_none,
e1000_phy_m88,
e1000_phy_igp,
e1000_phy_igp_2,
e1000_phy_gg82563,
e1000_phy_igp_3,
e1000_phy_ife,
e1000_phy_bm,
e1000_phy_82578,
e1000_phy_82577,
e1000_phy_82579,
};
enum e1000_bus_width {
e1000_bus_width_unknown = 0,
e1000_bus_width_pcie_x1,
e1000_bus_width_pcie_x2,
e1000_bus_width_pcie_x4 = 4,
e1000_bus_width_32,
e1000_bus_width_64,
e1000_bus_width_reserved
};
enum e1000_1000t_rx_status {
e1000_1000t_rx_status_not_ok = 0,
e1000_1000t_rx_status_ok,
e1000_1000t_rx_status_undefined = 0xFF
};
enum e1000_rev_polarity{
e1000_rev_polarity_normal = 0,
e1000_rev_polarity_reversed,
e1000_rev_polarity_undefined = 0xFF
};
enum e1000_fc_mode {
e1000_fc_none = 0,
e1000_fc_rx_pause,
e1000_fc_tx_pause,
e1000_fc_full,
e1000_fc_default = 0xFF
};
enum e1000_ms_type {
e1000_ms_hw_default = 0,
e1000_ms_force_master,
e1000_ms_force_slave,
e1000_ms_auto
};
enum e1000_smart_speed {
e1000_smart_speed_default = 0,
e1000_smart_speed_on,
e1000_smart_speed_off
};
enum e1000_serdes_link_state {
e1000_serdes_link_down = 0,
e1000_serdes_link_autoneg_progress,
e1000_serdes_link_autoneg_complete,
e1000_serdes_link_forced_up
};
/* Receive Descriptor */
struct e1000_rx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
__le16 length; /* Length of data DMAed into data buffer */
__le16 csum; /* Packet checksum */
u8 status; /* Descriptor status */
u8 errors; /* Descriptor Errors */
__le16 special;
};
/* Receive Descriptor - Extended */
union e1000_rx_desc_extended {
struct {
__le64 buffer_addr;
__le64 reserved;
} read;
struct {
struct {
__le32 mrq; /* Multiple Rx Queues */
union {
__le32 rss; /* RSS Hash */
struct {
__le16 ip_id; /* IP id */
__le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error; /* ext status/error */
__le16 length;
__le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
#define MAX_PS_BUFFERS 4
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
struct {
/* one buffer for protocol header(s), three data buffers */
__le64 buffer_addr[MAX_PS_BUFFERS];
} read;
struct {
struct {
__le32 mrq; /* Multiple Rx Queues */
union {
__le32 rss; /* RSS Hash */
struct {
__le16 ip_id; /* IP id */
__le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error; /* ext status/error */
__le16 length0; /* length of buffer 0 */
__le16 vlan; /* VLAN tag */
} middle;
struct {
__le16 header_status;
__le16 length[3]; /* length of buffers 1-3 */
} upper;
__le64 reserved;
} wb; /* writeback */
};
/* Transmit Descriptor */
struct e1000_tx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
union {
__le32 data;
struct {
__le16 length; /* Data buffer length */
u8 cso; /* Checksum offset */
u8 cmd; /* Descriptor control */
} flags;
} lower;
union {
__le32 data;
struct {
u8 status; /* Descriptor status */
u8 css; /* Checksum start */
__le16 special;
} fields;
} upper;
};
/* Offload Context Descriptor */
struct e1000_context_desc {
union {
__le32 ip_config;
struct {
u8 ipcss; /* IP checksum start */
u8 ipcso; /* IP checksum offset */
__le16 ipcse; /* IP checksum end */
} ip_fields;
} lower_setup;
union {
__le32 tcp_config;
struct {
u8 tucss; /* TCP checksum start */
u8 tucso; /* TCP checksum offset */
__le16 tucse; /* TCP checksum end */
} tcp_fields;
} upper_setup;
__le32 cmd_and_length;
union {
__le32 data;
struct {
u8 status; /* Descriptor status */
u8 hdr_len; /* Header length */
__le16 mss; /* Maximum segment size */
} fields;
} tcp_seg_setup;
};
/* Offload data descriptor */
struct e1000_data_desc {
__le64 buffer_addr; /* Address of the descriptor's buffer address */
union {
__le32 data;
struct {
__le16 length; /* Data buffer length */
u8 typ_len_ext;
u8 cmd;
} flags;
} lower;
union {
__le32 data;
struct {
u8 status; /* Descriptor status */
u8 popts; /* Packet Options */
__le16 special; /* */
} fields;
} upper;
};
/* Statistics counters collected by the MAC */
struct e1000_hw_stats {
u64 crcerrs;
u64 algnerrc;
u64 symerrs;
u64 rxerrc;
u64 mpc;
u64 scc;
u64 ecol;
u64 mcc;
u64 latecol;
u64 colc;
u64 dc;
u64 tncrs;
u64 sec;
u64 cexterr;
u64 rlec;
u64 xonrxc;
u64 xontxc;
u64 xoffrxc;
u64 xofftxc;
u64 fcruc;
u64 prc64;
u64 prc127;
u64 prc255;
u64 prc511;
u64 prc1023;
u64 prc1522;
u64 gprc;
u64 bprc;
u64 mprc;
u64 gptc;
u64 gorc;
u64 gotc;
u64 rnbc;
u64 ruc;
u64 rfc;
u64 roc;
u64 rjc;
u64 mgprc;
u64 mgpdc;
u64 mgptc;
u64 tor;
u64 tot;
u64 tpr;
u64 tpt;
u64 ptc64;
u64 ptc127;
u64 ptc255;
u64 ptc511;
u64 ptc1023;
u64 ptc1522;
u64 mptc;
u64 bptc;
u64 tsctc;
u64 tsctfc;
u64 iac;
u64 icrxptc;
u64 icrxatc;
u64 ictxptc;
u64 ictxatc;
u64 ictxqec;
u64 ictxqmtc;
u64 icrxdmtc;
u64 icrxoc;
};
struct e1000_phy_stats {
u32 idle_errors;
u32 receive_errors;
};
struct e1000_host_mng_dhcp_cookie {
u32 signature;
u8 status;
u8 reserved0;
u16 vlan_id;
u32 reserved1;
u16 reserved2;
u8 reserved3;
u8 checksum;
};
/* Host Interface "Rev 1" */
struct e1000_host_command_header {
u8 command_id;
u8 command_length;
u8 command_options;
u8 checksum;
};
#define E1000_HI_MAX_DATA_LENGTH 252
struct e1000_host_command_info {
struct e1000_host_command_header command_header;
u8 command_data[E1000_HI_MAX_DATA_LENGTH];
};
/* Host Interface "Rev 2" */
struct e1000_host_mng_command_header {
u8 command_id;
u8 checksum;
u16 reserved1;
u16 reserved2;
u16 command_length;
};
#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
struct e1000_host_mng_command_info {
struct e1000_host_mng_command_header command_header;
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
};
/* Function pointers and static data for the MAC. */
struct e1000_mac_operations {
s32 (*id_led_init)(struct e1000_hw *);
s32 (*blink_led)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
s32 (*check_for_link)(struct e1000_hw *);
s32 (*cleanup_led)(struct e1000_hw *);
void (*clear_hw_cntrs)(struct e1000_hw *);
void (*clear_vfta)(struct e1000_hw *);
s32 (*get_bus_info)(struct e1000_hw *);
void (*set_lan_id)(struct e1000_hw *);
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
s32 (*led_on)(struct e1000_hw *);
s32 (*led_off)(struct e1000_hw *);
void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
};
/*
* When to use various PHY register access functions:
*
* Func Caller
* Function Does Does When to use
* ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* X_reg L,P,A n/a for simple PHY reg accesses
* X_reg_locked P,A L for multiple accesses of different regs
* on different pages
* X_reg_page A L,P for multiple accesses of different regs
* on the same page
*
* Where X=[read|write], L=locking, P=sets page, A=register access
*
*/
struct e1000_phy_operations {
s32 (*acquire)(struct e1000_hw *);
s32 (*cfg_on_link_up)(struct e1000_hw *);
s32 (*check_polarity)(struct e1000_hw *);
s32 (*check_reset_block)(struct e1000_hw *);
s32 (*commit)(struct e1000_hw *);
s32 (*force_speed_duplex)(struct e1000_hw *);
s32 (*get_cfg_done)(struct e1000_hw *hw);
s32 (*get_cable_length)(struct e1000_hw *);
s32 (*get_info)(struct e1000_hw *);
s32 (*set_page)(struct e1000_hw *, u16);
s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
s32 (*reset)(struct e1000_hw *);
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_reg)(struct e1000_hw *, u32, u16);
s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
void (*power_up)(struct e1000_hw *);
void (*power_down)(struct e1000_hw *);
};
/* Function pointers for the NVM. */
struct e1000_nvm_operations {
s32 (*acquire)(struct e1000_hw *);
s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
s32 (*update)(struct e1000_hw *);
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
s32 (*validate)(struct e1000_hw *);
s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
};
struct e1000_mac_info {
struct e1000_mac_operations ops;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
enum e1000_mac_type type;
u32 collision_delta;
u32 ledctl_default;
u32 ledctl_mode1;
u32 ledctl_mode2;
u32 mc_filter_type;
u32 tx_packet_delta;
u32 txcw;
u16 current_ifs_val;
u16 ifs_max_val;
u16 ifs_min_val;
u16 ifs_ratio;
u16 ifs_step_size;
u16 mta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
bool adaptive_ifs;
bool has_fwsm;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
bool get_link_status;
bool in_ifs_mode;
bool serdes_has_link;
bool tx_pkt_filtering;
enum e1000_serdes_link_state serdes_link_state;
};
struct e1000_phy_info {
struct e1000_phy_operations ops;
enum e1000_phy_type type;
enum e1000_1000t_rx_status local_rx;
enum e1000_1000t_rx_status remote_rx;
enum e1000_ms_type ms_type;
enum e1000_ms_type original_ms_type;
enum e1000_rev_polarity cable_polarity;
enum e1000_smart_speed smart_speed;
u32 addr;
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
enum e1000_media_type media_type;
u16 autoneg_advertised;
u16 autoneg_mask;
u16 cable_length;
u16 max_cable_length;
u16 min_cable_length;
u8 mdix;
bool disable_polarity_correction;
bool is_mdix;
bool polarity_correction;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
struct e1000_nvm_info {
struct e1000_nvm_operations ops;
enum e1000_nvm_type type;
enum e1000_nvm_override override;
u32 flash_bank_size;
u32 flash_base_addr;
u16 word_size;
u16 delay_usec;
u16 address_bits;
u16 opcode_bits;
u16 page_size;
};
struct e1000_bus_info {
enum e1000_bus_width width;
u16 func;
};
struct e1000_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
u16 refresh_time; /* Flow control refresh timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum e1000_fc_mode current_mode; /* FC mode in effect */
enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
};
struct e1000_dev_spec_82571 {
bool laa_is_present;
u32 smb_counter;
};
struct e1000_dev_spec_80003es2lan {
bool mdic_wa_enable;
};
struct e1000_shadow_ram {
u16 value;
bool modified;
};
#define E1000_ICH8_SHADOW_RAM_WORDS 2048
struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
bool nvm_k1_enabled;
bool eee_disable;
};
struct e1000_hw {
struct e1000_adapter *adapter;
u8 __iomem *hw_addr;
u8 __iomem *flash_address;
struct e1000_mac_info mac;
struct e1000_fc_info fc;
struct e1000_phy_info phy;
struct e1000_nvm_info nvm;
struct e1000_bus_info bus;
struct e1000_host_mng_dhcp_cookie mng_cookie;
union {
struct e1000_dev_spec_82571 e82571;
struct e1000_dev_spec_80003es2lan e80003es2lan;
struct e1000_dev_spec_ich8lan ich8lan;
} dev_spec;
};
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,478 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "e1000.h"
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define E1000_MAX_NIC 32
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
#define COPYBREAK_DEFAULT 256
unsigned int copybreak = COPYBREAK_DEFAULT;
module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak,
"Maximum size of packet that is copied to a new buffer on receive");
/*
* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
static int __devinitdata X[E1000_MAX_NIC+1] \
= E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/*
* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non-zero
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define DEFAULT_TIDV 8
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
/*
* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define DEFAULT_TADV 32
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
/*
* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/*
* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
/*
* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
*/
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
#define MAX_ITR 100000
#define MIN_ITR 100
/* IntMode (Interrupt Mode)
*
* Valid Range: 0 - 2
*
* Default Value: 2 (MSI-X)
*/
E1000_PARAM(IntMode, "Interrupt Mode");
#define MAX_INTMODE 2
#define MIN_INTMODE 0
/*
* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
* Default Value: 0 (disabled)
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
/*
* Enable Kumeran Lock Loss workaround
*
* Valid Range: 0, 1
*
* Default Value: 1 (enabled)
*/
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
/*
* Write Protect NVM
*
* Valid Range: 0, 1
*
* Default Value: 1 (enabled)
*/
E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
/*
* Enable CRC Stripping
*
* Valid Range: 0, 1
*
* Default Value: 1 (enabled)
*/
E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
"the CRC");
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
const char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
struct e1000_opt_list { int i; char *str; } *p;
} l;
} arg;
};
static int __devinit e1000_validate_option(unsigned int *value,
const struct e1000_option *opt,
struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
e_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
e_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
e_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option: {
int i;
struct e1000_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
e_info("%s\n", ent->str);
return 0;
}
}
}
break;
default:
BUG();
}
e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
opt->err);
*value = opt->def;
return -1;
}
/**
* e1000e_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
void __devinit e1000e_check_options(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
e_notice("Warning: no configuration for board #%i\n", bd);
e_notice("Using defaults for all values\n");
}
{ /* Transmit Interrupt Delay */
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of "
__MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = { .r = { .min = MIN_TXDELAY,
.max = MAX_TXDELAY } }
};
if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd];
e1000_validate_option(&adapter->tx_int_delay, &opt,
adapter);
} else {
adapter->tx_int_delay = opt.def;
}
}
{ /* Transmit Absolute Interrupt Delay */
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of "
__MODULE_STRING(DEFAULT_TADV),
.def = DEFAULT_TADV,
.arg = { .r = { .min = MIN_TXABSDELAY,
.max = MAX_TXABSDELAY } }
};
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
}
{ /* Receive Interrupt Delay */
static struct e1000_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of "
__MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = { .r = { .min = MIN_RXDELAY,
.max = MAX_RXDELAY } }
};
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter);
} else {
adapter->rx_int_delay = opt.def;
}
}
{ /* Receive Absolute Interrupt Delay */
static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of "
__MODULE_STRING(DEFAULT_RADV),
.def = DEFAULT_RADV,
.arg = { .r = { .min = MIN_RXABSDELAY,
.max = MAX_RXABSDELAY } }
};
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
}
{ /* Interrupt Throttling Rate */
static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of "
__MODULE_STRING(DEFAULT_ITR),
.def = DEFAULT_ITR,
.arg = { .r = { .min = MIN_ITR,
.max = MAX_ITR } }
};
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
e_info("%s turned off\n", opt.name);
break;
case 1:
e_info("%s set to dynamic mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
e_info("%s set to dynamic conservative mode\n",
opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 4:
e_info("%s set to simplified (2000-8000 ints) "
"mode\n", opt.name);
adapter->itr_setting = 4;
break;
default:
/*
* Save the setting, because the dynamic bits
* change itr.
*/
if (e1000_validate_option(&adapter->itr, &opt,
adapter) &&
(adapter->itr == 3)) {
/*
* In case of invalid user value,
* default to conservative mode.
*/
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
} else {
/*
* Clear the lower two bits because
* they are used as control.
*/
adapter->itr_setting =
adapter->itr & ~3;
}
break;
}
} else {
adapter->itr_setting = opt.def;
adapter->itr = 20000;
}
}
{ /* Interrupt Mode */
static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
.err = "defaulting to 2 (MSI-X)",
.def = E1000E_INT_MODE_MSIX,
.arg = { .r = { .min = MIN_INTMODE,
.max = MAX_INTMODE } }
};
if (num_IntMode > bd) {
unsigned int int_mode = IntMode[bd];
e1000_validate_option(&int_mode, &opt, adapter);
adapter->int_mode = int_mode;
} else {
adapter->int_mode = opt.def;
}
}
{ /* Smart Power Down */
static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
.def = OPTION_DISABLED
};
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
&& spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
{ /* CRC Stripping */
static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_CrcStripping > bd) {
unsigned int crc_stripping = CrcStripping[bd];
e1000_validate_option(&crc_stripping, &opt, adapter);
if (crc_stripping == OPTION_ENABLED)
adapter->flags2 |= FLAG2_CRC_STRIPPING;
} else {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
}
}
{ /* Kumeran Lock Loss Workaround */
static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
if (hw->mac.type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
kmrn_lock_loss);
} else {
if (hw->mac.type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
opt.def);
}
}
{ /* Write-protect NVM */
static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (adapter->flags & FLAG_IS_ICH) {
if (num_WriteProtectNVM > bd) {
unsigned int write_protect_nvm = WriteProtectNVM[bd];
e1000_validate_option(&write_protect_nvm, &opt,
adapter);
if (write_protect_nvm)
adapter->flags |= FLAG_READ_ONLY_NVM;
} else {
if (opt.def)
adapter->flags |= FLAG_READ_ONLY_NVM;
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
# Copyright(c) 1999 - 2011 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# Linux NICS <linux.nics@intel.com>
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
#
obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,260 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_DEF1_DEF2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_OFF1_ON2))
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
#define E1000_RAR_ENTRIES_82580 24
#define E1000_RAR_ENTRIES_I350 32
#define E1000_SW_SYNCH_MB 0x00000100
#define E1000_STAT_DEV_RST_SET 0x00100000
#define E1000_CTRL_DEV_RST 0x20000000
/* SRRCTL bit definitions */
#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
#define E1000_SRRCTL_DROP_EN 0x80000000
#define E1000_SRRCTL_TIMESTAMP 0x40000000
#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
#define E1000_MRQC_ENABLE_VMDQ 0x00000003
#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
#define E1000_EICR_TX_QUEUE ( \
E1000_EICR_TX_QUEUE0 | \
E1000_EICR_TX_QUEUE1 | \
E1000_EICR_TX_QUEUE2 | \
E1000_EICR_TX_QUEUE3)
#define E1000_EICR_RX_QUEUE ( \
E1000_EICR_RX_QUEUE0 | \
E1000_EICR_RX_QUEUE1 | \
E1000_EICR_RX_QUEUE2 | \
E1000_EICR_RX_QUEUE3)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
/* Receive Descriptor - Advanced */
union e1000_adv_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
struct {
__le16 pkt_info; /* RSS type, Packet type */
__le16 hdr_info; /* Split Header,
* header buffer length */
} lo_dword;
union {
__le32 rss; /* RSS Hash */
struct {
__le16 ip_id; /* IP id */
__le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error; /* ext status/error */
__le16 length; /* Packet length */
__le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
struct {
__le64 buffer_addr; /* Address of descriptor's data buf */
__le32 cmd_type_len;
__le32 olinfo_status;
} read;
struct {
__le64 rsvd; /* Reserved */
__le32 nxtseq_seed;
__le32 status;
} wb;
};
/* Adv Transmit Descriptor Config Masks */
#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
/* Context descriptors */
struct e1000_adv_tx_context_desc {
__le32 vlan_macip_lens;
__le32 seqnum_seed;
__le32 type_tucmd_mlhl;
__le32 mss_l4len_idx;
};
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
/* IPSec Encrypt Enable for ESP */
#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
/* Adv ctxt IPSec SA IDX mask */
/* Adv ctxt IPSec ESP len mask */
/* Additional Transmit Descriptor Control definitions */
#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
/* Tx Queue Arbitration Priority 0=low, 1=high */
/* Additional Receive Descriptor Control definitions */
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
/* Direct Cache Access (DCA) definitions */
#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
/* ETQF register bit definitions */
#define E1000_ETQF_FILTER_ENABLE (1 << 26)
#define E1000_ETQF_1588 (1 << 30)
/* FTQF register bit definitions */
#define E1000_FTQF_VF_BP 0x00008000
#define E1000_FTQF_1588_TIME_STAMP 0x08000000
#define E1000_FTQF_MASK 0xF0000000
#define E1000_FTQF_MASK_PROTO_BP 0x10000000
#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
#define E1000_NVM_APME_82575 0x0400
#define MAX_NUM_VFS 8
#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
/* Other useful VMD_CTL register defines */
#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
/* Per VM Offload register setup */
#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
#define E1000_VLVF_ARRAY_SIZE 32
#define E1000_VLVF_VLANID_MASK 0x00000FFF
#define E1000_VLVF_POOLSEL_SHIFT 12
#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
#define E1000_VLVF_LVLAN 0x00100000
#define E1000_VLVF_VLANID_ENABLE 0x80000000
#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
#define E1000_IOVCTL 0x05BBC
#define E1000_IOVCTL_REUSE_VFQ 0x00000001
#define E1000_RPLOLR_STRVLAN 0x40000000
#define E1000_RPLOLR_STRCRC 0x80000000
#define E1000_DTXCTL_8023LL 0x0004
#define E1000_DTXCTL_VLAN_ADDED 0x0008
#define E1000_DTXCTL_OOS_ENABLE 0x0010
#define E1000_DTXCTL_MDP_EN 0x0020
#define E1000_DTXCTL_SPOOF_INT 0x0040
#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
#define ALL_QUEUES 0xFFFF
/* RX packet buffer size defines */
#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_set_eee_i350(struct e1000_hw *);
#endif

View File

@ -0,0 +1,838 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
/* Wake Up Filter Control */
#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
/* Extended Device Control */
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
/* Physical Func Reset Done Indication */
#define E1000_CTRL_EXT_PFRSTD 0x00004000
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_IRCA 0x00000001
/* Interrupt delay cancellation */
/* Driver loaded bit for FW */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
/* Interrupt acknowledge Auto-mask */
/* Clear Interrupt timers after IMS clear */
/* packet buffer parity error detection enabled */
/* descriptor FIFO parity error detection enable */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_I2CCMD_REG_ADDR_SHIFT 16
#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
#define E1000_I2CCMD_OPCODE_READ 0x08000000
#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
#define E1000_I2CCMD_READY 0x20000000
#define E1000_I2CCMD_ERROR 0x80000000
#define E1000_MAX_SGMII_PHY_REG_ADDR 255
#define E1000_I2CCMD_PHY_TIMEOUT 200
#define E1000_IVAR_VALID 0x80
#define E1000_GPIE_NSICR 0x00000001
#define E1000_GPIE_MSIX_MODE 0x00000010
#define E1000_GPIE_EIAME 0x40000000
#define E1000_GPIE_PBA 0x80000000
/* Receive Descriptor bit definitions */
#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */
#define E1000_RXDEXT_STATERR_LB 0x00040000
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
#define E1000_RXDEXT_STATERR_CXE 0x10000000
#define E1000_RXDEXT_STATERR_TCPE 0x20000000
#define E1000_RXDEXT_STATERR_IPE 0x40000000
#define E1000_RXDEXT_STATERR_RXE 0x80000000
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
E1000_RXDEXT_STATERR_CE | \
E1000_RXDEXT_STATERR_SE | \
E1000_RXDEXT_STATERR_SEQ | \
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
/* Management Control */
#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */
/* Enable Neighbor Discovery Filtering */
#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
/* Enable MAC address filtering */
#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
/* Receive Control */
#define E1000_RCTL_EN 0x00000002 /* enable */
#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
/*
* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
* ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
* E1000_PSRCTL_BSIZE1_MASK) |
* ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
* E1000_PSRCTL_BSIZE2_MASK) |
* ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
* E1000_PSRCTL_BSIZE3_MASK))
* where value0 = [128..16256], default=256
* value1 = [1024..64512], default=4096
* value2 = [0..64512], default=4096
* value3 = [0..64512], default=0
*/
#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
/* SWFW_SYNC Definitions */
#define E1000_SWFW_EEP_SM 0x1
#define E1000_SWFW_PHY0_SM 0x2
#define E1000_SWFW_PHY1_SM 0x4
#define E1000_SWFW_PHY2_SM 0x20
#define E1000_SWFW_PHY3_SM 0x40
/* FACTPS Definitions */
/* Device Control */
#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
/* Defined polarity of Dock/Undock indication in SDP[0] */
/* Reset both PHY ports, through PHYRST_N pin */
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
/* Initiate an interrupt to manageability engine */
#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
/* Bit definitions for the Management Data IO (MDIO) and Management Data
* Clock (MDC) pins in the Device Control Register.
*/
#define E1000_CONNSW_ENRGSRC 0x4
#define E1000_PCS_CFG_PCS_EN 8
#define E1000_PCS_LCTL_FLV_LINK_UP 1
#define E1000_PCS_LCTL_FSV_100 2
#define E1000_PCS_LCTL_FSV_1000 4
#define E1000_PCS_LCTL_FDV_FULL 8
#define E1000_PCS_LCTL_FSD 0x10
#define E1000_PCS_LCTL_FORCE_LINK 0x20
#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
#define E1000_PCS_LCTL_AN_ENABLE 0x10000
#define E1000_PCS_LCTL_AN_RESTART 0x20000
#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
#define E1000_PCS_LSTS_LINK_OK 1
#define E1000_PCS_LSTS_SPEED_100 2
#define E1000_PCS_LSTS_SPEED_1000 4
#define E1000_PCS_LSTS_DUPLEX_FULL 8
#define E1000_PCS_LSTS_SYNK_OK 0x10
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define E1000_STATUS_FUNC_SHIFT 2
#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
/* Change in Dock/Undock state. Clear on write '0'. */
/* Status of Master requests. */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
/* BMC external code execution disabled */
/* Constants used to intrepret the masked PCI-X bus speed. */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
#define ADVERTISE_100_FULL 0x0008
#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
#define ADVERTISE_1000_FULL 0x0020
/* 1000/H is not supported, nor spec-compliant. */
#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
ADVERTISE_1000_FULL)
#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
ADVERTISE_100_HALF | ADVERTISE_100_FULL)
#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \
ADVERTISE_1000_FULL)
#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
/* LED Control */
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
#define E1000_LEDCTL_LED0_BLINK 0x00000080
#define E1000_LEDCTL_MODE_LED_ON 0xE
#define E1000_LEDCTL_MODE_LED_OFF 0xF
/* Transmit Descriptor bit definitions */
#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
/* Extended desc bits for Linksec and timesync */
/* Transmit Control */
#define E1000_TCTL_EN 0x00000002 /* enable tx */
#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
/* DMA Coalescing register fields */
#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
* Watchdog Timer */
#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
* Threshold */
#define E1000_DMACR_DMACTHR_SHIFT 16
#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
* transactions */
#define E1000_DMACR_DMAC_LX_SHIFT 28
#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
* Threshold */
#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
* Threshold */
#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
* current window */
#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
* Current Cnt */
#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
* High val */
#define E1000_FCRTC_RTH_COAL_SHIFT 4
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
/* Receive Checksum Control */
#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* Header split receive */
#define E1000_RFCTL_LEF 0x00040000
/* Collision related configuration parameters */
#define E1000_COLLISION_THRESHOLD 15
#define E1000_CT_SHIFT 4
#define E1000_COLLISION_DISTANCE 63
#define E1000_COLD_SHIFT 12
/* Ethertype field values */
#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
#define MAX_JUMBO_FRAME_SIZE 0x3F00
/* PBA constants */
#define E1000_PBA_34K 0x0022
#define E1000_PBA_64K 0x0040 /* 64KB */
/* SW Semaphore Register */
#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
/* Interrupt Cause Read */
#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_INT_ASSERTED 0x80000000
/* LAN connected device generates an interrupt */
#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
/* Extended Interrupt Cause Read */
#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
/* TCP Timer */
/*
* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
* o RXT0 = Receiver Timer Interrupt (ring 0)
* o TXDW = Transmit Descriptor Written Back
* o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
* o RXSEQ = Receive Sequence Error
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
E1000_IMS_RXT0 | \
E1000_IMS_TXDW | \
E1000_IMS_RXDMT0 | \
E1000_IMS_RXSEQ | \
E1000_IMS_LSC | \
E1000_IMS_DOUTSYNC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
/* Extended Interrupt Mask Set */
#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
/* Interrupt Cause Set */
#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
/* Extended Interrupt Cause Set */
/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
/* Transmit Descriptor Control */
/* Enable the counting of descriptors still to be processed. */
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
#define FLOW_CONTROL_TYPE 0x8808
/* 802.1q VLAN Packet Size */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
/* Receive Address */
/*
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
#define E1000_RAL_MAC_ADDR_LEN 4
#define E1000_RAH_MAC_ADDR_LEN 2
#define E1000_RAH_POOL_MASK 0x03FC0000
#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
#define E1000_SUCCESS 0
#define E1000_ERR_NVM 1
#define E1000_ERR_PHY 2
#define E1000_ERR_CONFIG 3
#define E1000_ERR_PARAM 4
#define E1000_ERR_MAC_INIT 5
#define E1000_ERR_RESET 9
#define E1000_ERR_MASTER_REQUESTS_PENDING 10
#define E1000_BLK_PHY_RESET 12
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_MBX 15
#define E1000_ERR_INVALID_ARGUMENT 16
#define E1000_ERR_NO_SPACE 17
#define E1000_ERR_NVM_PBA_SECTION 18
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
#define PHY_FORCE_LIMIT 20
/* Number of 100 microseconds we wait for PCI Express master disable */
#define MASTER_DISABLE_TIMEOUT 800
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
#define PHY_CFG_TIMEOUT 100
/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
/* Number of milliseconds for NVM auto read done after MAC reset. */
#define AUTO_READ_DONE_TIMEOUT 10
/* Flow Control */
#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
#define E1000_TIMINCA_16NS_SHIFT 24
#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
#define E1000_MDICNFG_PHY_MASK 0x03E00000
#define E1000_MDICNFG_PHY_SHIFT 21
/* PCI Express Control */
#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
#define E1000_GCR_CAP_VER2 0x00040000
/* mPHY Address Control and Data Registers */
#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */
#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */
/* mPHY PCS CLK Register */
#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */
/* mPHY Near End Digital Loopback Override Bit */
#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define MII_CR_SPEED_1000 0x0040
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
/* Autoneg Advertisement Register */
#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
/* Link Partner Ability Register (Base Page) */
#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
/* Autoneg Expansion Register */
/* 1000BASE-T Control Register */
#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
/* 0=Configure PHY as Slave */
#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
/* 0=Automatic Master/Slave config */
/* 1000BASE-T Status Register */
#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
#define PHY_CONTROL 0x00 /* Control Register */
#define PHY_STATUS 0x01 /* Status Register */
#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
/* NVM Control */
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
#define E1000_EECD_DI 0x00000004 /* NVM Data In */
#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
#define E1000_EECD_PRES 0x00000100 /* NVM Present */
/* NVM Addressing bits based on type 0=small, 1=large */
#define E1000_EECD_ADDR_BITS 0x00000400
#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
#define E1000_EECD_SIZE_EX_SHIFT 11
/* Offset to data in NVM read/write registers */
#define E1000_NVM_RW_REG_DATA 16
#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
#define E1000_NVM_RW_REG_START 1 /* Start operation */
#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_CONTROL3_PORT_A 0x0024
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
#define NVM_COMPATIBILITY_REG_3 0x0003
#define NVM_COMPATIBILITY_BIT_MASK 0x8000
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
/* Mask bits for fields in Word 0x24 of the NVM */
#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */
/* Mask bits for fields in Word 0x0f of the NVM */
#define NVM_WORD0F_PAUSE_MASK 0x3000
#define NVM_WORD0F_ASM_DIR 0x2000
/* Mask bits for fields in Word 0x1a of the NVM */
/* length of string needed to store part num */
#define E1000_PBANUM_LENGTH 11
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* NVM Commands - Microwire */
/* NVM Commands - SPI */
#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
/* SPI NVM Status Register */
#define NVM_STATUS_RDY_SPI 0x01
/* Word definitions for ID LED Settings */
#define ID_LED_RESERVED_0000 0x0000
#define ID_LED_RESERVED_FFFF 0xFFFF
#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
#define ID_LED_DEF1_DEF2 0x1
#define ID_LED_DEF1_ON2 0x2
#define ID_LED_DEF1_OFF2 0x3
#define ID_LED_ON1_DEF2 0x4
#define ID_LED_ON1_ON2 0x5
#define ID_LED_ON1_OFF2 0x6
#define ID_LED_OFF1_DEF2 0x7
#define ID_LED_OFF1_ON2 0x8
#define ID_LED_OFF1_OFF2 0x9
#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
#define IGP_ACTIVITY_LED_ENABLE 0x0300
#define IGP_LED3_MODE 0x07000000
/* PCI/PCI-X/PCI-EX Config space */
#define PCIE_DEVICE_CONTROL2 0x28
#define PCIE_DEVICE_CONTROL2_16ms 0x0005
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
/* Bit definitions for valid PHY IDs. */
/*
* I = Integrated
* E = External
*/
#define M88E1111_I_PHY_ID 0x01410CC0
#define M88E1112_E_PHY_ID 0x01410C90
#define I347AT4_E_PHY_ID 0x01410DC0
#define IGP03E1000_E_PHY_ID 0x02A80390
#define I82580_I_PHY_ID 0x015403A0
#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
/* M88E1000 PHY Specific Control Register */
#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
/* 1=CLK125 low, 0=CLK125 toggling */
#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
/* Manual MDI configuration */
#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
/*
* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
* 0=Normal 10BASE-T Rx Threshold
*/
/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
/* M88E1000 PHY Specific Status Register */
#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
/*
* 0 = <50M
* 1 = 50-80M
* 2 = 80-110M
* 3 = 110-140M
* 4 = >140M
*/
#define M88E1000_PSSR_CABLE_LENGTH 0x0380
#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
/* M88E1000 Extended PHY Specific Control Register */
/*
* 1 = Lost lock detect enabled.
* Will assert lost lock and bring
* link down if idle not seen
* within 1ms in 1000BASE-T
*/
/*
* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
/*
* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
/* Intel i347-AT4 Registers */
#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
#define I347AT4_PAGE_SELECT 0x16
/* i347-AT4 Extended PHY Specific Control Register */
/*
* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
/* i347-AT4 PHY Cable Diagnostics Control */
#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
/* Marvell 1112 only registers */
#define M88E1112_VCT_DSP_DISTANCE 0x001A
/* M88EC018 Rev 2 specific DownShift settings */
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
/* MDI Control */
#define E1000_MDIC_DATA_MASK 0x0000FFFF
#define E1000_MDIC_REG_MASK 0x001F0000
#define E1000_MDIC_REG_SHIFT 16
#define E1000_MDIC_PHY_MASK 0x03E00000
#define E1000_MDIC_PHY_SHIFT 21
#define E1000_MDIC_OP_WRITE 0x04000000
#define E1000_MDIC_OP_READ 0x08000000
#define E1000_MDIC_READY 0x10000000
#define E1000_MDIC_INT_EN 0x20000000
#define E1000_MDIC_ERROR 0x40000000
#define E1000_MDIC_DEST 0x80000000
/* Thermal Sensor */
#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */
/* Energy Efficient Ethernet */
#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */
#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
/* SerDes Control */
#define E1000_GEN_CTL_READY 0x80000000
#define E1000_GEN_CTL_ADDRESS_SHIFT 8
#define E1000_GEN_POLL_TIMEOUT 640
#define E1000_VFTA_ENTRY_SHIFT 5
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
/* DMA Coalescing register fields */
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
on DMA coal */
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
#define E1000_RTTBCNRC_RF_INT_SHIFT 14
#define E1000_RTTBCNRC_RF_INT_MASK \
(E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
#endif

View File

@ -0,0 +1,529 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include "e1000_regs.h"
#include "e1000_defines.h"
struct e1000_hw;
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
#define E1000_DEV_ID_82576_NS 0x150A
#define E1000_DEV_ID_82576_NS_SERDES 0x1518
#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
#define E1000_DEV_ID_82580_COPPER 0x150E
#define E1000_DEV_ID_82580_FIBER 0x150F
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
#define E1000_DEV_ID_I350_COPPER 0x1521
#define E1000_DEV_ID_I350_FIBER 0x1522
#define E1000_DEV_ID_I350_SERDES 0x1523
#define E1000_DEV_ID_I350_SGMII 0x1524
#define E1000_REVISION_2 2
#define E1000_REVISION_4 4
#define E1000_FUNC_0 0
#define E1000_FUNC_1 1
#define E1000_FUNC_2 2
#define E1000_FUNC_3 3
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
enum e1000_mac_type {
e1000_undefined = 0,
e1000_82575,
e1000_82576,
e1000_82580,
e1000_i350,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
};
enum e1000_media_type {
e1000_media_type_unknown = 0,
e1000_media_type_copper = 1,
e1000_media_type_internal_serdes = 2,
e1000_num_media_types
};
enum e1000_nvm_type {
e1000_nvm_unknown = 0,
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
e1000_nvm_flash_sw
};
enum e1000_nvm_override {
e1000_nvm_override_none = 0,
e1000_nvm_override_spi_small,
e1000_nvm_override_spi_large,
};
enum e1000_phy_type {
e1000_phy_unknown = 0,
e1000_phy_none,
e1000_phy_m88,
e1000_phy_igp,
e1000_phy_igp_2,
e1000_phy_gg82563,
e1000_phy_igp_3,
e1000_phy_ife,
e1000_phy_82580,
};
enum e1000_bus_type {
e1000_bus_type_unknown = 0,
e1000_bus_type_pci,
e1000_bus_type_pcix,
e1000_bus_type_pci_express,
e1000_bus_type_reserved
};
enum e1000_bus_speed {
e1000_bus_speed_unknown = 0,
e1000_bus_speed_33,
e1000_bus_speed_66,
e1000_bus_speed_100,
e1000_bus_speed_120,
e1000_bus_speed_133,
e1000_bus_speed_2500,
e1000_bus_speed_5000,
e1000_bus_speed_reserved
};
enum e1000_bus_width {
e1000_bus_width_unknown = 0,
e1000_bus_width_pcie_x1,
e1000_bus_width_pcie_x2,
e1000_bus_width_pcie_x4 = 4,
e1000_bus_width_pcie_x8 = 8,
e1000_bus_width_32,
e1000_bus_width_64,
e1000_bus_width_reserved
};
enum e1000_1000t_rx_status {
e1000_1000t_rx_status_not_ok = 0,
e1000_1000t_rx_status_ok,
e1000_1000t_rx_status_undefined = 0xFF
};
enum e1000_rev_polarity {
e1000_rev_polarity_normal = 0,
e1000_rev_polarity_reversed,
e1000_rev_polarity_undefined = 0xFF
};
enum e1000_fc_mode {
e1000_fc_none = 0,
e1000_fc_rx_pause,
e1000_fc_tx_pause,
e1000_fc_full,
e1000_fc_default = 0xFF
};
/* Statistics counters collected by the MAC */
struct e1000_hw_stats {
u64 crcerrs;
u64 algnerrc;
u64 symerrs;
u64 rxerrc;
u64 mpc;
u64 scc;
u64 ecol;
u64 mcc;
u64 latecol;
u64 colc;
u64 dc;
u64 tncrs;
u64 sec;
u64 cexterr;
u64 rlec;
u64 xonrxc;
u64 xontxc;
u64 xoffrxc;
u64 xofftxc;
u64 fcruc;
u64 prc64;
u64 prc127;
u64 prc255;
u64 prc511;
u64 prc1023;
u64 prc1522;
u64 gprc;
u64 bprc;
u64 mprc;
u64 gptc;
u64 gorc;
u64 gotc;
u64 rnbc;
u64 ruc;
u64 rfc;
u64 roc;
u64 rjc;
u64 mgprc;
u64 mgpdc;
u64 mgptc;
u64 tor;
u64 tot;
u64 tpr;
u64 tpt;
u64 ptc64;
u64 ptc127;
u64 ptc255;
u64 ptc511;
u64 ptc1023;
u64 ptc1522;
u64 mptc;
u64 bptc;
u64 tsctc;
u64 tsctfc;
u64 iac;
u64 icrxptc;
u64 icrxatc;
u64 ictxptc;
u64 ictxatc;
u64 ictxqec;
u64 ictxqmtc;
u64 icrxdmtc;
u64 icrxoc;
u64 cbtmpc;
u64 htdpmc;
u64 cbrdpc;
u64 cbrmpc;
u64 rpthc;
u64 hgptc;
u64 htcbdpc;
u64 hgorc;
u64 hgotc;
u64 lenerrs;
u64 scvpc;
u64 hrmpc;
u64 doosync;
u64 o2bgptc;
u64 o2bspc;
u64 b2ospc;
u64 b2ogprc;
};
struct e1000_phy_stats {
u32 idle_errors;
u32 receive_errors;
};
struct e1000_host_mng_dhcp_cookie {
u32 signature;
u8 status;
u8 reserved0;
u16 vlan_id;
u32 reserved1;
u16 reserved2;
u8 reserved3;
u8 checksum;
};
/* Host Interface "Rev 1" */
struct e1000_host_command_header {
u8 command_id;
u8 command_length;
u8 command_options;
u8 checksum;
};
#define E1000_HI_MAX_DATA_LENGTH 252
struct e1000_host_command_info {
struct e1000_host_command_header command_header;
u8 command_data[E1000_HI_MAX_DATA_LENGTH];
};
/* Host Interface "Rev 2" */
struct e1000_host_mng_command_header {
u8 command_id;
u8 checksum;
u16 reserved1;
u16 reserved2;
u16 command_length;
};
#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
struct e1000_host_mng_command_info {
struct e1000_host_mng_command_header command_header;
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
};
#include "e1000_mac.h"
#include "e1000_phy.h"
#include "e1000_nvm.h"
#include "e1000_mbx.h"
struct e1000_mac_operations {
s32 (*check_for_link)(struct e1000_hw *);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8 *, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
};
struct e1000_phy_operations {
s32 (*acquire)(struct e1000_hw *);
s32 (*check_polarity)(struct e1000_hw *);
s32 (*check_reset_block)(struct e1000_hw *);
s32 (*force_speed_duplex)(struct e1000_hw *);
s32 (*get_cfg_done)(struct e1000_hw *hw);
s32 (*get_cable_length)(struct e1000_hw *);
s32 (*get_phy_info)(struct e1000_hw *);
s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
s32 (*reset)(struct e1000_hw *);
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_reg)(struct e1000_hw *, u32, u16);
};
struct e1000_nvm_operations {
s32 (*acquire)(struct e1000_hw *);
s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
s32 (*update)(struct e1000_hw *);
s32 (*validate)(struct e1000_hw *);
};
struct e1000_info {
s32 (*get_invariants)(struct e1000_hw *);
struct e1000_mac_operations *mac_ops;
struct e1000_phy_operations *phy_ops;
struct e1000_nvm_operations *nvm_ops;
};
extern const struct e1000_info e1000_82575_info;
struct e1000_mac_info {
struct e1000_mac_operations ops;
u8 addr[6];
u8 perm_addr[6];
enum e1000_mac_type type;
u32 ledctl_default;
u32 ledctl_mode1;
u32 ledctl_mode2;
u32 mc_filter_type;
u32 txcw;
u16 mta_reg_count;
u16 uta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
bool adaptive_ifs;
bool arc_subsystem_valid;
bool asf_firmware_present;
bool autoneg;
bool autoneg_failed;
bool disable_hw_init_bits;
bool get_link_status;
bool ifs_params_forced;
bool in_ifs_mode;
bool report_tx_early;
bool serdes_has_link;
bool tx_pkt_filtering;
};
struct e1000_phy_info {
struct e1000_phy_operations ops;
enum e1000_phy_type type;
enum e1000_1000t_rx_status local_rx;
enum e1000_1000t_rx_status remote_rx;
enum e1000_ms_type ms_type;
enum e1000_ms_type original_ms_type;
enum e1000_rev_polarity cable_polarity;
enum e1000_smart_speed smart_speed;
u32 addr;
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
enum e1000_media_type media_type;
u16 autoneg_advertised;
u16 autoneg_mask;
u16 cable_length;
u16 max_cable_length;
u16 min_cable_length;
u8 mdix;
bool disable_polarity_correction;
bool is_mdix;
bool polarity_correction;
bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
struct e1000_nvm_info {
struct e1000_nvm_operations ops;
enum e1000_nvm_type type;
enum e1000_nvm_override override;
u32 flash_bank_size;
u32 flash_base_addr;
u16 word_size;
u16 delay_usec;
u16 address_bits;
u16 opcode_bits;
u16 page_size;
};
struct e1000_bus_info {
enum e1000_bus_type type;
enum e1000_bus_speed speed;
enum e1000_bus_width width;
u32 snoop;
u16 func;
u16 pci_cmd_word;
};
struct e1000_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum e1000_fc_mode current_mode; /* Type of flow control */
enum e1000_fc_mode requested_mode;
};
struct e1000_mbx_operations {
s32 (*init_params)(struct e1000_hw *hw);
s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
s32 (*check_for_msg)(struct e1000_hw *, u16);
s32 (*check_for_ack)(struct e1000_hw *, u16);
s32 (*check_for_rst)(struct e1000_hw *, u16);
};
struct e1000_mbx_stats {
u32 msgs_tx;
u32 msgs_rx;
u32 acks;
u32 reqs;
u32 rsts;
};
struct e1000_mbx_info {
struct e1000_mbx_operations ops;
struct e1000_mbx_stats stats;
u32 timeout;
u32 usec_delay;
u16 size;
};
struct e1000_dev_spec_82575 {
bool sgmii_active;
bool global_device_reset;
bool eee_disable;
};
struct e1000_hw {
void *back;
u8 __iomem *hw_addr;
u8 __iomem *flash_address;
unsigned long io_base;
struct e1000_mac_info mac;
struct e1000_fc_info fc;
struct e1000_phy_info phy;
struct e1000_nvm_info nvm;
struct e1000_bus_info bus;
struct e1000_mbx_info mbx;
struct e1000_host_mng_dhcp_cookie mng_cookie;
union {
struct e1000_dev_spec_82575 _82575;
} dev_spec;
u16 device_id;
u16 subsystem_vendor_id;
u16 subsystem_device_id;
u16 vendor_id;
u8 revision_id;
};
extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
/* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,91 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
#include "e1000_hw.h"
#include "e1000_phy.h"
#include "e1000_nvm.h"
#include "e1000_defines.h"
/*
* Functions that should not be called directly from drivers but can be used
* by other files in this 'shared code'
*/
s32 igb_blink_led(struct e1000_hw *hw);
s32 igb_check_for_copper_link(struct e1000_hw *hw);
s32 igb_cleanup_led(struct e1000_hw *hw);
s32 igb_config_fc_after_link_up(struct e1000_hw *hw);
s32 igb_disable_pcie_master(struct e1000_hw *hw);
s32 igb_force_mac_fc(struct e1000_hw *hw);
s32 igb_get_auto_rd_done(struct e1000_hw *hw);
s32 igb_get_bus_info_pcie(struct e1000_hw *hw);
s32 igb_get_hw_semaphore(struct e1000_hw *hw);
s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
u16 *duplex);
s32 igb_id_led_init(struct e1000_hw *hw);
s32 igb_led_off(struct e1000_hw *hw);
void igb_update_mc_addr_list(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count);
s32 igb_setup_link(struct e1000_hw *hw);
s32 igb_validate_mdi_setting(struct e1000_hw *hw);
s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
u32 offset, u8 data);
void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
void igb_clear_vfta(struct e1000_hw *hw);
void igb_clear_vfta_i350(struct e1000_hw *hw);
s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
void igb_config_collision_dist(struct e1000_hw *hw);
void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
void igb_put_hw_semaphore(struct e1000_hw *hw);
void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
enum e1000_mng_mode {
e1000_mng_mode_none = 0,
e1000_mng_mode_asf,
e1000_mng_mode_pt,
e1000_mng_mode_ipmi,
e1000_mng_mode_host_if_only
};
#define E1000_FACTPS_MNGCG 0x20000000
#define E1000_FWSM_MODE_MASK 0xE
#define E1000_FWSM_MODE_SHIFT 1
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
#endif

View File

@ -0,0 +1,446 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "e1000_mbx.h"
/**
* igb_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
* returns SUCCESS if it successfully read message from buffer
**/
s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
/* limit read to size of mailbox */
if (size > mbx->size)
size = mbx->size;
if (mbx->ops.read)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
return ret_val;
}
/**
* igb_write_mbx - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer
**/
s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = 0;
if (size > mbx->size)
ret_val = -E1000_ERR_MBX;
else if (mbx->ops.write)
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
return ret_val;
}
/**
* igb_check_for_msg - checks to see if someone sent us mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
if (mbx->ops.check_for_msg)
ret_val = mbx->ops.check_for_msg(hw, mbx_id);
return ret_val;
}
/**
* igb_check_for_ack - checks to see if someone sent us ACK
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
if (mbx->ops.check_for_ack)
ret_val = mbx->ops.check_for_ack(hw, mbx_id);
return ret_val;
}
/**
* igb_check_for_rst - checks to see if other side has reset
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
if (mbx->ops.check_for_rst)
ret_val = mbx->ops.check_for_rst(hw, mbx_id);
return ret_val;
}
/**
* igb_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification
**/
static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_msg)
goto out;
while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->usec_delay);
}
/* if we failed, all future posted messages fail until reset */
if (!countdown)
mbx->timeout = 0;
out:
return countdown ? 0 : -E1000_ERR_MBX;
}
/**
* igb_poll_for_ack - Wait for message acknowledgement
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_ack)
goto out;
while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->usec_delay);
}
/* if we failed, all future posted messages fail until reset */
if (!countdown)
mbx->timeout = 0;
out:
return countdown ? 0 : -E1000_ERR_MBX;
}
/**
* igb_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
if (!mbx->ops.read)
goto out;
ret_val = igb_poll_for_msg(hw, mbx_id);
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
out:
return ret_val;
}
/**
* igb_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
goto out;
/* send msg */
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
/* if msg sent wait until we receive an ack */
if (!ret_val)
ret_val = igb_poll_for_ack(hw, mbx_id);
out:
return ret_val;
}
static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
{
u32 mbvficr = rd32(E1000_MBVFICR);
s32 ret_val = -E1000_ERR_MBX;
if (mbvficr & mask) {
ret_val = 0;
wr32(E1000_MBVFICR, mask);
}
return ret_val;
}
/**
* igb_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
{
s32 ret_val = -E1000_ERR_MBX;
if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
return ret_val;
}
/**
* igb_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
{
s32 ret_val = -E1000_ERR_MBX;
if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
ret_val = 0;
hw->mbx.stats.acks++;
}
return ret_val;
}
/**
* igb_check_for_rst_pf - checks to see if the VF has reset
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
{
u32 vflre = rd32(E1000_VFLRE);
s32 ret_val = -E1000_ERR_MBX;
if (vflre & (1 << vf_number)) {
ret_val = 0;
wr32(E1000_VFLRE, (1 << vf_number));
hw->mbx.stats.rsts++;
}
return ret_val;
}
/**
* igb_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* return SUCCESS if we obtained the mailbox lock
**/
static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
{
s32 ret_val = -E1000_ERR_MBX;
u32 p2v_mailbox;
/* Take ownership of the buffer */
wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
/* reserve mailbox for vf use */
p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
ret_val = 0;
return ret_val;
}
/**
* igb_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_write;
/* flush msg and acks as we are overwriting the message buffer */
igb_check_for_msg_pf(hw, vf_number);
igb_check_for_ack_pf(hw, vf_number);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
array_wr32(E1000_VMBMEM(vf_number), i, msg[i]);
/* Interrupt VF to tell it a message has been sent and release buffer*/
wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
/* update stats */
hw->mbx.stats.msgs_tx++;
out_no_write:
return ret_val;
}
/**
* igb_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* This function copies a message from the mailbox buffer to the caller's
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_read;
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = array_rd32(E1000_VMBMEM(vf_number), i);
/* Acknowledge the message and release buffer */
wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return ret_val;
}
/**
* e1000_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for pf mailbox
*/
s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
mbx->timeout = 0;
mbx->usec_delay = 0;
mbx->size = E1000_VFMAILBOX_SIZE;
mbx->ops.read = igb_read_mbx_pf;
mbx->ops.write = igb_write_mbx_pf;
mbx->ops.read_posted = igb_read_posted_mbx;
mbx->ops.write_posted = igb_write_posted_mbx;
mbx->ops.check_for_msg = igb_check_for_msg_pf;
mbx->ops.check_for_ack = igb_check_for_ack_pf;
mbx->ops.check_for_rst = igb_check_for_rst_pf;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
return 0;
}

View File

@ -0,0 +1,77 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
#include "e1000_hw.h"
#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is E1000_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
* this are the ACK */
#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
* this are the NACK */
#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
clear to send requests */
#define E1000_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
#define E1000_VF_RESET 0x01 /* VF requests reset */
#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
s32 igb_check_for_msg(struct e1000_hw *, u16);
s32 igb_check_for_ack(struct e1000_hw *, u16);
s32 igb_check_for_rst(struct e1000_hw *, u16);
s32 igb_init_mbx_params_pf(struct e1000_hw *);
#endif /* _E1000_MBX_H_ */

View File

@ -0,0 +1,713 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/if_ether.h>
#include <linux/delay.h>
#include "e1000_mac.h"
#include "e1000_nvm.h"
/**
* igb_raise_eec_clk - Raise EEPROM clock
* @hw: pointer to the HW structure
* @eecd: pointer to the EEPROM
*
* Enable/Raise the EEPROM clock bit.
**/
static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
{
*eecd = *eecd | E1000_EECD_SK;
wr32(E1000_EECD, *eecd);
wrfl();
udelay(hw->nvm.delay_usec);
}
/**
* igb_lower_eec_clk - Lower EEPROM clock
* @hw: pointer to the HW structure
* @eecd: pointer to the EEPROM
*
* Clear/Lower the EEPROM clock bit.
**/
static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
{
*eecd = *eecd & ~E1000_EECD_SK;
wr32(E1000_EECD, *eecd);
wrfl();
udelay(hw->nvm.delay_usec);
}
/**
* igb_shift_out_eec_bits - Shift data bits our to the EEPROM
* @hw: pointer to the HW structure
* @data: data to send to the EEPROM
* @count: number of bits to shift out
*
* We need to shift 'count' bits out to the EEPROM. So, the value in the
* "data" parameter will be shifted out to the EEPROM one bit at a time.
* In order to do this, "data" must be broken down into bits.
**/
static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = rd32(E1000_EECD);
u32 mask;
mask = 0x01 << (count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
do {
eecd &= ~E1000_EECD_DI;
if (data & mask)
eecd |= E1000_EECD_DI;
wr32(E1000_EECD, eecd);
wrfl();
udelay(nvm->delay_usec);
igb_raise_eec_clk(hw, &eecd);
igb_lower_eec_clk(hw, &eecd);
mask >>= 1;
} while (mask);
eecd &= ~E1000_EECD_DI;
wr32(E1000_EECD, eecd);
}
/**
* igb_shift_in_eec_bits - Shift data bits in from the EEPROM
* @hw: pointer to the HW structure
* @count: number of bits to shift in
*
* In order to read a register from the EEPROM, we need to shift 'count' bits
* in from the EEPROM. Bits are "shifted in" by raising the clock input to
* the EEPROM (setting the SK bit), and then reading the value of the data out
* "DO" bit. During this "shifting in" process the data in "DI" bit should
* always be clear.
**/
static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
{
u32 eecd;
u32 i;
u16 data;
eecd = rd32(E1000_EECD);
eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
data = 0;
for (i = 0; i < count; i++) {
data <<= 1;
igb_raise_eec_clk(hw, &eecd);
eecd = rd32(E1000_EECD);
eecd &= ~E1000_EECD_DI;
if (eecd & E1000_EECD_DO)
data |= 1;
igb_lower_eec_clk(hw, &eecd);
}
return data;
}
/**
* igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion
* @hw: pointer to the HW structure
* @ee_reg: EEPROM flag for polling
*
* Polls the EEPROM status bit for either read or write completion based
* upon the value of 'ee_reg'.
**/
static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
{
u32 attempts = 100000;
u32 i, reg = 0;
s32 ret_val = -E1000_ERR_NVM;
for (i = 0; i < attempts; i++) {
if (ee_reg == E1000_NVM_POLL_READ)
reg = rd32(E1000_EERD);
else
reg = rd32(E1000_EEWR);
if (reg & E1000_NVM_RW_REG_DONE) {
ret_val = 0;
break;
}
udelay(5);
}
return ret_val;
}
/**
* igb_acquire_nvm - Generic request for access to EEPROM
* @hw: pointer to the HW structure
*
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
s32 igb_acquire_nvm(struct e1000_hw *hw)
{
u32 eecd = rd32(E1000_EECD);
s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
s32 ret_val = 0;
wr32(E1000_EECD, eecd | E1000_EECD_REQ);
eecd = rd32(E1000_EECD);
while (timeout) {
if (eecd & E1000_EECD_GNT)
break;
udelay(5);
eecd = rd32(E1000_EECD);
timeout--;
}
if (!timeout) {
eecd &= ~E1000_EECD_REQ;
wr32(E1000_EECD, eecd);
hw_dbg("Could not acquire NVM grant\n");
ret_val = -E1000_ERR_NVM;
}
return ret_val;
}
/**
* igb_standby_nvm - Return EEPROM to standby state
* @hw: pointer to the HW structure
*
* Return the EEPROM to a standby state.
**/
static void igb_standby_nvm(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = rd32(E1000_EECD);
if (nvm->type == e1000_nvm_eeprom_spi) {
/* Toggle CS to flush commands */
eecd |= E1000_EECD_CS;
wr32(E1000_EECD, eecd);
wrfl();
udelay(nvm->delay_usec);
eecd &= ~E1000_EECD_CS;
wr32(E1000_EECD, eecd);
wrfl();
udelay(nvm->delay_usec);
}
}
/**
* e1000_stop_nvm - Terminate EEPROM command
* @hw: pointer to the HW structure
*
* Terminates the current command by inverting the EEPROM's chip select pin.
**/
static void e1000_stop_nvm(struct e1000_hw *hw)
{
u32 eecd;
eecd = rd32(E1000_EECD);
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
/* Pull CS high */
eecd |= E1000_EECD_CS;
igb_lower_eec_clk(hw, &eecd);
}
}
/**
* igb_release_nvm - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit.
**/
void igb_release_nvm(struct e1000_hw *hw)
{
u32 eecd;
e1000_stop_nvm(hw);
eecd = rd32(E1000_EECD);
eecd &= ~E1000_EECD_REQ;
wr32(E1000_EECD, eecd);
}
/**
* igb_ready_nvm_eeprom - Prepares EEPROM for read/write
* @hw: pointer to the HW structure
*
* Setups the EEPROM for reading and writing.
**/
static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = rd32(E1000_EECD);
s32 ret_val = 0;
u16 timeout = 0;
u8 spi_stat_reg;
if (nvm->type == e1000_nvm_eeprom_spi) {
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
wr32(E1000_EECD, eecd);
wrfl();
udelay(1);
timeout = NVM_MAX_RETRY_SPI;
/*
* Read "Status Register" repeatedly until the LSB is cleared.
* The EEPROM will signal that the command has been completed
* by clearing bit 0 of the internal status register. If it's
* not cleared within 'timeout', then error out.
*/
while (timeout) {
igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
hw->nvm.opcode_bits);
spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
break;
udelay(5);
igb_standby_nvm(hw);
timeout--;
}
if (!timeout) {
hw_dbg("SPI NVM Status error\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
}
out:
return ret_val;
}
/**
* igb_read_nvm_spi - Read EEPROM's using SPI
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
* @words: number of words to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM.
**/
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i = 0;
s32 ret_val;
u16 word_in;
u8 read_opcode = NVM_READ_OPCODE_SPI;
/*
* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg("nvm parameter(s) out of bounds\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
ret_val = nvm->ops.acquire(hw);
if (ret_val)
goto out;
ret_val = igb_ready_nvm_eeprom(hw);
if (ret_val)
goto release;
igb_standby_nvm(hw);
if ((nvm->address_bits == 8) && (offset >= 128))
read_opcode |= NVM_A8_OPCODE_SPI;
/* Send the READ command (opcode + addr) */
igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
/*
* Read the data. SPI NVMs increment the address with each byte
* read and will roll over if reading beyond the end. This allows
* us to read the whole NVM from any offset
*/
for (i = 0; i < words; i++) {
word_in = igb_shift_in_eec_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
}
release:
nvm->ops.release(hw);
out:
return ret_val;
}
/**
* igb_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
* @words: number of words to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, eerd = 0;
s32 ret_val = 0;
/*
* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg("nvm parameter(s) out of bounds\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
for (i = 0; i < words; i++) {
eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
E1000_NVM_RW_REG_START;
wr32(E1000_EERD, eerd);
ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
if (ret_val)
break;
data[i] = (rd32(E1000_EERD) >>
E1000_NVM_RW_REG_DATA);
}
out:
return ret_val;
}
/**
* igb_write_nvm_spi - Write to EEPROM using SPI
* @hw: pointer to the HW structure
* @offset: offset within the EEPROM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the EEPROM
*
* Writes data to EEPROM at offset using SPI interface.
*
* If e1000_update_nvm_checksum is not called after this function , the
* EEPROM will most likley contain an invalid checksum.
**/
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
s32 ret_val;
u16 widx = 0;
/*
* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg("nvm parameter(s) out of bounds\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
ret_val = hw->nvm.ops.acquire(hw);
if (ret_val)
goto out;
msleep(10);
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
ret_val = igb_ready_nvm_eeprom(hw);
if (ret_val)
goto release;
igb_standby_nvm(hw);
/* Send the WRITE ENABLE command (8 bit opcode) */
igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
nvm->opcode_bits);
igb_standby_nvm(hw);
/*
* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((nvm->address_bits == 8) && (offset >= 128))
write_opcode |= NVM_A8_OPCODE_SPI;
/* Send the Write command (8-bit opcode + addr) */
igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
nvm->address_bits);
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8);
igb_shift_out_eec_bits(hw, word_out, 16);
widx++;
if ((((offset + widx) * 2) % nvm->page_size) == 0) {
igb_standby_nvm(hw);
break;
}
}
}
msleep(10);
release:
hw->nvm.ops.release(hw);
out:
return ret_val;
}
/**
* igb_read_part_string - Read device part number
* @hw: pointer to the HW structure
* @part_num: pointer to device part number
* @part_num_size: size of part number buffer
*
* Reads the product board assembly (PBA) number from the EEPROM and stores
* the value in part_num.
**/
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
{
s32 ret_val;
u16 nvm_data;
u16 pointer;
u16 offset;
u16 length;
if (part_num == NULL) {
hw_dbg("PBA string buffer was null\n");
ret_val = E1000_ERR_INVALID_ARGUMENT;
goto out;
}
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
/*
* if nvm_data is not ptr guard the PBA must be in legacy format which
* means pointer is actually our second data word for the PBA number
* and we can decode it into an ascii string
*/
if (nvm_data != NVM_PBA_PTR_GUARD) {
hw_dbg("NVM PBA number is not stored as string\n");
/* we will need 11 characters to store the PBA */
if (part_num_size < 11) {
hw_dbg("PBA string buffer too small\n");
return E1000_ERR_NO_SPACE;
}
/* extract hex string from data and pointer */
part_num[0] = (nvm_data >> 12) & 0xF;
part_num[1] = (nvm_data >> 8) & 0xF;
part_num[2] = (nvm_data >> 4) & 0xF;
part_num[3] = nvm_data & 0xF;
part_num[4] = (pointer >> 12) & 0xF;
part_num[5] = (pointer >> 8) & 0xF;
part_num[6] = '-';
part_num[7] = 0;
part_num[8] = (pointer >> 4) & 0xF;
part_num[9] = pointer & 0xF;
/* put a null character on the end of our string */
part_num[10] = '\0';
/* switch all the data but the '-' to hex char */
for (offset = 0; offset < 10; offset++) {
if (part_num[offset] < 0xA)
part_num[offset] += '0';
else if (part_num[offset] < 0x10)
part_num[offset] += 'A' - 0xA;
}
goto out;
}
ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
if (length == 0xFFFF || length == 0) {
hw_dbg("NVM PBA number section invalid length\n");
ret_val = E1000_ERR_NVM_PBA_SECTION;
goto out;
}
/* check if part_num buffer is big enough */
if (part_num_size < (((u32)length * 2) - 1)) {
hw_dbg("PBA string buffer too small\n");
ret_val = E1000_ERR_NO_SPACE;
goto out;
}
/* trim pba length from start of string */
pointer++;
length--;
for (offset = 0; offset < length; offset++) {
ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
part_num[offset * 2] = (u8)(nvm_data >> 8);
part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
}
part_num[offset * 2] = '\0';
out:
return ret_val;
}
/**
* igb_read_mac_addr - Read device MAC address
* @hw: pointer to the HW structure
*
* Reads the device MAC address from the EEPROM and stores the value.
* Since devices with two ports use the same EEPROM, we increment the
* last bit in the MAC address for the second port.
**/
s32 igb_read_mac_addr(struct e1000_hw *hw)
{
u32 rar_high;
u32 rar_low;
u16 i;
rar_high = rd32(E1000_RAH(0));
rar_low = rd32(E1000_RAL(0));
for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
for (i = 0; i < ETH_ALEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
return 0;
}
/**
* igb_validate_nvm_checksum - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 checksum = 0;
u16 i, nvm_data;
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
checksum += nvm_data;
}
if (checksum != (u16) NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
out:
return ret_val;
}
/**
* igb_update_nvm_checksum - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
**/
s32 igb_update_nvm_checksum(struct e1000_hw *hw)
{
s32 ret_val;
u16 checksum = 0;
u16 i, nvm_data;
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error while updating checksum.\n");
goto out;
}
checksum += nvm_data;
}
checksum = (u16) NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
out:
return ret_val;
}

View File

@ -0,0 +1,43 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
s32 igb_acquire_nvm(struct e1000_hw *hw);
void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
s32 igb_update_nvm_checksum(struct e1000_hw *hw);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,136 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
enum e1000_ms_type {
e1000_ms_hw_default = 0,
e1000_ms_force_master,
e1000_ms_force_slave,
e1000_ms_auto
};
enum e1000_smart_speed {
e1000_smart_speed_default = 0,
e1000_smart_speed_on,
e1000_smart_speed_off
};
s32 igb_check_downshift(struct e1000_hw *hw);
s32 igb_check_reset_block(struct e1000_hw *hw);
s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
s32 igb_get_cable_length_m88(struct e1000_hw *hw);
s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
s32 igb_get_phy_id(struct e1000_hw *hw);
s32 igb_get_phy_info_igp(struct e1000_hw *hw);
s32 igb_get_phy_info_m88(struct e1000_hw *hw);
s32 igb_phy_sw_reset(struct e1000_hw *hw);
s32 igb_phy_hw_reset(struct e1000_hw *hw);
s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
s32 igb_setup_copper_link(struct e1000_hw *hw);
s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
void igb_power_up_phy_copper(struct e1000_hw *hw);
void igb_power_down_phy_copper(struct e1000_hw *hw);
s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
s32 igb_get_phy_info_82580(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
s32 igb_get_cable_length_82580(struct e1000_hw *hw);
/* IGP01E1000 Specific Registers */
#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
#define I82580_ADDR_REG 16
#define I82580_CFG_REG 22
#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
#define I82580_CTRL_REG 23
#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
/* 82580 specific PHY registers */
#define I82580_PHY_CTRL_2 18
#define I82580_PHY_LBK_CTRL 19
#define I82580_PHY_STATUS_2 26
#define I82580_PHY_DIAG_STATUS 31
/* I82580 PHY Status 2 */
#define I82580_PHY_STATUS2_REV_POLARITY 0x0400
#define I82580_PHY_STATUS2_MDIX 0x0800
#define I82580_PHY_STATUS2_SPEED_MASK 0x0300
#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200
#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
/* I82580 PHY Control 2 */
#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400
#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
/* I82580 PHY Diagnostics Status */
#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
/* Enable flexible speed on link-up */
#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
#define IGP01E1000_PSSR_MDIX 0x0800
#define IGP01E1000_PSSR_SPEED_MASK 0xC000
#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
#define IGP02E1000_PHY_CHANNEL_NUM 4
#define IGP02E1000_PHY_AGC_A 0x11B1
#define IGP02E1000_PHY_AGC_B 0x12B1
#define IGP02E1000_PHY_AGC_C 0x14B1
#define IGP02E1000_PHY_AGC_D 0x18B1
#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
#define IGP02E1000_AGC_LENGTH_MASK 0x7F
#define IGP02E1000_AGC_RANGE 15
#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
#endif

View File

@ -0,0 +1,355 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
#define E1000_CTRL 0x00000 /* Device Control - RW */
#define E1000_STATUS 0x00008 /* Device Status - RO */
#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
#define E1000_EERD 0x00014 /* EEPROM Read - RW */
#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
#define E1000_RCTL 0x00100 /* RX Control - RW */
#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define E1000_TCTL 0x00400 /* TX Control - RW */
#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
/* IEEE 1588 TIMESYNCH */
#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
/* Filtering Registers */
#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
#define E1000_DAQF(_n) (0x59A0 + 4 * (_n))
#define E1000_SPQF(_n) (0x59C0 + 4 * (_n))
#define E1000_FTQF(_n) (0x59E0 + 4 * (_n))
#define E1000_SAQF0 E1000_SAQF(0)
#define E1000_DAQF0 E1000_DAQF(0)
#define E1000_SPQF0 E1000_SPQF(0)
#define E1000_FTQF0 E1000_FTQF(0)
#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
/* DMA Coalescing registers */
#define E1000_DMACR 0x02508 /* Control Register */
#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
/* TX Rate Limit Registers */
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
/* Split and Replication RX Control - RW */
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
/*
* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
* Example usage:
* E1000_RDBAL_REG(current_rx_queue)
*/
#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \
: (0x0C000 + ((_n) * 0x40)))
#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \
: (0x0C004 + ((_n) * 0x40)))
#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \
: (0x0C008 + ((_n) * 0x40)))
#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \
: (0x0C00C + ((_n) * 0x40)))
#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \
: (0x0C010 + ((_n) * 0x40)))
#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \
: (0x0C018 + ((_n) * 0x40)))
#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \
: (0x0C028 + ((_n) * 0x40)))
#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \
: (0x0E000 + ((_n) * 0x40)))
#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \
: (0x0E004 + ((_n) * 0x40)))
#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \
: (0x0E008 + ((_n) * 0x40)))
#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \
: (0x0E010 + ((_n) * 0x40)))
#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \
: (0x0E018 + ((_n) * 0x40)))
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
: (0x0E028 + ((_n) * 0x40)))
#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
: (0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
: (0x0E03C + ((_n) * 0x40)))
#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */
#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
#define E1000_COLC 0x04028 /* Collision Count - R/clr */
#define E1000_DC 0x04030 /* Defer Count - R/clr */
#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
/* Interrupt Cause Rx Packet Timer Expire Count */
#define E1000_ICRXPTC 0x04104
/* Interrupt Cause Rx Absolute Timer Expire Count */
#define E1000_ICRXATC 0x04108
/* Interrupt Cause Tx Packet Timer Expire Count */
#define E1000_ICTXPTC 0x0410C
/* Interrupt Cause Tx Absolute Timer Expire Count */
#define E1000_ICTXATC 0x04110
/* Interrupt Cause Tx Queue Empty Count */
#define E1000_ICTXQEC 0x04118
/* Interrupt Cause Tx Queue Minimum Threshold Count */
#define E1000_ICTXQMTC 0x0411C
/* Interrupt Cause Rx Descriptor Minimum Threshold Count */
#define E1000_ICRXDMTC 0x04120
#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */
#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */
#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
#define E1000_LENERRS 0x04138 /* Length Errors Count */
#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */
#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */
#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
#define E1000_RA 0x05400 /* Receive Address - RW Array */
#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
(0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
(0x054E4 + ((_i - 16) * 8)))
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
#define E1000_WUC 0x05800 /* Wakeup Control - RW */
#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
#define E1000_WUS 0x05810 /* Wakeup Status - RO */
#define E1000_MANC 0x05820 /* Management Control - RW */
#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
#define E1000_GCR 0x05B00 /* PCI-Ex Control */
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
#define E1000_SWSM 0x05B50 /* SW Semaphore */
#define E1000_FWSM 0x05B54 /* FW Semaphore */
#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
/* RSS registers */
#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/
#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
/* MSI-X Allocation Register (_i) - RW */
#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4))
/* Redirection Table - RW Array */
#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
/* VT Registers */
#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
#define E1000_VFRE 0x00C8C /* VF Receive Enables */
#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
/* These act per VF so an array friendly macro is used */
#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
* Filter - RW */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
#define rd32(reg) (readl(hw->hw_addr + reg))
#define wrfl() ((void)rd32(E1000_STATUS))
#define array_wr32(reg, offset, value) \
(writel(value, hw->hw_addr + reg + ((offset) << 2)))
#define array_rd32(reg, offset) \
(readl(hw->hw_addr + reg + ((offset) << 2)))
/* DMA Coalescing registers */
#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
/* Energy Efficient Ethernet "EEE" register */
#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
/* Thermal Sensor Register */
#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
/* OS2BMC Registers */
#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
#endif

View File

@ -0,0 +1,450 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* Linux PRO/1000 Ethernet Driver main header file */
#ifndef _IGB_H_
#define _IGB_H_
#include "e1000_mac.h"
#include "e1000_82575.h"
#include <linux/clocksource.h>
#include <linux/timecompare.h>
#include <linux/net_tstamp.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
struct igb_adapter;
/* Interrupt defines */
#define IGB_START_ITR 648 /* ~6000 ints/sec */
#define IGB_4K_ITR 980
#define IGB_20K_ITR 196
#define IGB_70K_ITR 56
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TX_WORK 128
#define IGB_MIN_TXD 80
#define IGB_MAX_TXD 4096
#define IGB_DEFAULT_RXD 256
#define IGB_MIN_RXD 80
#define IGB_MAX_RXD 4096
#define IGB_DEFAULT_ITR 3 /* dynamic */
#define IGB_MAX_ITR_USECS 10000
#define IGB_MIN_ITR_USECS 10
#define NON_Q_VECTORS 1
#define MAX_Q_VECTORS 8
/* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
(hw->mac.type > e1000_82575 ? 8 : 4))
#define IGB_MAX_TX_QUEUES 16
#define IGB_MAX_VF_MC_ENTRIES 30
#define IGB_MAX_VF_FUNCTIONS 8
#define IGB_MAX_VFTA_ENTRIES 128
#define IGB_82576_VF_DEV_ID 0x10CA
#define IGB_I350_VF_DEV_ID 0x1520
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
u16 vlans_enabled;
u32 flags;
unsigned long last_nack;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
struct pci_dev *vfdev;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
/* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
* descriptors available in its onboard memory.
* Setting this to 0 disables RX descriptor prefetch.
* HTHRESH - MAC will only prefetch if there are at least this many descriptors
* available in host memory.
* If PTHRESH is 0, this should also be 0.
* WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
#define IGB_RX_PTHRESH 8
#define IGB_RX_HTHRESH 8
#define IGB_TX_PTHRESH 8
#define IGB_TX_HTHRESH 1
#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
adapter->msix_entries) ? 1 : 4)
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
adapter->msix_entries) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_512 512
#define IGB_RXBUFFER_16384 16384
#define IGB_RX_HDR_LEN IGB_RXBUFFER_512
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0
#define IGB_EEPROM_APME 0x0400
#ifndef IGB_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define IGB_MASTER_SLAVE e1000_ms_hw_default
#endif
#define IGB_MNG_VLAN_NONE -1
#define IGB_TX_FLAGS_CSUM 0x00000001
#define IGB_TX_FLAGS_VLAN 0x00000002
#define IGB_TX_FLAGS_TSO 0x00000004
#define IGB_TX_FLAGS_IPV4 0x00000008
#define IGB_TX_FLAGS_TSTAMP 0x00000010
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct igb_tx_buffer {
union e1000_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
dma_addr_t dma;
u32 length;
u32 tx_flags;
};
struct igb_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
u32 page_offset;
};
struct igb_tx_queue_stats {
u64 packets;
u64 bytes;
u64 restart_queue;
u64 restart_queue2;
};
struct igb_rx_queue_stats {
u64 packets;
u64 bytes;
u64 drops;
u64 csum_err;
u64 alloc_failed;
};
struct igb_ring_container {
struct igb_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
int cpu; /* CPU for DCA */
u32 eims_value; /* EIMS mask value */
struct igb_ring_container rx, tx;
struct napi_struct napi;
int numa_node;
u16 itr_val;
u8 set_itr;
void __iomem *itr_register;
char name[IFNAMSIZ + 9];
};
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
struct device *dev; /* device pointer for dma mapping */
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
u32 size; /* length of desc. ring in bytes */
/* everything past this point are written often */
u16 next_to_clean ____cacheline_aligned_in_smp;
u16 next_to_use;
union {
/* TX */
struct {
struct igb_tx_queue_stats tx_stats;
struct u64_stats_sync tx_syncp;
struct u64_stats_sync tx_syncp2;
};
/* RX */
struct {
struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp;
};
};
/* Items past this point are only used during ring alloc / free */
dma_addr_t dma; /* phys address of the ring */
int numa_node; /* node to alloc ring memory on */
};
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
#define IGB_TX_DESC(R, i) \
(&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
#define IGB_TX_CTXTDESC(R, i) \
(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
const u32 stat_err_bits)
{
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
}
/* igb_desc_unused - calculate if we have unused descriptors */
static inline int igb_desc_unused(struct igb_ring *ring)
{
if (ring->next_to_clean > ring->next_to_use)
return ring->next_to_clean - ring->next_to_use - 1;
return ring->count + ring->next_to_clean - ring->next_to_use - 1;
}
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
unsigned long state;
unsigned int flags;
unsigned int num_q_vectors;
struct msix_entry *msix_entries;
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
u32 tx_itr_setting;
u16 tx_itr;
u16 rx_itr;
/* TX */
u16 tx_work_limit;
u32 tx_timeout_count;
int num_tx_queues;
struct igb_ring *tx_ring[16];
/* RX */
int num_rx_queues;
struct igb_ring *rx_ring[16];
u32 max_frame_size;
u32 min_frame_size;
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
u16 mng_vlan_id;
u32 bd_number;
u32 wol;
u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
struct work_struct reset_task;
struct work_struct watchdog_task;
bool fc_autoneg;
u8 tx_timeout_factor;
struct timer_list blink_timer;
unsigned long led_status;
/* OS defined structs */
struct pci_dev *pdev;
struct cyclecounter cycles;
struct timecounter clock;
struct timecompare compare;
struct hwtstamp_config hwtstamp_config;
spinlock_t stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
u32 test_icr;
struct igb_ring test_tx_ring;
struct igb_ring test_rx_ring;
int msg_enable;
struct igb_q_vector *q_vector[MAX_Q_VECTORS];
u32 eims_enable_mask;
u32 eims_other;
/* to not mess up cache alignment, always add to the bottom */
u32 eeprom_wol;
u16 tx_ring_count;
u16 rx_ring_count;
unsigned int vfs_allocated_count;
struct vf_data_storage *vf_data;
int vf_rate_link_speed;
u32 rss_queues;
u32 wvbr;
int node;
u32 *shadow_vfta;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
#define IGB_FLAG_DCA_ENABLED (1 << 1)
#define IGB_FLAG_QUAD_PORT_A (1 << 2)
#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
#define IGB_FLAG_DMAC (1 << 4)
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
#define IGB_TX_BUF_4096 4096
#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
#define IGB_82576_TSYNC_SHIFT 19
#define IGB_82580_TSYNC_SHIFT 24
#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
__IGB_DOWN
};
enum igb_boards {
board_82575,
};
extern char igb_driver_name[];
extern char igb_driver_version[];
extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
extern void igb_reset(struct igb_adapter *);
extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
extern int igb_setup_tx_resources(struct igb_ring *);
extern int igb_setup_rx_resources(struct igb_ring *);
extern void igb_free_tx_resources(struct igb_ring *);
extern void igb_free_rx_resources(struct igb_ring *);
extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
extern void igb_setup_tctl(struct igb_adapter *);
extern void igb_setup_rctl(struct igb_adapter *);
extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
struct igb_tx_buffer *);
extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_power_up_link(struct igb_adapter *);
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
if (hw->phy.ops.reset)
return hw->phy.ops.reset(hw);
return 0;
}
static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
{
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
return 0;
}
static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
{
if (hw->phy.ops.write_reg)
return hw->phy.ops.write_reg(hw, offset, data);
return 0;
}
static inline s32 igb_get_phy_info(struct e1000_hw *hw)
{
if (hw->phy.ops.get_phy_info)
return hw->phy.ops.get_phy_info(hw);
return 0;
}
#endif /* _IGB_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
################################################################################
#
# Intel(R) 82576 Virtual Function Linux driver
# Copyright(c) 2009 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) 82576 VF ethernet driver
#
obj-$(CONFIG_IGBVF) += igbvf.o
igbvf-objs := vf.o \
mbx.o \
ethtool.o \
netdev.o

View File

@ -0,0 +1,125 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
/* IVAR valid bit */
#define E1000_IVAR_VALID 0x80
/* Receive Descriptor bit definitions */
#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
#define E1000_RXDEXT_STATERR_CXE 0x10000000
#define E1000_RXDEXT_STATERR_TCPE 0x20000000
#define E1000_RXDEXT_STATERR_IPE 0x40000000
#define E1000_RXDEXT_STATERR_RXE 0x80000000
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
E1000_RXDEXT_STATERR_CE | \
E1000_RXDEXT_STATERR_SE | \
E1000_RXDEXT_STATERR_SEQ | \
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
/* Device Control */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
/* Transmit Descriptor bit definitions */
#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define MAX_JUMBO_FRAME_SIZE 0x3F00
/* 802.1q VLAN Packet Size */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
/* Error Codes */
#define E1000_SUCCESS 0
#define E1000_ERR_CONFIG 3
#define E1000_ERR_MAC_INIT 5
#define E1000_ERR_MBX 15
#ifndef ETH_ADDR_LEN
#define ETH_ADDR_LEN 6
#endif
/* SRRCTL bit definitions */
#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
#define E1000_SRRCTL_DROP_EN 0x80000000
#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
/* Additional Descriptor Control definitions */
#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
/* Direct Cache Access (DCA) definitions */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
#endif /* _E1000_DEFINES_H_ */

View File

@ -0,0 +1,473 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* ethtool support for igbvf */
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include "igbvf.h"
#include <linux/if_vlan.h>
struct igbvf_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
int base_stat_offset;
};
#define IGBVF_STAT(current, base) \
sizeof(((struct igbvf_adapter *)0)->current), \
offsetof(struct igbvf_adapter, current), \
offsetof(struct igbvf_adapter, base)
static const struct igbvf_stats igbvf_gstrings_stats[] = {
{ "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) },
{ "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) },
{ "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
{ "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
{ "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
{ "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
{ "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
{ "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
{ "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
{ "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
{ "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) },
};
#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats)
static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
"Link test (on/offline)"
};
#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
static int igbvf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 status;
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_1000baseT_Full;
ecmd->port = -1;
ecmd->transceiver = XCVR_DUMMY1;
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
ethtool_cmd_speed_set(ecmd, SPEED_1000);
else if (status & E1000_STATUS_SPEED_100)
ethtool_cmd_speed_set(ecmd, SPEED_100);
else
ethtool_cmd_speed_set(ecmd, SPEED_10);
if (status & E1000_STATUS_FD)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
static int igbvf_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
return -EOPNOTSUPP;
}
static void igbvf_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
return;
}
static int igbvf_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
return -EOPNOTSUPP;
}
static u32 igbvf_get_msglevel(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
static int igbvf_get_regs_len(struct net_device *netdev)
{
#define IGBVF_REGS_LEN 8
return IGBVF_REGS_LEN * sizeof(u32);
}
static void igbvf_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 *regs_buff = p;
memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
regs_buff[2] = er32(RDLEN(0));
regs_buff[3] = er32(RDH(0));
regs_buff[4] = er32(RDT(0));
regs_buff[5] = er32(TDLEN(0));
regs_buff[6] = er32(TDH(0));
regs_buff[7] = er32(TDT(0));
}
static int igbvf_get_eeprom_len(struct net_device *netdev)
{
return 0;
}
static int igbvf_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
return -EOPNOTSUPP;
}
static int igbvf_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
return -EOPNOTSUPP;
}
static void igbvf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
char firmware_version[32] = "N/A";
strncpy(drvinfo->driver, igbvf_driver_name, 32);
strncpy(drvinfo->version, igbvf_driver_version, 32);
strncpy(drvinfo->fw_version, firmware_version, 32);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->regdump_len = igbvf_get_regs_len(netdev);
drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
}
static void igbvf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *tx_ring = adapter->tx_ring;
struct igbvf_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IGBVF_MAX_RXD;
ring->tx_max_pending = IGBVF_MAX_TXD;
ring->rx_pending = rx_ring->count;
ring->tx_pending = tx_ring->count;
}
static int igbvf_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *temp_ring;
int err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring->count) &&
(new_rx_count == adapter->rx_ring->count)) {
/* nothing to do */
return 0;
}
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
msleep(1);
if (!netif_running(adapter->netdev)) {
adapter->tx_ring->count = new_tx_count;
adapter->rx_ring->count = new_rx_count;
goto clear_reset;
}
temp_ring = vmalloc(sizeof(struct igbvf_ring));
if (!temp_ring) {
err = -ENOMEM;
goto clear_reset;
}
igbvf_down(adapter);
/*
* We can't just free everything and then setup again,
* because the ISRs in MSI-X mode get passed pointers
* to the tx and rx ring structs.
*/
if (new_tx_count != adapter->tx_ring->count) {
memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
temp_ring->count = new_tx_count;
err = igbvf_setup_tx_resources(adapter, temp_ring);
if (err)
goto err_setup;
igbvf_free_tx_resources(adapter->tx_ring);
memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
}
if (new_rx_count != adapter->rx_ring->count) {
memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));
temp_ring->count = new_rx_count;
err = igbvf_setup_rx_resources(adapter, temp_ring);
if (err)
goto err_setup;
igbvf_free_rx_resources(adapter->rx_ring);
memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
}
err_setup:
igbvf_up(adapter);
vfree(temp_ring);
clear_reset:
clear_bit(__IGBVF_RESETTING, &adapter->state);
return err;
}
static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
*data = 0;
hw->mac.ops.check_for_link(hw);
if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
return *data;
}
static void igbvf_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
set_bit(__IGBVF_TESTING, &adapter->state);
/*
* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
if (igbvf_link_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__IGBVF_TESTING, &adapter->state);
msleep_interruptible(4 * 1000);
}
static void igbvf_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
}
static int igbvf_set_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
return -EOPNOTSUPP;
}
static int igbvf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
if (adapter->itr_setting <= 3)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
return 0;
}
static int igbvf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
/* convert to rate of irq's per second */
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
adapter->itr = IGBVF_START_ITR;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
adapter->itr = ec->rx_coalesce_usecs << 2;
adapter->itr_setting = adapter->itr;
}
writel(adapter->itr,
hw->hw_addr + adapter->rx_ring[0].itr_register);
return 0;
}
static int igbvf_nway_reset(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
igbvf_reinit_locked(adapter);
return 0;
}
static void igbvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats,
u64 *data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
int i;
igbvf_update_stats(adapter);
for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
char *p = (char *)adapter +
igbvf_gstrings_stats[i].stat_offset;
char *b = (char *)adapter +
igbvf_gstrings_stats[i].base_stat_offset;
data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
(*(u32 *)p - *(u32 *)b));
}
}
static int igbvf_get_sset_count(struct net_device *dev, int stringset)
{
switch(stringset) {
case ETH_SS_TEST:
return IGBVF_TEST_LEN;
case ETH_SS_STATS:
return IGBVF_GLOBAL_STATS_LEN;
default:
return -EINVAL;
}
}
static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
memcpy(p, igbvf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
break;
}
}
static const struct ethtool_ops igbvf_ethtool_ops = {
.get_settings = igbvf_get_settings,
.set_settings = igbvf_set_settings,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,
.get_wol = igbvf_get_wol,
.set_wol = igbvf_set_wol,
.get_msglevel = igbvf_get_msglevel,
.set_msglevel = igbvf_set_msglevel,
.nway_reset = igbvf_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = igbvf_get_eeprom_len,
.get_eeprom = igbvf_get_eeprom,
.set_eeprom = igbvf_set_eeprom,
.get_ringparam = igbvf_get_ringparam,
.set_ringparam = igbvf_set_ringparam,
.get_pauseparam = igbvf_get_pauseparam,
.set_pauseparam = igbvf_set_pauseparam,
.self_test = igbvf_diag_test,
.get_sset_count = igbvf_get_sset_count,
.get_strings = igbvf_get_strings,
.get_ethtool_stats = igbvf_get_ethtool_stats,
.get_coalesce = igbvf_get_coalesce,
.set_coalesce = igbvf_set_coalesce,
};
void igbvf_set_ethtool_ops(struct net_device *netdev)
{
/* have to "undeclare" const on this struct to remove warnings */
SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
}

View File

@ -0,0 +1,326 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* Linux PRO/1000 Ethernet Driver main header file */
#ifndef _IGBVF_H_
#define _IGBVF_H_
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include "vf.h"
/* Forward declarations */
struct igbvf_info;
struct igbvf_adapter;
/* Interrupt defines */
#define IGBVF_START_ITR 648 /* ~6000 ints/sec */
/* Interrupt modes, as used by the IntMode parameter */
#define IGBVF_INT_MODE_LEGACY 0
#define IGBVF_INT_MODE_MSI 1
#define IGBVF_INT_MODE_MSIX 2
/* Tx/Rx descriptor defines */
#define IGBVF_DEFAULT_TXD 256
#define IGBVF_MAX_TXD 4096
#define IGBVF_MIN_TXD 80
#define IGBVF_DEFAULT_RXD 256
#define IGBVF_MAX_RXD 4096
#define IGBVF_MIN_RXD 80
#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
* descriptors available in its onboard memory.
* Setting this to 0 disables RX descriptor prefetch.
* HTHRESH - MAC will only prefetch if there are at least this many descriptors
* available in host memory.
* If PTHRESH is 0, this should also be 0.
* WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
#define IGBVF_RX_PTHRESH 16
#define IGBVF_RX_HTHRESH 8
#define IGBVF_RX_WTHRESH 1
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGBVF_TX_QUEUE_WAKE 32
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0
#define IGBVF_EEPROM_APME 0x0400
#define IGBVF_MNG_VLAN_NONE (-1)
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
enum igbvf_boards {
board_vf,
board_i350_vf,
};
struct igbvf_queue_stats {
u64 packets;
u64 bytes;
};
/*
* wrappers around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igbvf_buffer {
dma_addr_t dma;
struct sk_buff *skb;
union {
/* Tx */
struct {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
u16 mapped_as_page;
};
/* Rx */
struct {
struct page *page;
u64 page_dma;
unsigned int page_offset;
};
};
};
union igbvf_desc {
union e1000_adv_rx_desc rx_desc;
union e1000_adv_tx_desc tx_desc;
struct e1000_adv_tx_context_desc tx_context_desc;
};
struct igbvf_ring {
struct igbvf_adapter *adapter; /* backlink */
union igbvf_desc *desc; /* pointer to ring memory */
dma_addr_t dma; /* phys address of ring */
unsigned int size; /* length of ring in bytes */
unsigned int count; /* number of desc. in ring */
u16 next_to_use;
u16 next_to_clean;
u16 head;
u16 tail;
/* array of buffer information structs */
struct igbvf_buffer *buffer_info;
struct napi_struct napi;
char name[IFNAMSIZ + 5];
u32 eims_value;
u32 itr_val;
u16 itr_register;
int set_itr;
struct sk_buff *rx_skb_top;
struct igbvf_queue_stats stats;
};
/* board specific private data structure */
struct igbvf_adapter {
struct timer_list watchdog_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
struct work_struct watchdog_task;
const struct igbvf_info *ei;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 bd_number;
u32 rx_buffer_len;
u32 polling_interval;
u16 mng_vlan_id;
u16 link_speed;
u16 link_duplex;
spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
/* track device up/down/testing state */
unsigned long state;
/* Interrupt Throttle Rate */
u32 itr;
u32 itr_setting;
u16 tx_itr;
u16 rx_itr;
/*
* Tx
*/
struct igbvf_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
unsigned int restart_queue;
u32 txd_cmd;
u32 tx_int_delay;
u32 tx_abs_int_delay;
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
unsigned int total_rx_packets;
/* Tx stats */
u32 tx_timeout_count;
u32 tx_fifo_head;
u32 tx_head_addr;
u32 tx_fifo_size;
u32 tx_dma_failed;
/*
* Rx
*/
struct igbvf_ring *rx_ring;
u32 rx_int_delay;
u32 rx_abs_int_delay;
/* Rx stats */
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
unsigned int rx_ps_hdr_size;
u32 max_frame_size;
u32 min_frame_size;
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
spinlock_t stats_lock; /* prevent concurrent stats updates */
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
/* The VF counters don't clear on read so we have to get a base
* count on driver start up and always subtract that base on
* on the first update, thus the flag..
*/
struct e1000_vf_stats stats;
u64 zero_base;
struct igbvf_ring test_tx_ring;
struct igbvf_ring test_rx_ring;
u32 test_icr;
u32 msg_enable;
struct msix_entry *msix_entries;
int int_mode;
u32 eims_enable_mask;
u32 eims_other;
u32 int_counter0;
u32 int_counter1;
u32 eeprom_wol;
u32 wol;
u32 pba;
bool fc_autoneg;
unsigned long led_status;
unsigned int flags;
unsigned long last_reset;
};
struct igbvf_info {
enum e1000_mac_type mac;
unsigned int flags;
u32 pba;
void (*init_ops)(struct e1000_hw *);
s32 (*get_variants)(struct igbvf_adapter *);
};
/* hardware capability, feature, and workaround flags */
#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
#define IGBVF_TX_DESC_ADV(R, i) \
(&((((R).desc))[i].tx_desc))
#define IGBVF_TX_CTXTDESC_ADV(R, i) \
(&((((R).desc))[i].tx_context_desc))
enum igbvf_state_t {
__IGBVF_TESTING,
__IGBVF_RESETTING,
__IGBVF_DOWN
};
enum latency_range {
lowest_latency = 0,
low_latency = 1,
bulk_latency = 2,
latency_invalid = 255
};
extern char igbvf_driver_name[];
extern const char igbvf_driver_version[];
extern void igbvf_check_options(struct igbvf_adapter *);
extern void igbvf_set_ethtool_ops(struct net_device *);
extern int igbvf_up(struct igbvf_adapter *);
extern void igbvf_down(struct igbvf_adapter *);
extern void igbvf_reinit_locked(struct igbvf_adapter *);
extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
extern void igbvf_free_rx_resources(struct igbvf_ring *);
extern void igbvf_free_tx_resources(struct igbvf_ring *);
extern void igbvf_update_stats(struct igbvf_adapter *);
extern unsigned int copybreak;
#endif /* _IGBVF_H_ */

View File

@ -0,0 +1,350 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "mbx.h"
/**
* e1000_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
*
* returns SUCCESS if it successfully received a message notification
**/
static s32 e1000_poll_for_msg(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!mbx->ops.check_for_msg)
goto out;
while (countdown && mbx->ops.check_for_msg(hw)) {
countdown--;
udelay(mbx->usec_delay);
}
/* if we failed, all future posted messages fail until reset */
if (!countdown)
mbx->timeout = 0;
out:
return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
}
/**
* e1000_poll_for_ack - Wait for message acknowledgement
* @hw: pointer to the HW structure
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
static s32 e1000_poll_for_ack(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!mbx->ops.check_for_ack)
goto out;
while (countdown && mbx->ops.check_for_ack(hw)) {
countdown--;
udelay(mbx->usec_delay);
}
/* if we failed, all future posted messages fail until reset */
if (!countdown)
mbx->timeout = 0;
out:
return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
}
/**
* e1000_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
*
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
if (!mbx->ops.read)
goto out;
ret_val = e1000_poll_for_msg(hw);
/* if ack received read message, otherwise we timed out */
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size);
out:
return ret_val;
}
/**
* e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
*
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
/* exit if we either can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
goto out;
/* send msg*/
ret_val = mbx->ops.write(hw, msg, size);
/* if msg sent wait until we receive an ack */
if (!ret_val)
ret_val = e1000_poll_for_ack(hw);
out:
return ret_val;
}
/**
* e1000_read_v2p_mailbox - read v2p mailbox
* @hw: pointer to the HW structure
*
* This function is used to read the v2p mailbox without losing the read to
* clear status bits.
**/
static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
{
u32 v2p_mailbox = er32(V2PMAILBOX(0));
v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
return v2p_mailbox;
}
/**
* e1000_check_for_bit_vf - Determine if a status bit was set
* @hw: pointer to the HW structure
* @mask: bitmask for bits to be tested and cleared
*
* This function is used to check for the read to clear bits within
* the V2P mailbox.
**/
static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
{
u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
s32 ret_val = -E1000_ERR_MBX;
if (v2p_mailbox & mask)
ret_val = E1000_SUCCESS;
hw->dev_spec.vf.v2p_mailbox &= ~mask;
return ret_val;
}
/**
* e1000_check_for_msg_vf - checks to see if the PF has sent mail
* @hw: pointer to the HW structure
*
* returns SUCCESS if the PF has set the Status bit or else ERR_MBX
**/
static s32 e1000_check_for_msg_vf(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_MBX;
if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
ret_val = E1000_SUCCESS;
hw->mbx.stats.reqs++;
}
return ret_val;
}
/**
* e1000_check_for_ack_vf - checks to see if the PF has ACK'd
* @hw: pointer to the HW structure
*
* returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
**/
static s32 e1000_check_for_ack_vf(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_MBX;
if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
ret_val = E1000_SUCCESS;
hw->mbx.stats.acks++;
}
return ret_val;
}
/**
* e1000_check_for_rst_vf - checks to see if the PF has reset
* @hw: pointer to the HW structure
*
* returns true if the PF has set the reset done bit or else false
**/
static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_MBX;
if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
E1000_V2PMAILBOX_RSTI))) {
ret_val = E1000_SUCCESS;
hw->mbx.stats.rsts++;
}
return ret_val;
}
/**
* e1000_obtain_mbx_lock_vf - obtain mailbox lock
* @hw: pointer to the HW structure
*
* return SUCCESS if we obtained the mailbox lock
**/
static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_MBX;
/* Take ownership of the buffer */
ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
/* reserve mailbox for vf use */
if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
ret_val = E1000_SUCCESS;
return ret_val;
}
/**
* e1000_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
*
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
{
s32 err;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
if (err)
goto out_no_write;
/* flush any ack or msg as we are going to overwrite mailbox */
e1000_check_for_ack_vf(hw);
e1000_check_for_msg_vf(hw);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
array_ew32(VMBMEM(0), i, msg[i]);
/* update stats */
hw->mbx.stats.msgs_tx++;
/* Drop VFU and interrupt the PF to tell it a message has been sent */
ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
out_no_write:
return err;
}
/**
* e1000_read_mbx_vf - Reads a message from the inbox intended for vf
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
*
* returns SUCCESS if it successfully read message from buffer
**/
static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
{
s32 err;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
if (err)
goto out_no_read;
/* copy the message from the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = array_er32(VMBMEM(0), i);
/* Acknowledge receipt and release mailbox, then we're done */
ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return err;
}
/**
* e1000_init_mbx_params_vf - set initial values for vf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for vf mailbox
*/
s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
/* start mailbox as timed out and let the reset_hw call set the timeout
* value to being communications */
mbx->timeout = 0;
mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
mbx->size = E1000_VFMAILBOX_SIZE;
mbx->ops.read = e1000_read_mbx_vf;
mbx->ops.write = e1000_write_mbx_vf;
mbx->ops.read_posted = e1000_read_posted_mbx;
mbx->ops.write_posted = e1000_write_posted_mbx;
mbx->ops.check_for_msg = e1000_check_for_msg_vf;
mbx->ops.check_for_ack = e1000_check_for_ack_vf;
mbx->ops.check_for_rst = e1000_check_for_rst_vf;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
return E1000_SUCCESS;
}

View File

@ -0,0 +1,75 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
#include "vf.h"
#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is E1000_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
* this are the ACK */
#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
* this are the NACK */
#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
clear to send requests */
/* We have a total wait time of 1s for vf mailbox posted messages */
#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */
#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */
#define E1000_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
#define E1000_VF_RESET 0x01 /* VF requests reset */
#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
s32 e1000_init_mbx_params_vf(struct e1000_hw *);
#endif /* _E1000_MBX_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,108 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
#define E1000_CTRL 0x00000 /* Device Control - RW */
#define E1000_STATUS 0x00008 /* Device Status - RO */
#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
/*
* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
* Example usage:
* E1000_RDBAL_REG(current_rx_queue)
*/
#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
(0x0C000 + ((_n) * 0x40)))
#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
(0x0C004 + ((_n) * 0x40)))
#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
(0x0C008 + ((_n) * 0x40)))
#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
(0x0C00C + ((_n) * 0x40)))
#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
(0x0C010 + ((_n) * 0x40)))
#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
(0x0C018 + ((_n) * 0x40)))
#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
(0x0C028 + ((_n) * 0x40)))
#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
(0x0E000 + ((_n) * 0x40)))
#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
(0x0E004 + ((_n) * 0x40)))
#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
(0x0E008 + ((_n) * 0x40)))
#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
(0x0E010 + ((_n) * 0x40)))
#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
(0x0E018 + ((_n) * 0x40)))
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
(0x0E028 + ((_n) * 0x40)))
#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
(0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
(0x054E4 + ((_i - 16) * 8)))
/* Statistics registers */
#define E1000_VFGPRC 0x00F10
#define E1000_VFGORC 0x00F18
#define E1000_VFMPRC 0x00F3C
#define E1000_VFGPTC 0x00F14
#define E1000_VFGOTC 0x00F34
#define E1000_VFGOTLBC 0x00F50
#define E1000_VFGPTLBC 0x00F44
#define E1000_VFGORLBC 0x00F48
#define E1000_VFGPRLBC 0x00F40
/* These act per VF so an array friendly macro is used */
#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
/* Define macros for handling registers */
#define er32(reg) readl(hw->hw_addr + E1000_##reg)
#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg)
#define array_er32(reg, offset) \
readl(hw->hw_addr + E1000_##reg + (offset << 2))
#define array_ew32(reg, offset, val) \
writel((val), hw->hw_addr + E1000_##reg + (offset << 2))
#define e1e_flush() er32(STATUS)
#endif

View File

@ -0,0 +1,402 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
u16 *duplex);
static s32 e1000_init_hw_vf(struct e1000_hw *hw);
static s32 e1000_reset_hw_vf(struct e1000_hw *hw);
static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
u32, u32, u32);
static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
/**
* e1000_init_mac_params_vf - Inits MAC params
* @hw: pointer to the HW structure
**/
static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
/* VF's have no MTA Registers - PF feature only */
mac->mta_reg_count = 128;
/* VF's have no access to RAR entries */
mac->rar_entry_count = 1;
/* Function pointers */
/* reset */
mac->ops.reset_hw = e1000_reset_hw_vf;
/* hw initialization */
mac->ops.init_hw = e1000_init_hw_vf;
/* check for link */
mac->ops.check_for_link = e1000_check_for_link_vf;
/* link info */
mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
/* set mac address */
mac->ops.rar_set = e1000_rar_set_vf;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
/* set vlan filter table array */
mac->ops.set_vfta = e1000_set_vfta_vf;
return E1000_SUCCESS;
}
/**
* e1000_init_function_pointers_vf - Inits function pointers
* @hw: pointer to the HW structure
**/
void e1000_init_function_pointers_vf(struct e1000_hw *hw)
{
hw->mac.ops.init_params = e1000_init_mac_params_vf;
hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
}
/**
* e1000_get_link_up_info_vf - Gets link info.
* @hw: pointer to the HW structure
* @speed: pointer to 16 bit value to store link speed.
* @duplex: pointer to 16 bit value to store duplex.
*
* Since we cannot read the PHY and get accurate link info, we must rely upon
* the status register's data which is often stale and inaccurate.
**/
static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
u16 *duplex)
{
s32 status;
status = er32(STATUS);
if (status & E1000_STATUS_SPEED_1000)
*speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
*speed = SPEED_100;
else
*speed = SPEED_10;
if (status & E1000_STATUS_FD)
*duplex = FULL_DUPLEX;
else
*duplex = HALF_DUPLEX;
return E1000_SUCCESS;
}
/**
* e1000_reset_hw_vf - Resets the HW
* @hw: pointer to the HW structure
*
* VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
* This is all the reset we can perform on a VF.
**/
static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 timeout = E1000_VF_INIT_TIMEOUT;
u32 ret_val = -E1000_ERR_MAC_INIT;
u32 msgbuf[3];
u8 *addr = (u8 *)(&msgbuf[1]);
u32 ctrl;
/* assert vf queue/interrupt reset */
ctrl = er32(CTRL);
ew32(CTRL, ctrl | E1000_CTRL_RST);
/* we cannot initialize while the RSTI / RSTD bits are asserted */
while (!mbx->ops.check_for_rst(hw) && timeout) {
timeout--;
udelay(5);
}
if (timeout) {
/* mailbox timeout can now become active */
mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
/* notify pf of vf reset completion */
msgbuf[0] = E1000_VF_RESET;
mbx->ops.write_posted(hw, msgbuf, 1);
msleep(10);
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
memcpy(hw->mac.perm_addr, addr, 6);
else
ret_val = -E1000_ERR_MAC_INIT;
}
}
return ret_val;
}
/**
* e1000_init_hw_vf - Inits the HW
* @hw: pointer to the HW structure
*
* Not much to do here except clear the PF Reset indication if there is one.
**/
static s32 e1000_init_hw_vf(struct e1000_hw *hw)
{
/* attempt to set and restore our mac address */
e1000_rar_set_vf(hw, hw->mac.addr, 0);
return E1000_SUCCESS;
}
/**
* e1000_hash_mc_addr_vf - Generate a multicast hash value
* @hw: pointer to the HW structure
* @mc_addr: pointer to a multicast address
*
* Generates a multicast address hash value which is used to determine
* the multicast filter table array address and new table value. See
* e1000_mta_set_generic()
**/
static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
u8 bit_shift = 0;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
/*
* The bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
(((u16) mc_addr[5]) << bit_shift)));
return hash_value;
}
/**
* e1000_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
* @rar_used_count: the first RAR register free to program
* @rar_count: total number of supported Receive Address Registers
*
* Updates the Receive Address Registers and Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[E1000_VFMAILBOX_SIZE];
u16 *hash_list = (u16 *)&msgbuf[1];
u32 hash_value;
u32 cnt, i;
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
* msg type). That's 30 hash values if we pack 'em right. If
* there are more than 30 MC addresses to add then punt the
* extras for now and then add code to handle more than 30 later.
* It would be unusual for a server to request that many multi-cast
* addresses except for in large enterprise network environments.
*/
cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
msgbuf[0] = E1000_VF_SET_MULTICAST;
msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT;
for (i = 0; i < cnt; i++) {
hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
hash_list[i] = hash_value & 0x0FFFF;
mc_addr_list += ETH_ADDR_LEN;
}
mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE);
}
/**
* e1000_set_vfta_vf - Set/Unset vlan filter table address
* @hw: pointer to the HW structure
* @vid: determines the vfta register and bit to set/unset
* @set: if true then set bit, else clear bit
**/
static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 err;
msgbuf[0] = E1000_VF_SET_VLAN;
msgbuf[1] = vid;
/* Setting the 8 bit field MSG INFO to true indicates "add" */
if (set)
msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
mbx->ops.write_posted(hw, msgbuf, 2);
err = mbx->ops.read_posted(hw, msgbuf, 2);
msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
/* if nacked the vlan was rejected */
if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)))
err = -E1000_ERR_MAC_INIT;
return err;
}
/** e1000_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
msgbuf[0] = E1000_VF_SET_LPE;
msgbuf[1] = max_size;
mbx->ops.write_posted(hw, msgbuf, 2);
}
/**
* e1000_rar_set_vf - set device MAC address
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
* @index receive address array register
**/
static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
{
struct e1000_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, 12);
msgbuf[0] = E1000_VF_SET_MAC_ADDR;
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
(msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
e1000_read_mac_addr_vf(hw);
}
/**
* e1000_read_mac_addr_vf - Read device MAC address
* @hw: pointer to the HW structure
**/
static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
{
int i;
for (i = 0; i < ETH_ADDR_LEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
return E1000_SUCCESS;
}
/**
* e1000_check_for_link_vf - Check for link for a virtual interface
* @hw: pointer to the HW structure
*
* Checks to see if the underlying PF is still talking to the VF and
* if it is then it reports the link state to the hardware, otherwise
* it reports link down and returns an error.
**/
static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = E1000_SUCCESS;
u32 in_msg = 0;
/*
* We only want to run this if there has been a rst asserted.
* in this case that could mean a link change, device reset,
* or a virtual function reset
*/
/* If we were hit with a reset or timeout drop the link */
if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
mac->get_link_status = true;
if (!mac->get_link_status)
goto out;
/* if link status is down no point in checking to see if pf is up */
if (!(er32(STATUS) & E1000_STATUS_LU))
goto out;
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error */
if (mbx->ops.read(hw, &in_msg, 1))
goto out;
/* if incoming message isn't clear to send we are waiting on response */
if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
/* message is not CTS and is NACK we must have lost CTS status */
if (in_msg & E1000_VT_MSGTYPE_NACK)
ret_val = -E1000_ERR_MAC_INIT;
goto out;
}
/* the pf is talking, if we timed out in the past we reinit */
if (!mbx->timeout) {
ret_val = -E1000_ERR_MAC_INIT;
goto out;
}
/* if we passed all the tests above then the link is up and we no
* longer need to check for link */
mac->get_link_status = false;
out:
return ret_val;
}

View File

@ -0,0 +1,266 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_VF_H_
#define _E1000_VF_H_
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include "regs.h"
#include "defines.h"
struct e1000_hw;
#define E1000_DEV_ID_82576_VF 0x10CA
#define E1000_DEV_ID_I350_VF 0x1520
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
#define E1000_REVISION_3 3
#define E1000_REVISION_4 4
#define E1000_FUNC_0 0
#define E1000_FUNC_1 1
/*
* Receive Address Register Count
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* These entries are also used for MAC-based filtering.
*/
#define E1000_RAR_ENTRIES_VF 1
/* Receive Descriptor - Advanced */
union e1000_adv_rx_desc {
struct {
u64 pkt_addr; /* Packet buffer address */
u64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
union {
u32 data;
struct {
u16 pkt_info; /* RSS/Packet type */
u16 hdr_info; /* Split Header,
* hdr buffer length */
} hs_rss;
} lo_dword;
union {
u32 rss; /* RSS Hash */
struct {
u16 ip_id; /* IP id */
u16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
u32 status_error; /* ext status/error */
u16 length; /* Packet length */
u16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
struct {
u64 buffer_addr; /* Address of descriptor's data buf */
u32 cmd_type_len;
u32 olinfo_status;
} read;
struct {
u64 rsvd; /* Reserved */
u32 nxtseq_seed;
u32 status;
} wb;
};
/* Adv Transmit Descriptor Config Masks */
#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
/* Context descriptors */
struct e1000_adv_tx_context_desc {
u32 vlan_macip_lens;
u32 seqnum_seed;
u32 type_tucmd_mlhl;
u32 mss_l4len_idx;
};
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
enum e1000_mac_type {
e1000_undefined = 0,
e1000_vfadapt,
e1000_vfadapt_i350,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
};
struct e1000_vf_stats {
u64 base_gprc;
u64 base_gptc;
u64 base_gorc;
u64 base_gotc;
u64 base_mprc;
u64 base_gotlbc;
u64 base_gptlbc;
u64 base_gorlbc;
u64 base_gprlbc;
u32 last_gprc;
u32 last_gptc;
u32 last_gorc;
u32 last_gotc;
u32 last_mprc;
u32 last_gotlbc;
u32 last_gptlbc;
u32 last_gorlbc;
u32 last_gprlbc;
u64 gprc;
u64 gptc;
u64 gorc;
u64 gotc;
u64 mprc;
u64 gotlbc;
u64 gptlbc;
u64 gorlbc;
u64 gprlbc;
};
#include "mbx.h"
struct e1000_mac_operations {
/* Function pointers for the MAC. */
s32 (*init_params)(struct e1000_hw *);
s32 (*check_for_link)(struct e1000_hw *);
void (*clear_vfta)(struct e1000_hw *);
s32 (*get_bus_info)(struct e1000_hw *);
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
void (*mta_set)(struct e1000_hw *, u32);
void (*rar_set)(struct e1000_hw *, u8*, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
s32 (*set_vfta)(struct e1000_hw *, u16, bool);
};
struct e1000_mac_info {
struct e1000_mac_operations ops;
u8 addr[6];
u8 perm_addr[6];
enum e1000_mac_type type;
u16 mta_reg_count;
u16 rar_entry_count;
bool get_link_status;
};
struct e1000_mbx_operations {
s32 (*init_params)(struct e1000_hw *hw);
s32 (*read)(struct e1000_hw *, u32 *, u16);
s32 (*write)(struct e1000_hw *, u32 *, u16);
s32 (*read_posted)(struct e1000_hw *, u32 *, u16);
s32 (*write_posted)(struct e1000_hw *, u32 *, u16);
s32 (*check_for_msg)(struct e1000_hw *);
s32 (*check_for_ack)(struct e1000_hw *);
s32 (*check_for_rst)(struct e1000_hw *);
};
struct e1000_mbx_stats {
u32 msgs_tx;
u32 msgs_rx;
u32 acks;
u32 reqs;
u32 rsts;
};
struct e1000_mbx_info {
struct e1000_mbx_operations ops;
struct e1000_mbx_stats stats;
u32 timeout;
u32 usec_delay;
u16 size;
};
struct e1000_dev_spec_vf {
u32 vf_number;
u32 v2p_mailbox;
};
struct e1000_hw {
void *back;
u8 __iomem *hw_addr;
u8 __iomem *flash_address;
unsigned long io_base;
struct e1000_mac_info mac;
struct e1000_mbx_info mbx;
union {
struct e1000_dev_spec_vf vf;
} dev_spec;
u16 device_id;
u16 subsystem_vendor_id;
u16 subsystem_device_id;
u16 vendor_id;
u8 revision_id;
};
/* These functions must be implemented by drivers */
void e1000_rlpml_set_vf(struct e1000_hw *, u16);
void e1000_init_function_pointers_vf(struct e1000_hw *hw);
#endif /* _E1000_VF_H_ */

View File

@ -0,0 +1,35 @@
################################################################################
#
# Intel PRO/10GbE Linux driver
# Copyright(c) 1999 - 2008 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# Linux NICS <linux.nics@intel.com>
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) PRO/10GbE ethernet driver
#
obj-$(CONFIG_IXGB) += ixgb.o
ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o

View File

@ -0,0 +1,219 @@
/*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGB_H_
#define _IXGB_H_
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/capability.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/pkt_sched.h>
#include <linux/list.h>
#include <linux/reboot.h>
#include <net/checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#define BAR_0 0
#define BAR_1 1
#define BAR_5 5
struct ixgb_adapter;
#include "ixgb_hw.h"
#include "ixgb_ee.h"
#include "ixgb_ids.h"
#define PFX "ixgb: "
#ifdef _DEBUG_DRIVER_
#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
#else
#define IXGB_DBG(fmt, args...) \
do { \
if (0) \
printk(KERN_DEBUG PFX fmt, ##args); \
} while (0)
#endif
/* TX/RX descriptor defines */
#define DEFAULT_TXD 256
#define MAX_TXD 4096
#define MIN_TXD 64
/* hardware cannot reliably support more than 512 descriptors owned by
* hardware descriptor cache otherwise an unreliable ring under heavy
* receive load may result */
#define DEFAULT_RXD 512
#define MAX_RXD 512
#define MIN_RXD 64
/* Supported Rx Buffer Sizes */
#define IXGB_RXBUFFER_2048 2048
#define IXGB_RXBUFFER_4096 4096
#define IXGB_RXBUFFER_8192 8192
#define IXGB_RXBUFFER_16384 16384
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgb_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
u16 mapped_as_page;
};
struct ixgb_desc_ring {
/* pointer to the descriptor ring memory */
void *desc;
/* physical address of the descriptor ring */
dma_addr_t dma;
/* length of descriptor ring in bytes */
unsigned int size;
/* number of descriptors in the ring */
unsigned int count;
/* next descriptor to associate a buffer with */
unsigned int next_to_use;
/* next descriptor to check for DD status bit */
unsigned int next_to_clean;
/* array of buffer information structs */
struct ixgb_buffer *buffer_info;
};
#define IXGB_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
#define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc)
#define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc)
#define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc)
/* board specific private data structure */
struct ixgb_adapter {
struct timer_list watchdog_timer;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 bd_number;
u32 rx_buffer_len;
u32 part_num;
u16 link_speed;
u16 link_duplex;
struct work_struct tx_timeout_task;
/* TX */
struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
unsigned int restart_queue;
unsigned long timeo_start;
u32 tx_cmd_type;
u64 hw_csum_tx_good;
u64 hw_csum_tx_error;
u32 tx_int_delay;
u32 tx_timeout_count;
bool tx_int_delay_enable;
bool detect_tx_hung;
/* RX */
struct ixgb_desc_ring rx_ring;
u64 hw_csum_rx_error;
u64 hw_csum_rx_good;
u32 rx_int_delay;
bool rx_csum;
/* OS defined structs */
struct napi_struct napi;
struct net_device *netdev;
struct pci_dev *pdev;
/* structs defined in ixgb_hw.h */
struct ixgb_hw hw;
u16 msg_enable;
struct ixgb_hw_stats stats;
u32 alloc_rx_buff_failed;
bool have_msi;
unsigned long flags;
};
enum ixgb_state_t {
/* TBD
__IXGB_TESTING,
__IXGB_RESETTING,
*/
__IXGB_DOWN
};
/* Exported from other modules */
extern void ixgb_check_options(struct ixgb_adapter *adapter);
extern void ixgb_set_ethtool_ops(struct net_device *netdev);
extern char ixgb_driver_name[];
extern const char ixgb_driver_version[];
extern void ixgb_set_speed_duplex(struct net_device *netdev);
extern int ixgb_up(struct ixgb_adapter *adapter);
extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
extern void ixgb_reset(struct ixgb_adapter *adapter);
extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
extern void ixgb_update_stats(struct ixgb_adapter *adapter);
#endif /* _IXGB_H_ */

View File

@ -0,0 +1,607 @@
/*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ixgb_hw.h"
#include "ixgb_ee.h"
/* Local prototypes */
static u16 ixgb_shift_in_bits(struct ixgb_hw *hw);
static void ixgb_shift_out_bits(struct ixgb_hw *hw,
u16 data,
u16 count);
static void ixgb_standby_eeprom(struct ixgb_hw *hw);
static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw);
static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
/******************************************************************************
* Raises the EEPROM's clock input.
*
* hw - Struct containing variables accessed by shared code
* eecd_reg - EECD's current value
*****************************************************************************/
static void
ixgb_raise_clock(struct ixgb_hw *hw,
u32 *eecd_reg)
{
/* Raise the clock input to the EEPROM (by setting the SK bit), and then
* wait 50 microseconds.
*/
*eecd_reg = *eecd_reg | IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
IXGB_WRITE_FLUSH(hw);
udelay(50);
}
/******************************************************************************
* Lowers the EEPROM's clock input.
*
* hw - Struct containing variables accessed by shared code
* eecd_reg - EECD's current value
*****************************************************************************/
static void
ixgb_lower_clock(struct ixgb_hw *hw,
u32 *eecd_reg)
{
/* Lower the clock input to the EEPROM (by clearing the SK bit), and then
* wait 50 microseconds.
*/
*eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
IXGB_WRITE_FLUSH(hw);
udelay(50);
}
/******************************************************************************
* Shift data bits out to the EEPROM.
*
* hw - Struct containing variables accessed by shared code
* data - data to send to the EEPROM
* count - number of bits to shift out
*****************************************************************************/
static void
ixgb_shift_out_bits(struct ixgb_hw *hw,
u16 data,
u16 count)
{
u32 eecd_reg;
u32 mask;
/* We need to shift "count" bits out to the EEPROM. So, value in the
* "data" parameter will be shifted out to the EEPROM one bit at a time.
* In order to do this, "data" must be broken down into bits.
*/
mask = 0x01 << (count - 1);
eecd_reg = IXGB_READ_REG(hw, EECD);
eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
do {
/* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
* and then raising and then lowering the clock (the SK bit controls
* the clock input to the EEPROM). A "0" is shifted out to the EEPROM
* by setting "DI" to "0" and then raising and then lowering the clock.
*/
eecd_reg &= ~IXGB_EECD_DI;
if (data & mask)
eecd_reg |= IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
IXGB_WRITE_FLUSH(hw);
udelay(50);
ixgb_raise_clock(hw, &eecd_reg);
ixgb_lower_clock(hw, &eecd_reg);
mask = mask >> 1;
} while (mask);
/* We leave the "DI" bit set to "0" when we leave this routine. */
eecd_reg &= ~IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
}
/******************************************************************************
* Shift data bits in from the EEPROM
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static u16
ixgb_shift_in_bits(struct ixgb_hw *hw)
{
u32 eecd_reg;
u32 i;
u16 data;
/* In order to read a register from the EEPROM, we need to shift 16 bits
* in from the EEPROM. Bits are "shifted in" by raising the clock input to
* the EEPROM (setting the SK bit), and then reading the value of the "DO"
* bit. During this "shifting in" process the "DI" bit should always be
* clear..
*/
eecd_reg = IXGB_READ_REG(hw, EECD);
eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
data = 0;
for (i = 0; i < 16; i++) {
data = data << 1;
ixgb_raise_clock(hw, &eecd_reg);
eecd_reg = IXGB_READ_REG(hw, EECD);
eecd_reg &= ~(IXGB_EECD_DI);
if (eecd_reg & IXGB_EECD_DO)
data |= 1;
ixgb_lower_clock(hw, &eecd_reg);
}
return data;
}
/******************************************************************************
* Prepares EEPROM for access
*
* hw - Struct containing variables accessed by shared code
*
* Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
* function should be called before issuing a command to the EEPROM.
*****************************************************************************/
static void
ixgb_setup_eeprom(struct ixgb_hw *hw)
{
u32 eecd_reg;
eecd_reg = IXGB_READ_REG(hw, EECD);
/* Clear SK and DI */
eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI);
IXGB_WRITE_REG(hw, EECD, eecd_reg);
/* Set CS */
eecd_reg |= IXGB_EECD_CS;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
}
/******************************************************************************
* Returns EEPROM to a "standby" state
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void
ixgb_standby_eeprom(struct ixgb_hw *hw)
{
u32 eecd_reg;
eecd_reg = IXGB_READ_REG(hw, EECD);
/* Deselect EEPROM */
eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
IXGB_WRITE_REG(hw, EECD, eecd_reg);
IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Clock high */
eecd_reg |= IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Select EEPROM */
eecd_reg |= IXGB_EECD_CS;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Clock low */
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
IXGB_WRITE_FLUSH(hw);
udelay(50);
}
/******************************************************************************
* Raises then lowers the EEPROM's clock pin
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void
ixgb_clock_eeprom(struct ixgb_hw *hw)
{
u32 eecd_reg;
eecd_reg = IXGB_READ_REG(hw, EECD);
/* Rising edge of clock */
eecd_reg |= IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Falling edge of clock */
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
IXGB_WRITE_FLUSH(hw);
udelay(50);
}
/******************************************************************************
* Terminates a command by lowering the EEPROM's chip select pin
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void
ixgb_cleanup_eeprom(struct ixgb_hw *hw)
{
u32 eecd_reg;
eecd_reg = IXGB_READ_REG(hw, EECD);
eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI);
IXGB_WRITE_REG(hw, EECD, eecd_reg);
ixgb_clock_eeprom(hw);
}
/******************************************************************************
* Waits for the EEPROM to finish the current command.
*
* hw - Struct containing variables accessed by shared code
*
* The command is done when the EEPROM's data out pin goes high.
*
* Returns:
* true: EEPROM data pin is high before timeout.
* false: Time expired.
*****************************************************************************/
static bool
ixgb_wait_eeprom_command(struct ixgb_hw *hw)
{
u32 eecd_reg;
u32 i;
/* Toggle the CS line. This in effect tells to EEPROM to actually execute
* the command in question.
*/
ixgb_standby_eeprom(hw);
/* Now read DO repeatedly until is high (equal to '1'). The EEPROM will
* signal that the command has been completed by raising the DO signal.
* If DO does not go high in 10 milliseconds, then error out.
*/
for (i = 0; i < 200; i++) {
eecd_reg = IXGB_READ_REG(hw, EECD);
if (eecd_reg & IXGB_EECD_DO)
return true;
udelay(50);
}
ASSERT(0);
return false;
}
/******************************************************************************
* Verifies that the EEPROM has a valid checksum
*
* hw - Struct containing variables accessed by shared code
*
* Reads the first 64 16 bit words of the EEPROM and sums the values read.
* If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
* valid.
*
* Returns:
* true: Checksum is valid
* false: Checksum is not valid.
*****************************************************************************/
bool
ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
{
u16 checksum = 0;
u16 i;
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
checksum += ixgb_read_eeprom(hw, i);
if (checksum == (u16) EEPROM_SUM)
return true;
else
return false;
}
/******************************************************************************
* Calculates the EEPROM checksum and writes it to the EEPROM
*
* hw - Struct containing variables accessed by shared code
*
* Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
* Writes the difference to word offset 63 of the EEPROM.
*****************************************************************************/
void
ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
{
u16 checksum = 0;
u16 i;
for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
checksum += ixgb_read_eeprom(hw, i);
checksum = (u16) EEPROM_SUM - checksum;
ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
}
/******************************************************************************
* Writes a 16 bit word to a given offset in the EEPROM.
*
* hw - Struct containing variables accessed by shared code
* reg - offset within the EEPROM to be written to
* data - 16 bit word to be written to the EEPROM
*
* If ixgb_update_eeprom_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
*
*****************************************************************************/
void
ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
/* Prepare the EEPROM for writing */
ixgb_setup_eeprom(hw);
/* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode
* plus 4-bit dummy). This puts the EEPROM into write/erase mode.
*/
ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5);
ixgb_shift_out_bits(hw, 0, 4);
/* Prepare the EEPROM */
ixgb_standby_eeprom(hw);
/* Send the Write command (3-bit opcode + 6-bit addr) */
ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3);
ixgb_shift_out_bits(hw, offset, 6);
/* Send the data */
ixgb_shift_out_bits(hw, data, 16);
ixgb_wait_eeprom_command(hw);
/* Recover from write */
ixgb_standby_eeprom(hw);
/* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit
* opcode plus 4-bit dummy). This takes the EEPROM out of write/erase
* mode.
*/
ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5);
ixgb_shift_out_bits(hw, 0, 4);
/* Done with writing */
ixgb_cleanup_eeprom(hw);
/* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
}
/******************************************************************************
* Reads a 16 bit word from the EEPROM.
*
* hw - Struct containing variables accessed by shared code
* offset - offset of 16 bit word in the EEPROM to read
*
* Returns:
* The 16-bit value read from the eeprom
*****************************************************************************/
u16
ixgb_read_eeprom(struct ixgb_hw *hw,
u16 offset)
{
u16 data;
/* Prepare the EEPROM for reading */
ixgb_setup_eeprom(hw);
/* Send the READ command (opcode + addr) */
ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3);
/*
* We have a 64 word EEPROM, there are 6 address bits
*/
ixgb_shift_out_bits(hw, offset, 6);
/* Read the data */
data = ixgb_shift_in_bits(hw);
/* End this read operation */
ixgb_standby_eeprom(hw);
return data;
}
/******************************************************************************
* Reads eeprom and stores data in shared structure.
* Validates eeprom checksum and eeprom signature.
*
* hw - Struct containing variables accessed by shared code
*
* Returns:
* true: if eeprom read is successful
* false: otherwise.
*****************************************************************************/
bool
ixgb_get_eeprom_data(struct ixgb_hw *hw)
{
u16 i;
u16 checksum = 0;
struct ixgb_ee_map_type *ee_map;
ENTER();
ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
pr_debug("Reading eeprom data\n");
for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
u16 ee_data;
ee_data = ixgb_read_eeprom(hw, i);
checksum += ee_data;
hw->eeprom[i] = cpu_to_le16(ee_data);
}
if (checksum != (u16) EEPROM_SUM) {
pr_debug("Checksum invalid\n");
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
return false;
}
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
!= cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
pr_debug("Signature invalid\n");
return false;
}
return true;
}
/******************************************************************************
* Local function to check if the eeprom signature is good
* If the eeprom signature is good, calls ixgb)get_eeprom_data.
*
* hw - Struct containing variables accessed by shared code
*
* Returns:
* true: eeprom signature was good and the eeprom read was successful
* false: otherwise.
******************************************************************************/
static bool
ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
== cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
return true;
} else {
return ixgb_get_eeprom_data(hw);
}
}
/******************************************************************************
* return a word from the eeprom
*
* hw - Struct containing variables accessed by shared code
* index - Offset of eeprom word
*
* Returns:
* Word at indexed offset in eeprom, if valid, 0 otherwise.
******************************************************************************/
__le16
ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
{
if ((index < IXGB_EEPROM_SIZE) &&
(ixgb_check_and_get_eeprom_data(hw) == true)) {
return hw->eeprom[index];
}
return 0;
}
/******************************************************************************
* return the mac address from EEPROM
*
* hw - Struct containing variables accessed by shared code
* mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise
*
* Returns: None.
******************************************************************************/
void
ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
u8 *mac_addr)
{
int i;
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
ENTER();
if (ixgb_check_and_get_eeprom_data(hw) == true) {
for (i = 0; i < ETH_ALEN; i++) {
mac_addr[i] = ee_map->mac_addr[i];
}
pr_debug("eeprom mac address = %pM\n", mac_addr);
}
}
/******************************************************************************
* return the Printed Board Assembly number from EEPROM
*
* hw - Struct containing variables accessed by shared code
*
* Returns:
* PBA number if EEPROM contents are valid, 0 otherwise
******************************************************************************/
u32
ixgb_get_ee_pba_number(struct ixgb_hw *hw)
{
if (ixgb_check_and_get_eeprom_data(hw) == true)
return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
| (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16);
return 0;
}
/******************************************************************************
* return the Device Id from EEPROM
*
* hw - Struct containing variables accessed by shared code
*
* Returns:
* Device Id if EEPROM contents are valid, 0 otherwise
******************************************************************************/
u16
ixgb_get_ee_device_id(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == true)
return le16_to_cpu(ee_map->device_id);
return 0;
}

View File

@ -0,0 +1,104 @@
/*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGB_EE_H_
#define _IXGB_EE_H_
#define IXGB_EEPROM_SIZE 64 /* Size in words */
/* EEPROM Commands */
#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */
#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */
#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */
/* EEPROM MAP (Word Offsets) */
#define EEPROM_IA_1_2_REG 0x0000
#define EEPROM_IA_3_4_REG 0x0001
#define EEPROM_IA_5_6_REG 0x0002
#define EEPROM_COMPATIBILITY_REG 0x0003
#define EEPROM_PBA_1_2_REG 0x0008
#define EEPROM_PBA_3_4_REG 0x0009
#define EEPROM_INIT_CONTROL1_REG 0x000A
#define EEPROM_SUBSYS_ID_REG 0x000B
#define EEPROM_SUBVEND_ID_REG 0x000C
#define EEPROM_DEVICE_ID_REG 0x000D
#define EEPROM_VENDOR_ID_REG 0x000E
#define EEPROM_INIT_CONTROL2_REG 0x000F
#define EEPROM_SWDPINS_REG 0x0020
#define EEPROM_CIRCUIT_CTRL_REG 0x0021
#define EEPROM_D0_D3_POWER_REG 0x0022
#define EEPROM_FLASH_VERSION 0x0032
#define EEPROM_CHECKSUM_REG 0x003F
/* Mask bits for fields in Word 0x0a of the EEPROM */
#define EEPROM_ICW1_SIGNATURE_MASK 0xC000
#define EEPROM_ICW1_SIGNATURE_VALID 0x4000
#define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000
/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
#define EEPROM_SUM 0xBABA
/* EEPROM Map Sizes (Byte Counts) */
#define PBA_SIZE 4
/* EEPROM Map defines (WORD OFFSETS)*/
/* EEPROM structure */
struct ixgb_ee_map_type {
u8 mac_addr[ETH_ALEN];
__le16 compatibility;
__le16 reserved1[4];
__le32 pba_number;
__le16 init_ctrl_reg_1;
__le16 subsystem_id;
__le16 subvendor_id;
__le16 device_id;
__le16 vendor_id;
__le16 init_ctrl_reg_2;
__le16 oem_reserved[16];
__le16 swdpins_reg;
__le16 circuit_ctrl_reg;
u8 d3_power;
u8 d0_power;
__le16 reserved2[28];
__le16 checksum;
};
/* EEPROM Functions */
u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg);
bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data);
#endif /* IXGB_EE_H */

View File

@ -0,0 +1,658 @@
/*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* ethtool support for ixgb */
#include "ixgb.h"
#include <asm/uaccess.h>
#define IXGB_ALL_RAR_ENTRIES 16
enum {NETDEV_STATS, IXGB_STATS};
struct ixgb_stats {
char stat_string[ETH_GSTRING_LEN];
int type;
int sizeof_stat;
int stat_offset;
};
#define IXGB_STAT(m) IXGB_STATS, \
FIELD_SIZEOF(struct ixgb_adapter, m), \
offsetof(struct ixgb_adapter, m)
#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \
FIELD_SIZEOF(struct net_device, m), \
offsetof(struct net_device, m)
static struct ixgb_stats ixgb_gstrings_stats[] = {
{"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)},
{"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)},
{"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)},
{"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)},
{"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)},
{"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)},
{"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)},
{"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)},
{"multicast", IXGB_NETDEV_STAT(stats.multicast)},
{"collisions", IXGB_NETDEV_STAT(stats.collisions)},
/* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */
{"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)},
{"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)},
{"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)},
{"rx_no_buffer_count", IXGB_STAT(stats.rnbc)},
{"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)},
{"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)},
{"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)},
{"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)},
{"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)},
{"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)},
{"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)},
{"tx_deferred_ok", IXGB_STAT(stats.dc)},
{"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
{"tx_restart_queue", IXGB_STAT(restart_queue) },
{"rx_long_length_errors", IXGB_STAT(stats.roc)},
{"rx_short_length_errors", IXGB_STAT(stats.ruc)},
{"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
{"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
{"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
{"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
{"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
{"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)},
{"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)},
{"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)},
{"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)},
{"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)}
};
#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats)
static int
ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL;
if (netif_carrier_ok(adapter->netdev)) {
ethtool_cmd_speed_set(ecmd, SPEED_10000);
ecmd->duplex = DUPLEX_FULL;
} else {
ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
void ixgb_set_speed_duplex(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
/* be optimistic about our link, since we were up before */
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
static int
ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
u32 speed = ethtool_cmd_speed(ecmd);
if (ecmd->autoneg == AUTONEG_ENABLE ||
(speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
if (netif_running(adapter->netdev)) {
ixgb_down(adapter, true);
ixgb_reset(adapter);
ixgb_up(adapter);
ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
return 0;
}
static void
ixgb_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
pause->autoneg = AUTONEG_DISABLE;
if (hw->fc.type == ixgb_fc_rx_pause)
pause->rx_pause = 1;
else if (hw->fc.type == ixgb_fc_tx_pause)
pause->tx_pause = 1;
else if (hw->fc.type == ixgb_fc_full) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
}
static int
ixgb_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
if (pause->autoneg == AUTONEG_ENABLE)
return -EINVAL;
if (pause->rx_pause && pause->tx_pause)
hw->fc.type = ixgb_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgb_fc_rx_pause;
else if (!pause->rx_pause && pause->tx_pause)
hw->fc.type = ixgb_fc_tx_pause;
else if (!pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgb_fc_none;
if (netif_running(adapter->netdev)) {
ixgb_down(adapter, true);
ixgb_up(adapter);
ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
return 0;
}
static u32
ixgb_get_msglevel(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void
ixgb_set_msglevel(struct net_device *netdev, u32 data)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
static int
ixgb_get_regs_len(struct net_device *netdev)
{
#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
return IXGB_REG_DUMP_LEN;
}
static void
ixgb_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
u32 *reg = p;
u32 *reg_start = reg;
u8 i;
/* the 1 (one) below indicates an attempt at versioning, if the
* interface in ethtool or the driver changes, this 1 should be
* incremented */
regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
/* General Registers */
*reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
*reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */
*reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */
*reg++ = IXGB_READ_REG(hw, EECD); /* 3 */
*reg++ = IXGB_READ_REG(hw, MFS); /* 4 */
/* Interrupt */
*reg++ = IXGB_READ_REG(hw, ICR); /* 5 */
*reg++ = IXGB_READ_REG(hw, ICS); /* 6 */
*reg++ = IXGB_READ_REG(hw, IMS); /* 7 */
*reg++ = IXGB_READ_REG(hw, IMC); /* 8 */
/* Receive */
*reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */
*reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */
*reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */
*reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */
*reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */
*reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */
*reg++ = IXGB_READ_REG(hw, RDH); /* 15 */
*reg++ = IXGB_READ_REG(hw, RDT); /* 16 */
*reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */
*reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */
*reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
*reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
/* there are 16 RAR entries in hardware, we only use 3 */
for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
}
/* Transmit */
*reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */
*reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */
*reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */
*reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */
*reg++ = IXGB_READ_REG(hw, TDH); /* 57 */
*reg++ = IXGB_READ_REG(hw, TDT); /* 58 */
*reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */
*reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */
*reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */
*reg++ = IXGB_READ_REG(hw, PAP); /* 62 */
/* Physical */
*reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */
*reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */
*reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */
*reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */
*reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */
*reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */
*reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */
*reg++ = IXGB_READ_REG(hw, MACA); /* 70 */
*reg++ = IXGB_READ_REG(hw, APAE); /* 71 */
*reg++ = IXGB_READ_REG(hw, ARD); /* 72 */
*reg++ = IXGB_READ_REG(hw, AIS); /* 73 */
*reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */
*reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */
/* Statistics */
*reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */
*reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */
*reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */
*reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */
*reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */
*reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */
*reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */
*reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */
*reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */
*reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */
*reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */
*reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */
*reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */
*reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */
*reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */
*reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */
*reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */
*reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */
*reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */
*reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */
*reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */
*reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */
*reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */
*reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */
*reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */
*reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */
*reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */
*reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */
*reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */
*reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */
*reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */
*reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */
*reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */
*reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */
*reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */
*reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */
*reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */
*reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */
*reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */
*reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */
*reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */
*reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */
*reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */
*reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */
*reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */
*reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */
*reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */
*reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */
*reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */
*reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */
*reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */
*reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */
*reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */
*reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */
*reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */
*reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */
*reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */
*reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */
*reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
*reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
regs->len = (reg - reg_start) * sizeof(u32);
}
static int
ixgb_get_eeprom_len(struct net_device *netdev)
{
/* return size in bytes */
return IXGB_EEPROM_SIZE << 1;
}
static int
ixgb_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
__le16 *eeprom_buff;
int i, max_len, first_word, last_word;
int ret_val = 0;
if (eeprom->len == 0) {
ret_val = -EINVAL;
goto geeprom_error;
}
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
max_len = ixgb_get_eeprom_len(netdev);
if (eeprom->offset > eeprom->offset + eeprom->len) {
ret_val = -EINVAL;
goto geeprom_error;
}
if ((eeprom->offset + eeprom->len) > max_len)
eeprom->len = (max_len - eeprom->offset);
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(sizeof(__le16) *
(last_word - first_word + 1), GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
/* note the eeprom was good because the driver loaded */
for (i = 0; i <= (last_word - first_word); i++)
eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
kfree(eeprom_buff);
geeprom_error:
return ret_val;
}
static int
ixgb_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
u16 *eeprom_buff;
void *ptr;
int max_len, first_word, last_word;
u16 i;
if (eeprom->len == 0)
return -EINVAL;
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
max_len = ixgb_get_eeprom_len(netdev);
if (eeprom->offset > eeprom->offset + eeprom->len)
return -EINVAL;
if ((eeprom->offset + eeprom->len) > max_len)
eeprom->len = (max_len - eeprom->offset);
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
ptr++;
}
if ((eeprom->offset + eeprom->len) & 1) {
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
eeprom_buff[last_word - first_word]
= ixgb_read_eeprom(hw, last_word);
}
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i <= (last_word - first_word); i++)
ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
/* Update the checksum over the first part of the EEPROM if needed */
if (first_word <= EEPROM_CHECKSUM_REG)
ixgb_update_eeprom_checksum(hw);
kfree(eeprom_buff);
return 0;
}
static void
ixgb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
strncpy(drvinfo->driver, ixgb_driver_name, 32);
strncpy(drvinfo->version, ixgb_driver_version, 32);
strncpy(drvinfo->fw_version, "N/A", 32);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_stats = IXGB_STATS_LEN;
drvinfo->regdump_len = ixgb_get_regs_len(netdev);
drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
}
static void
ixgb_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
ring->rx_max_pending = MAX_RXD;
ring->tx_max_pending = MAX_TXD;
ring->rx_pending = rxdr->count;
ring->tx_pending = txdr->count;
}
static int
ixgb_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
int err;
tx_old = adapter->tx_ring;
rx_old = adapter->rx_ring;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
if (netif_running(adapter->netdev))
ixgb_down(adapter, true);
rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
rxdr->count = min(rxdr->count,(u32)MAX_RXD);
rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(u32)MIN_TXD);
txdr->count = min(txdr->count,(u32)MAX_TXD);
txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
if ((err = ixgb_setup_rx_resources(adapter)))
goto err_setup_rx;
if ((err = ixgb_setup_tx_resources(adapter)))
goto err_setup_tx;
/* save the new, restore the old in order to free it,
* then restore the new back again */
rx_new = adapter->rx_ring;
tx_new = adapter->tx_ring;
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
ixgb_free_rx_resources(adapter);
ixgb_free_tx_resources(adapter);
adapter->rx_ring = rx_new;
adapter->tx_ring = tx_new;
if ((err = ixgb_up(adapter)))
return err;
ixgb_set_speed_duplex(netdev);
}
return 0;
err_setup_tx:
ixgb_free_rx_resources(adapter);
err_setup_rx:
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
ixgb_up(adapter);
return err;
}
static int
ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
switch (state) {
case ETHTOOL_ID_ACTIVE:
return 2;
case ETHTOOL_ID_ON:
ixgb_led_on(&adapter->hw);
break;
case ETHTOOL_ID_OFF:
case ETHTOOL_ID_INACTIVE:
ixgb_led_off(&adapter->hw);
}
return 0;
}
static int
ixgb_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return IXGB_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}
static void
ixgb_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
int i;
char *p = NULL;
ixgb_update_stats(adapter);
for (i = 0; i < IXGB_STATS_LEN; i++) {
switch (ixgb_gstrings_stats[i].type) {
case NETDEV_STATS:
p = (char *) netdev +
ixgb_gstrings_stats[i].stat_offset;
break;
case IXGB_STATS:
p = (char *) adapter +
ixgb_gstrings_stats[i].stat_offset;
break;
}
data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
static void
ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
int i;
switch(stringset) {
case ETH_SS_STATS:
for (i = 0; i < IXGB_STATS_LEN; i++) {
memcpy(data + i * ETH_GSTRING_LEN,
ixgb_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
}
break;
}
}
static const struct ethtool_ops ixgb_ethtool_ops = {
.get_settings = ixgb_get_settings,
.set_settings = ixgb_set_settings,
.get_drvinfo = ixgb_get_drvinfo,
.get_regs_len = ixgb_get_regs_len,
.get_regs = ixgb_get_regs,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ixgb_get_eeprom_len,
.get_eeprom = ixgb_get_eeprom,
.set_eeprom = ixgb_set_eeprom,
.get_ringparam = ixgb_get_ringparam,
.set_ringparam = ixgb_set_ringparam,
.get_pauseparam = ixgb_get_pauseparam,
.set_pauseparam = ixgb_set_pauseparam,
.get_msglevel = ixgb_get_msglevel,
.set_msglevel = ixgb_set_msglevel,
.get_strings = ixgb_get_strings,
.set_phys_id = ixgb_set_phys_id,
.get_sset_count = ixgb_get_sset_count,
.get_ethtool_stats = ixgb_get_ethtool_stats,
};
void ixgb_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,799 @@
/*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGB_HW_H_
#define _IXGB_HW_H_
#include <linux/mdio.h>
#include "ixgb_osdep.h"
/* Enums */
typedef enum {
ixgb_mac_unknown = 0,
ixgb_82597,
ixgb_num_macs
} ixgb_mac_type;
/* Types of physical layer modules */
typedef enum {
ixgb_phy_type_unknown = 0,
ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */
ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */
ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */
ixgb_phy_type_txn17401, /* 1310nm, SM fiber, XENPAK transceiver */
ixgb_phy_type_bcm /* SUN specific board */
} ixgb_phy_type;
/* XPAK transceiver vendors, for the SR adapters */
typedef enum {
ixgb_xpak_vendor_intel,
ixgb_xpak_vendor_infineon
} ixgb_xpak_vendor;
/* Media Types */
typedef enum {
ixgb_media_type_unknown = 0,
ixgb_media_type_fiber = 1,
ixgb_media_type_copper = 2,
ixgb_num_media_types
} ixgb_media_type;
/* Flow Control Settings */
typedef enum {
ixgb_fc_none = 0,
ixgb_fc_rx_pause = 1,
ixgb_fc_tx_pause = 2,
ixgb_fc_full = 3,
ixgb_fc_default = 0xFF
} ixgb_fc_type;
/* PCI bus types */
typedef enum {
ixgb_bus_type_unknown = 0,
ixgb_bus_type_pci,
ixgb_bus_type_pcix
} ixgb_bus_type;
/* PCI bus speeds */
typedef enum {
ixgb_bus_speed_unknown = 0,
ixgb_bus_speed_33,
ixgb_bus_speed_66,
ixgb_bus_speed_100,
ixgb_bus_speed_133,
ixgb_bus_speed_reserved
} ixgb_bus_speed;
/* PCI bus widths */
typedef enum {
ixgb_bus_width_unknown = 0,
ixgb_bus_width_32,
ixgb_bus_width_64
} ixgb_bus_width;
#define IXGB_EEPROM_SIZE 64 /* Size in words */
#define SPEED_10000 10000
#define FULL_DUPLEX 2
#define MIN_NUMBER_OF_DESCRIPTORS 8
#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */
#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */
#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */
#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */
#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */
/* NOTE: this is MICROSECONDS */
#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */
/* General Registers */
#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */
#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */
#define IXGB_STATUS 0x00010 /* Device Status Register - RO */
#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */
#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */
/* Interrupt */
#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */
#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */
#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */
#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */
/* Receive */
#define IXGB_RCTL 0x00100 /* RX Control - RW */
#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */
#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */
#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */
#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */
#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */
#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */
#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */
#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */
#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */
#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */
#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */
#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */
#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */
#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */
#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */
#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */
#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8
/* Transmit */
#define IXGB_TCTL 0x00600 /* TX Control - RW */
#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */
#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */
#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */
#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */
#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */
#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */
#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */
#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */
#define IXGB_PAP 0x00640 /* Pause and Pace - RW */
#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8
/* Physical */
#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */
#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */
#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */
#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */
#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */
#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */
#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */
#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */
#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */
#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */
#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */
#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */
#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */
/* Wake-up */
#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */
#define IXGB_WUS 0x00810 /* Wake Up Status - RO */
#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */
#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */
#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */
/* Statistics */
#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */
#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */
#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */
#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */
#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */
#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */
#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */
#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */
#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */
#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */
#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */
#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */
#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */
#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */
#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */
#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */
#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */
#define IXGB_TORH 0x02044 /* Total Octets Received (High) */
#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */
#define IXGB_RUC 0x02050 /* Receive Undersize Count */
#define IXGB_ROC 0x02058 /* Receive Oversize Count */
#define IXGB_RLEC 0x02060 /* Receive Length Error Count */
#define IXGB_CRCERRS 0x02068 /* CRC Error Count */
#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */
#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */
#define IXGB_MPC 0x02080 /* Missed Packets Count */
#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */
#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */
#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */
#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */
#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */
#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */
#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */
#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */
#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */
#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */
#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */
#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */
#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */
#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */
#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */
#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */
#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */
#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */
#define IXGB_DC 0x02148 /* Defer Count */
#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */
#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */
#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */
#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */
#define IXGB_RFC 0x02188 /* Remote Fault Count */
#define IXGB_LFC 0x02190 /* Local Fault Count */
#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */
#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */
#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */
#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */
#define IXGB_XONRXC 0x021B8 /* XON Received Count */
#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */
#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */
#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */
#define IXGB_RJC 0x021D8 /* Receive Jabber Count */
/* CTRL0 Bit Masks */
#define IXGB_CTRL0_LRST 0x00000008
#define IXGB_CTRL0_JFE 0x00000010
#define IXGB_CTRL0_XLE 0x00000020
#define IXGB_CTRL0_MDCS 0x00000040
#define IXGB_CTRL0_CMDC 0x00000080
#define IXGB_CTRL0_SDP0 0x00040000
#define IXGB_CTRL0_SDP1 0x00080000
#define IXGB_CTRL0_SDP2 0x00100000
#define IXGB_CTRL0_SDP3 0x00200000
#define IXGB_CTRL0_SDP0_DIR 0x00400000
#define IXGB_CTRL0_SDP1_DIR 0x00800000
#define IXGB_CTRL0_SDP2_DIR 0x01000000
#define IXGB_CTRL0_SDP3_DIR 0x02000000
#define IXGB_CTRL0_RST 0x04000000
#define IXGB_CTRL0_RPE 0x08000000
#define IXGB_CTRL0_TPE 0x10000000
#define IXGB_CTRL0_VME 0x40000000
/* CTRL1 Bit Masks */
#define IXGB_CTRL1_GPI0_EN 0x00000001
#define IXGB_CTRL1_GPI1_EN 0x00000002
#define IXGB_CTRL1_GPI2_EN 0x00000004
#define IXGB_CTRL1_GPI3_EN 0x00000008
#define IXGB_CTRL1_SDP4 0x00000010
#define IXGB_CTRL1_SDP5 0x00000020
#define IXGB_CTRL1_SDP6 0x00000040
#define IXGB_CTRL1_SDP7 0x00000080
#define IXGB_CTRL1_SDP4_DIR 0x00000100
#define IXGB_CTRL1_SDP5_DIR 0x00000200
#define IXGB_CTRL1_SDP6_DIR 0x00000400
#define IXGB_CTRL1_SDP7_DIR 0x00000800
#define IXGB_CTRL1_EE_RST 0x00002000
#define IXGB_CTRL1_RO_DIS 0x00020000
#define IXGB_CTRL1_PCIXHM_MASK 0x00C00000
#define IXGB_CTRL1_PCIXHM_1_2 0x00000000
#define IXGB_CTRL1_PCIXHM_5_8 0x00400000
#define IXGB_CTRL1_PCIXHM_3_4 0x00800000
#define IXGB_CTRL1_PCIXHM_7_8 0x00C00000
/* STATUS Bit Masks */
#define IXGB_STATUS_LU 0x00000002
#define IXGB_STATUS_AIP 0x00000004
#define IXGB_STATUS_TXOFF 0x00000010
#define IXGB_STATUS_XAUIME 0x00000020
#define IXGB_STATUS_RES 0x00000040
#define IXGB_STATUS_RIS 0x00000080
#define IXGB_STATUS_RIE 0x00000100
#define IXGB_STATUS_RLF 0x00000200
#define IXGB_STATUS_RRF 0x00000400
#define IXGB_STATUS_PCI_SPD 0x00000800
#define IXGB_STATUS_BUS64 0x00001000
#define IXGB_STATUS_PCIX_MODE 0x00002000
#define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000
#define IXGB_STATUS_PCIX_SPD_66 0x00000000
#define IXGB_STATUS_PCIX_SPD_100 0x00004000
#define IXGB_STATUS_PCIX_SPD_133 0x00008000
#define IXGB_STATUS_REV_ID_MASK 0x000F0000
#define IXGB_STATUS_REV_ID_SHIFT 16
/* EECD Bit Masks */
#define IXGB_EECD_SK 0x00000001
#define IXGB_EECD_CS 0x00000002
#define IXGB_EECD_DI 0x00000004
#define IXGB_EECD_DO 0x00000008
#define IXGB_EECD_FWE_MASK 0x00000030
#define IXGB_EECD_FWE_DIS 0x00000010
#define IXGB_EECD_FWE_EN 0x00000020
/* MFS */
#define IXGB_MFS_SHIFT 16
/* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */
#define IXGB_INT_TXDW 0x00000001
#define IXGB_INT_TXQE 0x00000002
#define IXGB_INT_LSC 0x00000004
#define IXGB_INT_RXSEQ 0x00000008
#define IXGB_INT_RXDMT0 0x00000010
#define IXGB_INT_RXO 0x00000040
#define IXGB_INT_RXT0 0x00000080
#define IXGB_INT_AUTOSCAN 0x00000200
#define IXGB_INT_GPI0 0x00000800
#define IXGB_INT_GPI1 0x00001000
#define IXGB_INT_GPI2 0x00002000
#define IXGB_INT_GPI3 0x00004000
/* RCTL Bit Masks */
#define IXGB_RCTL_RXEN 0x00000002
#define IXGB_RCTL_SBP 0x00000004
#define IXGB_RCTL_UPE 0x00000008
#define IXGB_RCTL_MPE 0x00000010
#define IXGB_RCTL_RDMTS_MASK 0x00000300
#define IXGB_RCTL_RDMTS_1_2 0x00000000
#define IXGB_RCTL_RDMTS_1_4 0x00000100
#define IXGB_RCTL_RDMTS_1_8 0x00000200
#define IXGB_RCTL_MO_MASK 0x00003000
#define IXGB_RCTL_MO_47_36 0x00000000
#define IXGB_RCTL_MO_46_35 0x00001000
#define IXGB_RCTL_MO_45_34 0x00002000
#define IXGB_RCTL_MO_43_32 0x00003000
#define IXGB_RCTL_MO_SHIFT 12
#define IXGB_RCTL_BAM 0x00008000
#define IXGB_RCTL_BSIZE_MASK 0x00030000
#define IXGB_RCTL_BSIZE_2048 0x00000000
#define IXGB_RCTL_BSIZE_4096 0x00010000
#define IXGB_RCTL_BSIZE_8192 0x00020000
#define IXGB_RCTL_BSIZE_16384 0x00030000
#define IXGB_RCTL_VFE 0x00040000
#define IXGB_RCTL_CFIEN 0x00080000
#define IXGB_RCTL_CFI 0x00100000
#define IXGB_RCTL_RPDA_MASK 0x00600000
#define IXGB_RCTL_RPDA_MC_MAC 0x00000000
#define IXGB_RCTL_MC_ONLY 0x00400000
#define IXGB_RCTL_CFF 0x00800000
#define IXGB_RCTL_SECRC 0x04000000
#define IXGB_RDT_FPDB 0x80000000
#define IXGB_RCTL_IDLE_RX_UNIT 0
/* FCRTL Bit Masks */
#define IXGB_FCRTL_XONE 0x80000000
/* RXDCTL Bit Masks */
#define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF
#define IXGB_RXDCTL_PTHRESH_SHIFT 0
#define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00
#define IXGB_RXDCTL_HTHRESH_SHIFT 9
#define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000
#define IXGB_RXDCTL_WTHRESH_SHIFT 18
/* RAIDC Bit Masks */
#define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F
#define IXGB_RAIDC_DELAY_MASK 0x000FF800
#define IXGB_RAIDC_DELAY_SHIFT 11
#define IXGB_RAIDC_POLL_MASK 0x1FF00000
#define IXGB_RAIDC_POLL_SHIFT 20
#define IXGB_RAIDC_RXT_GATE 0x40000000
#define IXGB_RAIDC_EN 0x80000000
#define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220
#define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244
#define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122
#define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61
/* RXCSUM Bit Masks */
#define IXGB_RXCSUM_IPOFL 0x00000100
#define IXGB_RXCSUM_TUOFL 0x00000200
/* RAH Bit Masks */
#define IXGB_RAH_ASEL_MASK 0x00030000
#define IXGB_RAH_ASEL_DEST 0x00000000
#define IXGB_RAH_ASEL_SRC 0x00010000
#define IXGB_RAH_AV 0x80000000
/* TCTL Bit Masks */
#define IXGB_TCTL_TCE 0x00000001
#define IXGB_TCTL_TXEN 0x00000002
#define IXGB_TCTL_TPDE 0x00000004
#define IXGB_TCTL_IDLE_TX_UNIT 0
/* TXDCTL Bit Masks */
#define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F
#define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00
#define IXGB_TXDCTL_HTHRESH_SHIFT 8
#define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000
#define IXGB_TXDCTL_WTHRESH_SHIFT 16
/* TSPMT Bit Masks */
#define IXGB_TSPMT_TSMT_MASK 0x0000FFFF
#define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000
#define IXGB_TSPMT_TSPBP_SHIFT 16
/* PAP Bit Masks */
#define IXGB_PAP_TXPC_MASK 0x0000FFFF
#define IXGB_PAP_TXPV_MASK 0x000F0000
#define IXGB_PAP_TXPV_10G 0x00000000
#define IXGB_PAP_TXPV_1G 0x00010000
#define IXGB_PAP_TXPV_2G 0x00020000
#define IXGB_PAP_TXPV_3G 0x00030000
#define IXGB_PAP_TXPV_4G 0x00040000
#define IXGB_PAP_TXPV_5G 0x00050000
#define IXGB_PAP_TXPV_6G 0x00060000
#define IXGB_PAP_TXPV_7G 0x00070000
#define IXGB_PAP_TXPV_8G 0x00080000
#define IXGB_PAP_TXPV_9G 0x00090000
#define IXGB_PAP_TXPV_WAN 0x000F0000
/* PCSC1 Bit Masks */
#define IXGB_PCSC1_LOOPBACK 0x00004000
/* PCSC2 Bit Masks */
#define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003
#define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001
/* PCSS1 Bit Masks */
#define IXGB_PCSS1_LOCAL_FAULT 0x00000080
#define IXGB_PCSS1_RX_LINK_STATUS 0x00000004
/* PCSS2 Bit Masks */
#define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000
#define IXGB_PCSS2_DEV_PRES 0x00004000
#define IXGB_PCSS2_TX_LF 0x00000800
#define IXGB_PCSS2_RX_LF 0x00000400
#define IXGB_PCSS2_10GBW 0x00000004
#define IXGB_PCSS2_10GBX 0x00000002
#define IXGB_PCSS2_10GBR 0x00000001
/* XPCSS Bit Masks */
#define IXGB_XPCSS_ALIGN_STATUS 0x00001000
#define IXGB_XPCSS_PATTERN_TEST 0x00000800
#define IXGB_XPCSS_LANE_3_SYNC 0x00000008
#define IXGB_XPCSS_LANE_2_SYNC 0x00000004
#define IXGB_XPCSS_LANE_1_SYNC 0x00000002
#define IXGB_XPCSS_LANE_0_SYNC 0x00000001
/* XPCSTC Bit Masks */
#define IXGB_XPCSTC_BERT_TRIG 0x00200000
#define IXGB_XPCSTC_BERT_SST 0x00100000
#define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000
#define IXGB_XPCSTC_BERT_PSZ_SHIFT 17
#define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003
#define IXGB_XPCSTC_BERT_PSZ_68 0x00000001
#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000
/* MSCA bit Masks */
/* New Protocol Address */
#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF
#define IXGB_MSCA_NP_ADDR_SHIFT 0
/* Either Device Type or Register Address,depending on ST_CODE */
#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000
#define IXGB_MSCA_DEV_TYPE_SHIFT 16
#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000
#define IXGB_MSCA_PHY_ADDR_SHIFT 21
#define IXGB_MSCA_OP_CODE_MASK 0x0C000000
/* OP_CODE == 00, Address cycle, New Protocol */
/* OP_CODE == 01, Write operation */
/* OP_CODE == 10, Read operation */
/* OP_CODE == 11, Read, auto increment, New Protocol */
#define IXGB_MSCA_ADDR_CYCLE 0x00000000
#define IXGB_MSCA_WRITE 0x04000000
#define IXGB_MSCA_READ 0x08000000
#define IXGB_MSCA_READ_AUTOINC 0x0C000000
#define IXGB_MSCA_OP_CODE_SHIFT 26
#define IXGB_MSCA_ST_CODE_MASK 0x30000000
/* ST_CODE == 00, New Protocol */
/* ST_CODE == 01, Old Protocol */
#define IXGB_MSCA_NEW_PROTOCOL 0x00000000
#define IXGB_MSCA_OLD_PROTOCOL 0x10000000
#define IXGB_MSCA_ST_CODE_SHIFT 28
/* Initiate command, self-clearing when command completes */
#define IXGB_MSCA_MDI_COMMAND 0x40000000
/*MDI In Progress Enable. */
#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000
/* MSRWD bit masks */
#define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF
#define IXGB_MSRWD_WRITE_DATA_SHIFT 0
#define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000
#define IXGB_MSRWD_READ_DATA_SHIFT 16
/* Definitions for the optics devices on the MDIO bus. */
#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */
#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */
/* Vendor-specific MDIO registers */
#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */
#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */
#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80
#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00
#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */
/* Layout of a single receive descriptor. The controller assumes that this
* structure is packed into 16 bytes, which is a safe assumption with most
* compilers. However, some compilers may insert padding between the fields,
* in which case the structure must be packed in some compiler-specific
* manner. */
struct ixgb_rx_desc {
__le64 buff_addr;
__le16 length;
__le16 reserved;
u8 status;
u8 errors;
__le16 special;
};
#define IXGB_RX_DESC_STATUS_DD 0x01
#define IXGB_RX_DESC_STATUS_EOP 0x02
#define IXGB_RX_DESC_STATUS_IXSM 0x04
#define IXGB_RX_DESC_STATUS_VP 0x08
#define IXGB_RX_DESC_STATUS_TCPCS 0x20
#define IXGB_RX_DESC_STATUS_IPCS 0x40
#define IXGB_RX_DESC_STATUS_PIF 0x80
#define IXGB_RX_DESC_ERRORS_CE 0x01
#define IXGB_RX_DESC_ERRORS_SE 0x02
#define IXGB_RX_DESC_ERRORS_P 0x08
#define IXGB_RX_DESC_ERRORS_TCPE 0x20
#define IXGB_RX_DESC_ERRORS_IPE 0x40
#define IXGB_RX_DESC_ERRORS_RXE 0x80
#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */
/* Layout of a single transmit descriptor. The controller assumes that this
* structure is packed into 16 bytes, which is a safe assumption with most
* compilers. However, some compilers may insert padding between the fields,
* in which case the structure must be packed in some compiler-specific
* manner. */
struct ixgb_tx_desc {
__le64 buff_addr;
__le32 cmd_type_len;
u8 status;
u8 popts;
__le16 vlan;
};
#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF
#define IXGB_TX_DESC_TYPE_MASK 0x00F00000
#define IXGB_TX_DESC_TYPE_SHIFT 20
#define IXGB_TX_DESC_CMD_MASK 0xFF000000
#define IXGB_TX_DESC_CMD_SHIFT 24
#define IXGB_TX_DESC_CMD_EOP 0x01000000
#define IXGB_TX_DESC_CMD_TSE 0x04000000
#define IXGB_TX_DESC_CMD_RS 0x08000000
#define IXGB_TX_DESC_CMD_VLE 0x40000000
#define IXGB_TX_DESC_CMD_IDE 0x80000000
#define IXGB_TX_DESC_TYPE 0x00100000
#define IXGB_TX_DESC_STATUS_DD 0x01
#define IXGB_TX_DESC_POPTS_IXSM 0x01
#define IXGB_TX_DESC_POPTS_TXSM 0x02
#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
struct ixgb_context_desc {
u8 ipcss;
u8 ipcso;
__le16 ipcse;
u8 tucss;
u8 tucso;
__le16 tucse;
__le32 cmd_type_len;
u8 status;
u8 hdr_len;
__le16 mss;
};
#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000
#define IXGB_CONTEXT_DESC_CMD_IP 0x02000000
#define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000
#define IXGB_CONTEXT_DESC_CMD_RS 0x08000000
#define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000
#define IXGB_CONTEXT_DESC_TYPE 0x00000000
#define IXGB_CONTEXT_DESC_STATUS_DD 0x01
/* Filters */
#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */
#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0
#define ENET_HEADER_SIZE 14
#define ENET_FCS_LENGTH 4
#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
/* Phy Addresses */
#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */
#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */
#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */
/* This structure takes a 64k flash and maps it for identification commands */
struct ixgb_flash_buffer {
u8 manufacturer_id;
u8 device_id;
u8 filler1[0x2AA8];
u8 cmd2;
u8 filler2[0x2AAA];
u8 cmd1;
u8 filler3[0xAAAA];
};
/* Flow control parameters */
struct ixgb_fc {
u32 high_water; /* Flow Control High-water */
u32 low_water; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
ixgb_fc_type type; /* Type of flow control */
};
/* The historical defaults for the flow control values are given below. */
#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
/* Phy definitions */
#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF
#define IXGB_MAX_PHY_ADDRESS 31
#define IXGB_MAX_PHY_DEV_TYPE 31
/* Bus parameters */
struct ixgb_bus {
ixgb_bus_speed speed;
ixgb_bus_width width;
ixgb_bus_type type;
};
struct ixgb_hw {
u8 __iomem *hw_addr;/* Base Address of the hardware */
void *back; /* Pointer to OS-dependent struct */
struct ixgb_fc fc; /* Flow control parameters */
struct ixgb_bus bus; /* Bus parameters */
u32 phy_id; /* Phy Identifier */
u32 phy_addr; /* XGMII address of Phy */
ixgb_mac_type mac_type; /* Identifier for MAC controller */
ixgb_phy_type phy_type; /* Transceiver/phy identifier */
u32 max_frame_size; /* Maximum frame size supported */
u32 mc_filter_type; /* Multicast filter hash type */
u32 num_mc_addrs; /* Number of current Multicast addrs */
u8 curr_mac_addr[ETH_ALEN]; /* Individual address currently programmed in MAC */
u32 num_tx_desc; /* Number of Transmit descriptors */
u32 num_rx_desc; /* Number of Receive descriptors */
u32 rx_buffer_size; /* Size of Receive buffer */
bool link_up; /* true if link is valid */
bool adapter_stopped; /* State of adapter */
u16 device_id; /* device id from PCI configuration space */
u16 vendor_id; /* vendor id from PCI configuration space */
u8 revision_id; /* revision id from PCI configuration space */
u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
u16 subsystem_id; /* subsystem id from PCI configuration space */
u32 bar0; /* Base Address registers */
u32 bar1;
u32 bar2;
u32 bar3;
u16 pci_cmd_word; /* PCI command register id from PCI configuration space */
__le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
unsigned long io_base; /* Our I/O mapped location */
u32 lastLFC;
u32 lastRFC;
};
/* Statistics reported by the hardware */
struct ixgb_hw_stats {
u64 tprl;
u64 tprh;
u64 gprcl;
u64 gprch;
u64 bprcl;
u64 bprch;
u64 mprcl;
u64 mprch;
u64 uprcl;
u64 uprch;
u64 vprcl;
u64 vprch;
u64 jprcl;
u64 jprch;
u64 gorcl;
u64 gorch;
u64 torl;
u64 torh;
u64 rnbc;
u64 ruc;
u64 roc;
u64 rlec;
u64 crcerrs;
u64 icbc;
u64 ecbc;
u64 mpc;
u64 tptl;
u64 tpth;
u64 gptcl;
u64 gptch;
u64 bptcl;
u64 bptch;
u64 mptcl;
u64 mptch;
u64 uptcl;
u64 uptch;
u64 vptcl;
u64 vptch;
u64 jptcl;
u64 jptch;
u64 gotcl;
u64 gotch;
u64 totl;
u64 toth;
u64 dc;
u64 plt64c;
u64 tsctc;
u64 tsctfc;
u64 ibic;
u64 rfc;
u64 lfc;
u64 pfrc;
u64 pftc;
u64 mcfrc;
u64 mcftc;
u64 xonrxc;
u64 xontxc;
u64 xoffrxc;
u64 xofftxc;
u64 rjc;
};
/* Function Prototypes */
extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
extern bool ixgb_init_hw(struct ixgb_hw *hw);
extern bool ixgb_adapter_start(struct ixgb_hw *hw);
extern void ixgb_check_for_link(struct ixgb_hw *hw);
extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
extern void ixgb_rar_set(struct ixgb_hw *hw,
u8 *addr,
u32 index);
/* Filters (multicast, vlan, receive) */
extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
u8 *mc_addr_list,
u32 mc_addr_count,
u32 pad);
/* Vfta functions */
extern void ixgb_write_vfta(struct ixgb_hw *hw,
u32 offset,
u32 value);
/* Access functions to eeprom data */
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw);
u16 ixgb_get_ee_device_id(struct ixgb_hw *hw);
bool ixgb_get_eeprom_data(struct ixgb_hw *hw);
__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index);
/* Everything else */
void ixgb_led_on(struct ixgb_hw *hw);
void ixgb_led_off(struct ixgb_hw *hw);
void ixgb_write_pci_cfg(struct ixgb_hw *hw,
u32 reg,
u16 * value);
#endif /* _IXGB_HW_H_ */

View File

@ -0,0 +1,53 @@
/*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGB_IDS_H_
#define _IXGB_IDS_H_
/**********************************************************************
** The Device and Vendor IDs for 10 Gigabit MACs
**********************************************************************/
#define INTEL_VENDOR_ID 0x8086
#define INTEL_SUBVENDOR_ID 0x8086
#define SUN_VENDOR_ID 0x108E
#define SUN_SUBVENDOR_ID 0x108E
#define IXGB_DEVICE_ID_82597EX 0x1048
#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
#define IXGB_SUBDEVICE_ID_A11F 0xA11F
#define IXGB_SUBDEVICE_ID_A01F 0xA01F
#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
#define IXGB_SUBDEVICE_ID_A00C 0xA00C
#define IXGB_SUBDEVICE_ID_A01C 0xA01C
#define IXGB_SUBDEVICE_ID_7036 0x7036
#endif /* #ifndef _IXGB_IDS_H_ */
/* End of File */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,64 @@
/*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* glue for the OS independent part of ixgb
* includes register access macros
*/
#ifndef _IXGB_OSDEP_H_
#define _IXGB_OSDEP_H_
#include <linux/types.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/if_ether.h>
#undef ASSERT
#define ASSERT(x) BUG_ON(!(x))
#define ENTER() pr_debug("%s\n", __func__);
#define IXGB_WRITE_REG(a, reg, value) ( \
writel((value), ((a)->hw_addr + IXGB_##reg)))
#define IXGB_READ_REG(a, reg) ( \
readl((a)->hw_addr + IXGB_##reg))
#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \
writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \
readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS)
#define IXGB_MEMCPY memcpy
#endif /* _IXGB_OSDEP_H_ */

View File

@ -0,0 +1,469 @@
/*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ixgb.h"
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define IXGB_MAX_NIC 8
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
#define IXGB_PARAM(X, desc) \
static int __devinitdata X[IXGB_MAX_NIC+1] \
= IXGB_PARAM_INIT; \
static unsigned int num_##X = 0; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/* Transmit Descriptor Count
*
* Valid Range: 64-4096
*
* Default Value: 256
*/
IXGB_PARAM(TxDescriptors, "Number of transmit descriptors");
/* Receive Descriptor Count
*
* Valid Range: 64-4096
*
* Default Value: 1024
*/
IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
/* User Specified Flow Control Override
*
* Valid Range: 0-3
* - 0 - No Flow Control
* - 1 - Rx only, respond to PAUSE frames but do not generate them
* - 2 - Tx only, generate PAUSE frames but ignore them on receive
* - 3 - Full Flow Control Support
*
* Default Value: 2 - Tx only (silicon bug avoidance)
*/
IXGB_PARAM(FlowControl, "Flow Control setting");
/* XsumRX - Receive Checksum Offload Enable/Disable
*
* Valid Range: 0, 1
* - 0 - disables all checksum offload
* - 1 - enables receive IP/TCP/UDP checksum offload
* on 82597 based NICs
*
* Default Value: 1
*/
IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
/* Transmit Interrupt Delay in units of 0.8192 microseconds
*
* Valid Range: 0-65535
*
* Default Value: 32
*/
IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay");
/* Receive Interrupt Delay in units of 0.8192 microseconds
*
* Valid Range: 0-65535
*
* Default Value: 72
*/
IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay");
/* Receive Flow control high threshold (when we send a pause frame)
* (FCRTH)
*
* Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity)
*
* Default Value: 196,608 (0x30000)
*/
IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold");
/* Receive Flow control low threshold (when we send a resume frame)
* (FCRTL)
*
* Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity)
* must be less than high threshold by at least 8 bytes
*
* Default Value: 163,840 (0x28000)
*/
IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
/* Flow control request timeout (how long to pause the link partner's tx)
* (PAP 15:0)
*
* Valid Range: 1 - 65535
*
* Default Value: 65535 (0xffff) (we'll send an xon if we recover)
*/
IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
/* Interrupt Delay Enable
*
* Valid Range: 0, 1
*
* - 0 - disables transmit interrupt delay
* - 1 - enables transmmit interrupt delay
*
* Default Value: 1
*/
IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define DEFAULT_TIDV 32
#define MAX_TIDV 0xFFFF
#define MIN_TIDV 0
#define DEFAULT_RDTR 72
#define MAX_RDTR 0xFFFF
#define MIN_RDTR 0
#define XSUMRX_DEFAULT OPTION_ENABLED
#define DEFAULT_FCRTL 0x28000
#define DEFAULT_FCRTH 0x30000
#define MIN_FCRTL 0
#define MAX_FCRTL 0x3FFE8
#define MIN_FCRTH 8
#define MAX_FCRTH 0x3FFF0
#define MIN_FCPAUSE 1
#define MAX_FCPAUSE 0xffff
#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */
struct ixgb_option {
enum { enable_option, range_option, list_option } type;
const char *name;
const char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
const struct ixgb_opt_list {
int i;
const char *str;
} *p;
} l;
} arg;
};
static int __devinit
ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
pr_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
pr_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
pr_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option: {
int i;
const struct ixgb_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
pr_info("%s\n", ent->str);
return 0;
}
}
}
break;
default:
BUG();
}
pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
/**
* ixgb_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
void __devinit
ixgb_check_options(struct ixgb_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= IXGB_MAX_NIC) {
pr_notice("Warning: no configuration for board #%i\n", bd);
pr_notice("Using defaults for all values\n");
}
{ /* Transmit Descriptor Count */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_TXD),
.def = DEFAULT_TXD,
.arg = { .r = { .min = MIN_TXD,
.max = MAX_TXD}}
};
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
if (num_TxDescriptors > bd) {
tx_ring->count = TxDescriptors[bd];
ixgb_validate_option(&tx_ring->count, &opt);
} else {
tx_ring->count = opt.def;
}
tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Descriptor Count */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_RXD),
.def = DEFAULT_RXD,
.arg = { .r = { .min = MIN_RXD,
.max = MAX_RXD}}
};
struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
if (num_RxDescriptors > bd) {
rx_ring->count = RxDescriptors[bd];
ixgb_validate_option(&rx_ring->count, &opt);
} else {
rx_ring->count = opt.def;
}
rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Checksum Offload Enable */
static const struct ixgb_option opt = {
.type = enable_option,
.name = "Receive Checksum Offload",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_XsumRX > bd) {
unsigned int rx_csum = XsumRX[bd];
ixgb_validate_option(&rx_csum, &opt);
adapter->rx_csum = rx_csum;
} else {
adapter->rx_csum = opt.def;
}
}
{ /* Flow Control */
static const struct ixgb_opt_list fc_list[] = {
{ ixgb_fc_none, "Flow Control Disabled" },
{ ixgb_fc_rx_pause, "Flow Control Receive Only" },
{ ixgb_fc_tx_pause, "Flow Control Transmit Only" },
{ ixgb_fc_full, "Flow Control Enabled" },
{ ixgb_fc_default, "Flow Control Hardware Default" }
};
static const struct ixgb_option opt = {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
.def = ixgb_fc_tx_pause,
.arg = { .l = { .nr = ARRAY_SIZE(fc_list),
.p = fc_list }}
};
if (num_FlowControl > bd) {
unsigned int fc = FlowControl[bd];
ixgb_validate_option(&fc, &opt);
adapter->hw.fc.type = fc;
} else {
adapter->hw.fc.type = opt.def;
}
}
{ /* Receive Flow Control High Threshold */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control High Threshold",
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
.def = DEFAULT_FCRTH,
.arg = { .r = { .min = MIN_FCRTH,
.max = MAX_FCRTH}}
};
if (num_RxFCHighThresh > bd) {
adapter->hw.fc.high_water = RxFCHighThresh[bd];
ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
} else {
adapter->hw.fc.high_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
pr_info("Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control Low Threshold",
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
.def = DEFAULT_FCRTL,
.arg = { .r = { .min = MIN_FCRTL,
.max = MAX_FCRTL}}
};
if (num_RxFCLowThresh > bd) {
adapter->hw.fc.low_water = RxFCLowThresh[bd];
ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
} else {
adapter->hw.fc.low_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
pr_info("Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
static const struct ixgb_option opt = {
.type = range_option,
.name = "Flow Control Pause Time Request",
.err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
.def = DEFAULT_FCPAUSE,
.arg = { .r = { .min = MIN_FCPAUSE,
.max = MAX_FCPAUSE}}
};
if (num_FCReqTimeout > bd) {
unsigned int pause_time = FCReqTimeout[bd];
ixgb_validate_option(&pause_time, &opt);
adapter->hw.fc.pause_time = pause_time;
} else {
adapter->hw.fc.pause_time = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
pr_info("Ignoring FCReqTimeout when no RxFC\n");
}
/* high low and spacing check for rx flow control thresholds */
if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
adapter->hw.fc.high_water = DEFAULT_FCRTH;
adapter->hw.fc.low_water = DEFAULT_FCRTL;
}
}
{ /* Receive Interrupt Delay */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = { .r = { .min = MIN_RDTR,
.max = MAX_RDTR}}
};
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
ixgb_validate_option(&adapter->rx_int_delay, &opt);
} else {
adapter->rx_int_delay = opt.def;
}
}
{ /* Transmit Interrupt Delay */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = { .r = { .min = MIN_TIDV,
.max = MAX_TIDV}}
};
if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd];
ixgb_validate_option(&adapter->tx_int_delay, &opt);
} else {
adapter->tx_int_delay = opt.def;
}
}
{ /* Transmit Interrupt Delay Enable */
static const struct ixgb_option opt = {
.type = enable_option,
.name = "Tx Interrupt Delay Enable",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_IntDelayEnable > bd) {
unsigned int ide = IntDelayEnable[bd];
ixgb_validate_option(&ide, &opt);
adapter->tx_int_delay_enable = ide;
} else {
adapter->tx_int_delay_enable = opt.def;
}
}
}

View File

@ -0,0 +1,42 @@
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
# Copyright(c) 1999 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# Linux NICS <linux.nics@intel.com>
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o

View File

@ -0,0 +1,632 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBE_H_
#define _IXGBE_H_
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/cpumask.h>
#include <linux/aer.h>
#include <linux/if_vlan.h>
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
#ifdef CONFIG_IXGBE_DCA
#include <linux/dca.h>
#endif
/* common prefix used by pr_<> macros */
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* TX/RX descriptor defines */
#define IXGBE_DEFAULT_TXD 512
#define IXGBE_DEFAULT_TX_WORK 256
#define IXGBE_MAX_TXD 4096
#define IXGBE_MIN_TXD 64
#define IXGBE_DEFAULT_RXD 512
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
/* flow control */
#define IXGBE_MIN_FCRTL 0x40
#define IXGBE_MAX_FCRTL 0x7FF80
#define IXGBE_MIN_FCRTH 0x600
#define IXGBE_MAX_FCRTH 0x7FFF0
#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
#define IXGBE_MIN_FCPAUSE 0
#define IXGBE_MAX_FCPAUSE 0xFFFF
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
#define IXGBE_RXBUFFER_2K 2048
#define IXGBE_RXBUFFER_3K 3072
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_RXBUFFER_7K 7168
#define IXGBE_RXBUFFER_8K 8192
#define IXGBE_RXBUFFER_15K 15360
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
* i.e. RXBUFFER_512 --> size-1024 slab
*/
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IXGBE_TX_FLAGS_CSUM (u32)(1)
#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
#define IXGBE_MAX_RSC_INT_RATE 162760
#define IXGBE_MAX_VF_MC_ENTRIES 30
#define IXGBE_MAX_VF_FUNCTIONS 64
#define IXGBE_MAX_VFTA_ENTRIES 128
#define MAX_EMULATION_MAC_ADDRS 16
#define IXGBE_MAX_PF_MACVLANS 15
#define VMDQ_P(p) ((p) + adapter->num_vfs)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
u16 default_vf_vlan_id;
u16 vlans_enabled;
bool clear_to_send;
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
struct pci_dev *vfdev;
};
struct vf_macvlans {
struct list_head l;
int vf;
int rar_entry;
bool free;
bool is_macvlan;
u8 vf_macvlan[ETH_ALEN];
};
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
dma_addr_t dma;
u32 length;
u32 tx_flags;
struct sk_buff *skb;
u32 bytecount;
u16 gso_segs;
};
struct ixgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
unsigned int page_offset;
};
struct ixgbe_queue_stats {
u64 packets;
u64 bytes;
};
struct ixgbe_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 completed;
u64 tx_done_old;
};
struct ixgbe_rx_queue_stats {
u64 rsc_count;
u64 rsc_flush;
u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
};
enum ixbge_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_PS_ENABLED,
__IXGBE_RX_RSC_ENABLED,
};
#define ring_is_ps_enabled(ring) \
test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define set_ring_ps_enabled(ring) \
set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \
clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define ring_is_rsc_enabled(ring) \
test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
#define set_ring_rsc_enabled(ring) \
set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
#define clear_ring_rsc_enabled(ring) \
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring {
struct ixgbe_ring *next; /* pointer to next ring in q_vector */
void *desc; /* descriptor ring memory */
struct device *dev; /* device for DMA mapping */
struct net_device *netdev; /* netdev ring belongs to */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
unsigned long state;
u8 __iomem *tail;
u16 count; /* amount of descriptors */
u16 rx_buf_len;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
* the hardware register offset
* associated with this ring, which is
* different for DCB and RSS modes
*/
u8 atr_sample_rate;
u8 atr_count;
u16 next_to_use;
u16 next_to_clean;
u8 dcb_tc;
struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
union {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
int numa_node;
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
struct rcu_head rcu;
struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
RING_F_VMDQ, /* SR-IOV uses the same ring feature */
RING_F_RSS,
RING_F_FDIR,
#ifdef IXGBE_FCOE
RING_F_FCOE,
#endif /* IXGBE_FCOE */
RING_F_ARRAY_SIZE /* must be last in enum set */
};
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
#else
#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature {
int indices;
int mask;
} ____cacheline_internodealigned_in_smp;
struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbe_q_vector {
struct ixgbe_adapter *adapter;
#ifdef CONFIG_IXGBE_DCA
int cpu; /* CPU for DCA */
#endif
u16 v_idx; /* index of q_vector within array, also used for
* finding the bit in EICR and friends that
* represents the vector for this ring */
u16 itr; /* Interrupt throttle rate written to EITR */
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;
cpumask_var_t affinity_mask;
char name[IFNAMSIZ + 9];
};
/*
* microsecond values for various ITR rates shifted by 2 to fit itr register
* with the first 3 bits reserved 0
*/
#define IXGBE_MIN_RSC_ITR 24
#define IXGBE_100K_ITR 40
#define IXGBE_20K_ITR 200
#define IXGBE_10K_ITR 400
#define IXGBE_8K_ITR 500
static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
{
u16 ntc = ring->next_to_clean;
u16 ntu = ring->next_to_use;
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
#define IXGBE_RX_DESC_ADV(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC_ADV(R, i) \
(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
#define IXGBE_TX_CTXTDESC_ADV(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
#ifdef IXGBE_FCOE
/* Use 3K as the baby jumbo frame size for FCoE */
#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
#endif /* IXGBE_FCOE */
#define OTHER_VECTOR 1
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_VECTORS_82599 64
#define MAX_MSIX_Q_VECTORS_82599 64
#define MAX_MSIX_VECTORS_82598 18
#define MAX_MSIX_Q_VECTORS_82598 16
#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
#define MIN_MSIX_Q_VECTORS 2
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
/* board specific private data structure */
struct ixgbe_adapter {
unsigned long state;
/* Some features need tri-state capability,
* thus the additional *_CAPABLE flags.
*/
u32 flags;
#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23)
#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24)
#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25)
#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26)
#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27)
#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28)
#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 bd_number;
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
/* DCB parameters */
struct ieee_pfc *ixgbe_ieee_pfc;
struct ieee_ets *ixgbe_ieee_ets;
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
u8 dcb_set_bitmap;
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
u32 tx_itr_setting;
u16 eitr_low;
u16 eitr_high;
/* Work limits */
u16 tx_work_limit;
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues;
u32 tx_timeout_count;
bool detect_tx_hung;
u64 restart_queue;
u64 lsc_int;
/* RX */
struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
int num_rx_queues;
int num_rx_pools; /* == num_rx_queues in 82598 */
int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 non_eop_descs;
int num_msix_vectors;
int max_msix_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
u32 test_icr;
struct ixgbe_ring test_tx_ring;
struct ixgbe_ring test_rx_ring;
/* structs defined in ixgbe_hw.h */
struct ixgbe_hw hw;
u16 msg_enable;
struct ixgbe_hw_stats stats;
/* Interrupt Throttle Rate */
u32 rx_eitr_param;
u32 tx_eitr_param;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
u32 link_speed;
bool link_up;
unsigned long link_check_timeout;
struct work_struct service_task;
struct timer_list service_timer;
u32 fdir_pballoc;
u32 atr_sample_rate;
unsigned long fdir_overflow; /* number of times ATR was backed off */
spinlock_t fdir_perfect_lock;
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
u64 rsc_total_count;
u64 rsc_total_flush;
u32 wol;
u16 eeprom_verh;
u16 eeprom_verl;
u16 eeprom_cap;
int node;
u32 led_reg;
u32 interrupt_event;
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
unsigned int num_vfs;
struct vf_data_storage *vfinfo;
int vf_rate_link_speed;
struct vf_macvlans vf_mvs;
struct vf_macvlans *mv_list;
struct hlist_head fdir_filter_list;
union ixgbe_atr_input fdir_mask;
int fdir_filter_count;
u32 timer_event_accumulator;
u32 vferr_refcount;
};
struct ixgbe_fdir_filter {
struct hlist_node fdir_node;
union ixgbe_atr_input filter;
u16 sw_idx;
u16 action;
};
enum ixbge_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
};
struct ixgbe_rsc_cb {
dma_addr_t dma;
u16 skb_cnt;
bool delay_unmap;
};
#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
enum ixgbe_boards {
board_82598,
board_82599,
board_X540,
};
extern struct ixgbe_info ixgbe_82598_info;
extern struct ixgbe_info ixgbe_82599_info;
extern struct ixgbe_info ixgbe_X540_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
struct ixgbe_dcb_config *dst_dcb_cfg,
int tc_max);
#endif
extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[];
extern void ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
extern void ixgbe_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
struct ixgbe_adapter *,
struct ixgbe_ring *);
extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
struct ixgbe_tx_buffer *);
extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask);
extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue);
extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb,
u32 staterr);
extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
extern int ixgbe_fcoe_enable(struct net_device *netdev);
extern int ixgbe_fcoe_disable(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
#endif /* IXGBE_FCOE */
#endif /* _IXGBE_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,145 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBE_COMMON_H_
#define _IXGBE_COMMON_H_
#include "ixgbe_type.h"
#include "ixgbe.h"
u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#ifndef writeq
#define writeq(val, addr) writel((u32) (val), addr); \
writel((u32) (val >> 32), (addr + 4));
#endif
#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
readl((a)->hw_addr + (reg) + ((offset) << 2)))
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
#define hw_dbg(hw, format, arg...) \
netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
#define e_dev_info(format, arg...) \
dev_info(&adapter->pdev->dev, format, ## arg)
#define e_dev_warn(format, arg...) \
dev_warn(&adapter->pdev->dev, format, ## arg)
#define e_dev_err(format, arg...) \
dev_err(&adapter->pdev->dev, format, ## arg)
#define e_dev_notice(format, arg...) \
dev_notice(&adapter->pdev->dev, format, ## arg)
#define e_info(msglvl, format, arg...) \
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_err(msglvl, format, arg...) \
netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_warn(msglvl, format, arg...) \
netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
#define e_crit(msglvl, format, arg...) \
netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
#endif /* IXGBE_COMMON */

View File

@ -0,0 +1,366 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
#include "ixgbe_dcb_82599.h"
/**
* ixgbe_ieee_credits - This calculates the ieee traffic class
* credits from the configured bandwidth percentages. Credits
* are the smallest unit programmable into the underlying
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
*/
static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
{
int min_percent = 100;
int min_credit, multiplier;
int i;
min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
DCB_CREDIT_QUANTUM;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (bw[i] < min_percent && bw[i])
min_percent = bw[i];
}
multiplier = (min_credit / min_percent) + 1;
/* Find out the hw credits for each TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
if (val < min_credit)
val = min_credit;
refill[i] = val;
max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit;
}
return 0;
}
/**
* ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
* @ixgbe_dcb_config: Struct containing DCB settings.
* @direction: Configuring either Tx or Rx.
*
* This function calculates the credits allocated to each traffic class.
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config().
*/
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config,
int max_frame, u8 direction)
{
struct tc_bw_alloc *p;
int min_credit;
int min_multiplier;
int min_percent = 100;
s32 ret_val = 0;
/* Initialization values default for Tx settings */
u32 credit_refill = 0;
u32 credit_max = 0;
u16 link_percentage = 0;
u8 bw_percent = 0;
u8 i;
if (dcb_config == NULL) {
ret_val = DCB_ERR_CONFIG;
goto out;
}
min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
DCB_CREDIT_QUANTUM;
/* Find smallest link percentage */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
link_percentage = p->bwg_percent;
link_percentage = (link_percentage * bw_percent) / 100;
if (link_percentage && link_percentage < min_percent)
min_percent = link_percentage;
}
/*
* The ratio between traffic classes will control the bandwidth
* percentages seen on the wire. To calculate this ratio we use
* a multiplier. It is required that the refill credits must be
* larger than the max frame size so here we find the smallest
* multiplier that will allow all bandwidth percentages to be
* greater than the max frame size.
*/
min_multiplier = (min_credit / min_percent) + 1;
/* Find out the link percentage for each TC first */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
link_percentage = p->bwg_percent;
/* Must be careful of integer division for very small nums */
link_percentage = (link_percentage * bw_percent) / 100;
if (p->bwg_percent > 0 && link_percentage == 0)
link_percentage = 1;
/* Save link_percentage for reference */
p->link_percent = (u8)link_percentage;
/* Calculate credit refill ratio using multiplier */
credit_refill = min(link_percentage * min_multiplier,
MAX_CREDIT_REFILL);
p->data_credits_refill = (u16)credit_refill;
/* Calculate maximum credit for the TC */
credit_max = (link_percentage * MAX_CREDIT) / 100;
/*
* Adjustment based on rule checking, if the percentage
* of a TC is too small, the maximum credit may not be
* enough to send out a jumbo frame in data plane arbitration.
*/
if (credit_max && (credit_max < min_credit))
credit_max = min_credit;
if (direction == DCB_TX_CONFIG) {
/*
* Adjustment based on rule checking, if the
* percentage of a TC is too small, the maximum
* credit may not be enough to send out a TSO
* packet in descriptor plane arbitration.
*/
if ((hw->mac.type == ixgbe_mac_82598EB) &&
credit_max &&
(credit_max < MINIMUM_CREDIT_FOR_TSO))
credit_max = MINIMUM_CREDIT_FOR_TSO;
dcb_config->tc_config[i].desc_credits_max =
(u16)credit_max;
}
p->data_credits_max = (u16)credit_max;
}
out:
return ret_val;
}
void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
{
int i;
*pfc_en = 0;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
*pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i;
}
void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
u16 *refill)
{
struct tc_bw_alloc *p;
int i;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &cfg->tc_config[i].path[direction];
refill[i] = p->data_credits_refill;
}
}
void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
{
int i;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
max[i] = cfg->tc_config[i].desc_credits_max;
}
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
u8 *bwgid)
{
struct tc_bw_alloc *p;
int i;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &cfg->tc_config[i].path[direction];
bwgid[i] = p->bwg_id;
}
}
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 *ptype)
{
struct tc_bw_alloc *p;
int i;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &cfg->tc_config[i].path[direction];
ptype[i] = p->prio_type;
}
}
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
{
int i, up;
unsigned long bitmap;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap;
for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY)
map[up] = i;
}
}
/**
* ixgbe_dcb_hw_config - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
u8 pfc_en;
u8 ptype[MAX_TRAFFIC_CLASS];
u8 bwgid[MAX_TRAFFIC_CLASS];
u8 prio_tc[MAX_TRAFFIC_CLASS];
u16 refill[MAX_TRAFFIC_CLASS];
u16 max[MAX_TRAFFIC_CLASS];
/* Unpack CEE standard containers */
ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
ixgbe_dcb_unpack_max(dcb_config, max);
ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc);
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
bwgid, ptype);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
bwgid, ptype, prio_tc);
break;
default:
break;
}
return ret;
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
int ret = -EINVAL;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
break;
default:
break;
}
return ret;
}
s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
{
__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
__u8 prio_type[IEEE_8021QAZ_MAX_TCS];
int i;
/* naively give each TC a bwg to map onto CEE hardware */
__u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
/* Map TSA onto CEE prio type */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
prio_type[i] = 2;
break;
case IEEE_8021QAZ_TSA_ETS:
prio_type[i] = 0;
break;
default:
/* Hardware only supports priority strict or
* ETS transmission selection algorithms if
* we receive some other value from dcbnl
* throw an error
*/
return -EINVAL;
}
}
ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
return ixgbe_dcb_hw_ets_config(hw, refill, max,
bwg_id, prio_type, ets->prio_tc);
}
s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *prio_type, u8 *prio_tc)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
prio_type);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
bwg_id, prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
prio_type, prio_tc);
break;
default:
break;
}
return 0;
}

View File

@ -0,0 +1,168 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _DCB_CONFIG_H_
#define _DCB_CONFIG_H_
#include <linux/dcbnl.h>
#include "ixgbe_type.h"
/* DCB data structures */
#define IXGBE_MAX_PACKET_BUFFERS 8
#define MAX_USER_PRIORITY 8
#define MAX_BW_GROUP 8
#define BW_PERCENT 100
#define DCB_TX_CONFIG 0
#define DCB_RX_CONFIG 1
/* DCB error Codes */
#define DCB_SUCCESS 0
#define DCB_ERR_CONFIG -1
#define DCB_ERR_PARAM -2
/* Transmit and receive Errors */
/* Error in bandwidth group allocation */
#define DCB_ERR_BW_GROUP -3
/* Error in traffic class bandwidth allocation */
#define DCB_ERR_TC_BW -4
/* Traffic class has both link strict and group strict enabled */
#define DCB_ERR_LS_GS -5
/* Link strict traffic class has non zero bandwidth */
#define DCB_ERR_LS_BW_NONZERO -6
/* Link strict bandwidth group has non zero bandwidth */
#define DCB_ERR_LS_BWG_NONZERO -7
/* Traffic class has zero bandwidth */
#define DCB_ERR_TC_BW_ZERO -8
#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF
struct dcb_pfc_tc_debug {
u8 tc;
u8 pause_status;
u64 pause_quanta;
};
enum strict_prio_type {
prio_none = 0,
prio_group,
prio_link
};
/* DCB capability definitions */
#define IXGBE_DCB_PG_SUPPORT 0x00000001
#define IXGBE_DCB_PFC_SUPPORT 0x00000002
#define IXGBE_DCB_BCN_SUPPORT 0x00000004
#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008
#define IXGBE_DCB_GSP_SUPPORT 0x00000010
#define IXGBE_DCB_8_TC_SUPPORT 0x80
struct dcb_support {
/* DCB capabilities */
u32 capabilities;
/* Each bit represents a number of TCs configurable in the hw.
* If 8 traffic classes can be configured, the value is 0x80.
*/
u8 traffic_classes;
u8 pfc_traffic_classes;
};
/* Traffic class bandwidth allocation per direction */
struct tc_bw_alloc {
u8 bwg_id; /* Bandwidth Group (BWG) ID */
u8 bwg_percent; /* % of BWG's bandwidth */
u8 link_percent; /* % of link bandwidth */
u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
u16 data_credits_refill; /* Credit refill amount in 64B granularity */
u16 data_credits_max; /* Max credits for a configured packet buffer
* in 64B granularity.*/
enum strict_prio_type prio_type; /* Link or Group Strict Priority */
};
enum dcb_pfc_type {
pfc_disabled = 0,
pfc_enabled_full,
pfc_enabled_tx,
pfc_enabled_rx
};
/* Traffic class configuration */
struct tc_configuration {
struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */
u16 desc_credits_max; /* For Tx Descriptor arbitration */
u8 tc; /* Traffic class (TC) */
};
struct dcb_num_tcs {
u8 pg_tcs;
u8 pfc_tcs;
};
struct ixgbe_dcb_config {
struct dcb_support support;
struct dcb_num_tcs num_tcs;
struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
bool pfc_mode_enable;
u32 dcb_cfg_version; /* Not used...OS-specific? */
u32 link_speed; /* For bandwidth allocation validation purpose */
};
/* DCB driver APIs */
void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
/* DCB credits calculation */
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB definitions for credit calculation */
#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
#endif /* _DCB_CONFIG_H */

View File

@ -0,0 +1,296 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
/* Enable Arbiter */
reg &= ~IXGBE_RMCS_ARBDIS;
/* Enable Receive Recycle within the BWG */
reg |= IXGBE_RMCS_RRM;
/* Enable Deficit Fixed Priority arbitration*/
reg |= IXGBE_RMCS_DFP;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
if (prio_type[i] == prio_link)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
}
reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
reg |= IXGBE_RDRXCTL_RDMTS_1_2;
reg |= IXGBE_RDRXCTL_MPBEN;
reg |= IXGBE_RDRXCTL_MCEN;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
/* Make sure there is enough descriptors before arbitration */
reg &= ~IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
/* Enable DFP and Recycle mode */
reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDTQ2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
/* Enable Data Plane Arbiter */
reg &= ~IXGBE_PDPMCS_ARBDIS;
/* Enable DFP and Transmit Recycle Mode */
reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDPT2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
}
/* Enable Tx packet buffer division */
reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
reg |= IXGBE_DTXCTL_ENDBUBD;
IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
return 0;
}
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Priority Flow Control for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 reg;
u8 i;
if (pfc_en) {
/* Enable Transmit Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X;
/* correct the reporting of our flow control status */
reg |= IXGBE_RMCS_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
reg &= ~IXGBE_FCTRL_RFCE;
reg |= IXGBE_FCTRL_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
/* Configure pause time */
for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
}
/*
* Configure flow control thresholds and enable priority flow control
* for each traffic class.
*/
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
int enabled = pfc_en & (1 << i);
reg = hw->fc.low_water << 10;
if (enabled == pfc_enabled_tx ||
enabled == pfc_enabled_full)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
reg = hw->fc.high_water[i] << 10;
if (enabled == pfc_enabled_tx ||
enabled == pfc_enabled_full)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
u8 j = 0;
/* Receive Queues stats setting - 8 queues per statistics reg */
for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
}
/* Transmit Queues stats setting - 4 queues per statistics reg */
for (i = 0; i < 8; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
reg |= ((0x1010101) * i);
IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_pfc_82598(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82598(hw);
return 0;
}

View File

@ -0,0 +1,97 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _DCB_82598_CONFIG_H_
#define _DCB_82598_CONFIG_H_
/* DCB register definitions */
#define IXGBE_DPMCS_MTSOS_SHIFT 16
#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */
#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */
#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */
#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
#define IXGBE_TDTQ2TCCR_GSP 0x40000000
#define IXGBE_TDTQ2TCCR_LSP 0x80000000
#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
#define IXGBE_TDPT2TCCR_GSP 0x40000000
#define IXGBE_TDPT2TCCR_LSP 0x80000000
#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */
#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
/* DCB hw initialization */
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *prio_type);
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type);
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type);
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */

View File

@ -0,0 +1,376 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82599.h"
/**
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
* @hw: pointer to hardware structure
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type,
u8 *prio_tc)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
/*
* Disable the arbiter before changing parameters
* (always enable recycle mode; WSP)
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
/* Map all traffic classes to their UP */
reg = 0;
for (i = 0; i < MAX_USER_PRIORITY; i++)
reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
if (prio_type[i] == prio_link)
reg |= IXGBE_RTRPT4C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
}
/*
* Configure Rx packet plane (recycle mode; WSP) and
* enable arbiter
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
/* Clear the per-Tx queue credits; we use per-TC instead */
for (i = 0; i < 128; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
}
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_RTTDT2C_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_RTTDT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
}
/*
* Configure Tx descriptor plane (recycle mode; WSP) and
* enable arbiter
*/
reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
* @hw: pointer to hardware structure
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type,
u8 *prio_tc)
{
u32 reg;
u8 i;
/*
* Disable the arbiter before changing parameters
* (always enable recycle mode; SP; arb delay)
*/
reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
(IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
IXGBE_RTTPCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
/* Map all traffic classes to their UP */
reg = 0;
for (i = 0; i < MAX_USER_PRIORITY; i++)
reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_RTTPT2C_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_RTTPT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
}
/*
* Configure Tx packet plane (recycle mode; SP; arb delay) and
* enable arbiter
*/
reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
(IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
return 0;
}
/**
* ixgbe_dcb_config_pfc_82599 - Configure priority flow control
* @hw: pointer to hardware structure
* @pfc_en: enabled pfc bitmask
* @prio_tc: priority to tc assignments indexed by priority
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
u32 i, j, reg;
u8 max_tc = 0;
for (i = 0; i < MAX_USER_PRIORITY; i++)
if (prio_tc[i] > max_tc)
max_tc = prio_tc[i];
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
int enabled = 0;
if (i > max_tc) {
reg = 0;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
continue;
}
for (j = 0; j < MAX_USER_PRIORITY; j++) {
if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
enabled = 1;
break;
}
}
reg = hw->fc.low_water << 10;
if (enabled)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
reg = hw->fc.high_water[i] << 10;
if (enabled)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
}
if (pfc_en) {
/* Configure pause time (2 TCs per register) */
reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
reg = IXGBE_FCCFG_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
/*
* Enable Receive PFC
* 82599 will always honor XOFF frames we receive when
* we are in PFC mode however X540 only honors enabled
* traffic classes.
*/
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
reg &= ~IXGBE_MFLCN_RFCE;
reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
if (hw->mac.type == ixgbe_mac_X540) {
reg &= ~IXGBE_MFLCN_RPFCE_MASK;
reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
}
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
} else {
/* X540 devices have a RX bit that should be cleared
* if PFC is disabled on all TCs but PFC features is
* enabled.
*/
if (hw->mac.type == ixgbe_mac_X540) {
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
reg &= ~IXGBE_MFLCN_RPFCE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
}
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
hw->mac.ops.fc_enable(hw, i);
}
return 0;
}
/**
* ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
/*
* Receive Queues stats setting
* 32 RQSMR registers, each configuring 4 queues.
* Set all 16 queues of each TC to the same stat
* with TC 'n' going to stat 'n'.
*/
for (i = 0; i < 32; i++) {
reg = 0x01010101 * (i / 4);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
}
/*
* Transmit Queues stats setting
* 32 TQSM registers, each controlling 4 queues.
* Set all queues of each TC to the same stat
* with TC 'n' going to stat 'n'.
* Tx queues are allocated non-uniformly to TCs:
* 32, 32, 16, 16, 8, 8, 8, 8.
*/
for (i = 0; i < 32; i++) {
if (i < 8)
reg = 0x00000000;
else if (i < 16)
reg = 0x01010101;
else if (i < 20)
reg = 0x02020202;
else if (i < 24)
reg = 0x03030303;
else if (i < 26)
reg = 0x04040404;
else if (i < 28)
reg = 0x05050505;
else if (i < 30)
reg = 0x06060606;
else
reg = 0x07070707;
IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
* @pfc_en: enabled pfc bitmask
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
bwg_id, prio_type, prio_tc);
ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
ixgbe_dcb_config_tc_stats_82599(hw);
return 0;
}

View File

@ -0,0 +1,123 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _DCB_82599_CONFIG_H_
#define _DCB_82599_CONFIG_H_
/* DCB register definitions */
#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
* 1 WSP - Weighted Strict Priority
*/
#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
* 1 WRR - Weighted Round Robin
*/
#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
* clear!
*/
#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
/* Receive UP2TC mapping */
#define IXGBE_RTRUP2TC_UP_SHIFT 3
/* Transmit UP2TC mapping */
#define IXGBE_RTTUP2TC_UP_SHIFT 3
#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */
#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */
#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
* buffers enable
*/
#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
* (RSS) enable
*/
/* RTRPCS Bit Masks */
#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
#define IXGBE_RTRPCS_RAC 0x00000004
#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */
/* RTTDT2C Bit Masks */
#define IXGBE_RTTDT2C_MCL_SHIFT 12
#define IXGBE_RTTDT2C_BWG_SHIFT 9
#define IXGBE_RTTDT2C_GSP 0x40000000
#define IXGBE_RTTDT2C_LSP 0x80000000
#define IXGBE_RTTPT2C_MCL_SHIFT 12
#define IXGBE_RTTPT2C_BWG_SHIFT 9
#define IXGBE_RTTPT2C_GSP 0x40000000
#define IXGBE_RTTPT2C_LSP 0x80000000
/* RTTPCS Bit Masks */
#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
* 1 SP - Strict Priority
*/
#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
#define IXGBE_RTTPCS_ARBD_SHIFT 22
#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */
/* SECTXMINIFG DCB */
#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
/* DCB hw initialization */
s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type,
u8 *prio_tc);
s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type);
s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type,
u8 *prio_tc);
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type,
u8 *prio_tc);
#endif /* _DCB_82599_CONFIG_H */

View File

@ -0,0 +1,805 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include <linux/dcbnl.h>
#include "ixgbe_dcb_82598.h"
#include "ixgbe_dcb_82599.h"
/* Callbacks for DCB netlink in the kernel */
#define BIT_DCB_MODE 0x01
#define BIT_PFC 0x02
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
{
struct tc_configuration *src_tc_cfg = NULL;
struct tc_configuration *dst_tc_cfg = NULL;
int i;
if (!src_dcb_cfg || !dst_dcb_cfg)
return -EINVAL;
for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
}
for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
[i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
[DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
[i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
[DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
}
for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
}
dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable;
return 0;
}
static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
}
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
u8 err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
/* verify there is something to do, if not then exit */
if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
return err;
if (state > 0)
err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
else
err = ixgbe_setup_tc(netdev, 0);
return err;
}
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i, j;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
for (i = 0; i < netdev->addr_len; i++)
perm_addr[i] = adapter->hw.mac.perm_addr[i];
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
break;
default:
break;
}
}
static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
/* Abort a bad configuration */
if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
return;
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
bw_pct;
if (up_map != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
up_map;
if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_TX;
if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
adapter->dcb_set_bitmap |= BIT_PFC;
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
adapter->dcb_cfg.bw_percentage[0][bwg_id])
adapter->dcb_set_bitmap |= BIT_PG_TX;
}
static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
/* Abort bad configurations */
if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
return;
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
bw_pct;
if (up_map != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
up_map;
if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_RX;
if (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)
adapter->dcb_set_bitmap |= BIT_PFC;
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
adapter->dcb_cfg.bw_percentage[1][bwg_id])
adapter->dcb_set_bitmap |= BIT_PG_RX;
}
static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
*bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
*up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
}
static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
*bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
*up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
}
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
u8 setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
adapter->dcb_cfg.tc_config[priority].dcb_pfc) {
adapter->dcb_set_bitmap |= BIT_PFC;
adapter->temp_dcb_cfg.pfc_mode_enable = true;
}
}
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
u8 *setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int ret, i;
#ifdef IXGBE_FCOE
struct dcb_app app = {
.selector = DCB_APP_IDTYPE_ETHTYPE,
.protocol = ETH_P_FCOE,
};
u8 up;
/* In IEEE mode, use the IEEE Ethertype selector value */
if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
up = dcb_ieee_getapp_mask(netdev, &app);
} else {
up = dcb_getapp(netdev, &app);
}
#endif
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
MAX_TRAFFIC_CLASS);
if (ret)
return DCB_NO_HW_CHG;
#ifdef IXGBE_FCOE
if (up && !(up & (1 << adapter->fcoe.up)))
adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
/*
* Only take down the adapter if an app change occurred. FCoE
* may shuffle tx rings in this case and this can not be done
* without a reset currently.
*/
if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
adapter->fcoe.up = ffs(up) - 1;
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
}
#endif
if (adapter->dcb_cfg.pfc_mode_enable) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
adapter->last_lfc_mode =
adapter->hw.fc.current_mode;
break;
default:
break;
}
adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
} else {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
adapter->hw.fc.requested_mode = ixgbe_fc_none;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
break;
default:
break;
}
}
#ifdef IXGBE_FCOE
if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
ret = DCB_HW_CHG_RST;
}
#endif
if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
/* Priority to TC mapping in CEE case default to 1:1 */
u8 prio_tc[MAX_USER_PRIORITY];
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
#ifdef IXGBE_FCOE
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
max_frame, DCB_TX_CONFIG);
ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
max_frame, DCB_RX_CONFIG);
ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
DCB_TX_CONFIG, refill);
ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
DCB_TX_CONFIG, bwg_id);
ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
DCB_TX_CONFIG, prio_type);
ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
DCB_TX_CONFIG, prio_tc);
ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
bwg_id, prio_type, prio_tc);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
}
if (adapter->dcb_set_bitmap & BIT_PFC) {
u8 pfc_en;
u8 prio_tc[MAX_USER_PRIORITY];
ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
DCB_TX_CONFIG, prio_tc);
ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
ret = DCB_HW_CHG;
}
if (adapter->dcb_cfg.pfc_mode_enable)
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
clear_bit(__IXGBE_RESETTING, &adapter->state);
adapter->dcb_set_bitmap = 0x00;
return ret;
}
static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
switch (capid) {
case DCB_CAP_ATTR_PG:
*cap = true;
break;
case DCB_CAP_ATTR_PFC:
*cap = true;
break;
case DCB_CAP_ATTR_UP2TC:
*cap = false;
break;
case DCB_CAP_ATTR_PG_TCS:
*cap = 0x80;
break;
case DCB_CAP_ATTR_PFC_TCS:
*cap = 0x80;
break;
case DCB_CAP_ATTR_GSP:
*cap = true;
break;
case DCB_CAP_ATTR_BCN:
*cap = false;
break;
case DCB_CAP_ATTR_DCBX:
*cap = adapter->dcbx_cap;
break;
default:
*cap = false;
break;
}
return 0;
}
static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u8 rval = 0;
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
case DCB_NUMTCS_ATTR_PG:
*num = adapter->dcb_cfg.num_tcs.pg_tcs;
break;
case DCB_NUMTCS_ATTR_PFC:
*num = adapter->dcb_cfg.num_tcs.pfc_tcs;
break;
default:
rval = -EINVAL;
break;
}
} else {
rval = -EINVAL;
}
return rval;
}
static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
{
return -EINVAL;
}
static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
return adapter->dcb_cfg.pfc_mode_enable;
}
static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
adapter->temp_dcb_cfg.pfc_mode_enable = state;
if (adapter->temp_dcb_cfg.pfc_mode_enable !=
adapter->dcb_cfg.pfc_mode_enable)
adapter->dcb_set_bitmap |= BIT_PFC;
}
/**
* ixgbe_dcbnl_getapp - retrieve the DCBX application user priority
* @netdev : the corresponding netdev
* @idtype : identifies the id as ether type or TCP/UDP port number
* @id: id is either ether type or TCP/UDP port number
*
* Returns : on success, returns a non-zero 802.1p user priority bitmap
* otherwise returns 0 as the invalid user priority bitmap to indicate an
* error.
*/
static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct dcb_app app = {
.selector = idtype,
.protocol = id,
};
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 0;
return dcb_getapp(netdev, &app);
}
static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
/* No IEEE PFC settings available */
if (!my_ets)
return -EINVAL;
ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
return 0;
}
static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
struct ieee_ets *ets)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i;
__u8 max_tc = 0;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (!adapter->ixgbe_ieee_ets) {
adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
GFP_KERNEL);
if (!adapter->ixgbe_ieee_ets)
return -ENOMEM;
}
memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
}
if (max_tc)
max_tc++;
if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
return -EINVAL;
if (max_tc != netdev_get_num_tc(dev))
ixgbe_setup_tc(dev, max_tc);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
}
static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
int i;
/* No IEEE PFC settings available */
if (!my_pfc)
return -EINVAL;
pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
pfc->pfc_en = my_pfc->pfc_en;
pfc->mbc = my_pfc->mbc;
pfc->delay = my_pfc->delay;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
pfc->requests[i] = adapter->stats.pxoffrxc[i];
pfc->indications[i] = adapter->stats.pxofftxc[i];
}
return 0;
}
static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
u8 *prio_tc;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (!adapter->ixgbe_ieee_pfc) {
adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
GFP_KERNEL);
if (!adapter->ixgbe_ieee_pfc)
return -ENOMEM;
}
prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
}
#ifdef IXGBE_FCOE
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
if (netif_running(dev))
dev->netdev_ops->ndo_stop(dev);
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(dev))
dev->netdev_ops->ndo_open(dev);
}
#endif
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int err = -EINVAL;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return err;
err = dcb_ieee_setapp(dev, app);
#ifdef IXGBE_FCOE
if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up))
return err;
adapter->fcoe.up = app->priority;
ixgbe_dcbnl_devreset(dev);
}
#endif
return 0;
}
static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
struct dcb_app *app)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
err = dcb_ieee_delapp(dev, app);
#ifdef IXGBE_FCOE
if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up))
return err;
adapter->fcoe.up = app_mask ?
ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
ixgbe_dcbnl_devreset(dev);
}
#endif
return err;
}
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
return adapter->dcbx_cap;
}
static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
/* no support for LLD_MANAGED modes or CEE+IEEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
!(mode & DCB_CAP_DCBX_HOST))
return 1;
if (mode == adapter->dcbx_cap)
return 0;
adapter->dcbx_cap = mode;
/* ETS and PFC defaults */
ets.ets_cap = 8;
pfc.pfc_cap = 8;
if (mode & DCB_CAP_DCBX_VER_IEEE) {
ixgbe_dcbnl_ieee_setets(dev, &ets);
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
} else if (mode & DCB_CAP_DCBX_VER_CEE) {
adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
ixgbe_dcbnl_set_all(dev);
} else {
/* Drop into single TC mode strict priority as this
* indicates CEE and IEEE versions are disabled
*/
ixgbe_dcbnl_ieee_setets(dev, &ets);
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
ixgbe_setup_tc(dev, 0);
}
return 0;
}
const struct dcbnl_rtnl_ops dcbnl_ops = {
.ieee_getets = ixgbe_dcbnl_ieee_getets,
.ieee_setets = ixgbe_dcbnl_ieee_setets,
.ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
.ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
.ieee_setapp = ixgbe_dcbnl_ieee_setapp,
.ieee_delapp = ixgbe_dcbnl_ieee_delapp,
.getstate = ixgbe_dcbnl_get_state,
.setstate = ixgbe_dcbnl_set_state,
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
.setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx,
.setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx,
.setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx,
.setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx,
.getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx,
.getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx,
.getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx,
.getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx,
.setpfccfg = ixgbe_dcbnl_set_pfc_cfg,
.getpfccfg = ixgbe_dcbnl_get_pfc_cfg,
.setall = ixgbe_dcbnl_set_all,
.getcap = ixgbe_dcbnl_getcap,
.getnumtcs = ixgbe_dcbnl_getnumtcs,
.setnumtcs = ixgbe_dcbnl_setnumtcs,
.getpfcstate = ixgbe_dcbnl_getpfcstate,
.setpfcstate = ixgbe_dcbnl_setpfcstate,
.getapp = ixgbe_dcbnl_getapp,
.getdcbx = ixgbe_dcbnl_getdcbx,
.setdcbx = ixgbe_dcbnl_setdcbx,
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,857 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include <linux/if_ether.h>
#include <linux/gfp.h>
#include <linux/if_vlan.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
/**
* ixgbe_fcoe_clear_ddp - clear the given ddp context
* @ddp - ptr to the ixgbe_fcoe_ddp
*
* Returns : none
*
*/
static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
{
ddp->len = 0;
ddp->err = 1;
ddp->udl = NULL;
ddp->udp = 0UL;
ddp->sgl = NULL;
ddp->sgc = 0;
}
/**
* ixgbe_fcoe_ddp_put - free the ddp context for a given xid
* @netdev: the corresponding net_device
* @xid: the xid that corresponding ddp will be freed
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_done
* and it is expected to be called by ULD, i.e., FCP layer of libfc
* to release the corresponding ddp context when the I/O is done.
*
* Returns : data length already ddp-ed in bytes
*/
int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
{
int len = 0;
struct ixgbe_fcoe *fcoe;
struct ixgbe_adapter *adapter;
struct ixgbe_fcoe_ddp *ddp;
u32 fcbuff;
if (!netdev)
goto out_ddp_put;
if (xid >= IXGBE_FCOE_DDP_MAX)
goto out_ddp_put;
adapter = netdev_priv(netdev);
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
goto out_ddp_put;
len = ddp->len;
/* if there an error, force to invalidate ddp context */
if (ddp->err) {
spin_lock_bh(&fcoe->lock);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
(xid | IXGBE_FCFLTRW_WE));
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
(xid | IXGBE_FCDMARW_WE));
/* guaranteed to be invalidated after 100us */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
(xid | IXGBE_FCDMARW_RE));
fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
spin_unlock_bh(&fcoe->lock);
if (fcbuff & IXGBE_FCBUFF_VALID)
udelay(100);
}
if (ddp->sgl)
pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
if (ddp->pool) {
pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
ddp->pool = NULL;
}
ixgbe_fcoe_clear_ddp(ddp);
out_ddp_put:
return len;
}
/**
* ixgbe_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* Returns : 1 for success and 0 for no ddp
*/
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc,
int target_mode)
{
struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
unsigned int firstoff = 0;
unsigned int lastsize;
unsigned int thisoff = 0;
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0;
struct pci_pool *pool;
unsigned int cpu;
if (!netdev || !sgl)
return 0;
adapter = netdev_priv(netdev);
if (xid >= IXGBE_FCOE_DDP_MAX) {
e_warn(drv, "xid=0x%x out-of-range\n", xid);
return 0;
}
/* no DDP if we are already down or resetting */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return 0;
fcoe = &adapter->fcoe;
if (!fcoe->pool) {
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
return 0;
}
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
xid, ddp->sgl, ddp->sgc);
return 0;
}
ixgbe_fcoe_clear_ddp(ddp);
/* setup dma from scsi command sgl */
dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) {
e_err(drv, "xid 0x%x DMA map error\n", xid);
return 0;
}
/* alloc the udl from per cpu ddp pool */
cpu = get_cpu();
pool = *per_cpu_ptr(fcoe->pool, cpu);
ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
}
ddp->pool = pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
j = 0;
for_each_sg(sgl, sg, dmacount, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= IXGBE_BUFFCNT_MAX) {
*per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
goto out_noddp_free;
}
/* get the offset of length of current buffer */
thisoff = addr & ((dma_addr_t)bufflen - 1);
thislen = min((bufflen - thisoff), len);
/*
* all but the 1st buffer (j == 0)
* must be aligned on bufflen
*/
if ((j != 0) && (thisoff))
goto out_noddp_free;
/*
* all but the last buffer
* ((i == (dmacount - 1)) && (thislen == len))
* must end at bufflen
*/
if (((i != (dmacount - 1)) || (thislen != len))
&& ((thislen + thisoff) != bufflen))
goto out_noddp_free;
ddp->udl[j] = (u64)(addr - thisoff);
/* only the first buffer may have none-zero offset */
if (j == 0)
firstoff = thisoff;
len -= thislen;
addr += thislen;
j++;
}
}
/* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
/*
* lastsize can not be buffer len.
* If it is then adding another buffer with lastsize = 1.
*/
if (lastsize == bufflen) {
if (j >= IXGBE_BUFFCNT_MAX) {
*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
goto out_noddp_free;
}
ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
j++;
lastsize = 1;
}
put_cpu();
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
/* Set WRCONTX bit to allow DDP for target */
if (target_mode)
fcbuff |= (IXGBE_FCBUFF_WRCONTX);
fcbuff |= (IXGBE_FCBUFF_VALID);
fcdmarw = xid;
fcdmarw |= IXGBE_FCDMARW_WE;
fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
fcfltrw = xid;
fcfltrw |= IXGBE_FCFLTRW_WE;
/* program DMA context */
hw = &adapter->hw;
spin_lock_bh(&fcoe->lock);
/* turn on last frame indication for target mode as FCP_RSPtarget is
* supposed to send FCP_RSP when it is done. */
if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
}
IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
/* program filter context */
IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
spin_unlock_bh(&fcoe->lock);
return 1;
out_noddp_free:
pci_pool_free(pool, ddp->udl, ddp->udp);
ixgbe_fcoe_clear_ddp(ddp);
out_noddp_unmap:
pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
put_cpu();
return 0;
}
/**
* ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O.
*
* Returns : 1 for success and 0 for no ddp
*/
int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
}
/**
* ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_target
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O. The DDP in target mode is a write I/O request
* from the initiator.
*
* Returns : 1 for success and 0 for no ddp
*/
int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
}
/**
* ixgbe_fcoe_ddp - check ddp status and mark it done
* @adapter: ixgbe adapter
* @rx_desc: advanced rx descriptor
* @skb: the skb holding the received data
*
* This checks ddp status.
*
* Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
* not passing the skb to ULD, > 0 indicates is the length of data
* being ddped.
*/
int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb,
u32 staterr)
{
u16 xid;
u32 fctl;
u32 fceofe, fcerr, fcstat;
int rc = -EINVAL;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
struct fc_frame_header *fh;
struct fcoe_crc_eof *crc;
fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR);
fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE);
if (fcerr == IXGBE_FCERR_BADCRC)
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
fh = (struct fc_frame_header *)(skb->data +
sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
else
fh = (struct fc_frame_header *)(skb->data +
sizeof(struct fcoe_hdr));
fctl = ntoh24(fh->fh_f_ctl);
if (fctl & FC_FC_EX_CTX)
xid = be16_to_cpu(fh->fh_ox_id);
else
xid = be16_to_cpu(fh->fh_rx_id);
if (xid >= IXGBE_FCOE_DDP_MAX)
goto ddp_out;
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
goto ddp_out;
if (fcerr | fceofe)
goto ddp_out;
fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT);
if (fcstat) {
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
/* unmap the sg list when FCP_RSP is received */
if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
pci_unmap_sg(adapter->pdev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = (fcerr | fceofe);
ddp->sgl = NULL;
ddp->sgc = 0;
}
/* return 0 to bypass going to ULD for DDPed data */
if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
rc = 0;
else if (ddp->len)
rc = ddp->len;
}
/* In target mode, check the last data frame of the sequence.
* For DDP in target mode, data is already DDPed but the header
* indication of the last data frame ould allow is to tell if we
* got all the data and the ULP can send FCP_RSP back, as this is
* not a full fcoe frame, we fill the trailer here so it won't be
* dropped by the ULP stack.
*/
if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
(fctl & FC_FC_END_SEQ)) {
crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
}
ddp_out:
return rc;
}
/**
* ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
* @tx_ring: tx desc ring
* @skb: associated skb
* @tx_flags: tx flags
* @hdr_len: hdr_len to be returned
*
* This sets up large send offload for FCoE
*
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
*/
int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len)
{
struct fc_frame_header *fh;
u32 vlan_macip_lens;
u32 fcoe_sof_eof = 0;
u32 mss_l4len_idx;
u8 sof, eof;
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
skb_shinfo(skb)->gso_type);
return -EINVAL;
}
/* resets the header to point fcoe/fc */
skb_set_network_header(skb, skb->mac_len);
skb_set_transport_header(skb, skb->mac_len +
sizeof(struct fcoe_hdr));
/* sets up SOF and ORIS */
sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
switch (sof) {
case FC_SOF_I2:
fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
break;
case FC_SOF_I3:
fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
IXGBE_ADVTXD_FCOEF_ORIS;
break;
case FC_SOF_N2:
break;
case FC_SOF_N3:
fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
break;
default:
dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
return -EINVAL;
}
/* the first byte of the last dword is EOF */
skb_copy_bits(skb, skb->len - 4, &eof, 1);
/* sets up EOF and ORIE */
switch (eof) {
case FC_EOF_N:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
break;
case FC_EOF_T:
/* lso needs ORIE */
if (skb_is_gso(skb))
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
IXGBE_ADVTXD_FCOEF_ORIE;
else
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
break;
case FC_EOF_NI:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
break;
case FC_EOF_A:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
break;
default:
dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
return -EINVAL;
}
/* sets up PARINC indicating data offset */
fh = (struct fc_frame_header *)skb_transport_header(skb);
if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
/* include trailer in headlen as it is replicated per frame */
*hdr_len = sizeof(struct fcoe_crc_eof);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */
if (skb_is_gso(skb))
*hdr_len += (skb_transport_offset(skb) +
sizeof(struct fc_frame_header));
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_transport_offset(skb) +
sizeof(struct fc_frame_header);
vlan_macip_lens |= (skb_transport_offset(skb) - 4)
<< IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
/* write context desc */
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
return skb_is_gso(skb);
}
static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
{
unsigned int cpu;
struct pci_pool **pool;
for_each_possible_cpu(cpu) {
pool = per_cpu_ptr(fcoe->pool, cpu);
if (*pool)
pci_pool_destroy(*pool);
}
free_percpu(fcoe->pool);
fcoe->pool = NULL;
}
static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
{
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
unsigned int cpu;
struct pci_pool **pool;
char pool_name[32];
fcoe->pool = alloc_percpu(struct pci_pool *);
if (!fcoe->pool)
return;
/* allocate pci pool for each cpu */
for_each_possible_cpu(cpu) {
snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
pool = per_cpu_ptr(fcoe->pool, cpu);
*pool = pci_pool_create(pool_name,
adapter->pdev, IXGBE_FCPTR_MAX,
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
if (!*pool) {
e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
ixgbe_fcoe_ddp_pools_free(fcoe);
return;
}
}
}
/**
* ixgbe_configure_fcoe - configures registers for fcoe at start
* @adapter: ptr to ixgbe adapter
*
* This sets up FCoE related registers
*
* Returns : none
*/
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
{
int i, fcoe_q, fcoe_i;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
unsigned int cpu;
if (!fcoe->pool) {
spin_lock_init(&fcoe->lock);
ixgbe_fcoe_ddp_pools_alloc(adapter);
if (!fcoe->pool) {
e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
return;
}
/* Extra buffer to be shared by all DDPs for HW work around */
fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
if (fcoe->extra_ddp_buffer == NULL) {
e_err(drv, "failed to allocated extra DDP buffer\n");
goto out_ddp_pools;
}
fcoe->extra_ddp_buffer_dma =
dma_map_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma)) {
e_err(drv, "failed to map extra DDP buffer\n");
goto out_extra_ddp_buffer;
}
/* Alloc per cpu mem to count the ddp alloc failure number */
fcoe->pcpu_noddp = alloc_percpu(u64);
if (!fcoe->pcpu_noddp) {
e_err(drv, "failed to alloc noddp counter\n");
goto out_pcpu_noddp_alloc_fail;
}
fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
if (!fcoe->pcpu_noddp_ext_buff) {
e_err(drv, "failed to alloc noddp extra buff cnt\n");
goto out_pcpu_noddp_extra_buff_alloc_fail;
}
for_each_possible_cpu(cpu) {
*per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
}
}
/* Enable L2 eth type filter for FCoE */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
/* Enable L2 eth type filter for FIP */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
(ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
if (adapter->ring_feature[RING_F_FCOE].indices) {
/* Use multiple rx queues for FCoE by redirection table */
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->mask + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
}
/* send FIP frames to the first FCoE queue */
fcoe_i = f->mask;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
return;
out_pcpu_noddp_extra_buff_alloc_fail:
free_percpu(fcoe->pcpu_noddp);
out_pcpu_noddp_alloc_fail:
dma_unmap_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
out_extra_ddp_buffer:
kfree(fcoe->extra_ddp_buffer);
out_ddp_pools:
ixgbe_fcoe_ddp_pools_free(fcoe);
}
/**
* ixgbe_cleanup_fcoe - release all fcoe ddp context resources
* @adapter : ixgbe adapter
*
* Cleans up outstanding ddp context resources
*
* Returns : none
*/
void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
{
int i;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
if (!fcoe->pool)
return;
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
dma_unmap_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
free_percpu(fcoe->pcpu_noddp);
free_percpu(fcoe->pcpu_noddp_ext_buff);
kfree(fcoe->extra_ddp_buffer);
ixgbe_fcoe_ddp_pools_free(fcoe);
}
/**
* ixgbe_fcoe_enable - turn on FCoE offload feature
* @netdev: the corresponding netdev
*
* Turns on FCoE offload feature in 82599.
*
* Returns : 0 indicates success or -EINVAL on failure
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
goto out_enable;
atomic_inc(&fcoe->refcnt);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
goto out_enable;
e_info(drv, "Enabling FCoE offload features.\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
netdev->features |= NETIF_F_FCOE_CRC;
netdev->features |= NETIF_F_FSO;
netdev->features |= NETIF_F_FCOE_MTU;
netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
ixgbe_init_interrupt_scheme(adapter);
netdev_features_change(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
rc = 0;
out_enable:
return rc;
}
/**
* ixgbe_fcoe_disable - turn off FCoE offload feature
* @netdev: the corresponding netdev
*
* Turns off FCoE offload feature in 82599.
*
* Returns : 0 indicates success or -EINVAL on failure
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
goto out_disable;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
goto out_disable;
if (!atomic_dec_and_test(&fcoe->refcnt))
goto out_disable;
e_info(drv, "Disabling FCoE offload features.\n");
netdev->features &= ~NETIF_F_FCOE_CRC;
netdev->features &= ~NETIF_F_FSO;
netdev->features &= ~NETIF_F_FCOE_MTU;
netdev->fcoe_ddp_xid = 0;
netdev_features_change(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0;
ixgbe_cleanup_fcoe(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
rc = 0;
out_disable:
return rc;
}
/**
* ixgbe_fcoe_get_wwn - get world wide name for the node or the port
* @netdev : ixgbe adapter
* @wwn : the world wide name
* @type: the type of world wide name
*
* Returns the node or port world wide name if both the prefix and the san
* mac address are valid, then the wwn is formed based on the NAA-2 for
* IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
*
* Returns : 0 on success
*/
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
int rc = -EINVAL;
u16 prefix = 0xffff;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
switch (type) {
case NETDEV_FCOE_WWNN:
prefix = mac->wwnn_prefix;
break;
case NETDEV_FCOE_WWPN:
prefix = mac->wwpn_prefix;
break;
default:
break;
}
if ((prefix != 0xffff) &&
is_valid_ether_addr(mac->san_addr)) {
*wwn = ((u64) prefix << 48) |
((u64) mac->san_addr[0] << 40) |
((u64) mac->san_addr[1] << 32) |
((u64) mac->san_addr[2] << 24) |
((u64) mac->san_addr[3] << 16) |
((u64) mac->san_addr[4] << 8) |
((u64) mac->san_addr[5]);
rc = 0;
}
return rc;
}

View File

@ -0,0 +1,83 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBE_FCOE_H
#define _IXGBE_FCOE_H
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_fcoe.h>
/* shift bits within STAT fo FCSTAT */
#define IXGBE_RXDADV_FCSTAT_SHIFT 4
/* ddp user buffer */
#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */
#define IXGBE_FCPTR_ALIGN 16
#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
#define IXGBE_FCBUFF_4KB 0x0
#define IXGBE_FCBUFF_8KB 0x1
#define IXGBE_FCBUFF_16KB 0x2
#define IXGBE_FCBUFF_64KB 0x3
#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */
#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */
#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
/* Default traffic class to use for FCoE */
#define IXGBE_FCOE_DEFTC 3
/* fcerr */
#define IXGBE_FCERR_BADCRC 0x00100000
/* FCoE DDP for target mode */
#define __IXGBE_FCOE_TARGET 1
struct ixgbe_fcoe_ddp {
int len;
u32 err;
unsigned int sgc;
struct scatterlist *sgl;
dma_addr_t udp;
u64 *udl;
struct pci_pool *pool;
};
struct ixgbe_fcoe {
struct pci_pool **pool;
atomic_t refcnt;
spinlock_t lock;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
unsigned char *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
u64 __percpu *pcpu_noddp;
u64 __percpu *pcpu_noddp_ext_buff;
#ifdef CONFIG_IXGBE_DCB
u8 up;
#endif
};
#endif /* _IXGBE_FCOE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,471 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/pci.h>
#include <linux/delay.h>
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_mbx.h"
/**
* ixgbe_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
* returns SUCCESS if it successfully read message from buffer
**/
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
/* limit read to size of mailbox */
if (size > mbx->size)
size = mbx->size;
if (mbx->ops.read)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
return ret_val;
}
/**
* ixgbe_write_mbx - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer
**/
s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = 0;
if (size > mbx->size)
ret_val = IXGBE_ERR_MBX;
else if (mbx->ops.write)
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
return ret_val;
}
/**
* ixgbe_check_for_msg - checks to see if someone sent us mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (mbx->ops.check_for_msg)
ret_val = mbx->ops.check_for_msg(hw, mbx_id);
return ret_val;
}
/**
* ixgbe_check_for_ack - checks to see if someone sent us ACK
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (mbx->ops.check_for_ack)
ret_val = mbx->ops.check_for_ack(hw, mbx_id);
return ret_val;
}
/**
* ixgbe_check_for_rst - checks to see if other side has reset
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (mbx->ops.check_for_rst)
ret_val = mbx->ops.check_for_rst(hw, mbx_id);
return ret_val;
}
/**
* ixgbe_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification
**/
static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_msg)
goto out;
while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->usec_delay);
}
out:
return countdown ? 0 : IXGBE_ERR_MBX;
}
/**
* ixgbe_poll_for_ack - Wait for message acknowledgement
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_ack)
goto out;
while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->usec_delay);
}
out:
return countdown ? 0 : IXGBE_ERR_MBX;
}
/**
* ixgbe_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
/* if ack received read message, otherwise we timed out */
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
out:
return ret_val;
}
/**
* ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
goto out;
/* send msg */
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
/* if msg sent wait until we receive an ack */
if (!ret_val)
ret_val = ixgbe_poll_for_ack(hw, mbx_id);
out:
return ret_val;
}
static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
s32 ret_val = IXGBE_ERR_MBX;
if (mbvficr & mask) {
ret_val = 0;
IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
}
return ret_val;
}
/**
* ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
index)) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
return ret_val;
}
/**
* ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
index)) {
ret_val = 0;
hw->mbx.stats.acks++;
}
return ret_val;
}
/**
* ixgbe_check_for_rst_pf - checks to see if the VF has reset
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
u32 vflre = 0;
s32 ret_val = IXGBE_ERR_MBX;
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
break;
case ixgbe_mac_X540:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
break;
}
if (vflre & (1 << vf_shift)) {
ret_val = 0;
IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
hw->mbx.stats.rsts++;
}
return ret_val;
}
/**
* ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* return SUCCESS if we obtained the mailbox lock
**/
static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
u32 p2v_mailbox;
/* Take ownership of the buffer */
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
/* reserve mailbox for vf use */
p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
ret_val = 0;
return ret_val;
}
/**
* ixgbe_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_write;
/* flush msg and acks as we are overwriting the message buffer */
ixgbe_check_for_msg_pf(hw, vf_number);
ixgbe_check_for_ack_pf(hw, vf_number);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
/* Interrupt VF to tell it a message has been sent and release buffer*/
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
/* update stats */
hw->mbx.stats.msgs_tx++;
out_no_write:
return ret_val;
}
/**
* ixgbe_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* This function copies a message from the mailbox buffer to the caller's
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_read;
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
/* Acknowledge the message and release buffer */
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return ret_val;
}
#ifdef CONFIG_PCI_IOV
/**
* ixgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for pf mailbox
*/
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X540)
return;
mbx->timeout = 0;
mbx->usec_delay = 0;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
mbx->size = IXGBE_VFMAILBOX_SIZE;
}
#endif /* CONFIG_PCI_IOV */
struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
.write_posted = ixgbe_write_posted_mbx,
.check_for_msg = ixgbe_check_for_msg_pf,
.check_for_ack = ixgbe_check_for_ack_pf,
.check_for_rst = ixgbe_check_for_rst_pf,
};

View File

@ -0,0 +1,93 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
#include "ixgbe_type.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
* this are the ACK */
#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
* this are the NACK */
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
#define IXGBE_VF_MC_TYPE_WORD 3
#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
#ifdef CONFIG_PCI_IOV
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
extern struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,131 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBE_PHY_H_
#define _IXGBE_PHY_H_
#include "ixgbe_type.h"
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
/* EEPROM byte offsets */
#define IXGBE_SFF_IDENTIFIER 0x0
#define IXGBE_SFF_IDENTIFIER_SFP 0x3
#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
/* Bit-shift macros */
#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
/* I2C SDA and SCL timing parameters for standard mode */
#define IXGBE_I2C_T_HD_STA 4
#define IXGBE_I2C_T_LOW 5
#define IXGBE_I2C_T_HIGH 4
#define IXGBE_I2C_T_SU_STA 5
#define IXGBE_I2C_T_HD_DATA 5
#define IXGBE_I2C_T_SU_DATA 1
#define IXGBE_I2C_T_RISE 1
#define IXGBE_I2C_T_FALL 1
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
#endif /* _IXGBE_PHY_H_ */

View File

@ -0,0 +1,926 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
#ifdef NETIF_F_HW_VLAN_TX
#include <linux/if_vlan.h>
#endif
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct pci_dev *pvfdev;
u16 vf_devfn = 0;
int device_id;
int vfs_found = 0;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
device_id = IXGBE_DEV_ID_82599_VF;
break;
case ixgbe_mac_X540:
device_id = IXGBE_DEV_ID_X540_VF;
break;
default:
device_id = 0;
break;
}
vf_devfn = pdev->devfn + 0x80;
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
while (pvfdev) {
if (pvfdev->devfn == vf_devfn)
vfs_found++;
vf_devfn += 2;
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
device_id, pvfdev);
}
return vfs_found;
}
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii)
{
struct ixgbe_hw *hw = &adapter->hw;
int err = 0;
int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
int pre_existing_vfs = 0;
pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
if (!pre_existing_vfs && !adapter->num_vfs)
return;
/* If there are pre-existing VFs then we have to force
* use of that many because they were not deleted the last
* time someone removed the PF driver. That would have
* been because they were allocated to guest VMs and can't
* be removed. Go ahead and just re-enable the old amount.
* If the user wants to change the number of VFs they can
* use ethtool while making sure no VFs are allocated to
* guest VMs... i.e. the right way.
*/
if (pre_existing_vfs) {
adapter->num_vfs = pre_existing_vfs;
dev_warn(&adapter->pdev->dev, "Virtual Functions already "
"enabled for this device - Please reload all "
"VF drivers to avoid spoofed packet errors\n");
} else {
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
}
if (err) {
e_err(probe, "Failed to enable PCI sriov: %d\n", err);
goto err_novfs;
}
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
num_vf_macvlans = hw->mac.num_rar_entries -
(IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
sizeof(struct vf_macvlans),
GFP_KERNEL);
if (mv_list) {
/* Initialize list of VF macvlans */
INIT_LIST_HEAD(&adapter->vf_mvs.l);
for (i = 0; i < num_vf_macvlans; i++) {
mv_list->vf = -1;
mv_list->free = true;
mv_list->rar_entry = hw->mac.num_rar_entries -
(i + adapter->num_vfs + 1);
list_add(&mv_list->l, &adapter->vf_mvs.l);
mv_list++;
}
}
/* If call to enable VFs succeeded then allocate memory
* for per VF control structures.
*/
adapter->vfinfo =
kcalloc(adapter->num_vfs,
sizeof(struct vf_data_storage), GFP_KERNEL);
if (adapter->vfinfo) {
/* Now that we're sure SR-IOV is enabled
* and memory allocated set up the mailbox parameters
*/
ixgbe_init_mbx_params_pf(hw);
memcpy(&hw->mbx.ops, ii->mbx_ops,
sizeof(hw->mbx.ops));
/* Disable RSC when in SR-IOV mode */
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
return;
}
/* Oh oh */
e_err(probe, "Unable to allocate memory for VF Data Storage - "
"SRIOV disabled\n");
pci_disable_sriov(adapter->pdev);
err_novfs:
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
adapter->num_vfs = 0;
}
#endif /* #ifdef CONFIG_PCI_IOV */
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 gcr;
u32 gpie;
u32 vmdctl;
int i;
#ifdef CONFIG_PCI_IOV
/* disable iov and allow time for transactions to clear */
pci_disable_sriov(adapter->pdev);
#endif
/* turn off device IOV mode */
gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
gcr &= ~(IXGBE_GCR_EXT_SRIOV);
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
/* set default pool back to 0 */
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
IXGBE_WRITE_FLUSH(hw);
/* take a breather then clean up driver data */
msleep(100);
/* Release reference to VF devices */
for (i = 0; i < adapter->num_vfs; i++) {
if (adapter->vfinfo[i].vfdev)
pci_dev_put(adapter->vfinfo[i].vfdev);
}
kfree(adapter->vfinfo);
kfree(adapter->mv_list);
adapter->vfinfo = NULL;
adapter->num_vfs = 0;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf)
{
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw;
int i;
u32 vector_bit;
u32 vector_reg;
u32 mta_reg;
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
/*
* salt away the number of multi cast addresses assigned
* to this VF for later use to restore when the PF multi cast
* list changes
*/
vfinfo->num_vf_mc_hashes = entries;
/*
* VFs are limited to using the MTA hash table for their multicast
* addresses
*/
for (i = 0; i < entries; i++) {
vfinfo->vf_mc_hashes[i] = hash_list[i];
}
for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
return 0;
}
static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct list_head *pos;
struct vf_macvlans *entry;
list_for_each(pos, &adapter->vf_mvs.l) {
entry = list_entry(pos, struct vf_macvlans, l);
if (entry->free == false)
hw->mac.ops.set_rar(hw, entry->rar_entry,
entry->vf_macvlan,
entry->vf, IXGBE_RAH_AV);
}
}
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo;
int i, j;
u32 vector_bit;
u32 vector_reg;
u32 mta_reg;
for (i = 0; i < adapter->num_vfs; i++) {
vfinfo = &adapter->vfinfo[i];
for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
hw->addr_ctrl.mta_in_use++;
vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
}
/* Restore any VF macvlans */
ixgbe_restore_vf_macvlans(adapter);
}
static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
u32 vf)
{
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
{
struct ixgbe_hw *hw = &adapter->hw;
int new_mtu = msgbuf[1];
u32 max_frs;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Only X540 supports jumbo frames in IOV mode */
if (adapter->hw.mac.type != ixgbe_mac_X540)
return;
/* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
e_err(drv, "VF mtu %d out of range\n", new_mtu);
return;
}
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
if (max_frs < new_mtu) {
max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
}
e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
}
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
vmolr |= (IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_BAM);
if (aupe)
vmolr |= IXGBE_VMOLR_AUPE;
else
vmolr &= ~IXGBE_VMOLR_AUPE;
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
if (vid)
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
(vid | IXGBE_VMVIR_VLANA_DEFAULT));
else
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
}
static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
/* reset offloads to defaults */
if (adapter->vfinfo[vf].pf_vlan) {
ixgbe_set_vf_vlan(adapter, true,
adapter->vfinfo[vf].pf_vlan, vf);
ixgbe_set_vmvir(adapter,
(adapter->vfinfo[vf].pf_vlan |
(adapter->vfinfo[vf].pf_qos <<
VLAN_PRIO_SHIFT)), vf);
ixgbe_set_vmolr(hw, vf, false);
} else {
ixgbe_set_vmvir(adapter, 0, vf);
ixgbe_set_vmolr(hw, vf, true);
}
/* reset multicast table array for vf */
adapter->vfinfo[vf].num_vf_mc_hashes = 0;
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
hw->mac.ops.clear_rar(hw, rar_entry);
}
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
struct ixgbe_hw *hw = &adapter->hw;
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
return 0;
}
static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
struct ixgbe_hw *hw = &adapter->hw;
struct list_head *pos;
struct vf_macvlans *entry;
if (index <= 1) {
list_for_each(pos, &adapter->vf_mvs.l) {
entry = list_entry(pos, struct vf_macvlans, l);
if (entry->vf == vf) {
entry->vf = -1;
entry->free = true;
entry->is_macvlan = false;
hw->mac.ops.clear_rar(hw, entry->rar_entry);
}
}
}
/*
* If index was zero then we were asked to clear the uc list
* for the VF. We're done.
*/
if (!index)
return 0;
entry = NULL;
list_for_each(pos, &adapter->vf_mvs.l) {
entry = list_entry(pos, struct vf_macvlans, l);
if (entry->free)
break;
}
/*
* If we traversed the entire list and didn't find a free entry
* then we're out of space on the RAR table. Also entry may
* be NULL because the original memory allocation for the list
* failed, which is not fatal but does mean we can't support
* VF requests for MACVLAN because we couldn't allocate
* memory for the list management required.
*/
if (!entry || !entry->free)
return -ENOSPC;
entry->free = false;
entry->is_macvlan = true;
entry->vf = vf;
memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
return 0;
}
int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_vfs; i++) {
if (adapter->vfinfo[i].vfdev->dev_flags &
PCI_DEV_FLAGS_ASSIGNED)
return true;
}
return false;
}
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
unsigned char vf_mac_addr[6];
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
struct pci_dev *pvfdev;
unsigned int device_id;
u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
(pdev->devfn & 1);
bool enable = ((event_mask & 0x10000000U) != 0);
if (enable) {
random_ether_addr(vf_mac_addr);
e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
vfn, vf_mac_addr);
/*
* Store away the VF "permananet" MAC address, it will ask
* for it later.
*/
memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
device_id = IXGBE_DEV_ID_82599_VF;
break;
case ixgbe_mac_X540:
device_id = IXGBE_DEV_ID_X540_VF;
break;
default:
device_id = 0;
break;
}
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
while (pvfdev) {
if (pvfdev->devfn == thisvf_devfn)
break;
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
device_id, pvfdev);
}
if (pvfdev)
adapter->vfinfo[vfn].vfdev = pvfdev;
else
e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
thisvf_devfn);
}
return 0;
}
static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 reg;
u32 reg_offset, vf_shift;
vf_shift = vf % 32;
reg_offset = vf / 32;
/* enable transmit and receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
ixgbe_vf_reset_event(adapter, vf);
}
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
s32 retval;
int entries;
u16 *hash_list;
int add, vid, index;
u8 *new_mac;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
if (retval)
pr_err("Error receiving message from VF\n");
/* this is a message we already processed, do nothing */
if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
return retval;
/*
* until the vf completes a virtual function reset it should not be
* allowed to start any configuration.
*/
if (msgbuf[0] == IXGBE_VF_RESET) {
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
new_mac = (u8 *)(&msgbuf[1]);
e_info(probe, "VF Reset msg received from vf %d\n", vf);
adapter->vfinfo[vf].clear_to_send = false;
ixgbe_vf_reset_msg(adapter, vf);
adapter->vfinfo[vf].clear_to_send = true;
if (is_valid_ether_addr(new_mac) &&
!adapter->vfinfo[vf].pf_set_mac)
ixgbe_set_vf_mac(adapter, vf, vf_mac);
else
ixgbe_set_vf_mac(adapter,
vf, adapter->vfinfo[vf].vf_mac_addresses);
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
*/
msgbuf[3] = hw->mac.mc_filter_type;
ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
return retval;
}
if (!adapter->vfinfo[vf].clear_to_send) {
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
ixgbe_write_mbx(hw, msgbuf, 1, vf);
return retval;
}
switch ((msgbuf[0] & 0xFFFF)) {
case IXGBE_VF_SET_MAC_ADDR:
new_mac = ((u8 *)(&msgbuf[1]));
if (is_valid_ether_addr(new_mac) &&
!adapter->vfinfo[vf].pf_set_mac) {
ixgbe_set_vf_mac(adapter, vf, new_mac);
} else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
new_mac, ETH_ALEN)) {
e_warn(drv, "VF %d attempted to override "
"administratively set MAC address\nReload "
"the VF driver to resume operations\n", vf);
retval = -1;
}
break;
case IXGBE_VF_SET_MULTICAST:
entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
>> IXGBE_VT_MSGINFO_SHIFT;
hash_list = (u16 *)&msgbuf[1];
retval = ixgbe_set_vf_multicasts(adapter, entries,
hash_list, vf);
break;
case IXGBE_VF_SET_LPE:
ixgbe_set_vf_lpe(adapter, msgbuf);
break;
case IXGBE_VF_SET_VLAN:
add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
>> IXGBE_VT_MSGINFO_SHIFT;
vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
if (adapter->vfinfo[vf].pf_vlan) {
e_warn(drv, "VF %d attempted to override "
"administratively set VLAN configuration\n"
"Reload the VF driver to resume operations\n",
vf);
retval = -1;
} else {
if (add)
adapter->vfinfo[vf].vlan_count++;
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
}
break;
case IXGBE_VF_SET_MACVLAN:
index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
IXGBE_VT_MSGINFO_SHIFT;
/*
* If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives. An index
* greater than 0 will indicate the VF is setting a
* macvlan MAC filter.
*/
if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
retval = ixgbe_set_vf_macvlan(adapter, vf, index,
(unsigned char *)(&msgbuf[1]));
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = IXGBE_ERR_MBX;
break;
}
/* notify the VF of the results of what it sent us */
if (retval)
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
else
msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
ixgbe_write_mbx(hw, msgbuf, 1, vf);
return retval;
}
static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 msg = IXGBE_VT_MSGTYPE_NACK;
/* if device isn't clear to send it shouldn't be reading either */
if (!adapter->vfinfo[vf].clear_to_send)
ixgbe_write_mbx(hw, &msg, 1, vf);
}
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vf;
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
if (!ixgbe_check_for_rst(hw, vf))
ixgbe_vf_reset_event(adapter, vf);
/* process any messages pending */
if (!ixgbe_check_for_msg(hw, vf))
ixgbe_rcv_msg_from_vf(adapter, vf);
/* process any acks */
if (!ixgbe_check_for_ack(hw, vf))
ixgbe_rcv_ack_from_vf(adapter, vf);
}
}
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
/* disable transmit and receive for all vfs */
IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
}
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 ping;
int i;
for (i = 0 ; i < adapter->num_vfs; i++) {
ping = IXGBE_PF_CONTROL_MSG;
if (adapter->vfinfo[i].clear_to_send)
ping |= IXGBE_VT_MSGTYPE_CTS;
ixgbe_write_mbx(hw, &ping, 1, i);
}
}
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
return -EINVAL;
adapter->vfinfo[vf].pf_set_mac = true;
dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
" change effective.");
if (test_bit(__IXGBE_DOWN, &adapter->state)) {
dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
" but the PF device is not up.\n");
dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
" attempting to use the VF device.\n");
}
return ixgbe_set_vf_mac(adapter, vf, mac);
}
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
if (vlan || qos) {
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err)
goto out;
ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
ixgbe_set_vmolr(hw, vf, false);
if (adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
adapter->vfinfo[vf].vlan_count++;
adapter->vfinfo[vf].pf_vlan = vlan;
adapter->vfinfo[vf].pf_qos = qos;
dev_info(&adapter->pdev->dev,
"Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
if (test_bit(__IXGBE_DOWN, &adapter->state)) {
dev_warn(&adapter->pdev->dev,
"The VF VLAN has been set,"
" but the PF device is not up.\n");
dev_warn(&adapter->pdev->dev,
"Bring the PF device up before"
" attempting to use the VF device.\n");
}
} else {
err = ixgbe_set_vf_vlan(adapter, false,
adapter->vfinfo[vf].pf_vlan, vf);
ixgbe_set_vmvir(adapter, vlan, vf);
ixgbe_set_vmolr(hw, vf, true);
hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
adapter->vfinfo[vf].pf_vlan = 0;
adapter->vfinfo[vf].pf_qos = 0;
}
out:
return err;
}
static int ixgbe_link_mbps(int internal_link_speed)
{
switch (internal_link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
return 100;
case IXGBE_LINK_SPEED_1GB_FULL:
return 1000;
case IXGBE_LINK_SPEED_10GB_FULL:
return 10000;
default:
return 0;
}
}
static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
int link_speed)
{
int rf_dec, rf_int;
u32 bcnrc_val;
if (tx_rate != 0) {
/* Calculate the rate factor values to set */
rf_int = link_speed / tx_rate;
rf_dec = (link_speed - (rf_int * tx_rate));
rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
IXGBE_RTTBCNRC_RF_INT_MASK);
bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
} else {
bcnrc_val = 0;
}
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
* and 0x004 otherwise.
*/
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
break;
case ixgbe_mac_X540:
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
break;
default:
break;
}
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
}
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
{
int actual_link_speed, i;
bool reset_rate = false;
/* VF Tx rate limit was not set */
if (adapter->vf_rate_link_speed == 0)
return;
actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
if (actual_link_speed != adapter->vf_rate_link_speed) {
reset_rate = true;
adapter->vf_rate_link_speed = 0;
dev_info(&adapter->pdev->dev,
"Link speed has been changed. VF Transmit rate "
"is disabled\n");
}
for (i = 0; i < adapter->num_vfs; i++) {
if (reset_rate)
adapter->vfinfo[i].tx_rate = 0;
ixgbe_set_vf_rate_limit(&adapter->hw, i,
adapter->vfinfo[i].tx_rate,
actual_link_speed);
}
}
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int actual_link_speed;
actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
(tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
((tx_rate != 0) && (tx_rate <= 10)))
/* rate limit cannot be set to 10Mb or less in 10Gb adapters */
return -EINVAL;
adapter->vf_rate_link_speed = actual_link_speed;
adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
return 0;
}
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8;
struct ixgbe_hw *hw = &adapter->hw;
u32 regval;
adapter->vfinfo[vf].spoofchk_enabled = setting;
regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
regval &= ~(1 << vf_target_shift);
regval |= (setting << vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
if (adapter->vfinfo[vf].vlan_count) {
vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
regval &= ~(1 << vf_target_shift);
regval |= (setting << vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
}
return 0;
}
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
ivi->vlan = adapter->vfinfo[vf].pf_vlan;
ivi->qos = adapter->vfinfo[vf].pf_qos;
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
return 0;
}

View File

@ -0,0 +1,52 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBE_SRIOV_H_
#define _IXGBE_SRIOV_H_
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii);
int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
#endif /* _IXGBE_SRIOV_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,883 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include "ixgbe.h"
#include "ixgbe_phy.h"
#define IXGBE_X540_MAX_TX_QUEUES 128
#define IXGBE_X540_MAX_RX_QUEUES 128
#define IXGBE_X540_RAR_ENTRIES 128
#define IXGBE_X540_MC_TBL_SIZE 128
#define IXGBE_X540_VFT_TBL_SIZE 128
#define IXGBE_X540_RX_PB_SIZE 384
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
{
return ixgbe_media_type_copper;
}
static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
/* Call PHY identify routine to get the phy type */
ixgbe_identify_phy_generic(hw);
mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
return 0;
}
/**
* ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
autoneg_wait_to_complete);
}
/**
* ixgbe_reset_hw_X540 - Perform hardware reset
* @hw: pointer to hardware structure
*
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
if (status != 0)
goto reset_hw_out;
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
mac_reset_top:
ctrl = IXGBE_CTRL_RST;
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
udelay(1);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
if (!(ctrl & IXGBE_CTRL_RST_MASK))
break;
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
status = IXGBE_ERR_RESET_FAILED;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
msleep(100);
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
* for any pending HW events to complete.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
goto mac_reset_top;
}
/* Set the Rx packet buffer size. */
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
/* Store the permanent mac address */
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
/*
* Store MAC address from RAR0, clear receive address registers, and
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
hw->mac.ops.init_rx_addrs(hw);
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
&hw->mac.wwpn_prefix);
reset_hw_out:
return status;
}
/**
* ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val != 0)
goto out;
ret_val = ixgbe_start_hw_gen2(hw);
hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
out:
return ret_val;
}
/**
* ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
* @hw: pointer to hardware structure
*
* Determines physical layer capabilities of the current configuration.
**/
static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
{
u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u16 ext_ability = 0;
hw->phy.ops.identify(hw);
hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
&ext_ability);
if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
return physical_layer;
}
/**
* ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
* @hw: pointer to hardware structure
*
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
u16 eeprom_size;
if (eeprom->type == ixgbe_eeprom_uninitialized) {
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
}
return 0;
}
/**
* ixgbe_read_eerd_X540- Read EEPROM word using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
s32 status = 0;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
0)
status = ixgbe_read_eerd_generic(hw, offset, data);
else
status = IXGBE_ERR_SWFW_SYNC;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @words: number of words
* @data: word(s) read from the EEPROM
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
s32 status = 0;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
0)
status = ixgbe_read_eerd_buffer_generic(hw, offset,
words, data);
else
status = IXGBE_ERR_SWFW_SYNC;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @data: word write to the EEPROM
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
{
s32 status = 0;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
status = ixgbe_write_eewr_generic(hw, offset, data);
else
status = IXGBE_ERR_SWFW_SYNC;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @words: number of words
* @data: word(s) write to the EEPROM
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
s32 status = 0;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
0)
status = ixgbe_write_eewr_buffer_generic(hw, offset,
words, data);
else
status = IXGBE_ERR_SWFW_SYNC;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
*
* This function does not use synchronization for EERD and EEWR. It can
* be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
*
* @hw: pointer to hardware structure
**/
static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
u16 checksum = 0;
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
/*
* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores here. Instead use
* ixgbe_read_eerd_generic
*/
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
checksum += word;
}
/*
* Include all data from pointers 0x3, 0x6-0xE. This excludes the
* FW, PHY module, and PCIe Expansion/Option ROM pointers.
*/
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
continue;
if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
/* Skip pointer section if the pointer is invalid. */
if (pointer == 0xFFFF || pointer == 0 ||
pointer >= hw->eeprom.word_size)
continue;
if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
/* Skip pointer section if length is invalid. */
if (length == 0xFFFF || length == 0 ||
(pointer + length) >= hw->eeprom.word_size)
continue;
for (j = pointer+1; j <= pointer+length; j++) {
if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
checksum += word;
}
}
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
return checksum;
}
/**
* ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
* @hw: pointer to hardware structure
* @checksum_val: calculated checksum
*
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
u16 *checksum_val)
{
s32 status;
u16 checksum;
u16 read_checksum = 0;
/*
* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status != 0) {
hw_dbg(hw, "EEPROM read failed\n");
goto out;
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
checksum = hw->eeprom.ops.calc_checksum(hw);
/*
* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores twice here.
*/
ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
&read_checksum);
/*
* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (read_checksum != checksum)
status = IXGBE_ERR_EEPROM_CHECKSUM;
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
} else {
status = IXGBE_ERR_SWFW_SYNC;
}
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
out:
return status;
}
/**
* ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
* @hw: pointer to hardware structure
*
* After writing EEPROM to shadow RAM using EEWR register, software calculates
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
s32 status;
u16 checksum;
/*
* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status != 0)
hw_dbg(hw, "EEPROM read failed\n");
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
checksum = hw->eeprom.ops.calc_checksum(hw);
/*
* Do not use hw->eeprom.ops.write because we do not want to
* take the synchronization semaphores twice here.
*/
status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
checksum);
if (status == 0)
status = ixgbe_update_flash_X540(hw);
else
status = IXGBE_ERR_SWFW_SYNC;
}
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
* @hw: pointer to hardware structure
*
* Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
* EEPROM from shadow RAM to the flash device.
**/
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
u32 flup;
s32 status = IXGBE_ERR_EEPROM;
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == IXGBE_ERR_EEPROM) {
hw_dbg(hw, "Flash update time out\n");
goto out;
}
flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == 0)
hw_dbg(hw, "Flash update complete\n");
else
hw_dbg(hw, "Flash update time out\n");
if (hw->revision_id == 0) {
flup = IXGBE_READ_REG(hw, IXGBE_EEC);
if (flup & IXGBE_EEC_SEC1VAL) {
flup |= IXGBE_EEC_FLUP;
IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
}
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == 0)
hw_dbg(hw, "Flash update complete\n");
else
hw_dbg(hw, "Flash update time out\n");
}
out:
return status;
}
/**
* ixgbe_poll_flash_update_done_X540 - Poll flash update status
* @hw: pointer to hardware structure
*
* Polls the FLUDONE (bit 26) of the EEC Register to determine when the
* flash update is done.
**/
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
{
u32 i;
u32 reg;
s32 status = IXGBE_ERR_EEPROM;
for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_EEC);
if (reg & IXGBE_EEC_FLUDONE) {
status = 0;
break;
}
udelay(5);
}
return status;
}
/**
* ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
*
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 5;
u32 hwmask = 0;
u32 timeout = 200;
u32 i;
if (swmask == IXGBE_GSSR_EEP_SM)
hwmask = IXGBE_GSSR_FLASH_SM;
for (i = 0; i < timeout; i++) {
/*
* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
break;
} else {
/*
* Firmware currently using resource (fwmask),
* hardware currently using resource (hwmask),
* or other software thread currently using
* resource (swmask)
*/
ixgbe_release_swfw_sync_semaphore(hw);
usleep_range(5000, 10000);
}
}
/*
* If the resource is not released by the FW/HW the SW can assume that
* the FW/HW malfunctions. In that case the SW should sets the
* SW bit(s) of the requested resource(s) while ignoring the
* corresponding FW/HW bits in the SW_FW_SYNC register.
*/
if (i >= timeout) {
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (swfw_sync & (fwmask | hwmask)) {
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
}
}
usleep_range(5000, 10000);
return 0;
}
/**
* ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
* Releases the SWFW semaphore through the SW_FW_SYNC register
* for the specified function (CSR, PHY0, PHY1, EVM, Flash)
**/
static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
ixgbe_get_swfw_sync_semaphore(hw);
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
swfw_sync &= ~swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
usleep_range(5000, 10000);
}
/**
* ixgbe_get_nvm_semaphore - Get hardware semaphore
* @hw: pointer to hardware structure
*
* Sets the hardware semaphores so SW/FW can gain control of shared resources
**/
static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM;
u32 timeout = 2000;
u32 i;
u32 swsm;
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
if (!(swsm & IXGBE_SWSM_SMBI)) {
status = 0;
break;
}
udelay(50);
}
/* Now get the semaphore between SW/FW through the REGSMP bit */
if (status) {
for (i = 0; i < timeout; i++) {
swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (!(swsm & IXGBE_SWFW_REGSMP))
break;
udelay(50);
}
} else {
hw_dbg(hw, "Software semaphore SMBI between device drivers "
"not granted.\n");
}
return status;
}
/**
* ixgbe_release_nvm_semaphore - Release hardware semaphore
* @hw: pointer to hardware structure
*
* This function clears hardware semaphore bits.
**/
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
{
u32 swsm;
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
swsm &= ~IXGBE_SWSM_SMBI;
IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
swsm &= ~IXGBE_SWFW_REGSMP;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
IXGBE_WRITE_FLUSH(hw);
}
/**
* ixgbe_blink_led_start_X540 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
*
* Devices that implement the version 2 interface:
* X540
**/
static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
/*
* In order for the blink bit in the LED control register
* to work, link and speed must be forced in the MAC. We
* will reverse this when we stop the blinking.
*/
macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
/* Set the LED to LINK_UP + BLINK. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
ledctl_reg |= IXGBE_LED_BLINK(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
IXGBE_WRITE_FLUSH(hw);
return 0;
}
/**
* ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
* @hw: pointer to hardware structure
* @index: led number to stop blinking
*
* Devices that implement the version 2 interface:
* X540
**/
static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
/* Restore the LED to its default value. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
ledctl_reg &= ~IXGBE_LED_BLINK(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
/* Unforce link and speed in the MAC. */
macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
IXGBE_WRITE_FLUSH(hw);
return 0;
}
static struct ixgbe_mac_operations mac_ops_X540 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_X540,
.start_hw = &ixgbe_start_hw_X540,
.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
.get_media_type = &ixgbe_get_media_type_X540,
.get_supported_physical_layer =
&ixgbe_get_supported_physical_layer_X540,
.enable_rx_dma = &ixgbe_enable_rx_dma_generic,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_device_caps = &ixgbe_get_device_caps_generic,
.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
.read_analog_reg8 = NULL,
.write_analog_reg8 = NULL,
.setup_link = &ixgbe_setup_mac_link_X540,
.set_rxpba = &ixgbe_set_rxpba_generic,
.check_link = &ixgbe_check_mac_link_generic,
.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
.led_on = &ixgbe_led_on_generic,
.led_off = &ixgbe_led_off_generic,
.blink_led_start = &ixgbe_blink_led_start_X540,
.blink_led_stop = &ixgbe_blink_led_stop_X540,
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
.set_vmdq = &ixgbe_set_vmdq_generic,
.clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
.enable_mc = &ixgbe_enable_mc_generic,
.disable_mc = &ixgbe_disable_mc_generic,
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = NULL,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
};
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.init_params = &ixgbe_init_eeprom_params_X540,
.read = &ixgbe_read_eerd_X540,
.read_buffer = &ixgbe_read_eerd_buffer_X540,
.write = &ixgbe_write_eewr_X540,
.write_buffer = &ixgbe_write_eewr_buffer_X540,
.calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
.validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
};
static struct ixgbe_phy_operations phy_ops_X540 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
.init = NULL,
.reset = NULL,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_byte = &ixgbe_read_i2c_byte_generic,
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
};
struct ixgbe_info ixgbe_X540_info = {
.mac = ixgbe_mac_X540,
.get_invariants = &ixgbe_get_invariants_X540,
.mac_ops = &mac_ops_X540,
.eeprom_ops = &eeprom_ops_X540,
.phy_ops = &phy_ops_X540,
.mbx_ops = &mbx_ops_generic,
};

View File

@ -0,0 +1,38 @@
################################################################################
#
# Intel 82599 Virtual Function driver
# Copyright(c) 1999 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) 82599 VF ethernet driver
#
obj-$(CONFIG_IXGBEVF) += ixgbevf.o
ixgbevf-objs := vf.o \
mbx.o \
ethtool.o \
ixgbevf_main.o

View File

@ -0,0 +1,297 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
/* Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 1
#define IXGBE_VF_MAX_RX_QUEUES 1
#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
/* Link speed */
typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
#define IXGBE_LINKS_UP 0x40000000
#define IXGBE_LINKS_SPEED_82599 0x30000000
#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
/* Interrupt Vector Allocation Registers */
#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
/* Receive Config masks */
#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */
#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
#define IXGBE_PSRTYPE_UDPHDR 0x00000020
#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
#define IXGBE_PSRTYPE_L2HDR 0x00001000
/* SRRCTL bit definitions */
#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
#define IXGBE_SRRCTL_RDMTS_SHIFT 22
#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
#define IXGBE_SRRCTL_DROP_EN 0x10000000
#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
/* Receive Descriptor bit definitions */
#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
#define IXGBE_RXD_PRI_SHIFT 13
#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
#define IXGBE_RXD_CFI_SHIFT 12
#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
#define IXGBE_RXDADV_RSCCNT_SHIFT 17
#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
#define IXGBE_RXDADV_SPH 0x8000
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
IXGBE_RXD_ERR_CE | \
IXGBE_RXD_ERR_LE | \
IXGBE_RXD_ERR_PE | \
IXGBE_RXD_ERR_OSE | \
IXGBE_RXD_ERR_USE)
#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
IXGBE_RXDADV_ERR_CE | \
IXGBE_RXDADV_ERR_LE | \
IXGBE_RXDADV_ERR_PE | \
IXGBE_RXDADV_ERR_OSE | \
IXGBE_RXDADV_ERR_USE)
#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
__le64 buffer_addr; /* Address of descriptor's data buf */
__le32 cmd_type_len;
__le32 olinfo_status;
} read;
struct {
__le64 rsvd; /* Reserved */
__le32 nxtseq_seed;
__le32 status;
} wb;
};
/* Receive Descriptor - Advanced */
union ixgbe_adv_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
union {
__le32 data;
struct {
__le16 pkt_info; /* RSS, Pkt type */
__le16 hdr_info; /* Splithdr, hdrlen */
} hs_rss;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
struct {
__le16 ip_id; /* IP id */
__le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error; /* ext status/error */
__le16 length; /* Packet length */
__le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
/* Context descriptors */
struct ixgbe_adv_tx_context_desc {
__le32 vlan_macip_lens;
__le32 seqnum_seed;
__le32 type_tucmd_mlhl;
__le32 mss_l4len_idx;
};
/* Adv Transmit Descriptor Config Masks */
#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
/* Interrupt register bitmasks */
/* Extended Interrupt Cause Read */
#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
/* Extended Interrupt Cause Set */
#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
/* Extended Interrupt Mask Set */
#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
/* Extended Interrupt Mask Clear */
#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
#define IXGBE_EIMS_ENABLE_MASK ( \
IXGBE_EIMS_RTX_QUEUE | \
IXGBE_EIMS_MAILBOX | \
IXGBE_EIMS_OTHER)
#define IXGBE_EITR_CNT_WDIS 0x80000000
/* Error Codes */
#define IXGBE_ERR_INVALID_MAC_ADDR -1
#define IXGBE_ERR_RESET_FAILED -2
#endif /* _IXGBEVF_DEFINES_H_ */

View File

@ -0,0 +1,692 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* ethtool support for ixgbevf */
#include <linux/types.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
#include "ixgbevf.h"
#define IXGBE_ALL_RAR_ENTRIES 16
#ifdef ETHTOOL_GSTATS
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
int base_stat_offset;
int saved_reset_offset;
};
#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
offsetof(struct ixgbevf_adapter, m), \
offsetof(struct ixgbevf_adapter, b), \
offsetof(struct ixgbevf_adapter, r)
static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
stats.saved_reset_vfgprc)},
{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
stats.saved_reset_vfgptc)},
{"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
stats.saved_reset_vfgorc)},
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)},
{"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
{"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
zero_base)},
{"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
zero_base)},
{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
zero_base)},
{"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
};
#define IXGBE_QUEUE_STATS_LEN 0
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
#endif /* ETHTOOL_GSTATS */
#ifdef ETHTOOL_TEST
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
#endif /* ETHTOOL_TEST */
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = 0;
bool link_up;
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = -1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
ethtool_cmd_speed_set(
ecmd,
(link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
SPEED_10000 : SPEED_1000);
ecmd->duplex = DUPLEX_FULL;
} else {
ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
return 0;
}
static u32 ixgbevf_get_msglevel(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
static char *ixgbevf_reg_names[] = {
"IXGBE_VFCTRL",
"IXGBE_VFSTATUS",
"IXGBE_VFLINKS",
"IXGBE_VFRXMEMWRAP",
"IXGBE_VFFRTIMER",
"IXGBE_VTEICR",
"IXGBE_VTEICS",
"IXGBE_VTEIMS",
"IXGBE_VTEIMC",
"IXGBE_VTEIAC",
"IXGBE_VTEIAM",
"IXGBE_VTEITR",
"IXGBE_VTIVAR",
"IXGBE_VTIVAR_MISC",
"IXGBE_VFRDBAL0",
"IXGBE_VFRDBAL1",
"IXGBE_VFRDBAH0",
"IXGBE_VFRDBAH1",
"IXGBE_VFRDLEN0",
"IXGBE_VFRDLEN1",
"IXGBE_VFRDH0",
"IXGBE_VFRDH1",
"IXGBE_VFRDT0",
"IXGBE_VFRDT1",
"IXGBE_VFRXDCTL0",
"IXGBE_VFRXDCTL1",
"IXGBE_VFSRRCTL0",
"IXGBE_VFSRRCTL1",
"IXGBE_VFPSRTYPE",
"IXGBE_VFTDBAL0",
"IXGBE_VFTDBAL1",
"IXGBE_VFTDBAH0",
"IXGBE_VFTDBAH1",
"IXGBE_VFTDLEN0",
"IXGBE_VFTDLEN1",
"IXGBE_VFTDH0",
"IXGBE_VFTDH1",
"IXGBE_VFTDT0",
"IXGBE_VFTDT1",
"IXGBE_VFTXDCTL0",
"IXGBE_VFTXDCTL1",
"IXGBE_VFTDWBAL0",
"IXGBE_VFTDWBAL1",
"IXGBE_VFTDWBAH0",
"IXGBE_VFTDWBAH1"
};
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
}
static void ixgbevf_get_regs(struct net_device *netdev,
struct ethtool_regs *regs,
void *p)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u32 regs_len = ixgbevf_get_regs_len(netdev);
u8 i;
memset(p, 0, regs_len);
regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
/* General Registers */
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
/* Interrupt */
/* don't read EICR because it can clear interrupt causes, instead
* read EICS which is a shadow but doesn't clear EICR */
regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
/* Receive DMA */
for (i = 0; i < 2; i++)
regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
for (i = 0; i < 2; i++)
regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
for (i = 0; i < 2; i++)
regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
for (i = 0; i < 2; i++)
regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
for (i = 0; i < 2; i++)
regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
for (i = 0; i < 2; i++)
regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
for (i = 0; i < 2; i++)
regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
/* Receive */
regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
/* Transmit */
for (i = 0; i < 2; i++)
regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
for (i = 0; i < 2; i++)
regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
for (i = 0; i < 2; i++)
regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
for (i = 0; i < 2; i++)
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
for (i = 0; i < 2; i++)
regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
for (i = 0; i < 2; i++)
regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
for (i = 0; i < 2; i++)
regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
for (i = 0; i < 2; i++)
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
}
static void ixgbevf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = adapter->tx_ring;
struct ixgbevf_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IXGBEVF_MAX_RXD;
ring->tx_max_pending = IXGBEVF_MAX_TXD;
ring->rx_pending = rx_ring->count;
ring->tx_pending = tx_ring->count;
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
int i, err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring->count) &&
(new_rx_count == adapter->rx_ring->count)) {
/* nothing to do */
return 0;
}
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);
/*
* If the adapter isn't up and running then just set the
* new parameters and scurry for the exits.
*/
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
}
tx_ring = kcalloc(adapter->num_tx_queues,
sizeof(struct ixgbevf_ring), GFP_KERNEL);
if (!tx_ring) {
err = -ENOMEM;
goto clear_reset;
}
rx_ring = kcalloc(adapter->num_rx_queues,
sizeof(struct ixgbevf_ring), GFP_KERNEL);
if (!rx_ring) {
err = -ENOMEM;
goto err_rx_setup;
}
ixgbevf_down(adapter);
memcpy(tx_ring, adapter->tx_ring,
adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
for (i = 0; i < adapter->num_tx_queues; i++) {
tx_ring[i].count = new_tx_count;
err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
if (err) {
while (i) {
i--;
ixgbevf_free_tx_resources(adapter,
&tx_ring[i]);
}
goto err_tx_ring_setup;
}
tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
}
memcpy(rx_ring, adapter->rx_ring,
adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring[i].count = new_rx_count;
err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
if (err) {
while (i) {
i--;
ixgbevf_free_rx_resources(adapter,
&rx_ring[i]);
}
goto err_rx_ring_setup;
}
rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
}
/*
* Only switch to new rings if all the prior allocations
* and ring setups have succeeded.
*/
kfree(adapter->tx_ring);
adapter->tx_ring = tx_ring;
adapter->tx_ring_count = new_tx_count;
kfree(adapter->rx_ring);
adapter->rx_ring = rx_ring;
adapter->rx_ring_count = new_rx_count;
/* success! */
ixgbevf_up(adapter);
goto clear_reset;
err_rx_ring_setup:
for(i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
err_tx_ring_setup:
kfree(rx_ring);
err_rx_setup:
kfree(tx_ring);
clear_reset:
clear_bit(__IXGBEVF_RESETTING, &adapter->state);
return err;
}
static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
{
switch (stringset) {
case ETH_SS_TEST:
return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_GLOBAL_STATS_LEN;
default:
return -EINVAL;
}
}
static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
int i;
ixgbevf_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
char *p = (char *)adapter +
ixgbe_gstrings_stats[i].stat_offset;
char *b = (char *)adapter +
ixgbe_gstrings_stats[i].base_stat_offset;
char *r = (char *)adapter +
ixgbe_gstrings_stats[i].saved_reset_offset;
data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
((ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
((ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
}
}
static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
char *p = (char *)data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *ixgbe_gstrings_test,
IXGBE_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
memcpy(p, ixgbe_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
break;
}
}
static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
{
struct ixgbe_hw *hw = &adapter->hw;
bool link_up;
u32 link_speed = 0;
*data = 0;
hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
if (!link_up)
*data = 1;
return *data;
}
/* ethtool register test data */
struct ixgbevf_reg_test {
u16 reg;
u8 array_len;
u8 test_type;
u32 mask;
u32 write;
};
/* In the hardware, registers are laid out either singly, in arrays
* spaced 0x40 bytes apart, or in contiguous tables. We assume
* most tests take place on arrays or single registers (handled
* as a single-element array) and special-case the tables.
* Table tests are always pattern tests.
*
* We also make provision for some required setup steps by specifying
* registers to be written without any read-back testing.
*/
#define PATTERN_TEST 1
#define SET_READ_TEST 2
#define WRITE_NO_TEST 3
#define TABLE32_TEST 4
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
/* default VF register test */
static const struct ixgbevf_reg_test reg_test_vf[] = {
{ IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
{ IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
{ IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
{ IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
{ 0, 0, 0, 0 }
};
static const u32 register_test_patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
};
#define REG_PATTERN_TEST(R, M, W) \
{ \
u32 pat, val, before; \
for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
before = readl(adapter->hw.hw_addr + R); \
writel((register_test_patterns[pat] & W), \
(adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if (val != (register_test_patterns[pat] & W & M)) { \
hw_dbg(&adapter->hw, \
"pattern test reg %04X failed: got " \
"0x%08X expected 0x%08X\n", \
R, val, (register_test_patterns[pat] & W & M)); \
*data = R; \
writel(before, adapter->hw.hw_addr + R); \
return 1; \
} \
writel(before, adapter->hw.hw_addr + R); \
} \
}
#define REG_SET_AND_CHECK(R, M, W) \
{ \
u32 val, before; \
before = readl(adapter->hw.hw_addr + R); \
writel((W & M), (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if ((W & M) != (val & M)) { \
printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
"expected 0x%08X\n", R, (val & M), (W & M)); \
*data = R; \
writel(before, (adapter->hw.hw_addr + R)); \
return 1; \
} \
writel(before, (adapter->hw.hw_addr + R)); \
}
static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
{
const struct ixgbevf_reg_test *test;
u32 i;
test = reg_test_vf;
/*
* Perform the register test, looping through the test table
* until we either fail or reach the null entry.
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
switch (test->test_type) {
case PATTERN_TEST:
REG_PATTERN_TEST(test->reg + (i * 0x40),
test->mask,
test->write);
break;
case SET_READ_TEST:
REG_SET_AND_CHECK(test->reg + (i * 0x40),
test->mask,
test->write);
break;
case WRITE_NO_TEST:
writel(test->write,
(adapter->hw.hw_addr + test->reg)
+ (i * 0x40));
break;
case TABLE32_TEST:
REG_PATTERN_TEST(test->reg + (i * 4),
test->mask,
test->write);
break;
case TABLE64_TEST_LO:
REG_PATTERN_TEST(test->reg + (i * 8),
test->mask,
test->write);
break;
case TABLE64_TEST_HI:
REG_PATTERN_TEST((test->reg + 4) + (i * 8),
test->mask,
test->write);
break;
}
}
test++;
}
*data = 0;
return *data;
}
static void ixgbevf_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
set_bit(__IXGBEVF_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
hw_dbg(&adapter->hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
if (ixgbevf_link_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED;
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
else
ixgbevf_reset(adapter);
hw_dbg(&adapter->hw, "register testing starting\n");
if (ixgbevf_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
ixgbevf_reset(adapter);
clear_bit(__IXGBEVF_TESTING, &adapter->state);
if (if_running)
dev_open(netdev);
} else {
hw_dbg(&adapter->hw, "online testing starting\n");
/* Online tests */
if (ixgbevf_link_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Online tests aren't run; pass by default */
data[0] = 0;
clear_bit(__IXGBEVF_TESTING, &adapter->state);
}
msleep_interruptible(4 * 1000);
}
static int ixgbevf_nway_reset(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev)) {
if (!adapter->dev_closed)
ixgbevf_reinit_locked(adapter);
}
return 0;
}
static struct ethtool_ops ixgbevf_ethtool_ops = {
.get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,
.nway_reset = ixgbevf_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = ixgbevf_get_ringparam,
.set_ringparam = ixgbevf_set_ringparam,
.get_msglevel = ixgbevf_get_msglevel,
.set_msglevel = ixgbevf_set_msglevel,
.self_test = ixgbevf_diag_test,
.get_sset_count = ixgbevf_get_sset_count,
.get_strings = ixgbevf_get_strings,
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
}

View File

@ -0,0 +1,320 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/u64_stats_sync.h>
#include "vf.h"
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
u16 mapped_as_page;
};
struct ixgbevf_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
unsigned int page_offset;
};
struct ixgbevf_ring {
struct ixgbevf_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
unsigned int count; /* amount of descriptors */
unsigned int next_to_use;
unsigned int next_to_clean;
int queue_index; /* needed for multiqueue queue management */
union {
struct ixgbevf_tx_buffer *tx_buffer_info;
struct ixgbevf_rx_buffer *rx_buffer_info;
};
u64 total_bytes;
u64 total_packets;
struct u64_stats_sync syncp;
u16 head;
u16 tail;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCB and RSS modes */
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
/* cpu for tx queue */
int cpu;
#endif
u64 v_idx; /* maps directly to the index for this ring in the hardware
* vector array, can also be used for finding the bit in EICR
* and friends that represents the vector for this ring */
u16 work_limit; /* max work per interrupt */
u16 rx_buf_len;
};
enum ixgbevf_ring_f_enum {
RING_F_NONE = 0,
RING_F_ARRAY_SIZE /* must be last in enum set */
};
struct ixgbevf_ring_feature {
int indices;
int mask;
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define MAX_RX_QUEUES 1
#define MAX_TX_QUEUES 1
#define IXGBEVF_DEFAULT_TXD 1024
#define IXGBEVF_DEFAULT_RXD 512
#define IXGBEVF_MAX_TXD 4096
#define IXGBEVF_MIN_TXD 64
#define IXGBEVF_MAX_RXD 4096
#define IXGBEVF_MIN_RXD 64
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define IXGBE_TX_FLAGS_CSUM (u32)(1)
#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbevf_q_vector {
struct ixgbevf_adapter *adapter;
struct napi_struct napi;
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
u8 rxr_count; /* Rx ring count assigned to this vector */
u8 txr_count; /* Tx ring count assigned to this vector */
u8 tx_itr;
u8 rx_itr;
u32 eitr;
int v_idx; /* vector index in list */
};
/* Helper macros to switch between ints/sec and what the register uses.
* And yes, it's the same math going both ways. The lowest value
* supported by all of the ixgbe hardware is 8.
*/
#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
#define IXGBE_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
#define IXGBE_RX_DESC_ADV(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
#define IXGBE_TX_DESC_ADV(R, i) \
(&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
#define IXGBE_TX_CTXTDESC_ADV(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
#define OTHER_VECTOR 1
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_Q_VECTORS 2
#define MAX_MSIX_COUNT 2
#define MIN_MSIX_Q_VECTORS 2
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
/* board specific private data structure */
struct ixgbevf_adapter {
struct timer_list watchdog_timer;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
/* Interrupt Throttle Rate */
u32 itr_setting;
u16 eitr_low;
u16 eitr_high;
/* TX */
struct ixgbevf_ring *tx_ring; /* One per active queue */
int num_tx_queues;
u64 restart_queue;
u64 hw_csum_tx_good;
u64 lsc_int;
u64 hw_tso_ctxt;
u64 hw_tso6_ctxt;
u32 tx_timeout_count;
/* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
int num_rx_pools; /* == num_rx_queues in 82598 */
int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
int max_msix_q_vectors; /* true count of q_vectors for device */
struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
/* Some features need tri-state capability,
* thus the additional *_CAPABLE flags.
*/
u32 flags;
#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
struct ixgbevf_hw_stats stats;
u64 zero_base;
/* Interrupt Throttle Rate */
u32 eitr_param;
unsigned long state;
u32 *config_space;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
u32 link_speed;
bool link_up;
unsigned long link_check_timeout;
struct work_struct watchdog_task;
bool netdev_registered;
bool dev_closed;
};
enum ixbgevf_state_t {
__IXGBEVF_TESTING,
__IXGBEVF_RESETTING,
__IXGBEVF_DOWN
};
enum ixgbevf_boards {
board_82599_vf,
board_X540_vf,
};
extern struct ixgbevf_info ixgbevf_82599_vf_info;
extern struct ixgbevf_info ixgbevf_X540_vf_info;
extern struct ixgbe_mbx_operations ixgbevf_mbx_ops;
/* needed by ethtool.c */
extern char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
#ifdef ETHTOOL_OPS_COMPAT
extern int ethtool_ioctl(struct ifreq *ifr);
#endif
extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
#ifdef DEBUG
extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
#else
#define hw_dbg(hw, format, arg...) do {} while (0)
#endif
#endif /* _IXGBEVF_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,341 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "mbx.h"
/**
* ixgbevf_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
*
* returns 0 if it successfully received a message notification
**/
static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
while (countdown && mbx->ops.check_for_msg(hw)) {
countdown--;
udelay(mbx->udelay);
}
/* if we failed, all future posted messages fail until reset */
if (!countdown)
mbx->timeout = 0;
return countdown ? 0 : IXGBE_ERR_MBX;
}
/**
* ixgbevf_poll_for_ack - Wait for message acknowledgement
* @hw: pointer to the HW structure
*
* returns 0 if it successfully received a message acknowledgement
**/
static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
while (countdown && mbx->ops.check_for_ack(hw)) {
countdown--;
udelay(mbx->udelay);
}
/* if we failed, all future posted messages fail until reset */
if (!countdown)
mbx->timeout = 0;
return countdown ? 0 : IXGBE_ERR_MBX;
}
/**
* ixgbevf_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
*
* returns 0 if it successfully received a message notification and
* copied it into the receive buffer.
**/
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
ret_val = ixgbevf_poll_for_msg(hw);
/* if ack received read message, otherwise we timed out */
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size);
return ret_val;
}
/**
* ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
*
* returns 0 if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val;
/* send msg */
ret_val = mbx->ops.write(hw, msg, size);
/* if msg sent wait until we receive an ack */
if (!ret_val)
ret_val = ixgbevf_poll_for_ack(hw);
return ret_val;
}
/**
* ixgbevf_read_v2p_mailbox - read v2p mailbox
* @hw: pointer to the HW structure
*
* This function is used to read the v2p mailbox without losing the read to
* clear status bits.
**/
static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
{
u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
v2p_mailbox |= hw->mbx.v2p_mailbox;
hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
return v2p_mailbox;
}
/**
* ixgbevf_check_for_bit_vf - Determine if a status bit was set
* @hw: pointer to the HW structure
* @mask: bitmask for bits to be tested and cleared
*
* This function is used to check for the read to clear bits within
* the V2P mailbox.
**/
static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
{
u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
s32 ret_val = IXGBE_ERR_MBX;
if (v2p_mailbox & mask)
ret_val = 0;
hw->mbx.v2p_mailbox &= ~mask;
return ret_val;
}
/**
* ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
* @hw: pointer to the HW structure
*
* returns 0 if the PF has set the Status bit or else ERR_MBX
**/
static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_ERR_MBX;
if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
return ret_val;
}
/**
* ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
* @hw: pointer to the HW structure
*
* returns 0 if the PF has set the ACK bit or else ERR_MBX
**/
static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_ERR_MBX;
if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
ret_val = 0;
hw->mbx.stats.acks++;
}
return ret_val;
}
/**
* ixgbevf_check_for_rst_vf - checks to see if the PF has reset
* @hw: pointer to the HW structure
*
* returns true if the PF has set the reset done bit or else false
**/
static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_ERR_MBX;
if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
IXGBE_VFMAILBOX_RSTI))) {
ret_val = 0;
hw->mbx.stats.rsts++;
}
return ret_val;
}
/**
* ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
* @hw: pointer to the HW structure
*
* return 0 if we obtained the mailbox lock
**/
static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_ERR_MBX;
/* Take ownership of the buffer */
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
/* reserve mailbox for vf use */
if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
ret_val = 0;
return ret_val;
}
/**
* ixgbevf_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
*
* returns 0 if it successfully copied message into the buffer
**/
static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
s32 ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
if (ret_val)
goto out_no_write;
/* flush msg and acks as we are overwriting the message buffer */
ixgbevf_check_for_msg_vf(hw);
ixgbevf_check_for_ack_vf(hw);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
/* update stats */
hw->mbx.stats.msgs_tx++;
/* Drop VFU and interrupt the PF to tell it a message has been sent */
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
out_no_write:
return ret_val;
}
/**
* ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
*
* returns 0 if it successfully read message from buffer
**/
static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
s32 ret_val = 0;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
if (ret_val)
goto out_no_read;
/* copy the message from the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
/* Acknowledge receipt and release mailbox, then we're done */
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return ret_val;
}
/**
* ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for vf mailbox
*/
static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
/* start mailbox as timed out and let the reset_hw call set the timeout
* value to begin communications */
mbx->timeout = 0;
mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
return 0;
}
struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.init_params = ixgbevf_init_mbx_params_vf,
.read = ixgbevf_read_mbx_vf,
.write = ixgbevf_write_mbx_vf,
.read_posted = ixgbevf_read_posted_mbx,
.write_posted = ixgbevf_write_posted_mbx,
.check_for_msg = ixgbevf_check_for_msg_vf,
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
};

View File

@ -0,0 +1,99 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
#include "vf.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
/* Define mailbox register bits */
#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
* this are the ACK */
#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
* this are the NACK */
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
* clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
#define IXGBE_VF_MC_TYPE_WORD 3
#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
/* forward declaration of the HW struct */
struct ixgbe_hw;
#endif /* _IXGBE_MBX_H_ */

View File

@ -0,0 +1,85 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _IXGBEVF_REGS_H_
#define _IXGBEVF_REGS_H_
#define IXGBE_VFCTRL 0x00000
#define IXGBE_VFSTATUS 0x00008
#define IXGBE_VFLINKS 0x00010
#define IXGBE_VFFRTIMER 0x00048
#define IXGBE_VFRXMEMWRAP 0x03190
#define IXGBE_VTEICR 0x00100
#define IXGBE_VTEICS 0x00104
#define IXGBE_VTEIMS 0x00108
#define IXGBE_VTEIMC 0x0010C
#define IXGBE_VTEIAC 0x00110
#define IXGBE_VTEIAM 0x00114
#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
#define IXGBE_VTIVAR_MISC 0x00140
#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
#define IXGBE_VFPSRTYPE 0x00300
#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
#define IXGBE_VFGPRC 0x0101C
#define IXGBE_VFGPTC 0x0201C
#define IXGBE_VFGORC_LSB 0x01020
#define IXGBE_VFGORC_MSB 0x01024
#define IXGBE_VFGOTC_LSB 0x02020
#define IXGBE_VFGOTC_MSB 0x02024
#define IXGBE_VFMPRC 0x01034
#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
readl((a)->hw_addr + (reg) + ((offset) << 2)))
#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
#endif /* _IXGBEVF_REGS_H_ */

View File

@ -0,0 +1,426 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "vf.h"
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware by filling the bus info structure and media type, clears
* all on chip counters, initializes receive address registers, multicast
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
{
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
return 0;
}
/**
* ixgbevf_init_hw_vf - virtual function hardware initialization
* @hw: pointer to hardware structure
*
* Initialize the hardware by resetting the hardware and then starting
* the hardware
**/
static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
{
s32 status = hw->mac.ops.start_hw(hw);
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
return status;
}
/**
* ixgbevf_reset_hw_vf - Performs hardware reset
* @hw: pointer to hardware structure
*
* Resets the hardware by reseting the transmit and receive units, masks and
* clears all interrupts.
**/
static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 timeout = IXGBE_VF_INIT_TIMEOUT;
s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
u8 *addr = (u8 *)(&msgbuf[1]);
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
/* we cannot reset while the RSTI / RSTD bits are asserted */
while (!mbx->ops.check_for_rst(hw) && timeout) {
timeout--;
udelay(5);
}
if (!timeout)
return IXGBE_ERR_RESET_FAILED;
/* mailbox timeout can now become active */
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
mbx->ops.write_posted(hw, msgbuf, 1);
msleep(10);
/* set our "perm_addr" based on info provided by PF */
/* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3 */
ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
if (ret_val)
return ret_val;
if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
return 0;
}
/**
* ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
* Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
* disables transmit and receive units. The adapter_stopped flag is used by
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
{
u32 number_of_queues;
u32 reg_val;
u16 i;
/*
* Set the adapter_stopped flag so other driver functions stop touching
* the hardware
*/
hw->adapter_stopped = true;
/* Disable the receive unit by stopped each queue */
number_of_queues = hw->mac.max_rx_queues;
for (i = 0; i < number_of_queues; i++) {
reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
if (reg_val & IXGBE_RXDCTL_ENABLE) {
reg_val &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
}
}
IXGBE_WRITE_FLUSH(hw);
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
/* Clear any pending interrupts */
IXGBE_READ_REG(hw, IXGBE_VTEICR);
/* Disable the transmit unit. Each queue must be disabled. */
number_of_queues = hw->mac.max_tx_queues;
for (i = 0; i < number_of_queues; i++) {
reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
if (reg_val & IXGBE_TXDCTL_ENABLE) {
reg_val &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
}
}
return 0;
}
/**
* ixgbevf_mta_vector - Determines bit-vector in multicast table to set
* @hw: pointer to hardware structure
* @mc_addr: the multicast address
*
* Extracts the 12 bits, from a multicast address, to determine which
* bit-vector to set in the multicast table. The hardware uses 12 bits, from
* incoming rx multicast addresses, to determine the bit-vector to check in
* the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
break;
case 1: /* use bits [46:35] of the address */
vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
break;
case 2: /* use bits [45:34] of the address */
vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
break;
case 3: /* use bits [43:32] of the address */
vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
break;
default: /* Invalid mc_filter_type */
break;
}
/* vector can only be 12-bits or boundary will be exceeded */
vector &= 0xFFF;
return vector;
}
/**
* ixgbevf_get_mac_addr_vf - Read device MAC address
* @hw: pointer to the HW structure
* @mac_addr: pointer to storage for retrieved MAC address
**/
static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
return 0;
}
static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, sizeof(msgbuf));
/*
* If index is one then this is the start of a new list and needs
* indication to the PF so it can do it's own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
*/
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr)
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
if (!ret_val)
if (msgbuf[0] ==
(IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
ret_val = -ENOMEM;
return ret_val;
}
/**
* ixgbevf_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
* @index: Receive address register to write
* @addr: Address to put into receive address register
* @vmdq: Unused in this implementation
**/
static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
u32 vmdq)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
(msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
return ret_val;
}
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
* @netdev: pointer to net device structure
*
* Updates the Multicast Table Array.
**/
static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
u32 cnt, i;
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
* msg type). That's 30 hash values if we pack 'em right. If
* there are more than 30 MC addresses to add then punt the
* extras for now and then add code to handle more than 30 later.
* It would be unusual for a server to request that many multi-cast
* addresses except for in large enterprise network environments.
*/
cnt = netdev_mc_count(netdev);
if (cnt > 30)
cnt = 30;
msgbuf[0] = IXGBE_VF_SET_MULTICAST;
msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
i = 0;
netdev_for_each_mc_addr(ha, netdev) {
if (i == cnt)
break;
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
return 0;
}
/**
* ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
* @vlan_on: if true then set bit, else clear bit
**/
static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
return mbx->ops.write_posted(hw, msgbuf, 2);
}
/**
* ixgbevf_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
* @speed: Unused in this implementation
* @autoneg: Unused in this implementation
* @autoneg_wait_to_complete: Unused in this implementation
*
* Do nothing and return success. VF drivers are not allowed to change
* global settings. Maintained for driver compatibility.
**/
static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete)
{
return 0;
}
/**
* ixgbevf_check_mac_link_vf - Get link/speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Reads the links register to determine if link is up and the current speed
**/
static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up,
bool autoneg_wait_to_complete)
{
u32 links_reg;
if (!(hw->mbx.ops.check_for_rst(hw))) {
*link_up = false;
*speed = 0;
return -1;
}
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (links_reg & IXGBE_LINKS_UP)
*link_up = true;
else
*link_up = false;
if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
return 0;
}
static struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,
.start_hw = ixgbevf_start_hw_vf,
.get_mac_addr = ixgbevf_get_mac_addr_vf,
.stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
};
struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac = ixgbe_mac_82599_vf,
.mac_ops = &ixgbevf_mac_ops,
};
struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};

View File

@ -0,0 +1,174 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include "defines.h"
#include "regs.h"
#include "mbx.h"
struct ixgbe_hw;
/* iterator type for walking multicast address lists */
typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
u32 *vmdq);
struct ixgbe_mac_operations {
s32 (*init_hw)(struct ixgbe_hw *);
s32 (*reset_hw)(struct ixgbe_hw *);
s32 (*start_hw)(struct ixgbe_hw *);
s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
};
enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82599_vf,
ixgbe_mac_X540_vf,
ixgbe_num_macs
};
struct ixgbe_mac_info {
struct ixgbe_mac_operations ops;
u8 addr[6];
u8 perm_addr[6];
enum ixgbe_mac_type type;
s32 mc_filter_type;
bool get_link_status;
u32 max_tx_queues;
u32 max_rx_queues;
u32 max_msix_vectors;
};
struct ixgbe_mbx_operations {
s32 (*init_params)(struct ixgbe_hw *hw);
s32 (*read)(struct ixgbe_hw *, u32 *, u16);
s32 (*write)(struct ixgbe_hw *, u32 *, u16);
s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
s32 (*check_for_msg)(struct ixgbe_hw *);
s32 (*check_for_ack)(struct ixgbe_hw *);
s32 (*check_for_rst)(struct ixgbe_hw *);
};
struct ixgbe_mbx_stats {
u32 msgs_tx;
u32 msgs_rx;
u32 acks;
u32 reqs;
u32 rsts;
};
struct ixgbe_mbx_info {
struct ixgbe_mbx_operations ops;
struct ixgbe_mbx_stats stats;
u32 timeout;
u32 udelay;
u32 v2p_mailbox;
u16 size;
};
struct ixgbe_hw {
void *back;
u8 __iomem *hw_addr;
struct ixgbe_mac_info mac;
struct ixgbe_mbx_info mbx;
u16 device_id;
u16 subsystem_vendor_id;
u16 subsystem_device_id;
u16 vendor_id;
u8 revision_id;
bool adapter_stopped;
};
struct ixgbevf_hw_stats {
u64 base_vfgprc;
u64 base_vfgptc;
u64 base_vfgorc;
u64 base_vfgotc;
u64 base_vfmprc;
u64 last_vfgprc;
u64 last_vfgptc;
u64 last_vfgorc;
u64 last_vfgotc;
u64 last_vfmprc;
u64 vfgprc;
u64 vfgptc;
u64 vfgorc;
u64 vfgotc;
u64 vfmprc;
u64 saved_reset_vfgprc;
u64 saved_reset_vfgptc;
u64 saved_reset_vfgorc;
u64 saved_reset_vfgotc;
u64 saved_reset_vfmprc;
};
struct ixgbevf_info {
enum ixgbe_mac_type mac;
struct ixgbe_mac_operations *mac_ops;
};
#endif /* __IXGBE_VF_H__ */