|
|
|
@ -848,8 +848,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
|
|
|
|
|
{
|
|
|
|
|
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
|
|
|
|
|
|
|
|
|
|
a->rx_ring[r_idx].next = q_vector->rx.ring;
|
|
|
|
|
q_vector->rx.ring = &a->rx_ring[r_idx];
|
|
|
|
|
a->rx_ring[r_idx]->next = q_vector->rx.ring;
|
|
|
|
|
q_vector->rx.ring = a->rx_ring[r_idx];
|
|
|
|
|
q_vector->rx.count++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -858,8 +858,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
|
|
|
|
|
{
|
|
|
|
|
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
|
|
|
|
|
|
|
|
|
|
a->tx_ring[t_idx].next = q_vector->tx.ring;
|
|
|
|
|
q_vector->tx.ring = &a->tx_ring[t_idx];
|
|
|
|
|
a->tx_ring[t_idx]->next = q_vector->tx.ring;
|
|
|
|
|
q_vector->tx.ring = a->tx_ring[t_idx];
|
|
|
|
|
q_vector->tx.count++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1086,6 +1086,70 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
|
|
|
|
|
* @adapter: board private structure
|
|
|
|
|
* @ring: structure containing ring specific data
|
|
|
|
|
*
|
|
|
|
|
* Configure the Tx descriptor ring after a reset.
|
|
|
|
|
**/
|
|
|
|
|
static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
|
|
|
|
|
struct ixgbevf_ring *ring)
|
|
|
|
|
{
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
u64 tdba = ring->dma;
|
|
|
|
|
int wait_loop = 10;
|
|
|
|
|
u32 txdctl = IXGBE_TXDCTL_ENABLE;
|
|
|
|
|
u8 reg_idx = ring->reg_idx;
|
|
|
|
|
|
|
|
|
|
/* disable queue to avoid issues while updating state */
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
|
|
|
|
|
IXGBE_WRITE_FLUSH(hw);
|
|
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
|
|
|
|
|
ring->count * sizeof(union ixgbe_adv_tx_desc));
|
|
|
|
|
|
|
|
|
|
/* disable head writeback */
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
|
|
|
|
|
|
|
|
|
|
/* enable relaxed ordering */
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
|
|
|
|
|
(IXGBE_DCA_TXCTRL_DESC_RRO_EN |
|
|
|
|
|
IXGBE_DCA_TXCTRL_DATA_RRO_EN));
|
|
|
|
|
|
|
|
|
|
/* reset head and tail pointers */
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
|
|
|
|
|
ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
|
|
|
|
|
|
|
|
|
|
/* reset ntu and ntc to place SW in sync with hardwdare */
|
|
|
|
|
ring->next_to_clean = 0;
|
|
|
|
|
ring->next_to_use = 0;
|
|
|
|
|
|
|
|
|
|
/* In order to avoid issues WTHRESH + PTHRESH should always be equal
|
|
|
|
|
* to or less than the number of on chip descriptors, which is
|
|
|
|
|
* currently 40.
|
|
|
|
|
*/
|
|
|
|
|
txdctl |= (8 << 16); /* WTHRESH = 8 */
|
|
|
|
|
|
|
|
|
|
/* Setting PTHRESH to 32 both improves performance */
|
|
|
|
|
txdctl |= (1 << 8) | /* HTHRESH = 1 */
|
|
|
|
|
32; /* PTHRESH = 32 */
|
|
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
|
|
|
|
|
|
|
|
|
|
/* poll to verify queue is enabled */
|
|
|
|
|
do {
|
|
|
|
|
usleep_range(1000, 2000);
|
|
|
|
|
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
|
|
|
|
|
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
|
|
|
|
|
if (!wait_loop)
|
|
|
|
|
pr_err("Could not enable Tx Queue %d\n", reg_idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
|
|
|
|
|
* @adapter: board private structure
|
|
|
|
@ -1094,32 +1158,11 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
|
|
|
|
|
**/
|
|
|
|
|
static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
|
|
|
|
|
{
|
|
|
|
|
u64 tdba;
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
u32 i, j, tdlen, txctrl;
|
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
|
|
/* Setup the HW Tx Head and Tail descriptor pointers */
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
|
struct ixgbevf_ring *ring = &adapter->tx_ring[i];
|
|
|
|
|
j = ring->reg_idx;
|
|
|
|
|
tdba = ring->dma;
|
|
|
|
|
tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
|
|
|
|
|
(tdba & DMA_BIT_MASK(32)));
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
|
|
|
|
|
ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
|
|
|
|
|
ring->next_to_clean = 0;
|
|
|
|
|
ring->next_to_use = 0;
|
|
|
|
|
/* Disable Tx Head Writeback RO bit, since this hoses
|
|
|
|
|
* bookkeeping if things aren't delivered in order.
|
|
|
|
|
*/
|
|
|
|
|
txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
|
|
|
|
|
txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
|
|
|
|
|
}
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
|
ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
|
|
|
|
@ -1130,7 +1173,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
u32 srrctl;
|
|
|
|
|
|
|
|
|
|
rx_ring = &adapter->rx_ring[index];
|
|
|
|
|
rx_ring = adapter->rx_ring[index];
|
|
|
|
|
|
|
|
|
|
srrctl = IXGBE_SRRCTL_DROP_EN;
|
|
|
|
|
|
|
|
|
@ -1188,7 +1231,93 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
|
|
|
|
|
rx_buf_len = IXGBEVF_RXBUFFER_10K;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
|
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
|
|
|
|
|
adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define IXGBEVF_MAX_RX_DESC_POLL 10
|
|
|
|
|
static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
|
|
|
|
|
struct ixgbevf_ring *ring)
|
|
|
|
|
{
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
|
|
|
|
|
u32 rxdctl;
|
|
|
|
|
u8 reg_idx = ring->reg_idx;
|
|
|
|
|
|
|
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
|
|
|
|
|
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
|
|
|
|
|
|
|
|
|
|
/* write value back with RXDCTL.ENABLE bit cleared */
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
|
|
|
|
|
|
|
|
|
|
/* the hardware may take up to 100us to really disable the rx queue */
|
|
|
|
|
do {
|
|
|
|
|
udelay(10);
|
|
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
|
|
|
|
|
} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
|
|
|
|
|
|
|
|
|
|
if (!wait_loop)
|
|
|
|
|
pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
|
|
|
|
|
reg_idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
|
|
|
|
|
struct ixgbevf_ring *ring)
|
|
|
|
|
{
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
|
|
|
|
|
u32 rxdctl;
|
|
|
|
|
u8 reg_idx = ring->reg_idx;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
usleep_range(1000, 2000);
|
|
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
|
|
|
|
|
} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
|
|
|
|
|
|
|
|
|
|
if (!wait_loop)
|
|
|
|
|
pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
|
|
|
|
|
reg_idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
|
|
|
|
|
struct ixgbevf_ring *ring)
|
|
|
|
|
{
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
u64 rdba = ring->dma;
|
|
|
|
|
u32 rxdctl;
|
|
|
|
|
u8 reg_idx = ring->reg_idx;
|
|
|
|
|
|
|
|
|
|
/* disable queue to avoid issues while updating state */
|
|
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
|
|
|
|
|
ixgbevf_disable_rx_queue(adapter, ring);
|
|
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
|
|
|
|
|
ring->count * sizeof(union ixgbe_adv_rx_desc));
|
|
|
|
|
|
|
|
|
|
/* enable relaxed ordering */
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
|
|
|
|
|
IXGBE_DCA_RXCTRL_DESC_RRO_EN);
|
|
|
|
|
|
|
|
|
|
/* reset head and tail pointers */
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
|
|
|
|
|
ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
|
|
|
|
|
|
|
|
|
|
/* reset ntu and ntc to place SW in sync with hardwdare */
|
|
|
|
|
ring->next_to_clean = 0;
|
|
|
|
|
ring->next_to_use = 0;
|
|
|
|
|
|
|
|
|
|
ixgbevf_configure_srrctl(adapter, reg_idx);
|
|
|
|
|
|
|
|
|
|
/* prevent DMA from exceeding buffer space available */
|
|
|
|
|
rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
|
|
|
|
|
rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
|
|
|
|
|
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
|
|
|
|
|
|
|
|
|
|
ixgbevf_rx_desc_queue_enable(adapter, ring);
|
|
|
|
|
ixgbevf_alloc_rx_buffers(adapter, ring, ixgbevf_desc_unused(ring));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -1199,10 +1328,7 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
|
|
|
|
|
**/
|
|
|
|
|
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
|
|
|
|
|
{
|
|
|
|
|
u64 rdba;
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
int i, j;
|
|
|
|
|
u32 rdlen;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
ixgbevf_setup_psrtype(adapter);
|
|
|
|
|
|
|
|
|
@ -1211,23 +1337,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
|
|
|
|
|
|
|
|
|
|
/* Setup the HW Rx Head and Tail Descriptor Pointers and
|
|
|
|
|
* the Base and Length of the Rx Descriptor Ring */
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
|
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
|
|
|
|
|
rdba = ring->dma;
|
|
|
|
|
j = ring->reg_idx;
|
|
|
|
|
rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
|
|
|
|
|
(rdba & DMA_BIT_MASK(32)));
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
|
|
|
|
|
ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
|
|
|
|
|
ring->next_to_clean = 0;
|
|
|
|
|
ring->next_to_use = 0;
|
|
|
|
|
|
|
|
|
|
ixgbevf_configure_srrctl(adapter, j);
|
|
|
|
|
}
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
|
ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
|
|
|
|
@ -1389,7 +1500,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
|
|
|
|
|
|
|
|
|
|
if (num_tcs > 1) {
|
|
|
|
|
/* update default Tx ring register index */
|
|
|
|
|
adapter->tx_ring[0].reg_idx = def_q;
|
|
|
|
|
adapter->tx_ring[0]->reg_idx = def_q;
|
|
|
|
|
|
|
|
|
|
/* we need as many queues as traffic classes */
|
|
|
|
|
num_rx_queues = num_tcs;
|
|
|
|
@ -1409,69 +1520,14 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
|
|
|
|
|
|
|
|
|
|
static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
|
|
|
|
|
{
|
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
ixgbevf_configure_dcb(adapter);
|
|
|
|
|
|
|
|
|
|
ixgbevf_set_rx_mode(netdev);
|
|
|
|
|
ixgbevf_set_rx_mode(adapter->netdev);
|
|
|
|
|
|
|
|
|
|
ixgbevf_restore_vlan(adapter);
|
|
|
|
|
|
|
|
|
|
ixgbevf_configure_tx(adapter);
|
|
|
|
|
ixgbevf_configure_rx(adapter);
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
|
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
|
|
|
|
|
ixgbevf_alloc_rx_buffers(adapter, ring,
|
|
|
|
|
ixgbevf_desc_unused(ring));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define IXGBEVF_MAX_RX_DESC_POLL 10
|
|
|
|
|
static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
|
|
|
|
|
int rxr)
|
|
|
|
|
{
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
|
|
|
|
|
u32 rxdctl;
|
|
|
|
|
int j = adapter->rx_ring[rxr].reg_idx;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
usleep_range(1000, 2000);
|
|
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
|
|
|
|
|
} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
|
|
|
|
|
|
|
|
|
|
if (!wait_loop)
|
|
|
|
|
hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
|
|
|
|
|
rxr);
|
|
|
|
|
|
|
|
|
|
ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
|
|
|
|
|
(adapter->rx_ring[rxr].count - 1));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
|
|
|
|
|
struct ixgbevf_ring *ring)
|
|
|
|
|
{
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
|
|
|
|
|
u32 rxdctl;
|
|
|
|
|
u8 reg_idx = ring->reg_idx;
|
|
|
|
|
|
|
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
|
|
|
|
|
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
|
|
|
|
|
|
|
|
|
|
/* write value back with RXDCTL.ENABLE bit cleared */
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
|
|
|
|
|
|
|
|
|
|
/* the hardware may take up to 100us to really disable the rx queue */
|
|
|
|
|
do {
|
|
|
|
|
udelay(10);
|
|
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
|
|
|
|
|
} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
|
|
|
|
|
|
|
|
|
|
if (!wait_loop)
|
|
|
|
|
hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
|
|
|
|
|
reg_idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
|
|
|
|
@ -1536,37 +1592,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
|
|
|
|
|
{
|
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
int i, j = 0;
|
|
|
|
|
int num_rx_rings = adapter->num_rx_queues;
|
|
|
|
|
u32 txdctl, rxdctl;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
|
j = adapter->tx_ring[i].reg_idx;
|
|
|
|
|
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
|
|
|
|
|
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
|
|
|
|
|
txdctl |= (8 << 16);
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
|
j = adapter->tx_ring[i].reg_idx;
|
|
|
|
|
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
|
|
|
|
|
txdctl |= IXGBE_TXDCTL_ENABLE;
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < num_rx_rings; i++) {
|
|
|
|
|
j = adapter->rx_ring[i].reg_idx;
|
|
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
|
|
|
|
|
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
|
|
|
|
|
if (hw->mac.type == ixgbe_mac_X540_vf) {
|
|
|
|
|
rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
|
|
|
|
|
rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
|
|
|
|
|
IXGBE_RXDCTL_RLPML_EN);
|
|
|
|
|
}
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
|
|
|
|
|
ixgbevf_rx_desc_queue_enable(adapter, i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ixgbevf_configure_msix(adapter);
|
|
|
|
|
|
|
|
|
@ -1686,7 +1711,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
|
ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
|
|
|
|
|
ixgbevf_clean_rx_ring(adapter, adapter->rx_ring[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -1698,22 +1723,21 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
|
ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
|
|
|
|
|
ixgbevf_clean_tx_ring(adapter, adapter->tx_ring[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void ixgbevf_down(struct ixgbevf_adapter *adapter)
|
|
|
|
|
{
|
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
u32 txdctl;
|
|
|
|
|
int i, j;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* signal that we are down to the interrupt handler */
|
|
|
|
|
set_bit(__IXGBEVF_DOWN, &adapter->state);
|
|
|
|
|
|
|
|
|
|
/* disable all enabled rx queues */
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
|
ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
|
|
|
|
|
ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
|
|
|
|
|
|
|
|
|
|
netif_tx_disable(netdev);
|
|
|
|
|
|
|
|
|
@ -1734,10 +1758,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
|
|
|
|
|
|
|
|
|
|
/* disable transmits in the hardware now that interrupts are off */
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
|
j = adapter->tx_ring[i].reg_idx;
|
|
|
|
|
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
|
|
|
|
|
(txdctl & ~IXGBE_TXDCTL_ENABLE));
|
|
|
|
|
u8 reg_idx = adapter->tx_ring[i]->reg_idx;
|
|
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
|
|
|
|
|
IXGBE_TXDCTL_SWFLSH);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
netif_carrier_off(netdev);
|
|
|
|
@ -1875,40 +1899,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
|
|
|
|
|
**/
|
|
|
|
|
static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
struct ixgbevf_ring *ring;
|
|
|
|
|
int rx = 0, tx = 0;
|
|
|
|
|
|
|
|
|
|
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
|
|
|
|
|
sizeof(struct ixgbevf_ring), GFP_KERNEL);
|
|
|
|
|
if (!adapter->tx_ring)
|
|
|
|
|
goto err_tx_ring_allocation;
|
|
|
|
|
for (; tx < adapter->num_tx_queues; tx++) {
|
|
|
|
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
|
|
|
|
if (!ring)
|
|
|
|
|
goto err_allocation;
|
|
|
|
|
|
|
|
|
|
adapter->rx_ring = kcalloc(adapter->num_rx_queues,
|
|
|
|
|
sizeof(struct ixgbevf_ring), GFP_KERNEL);
|
|
|
|
|
if (!adapter->rx_ring)
|
|
|
|
|
goto err_rx_ring_allocation;
|
|
|
|
|
ring->dev = &adapter->pdev->dev;
|
|
|
|
|
ring->netdev = adapter->netdev;
|
|
|
|
|
ring->count = adapter->tx_ring_count;
|
|
|
|
|
ring->queue_index = tx;
|
|
|
|
|
ring->reg_idx = tx;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
|
adapter->tx_ring[i].count = adapter->tx_ring_count;
|
|
|
|
|
adapter->tx_ring[i].queue_index = i;
|
|
|
|
|
/* reg_idx may be remapped later by DCB config */
|
|
|
|
|
adapter->tx_ring[i].reg_idx = i;
|
|
|
|
|
adapter->tx_ring[i].dev = &adapter->pdev->dev;
|
|
|
|
|
adapter->tx_ring[i].netdev = adapter->netdev;
|
|
|
|
|
adapter->tx_ring[tx] = ring;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
|
adapter->rx_ring[i].count = adapter->rx_ring_count;
|
|
|
|
|
adapter->rx_ring[i].queue_index = i;
|
|
|
|
|
adapter->rx_ring[i].reg_idx = i;
|
|
|
|
|
adapter->rx_ring[i].dev = &adapter->pdev->dev;
|
|
|
|
|
adapter->rx_ring[i].netdev = adapter->netdev;
|
|
|
|
|
for (; rx < adapter->num_rx_queues; rx++) {
|
|
|
|
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
|
|
|
|
if (!ring)
|
|
|
|
|
goto err_allocation;
|
|
|
|
|
|
|
|
|
|
ring->dev = &adapter->pdev->dev;
|
|
|
|
|
ring->netdev = adapter->netdev;
|
|
|
|
|
|
|
|
|
|
ring->count = adapter->rx_ring_count;
|
|
|
|
|
ring->queue_index = rx;
|
|
|
|
|
ring->reg_idx = rx;
|
|
|
|
|
|
|
|
|
|
adapter->rx_ring[rx] = ring;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_rx_ring_allocation:
|
|
|
|
|
kfree(adapter->tx_ring);
|
|
|
|
|
err_tx_ring_allocation:
|
|
|
|
|
err_allocation:
|
|
|
|
|
while (tx) {
|
|
|
|
|
kfree(adapter->tx_ring[--tx]);
|
|
|
|
|
adapter->tx_ring[tx] = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
while (rx) {
|
|
|
|
|
kfree(adapter->rx_ring[--rx]);
|
|
|
|
|
adapter->rx_ring[rx] = NULL;
|
|
|
|
|
}
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -2099,6 +2133,17 @@ err_set_interrupt:
|
|
|
|
|
**/
|
|
|
|
|
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
|
kfree(adapter->tx_ring[i]);
|
|
|
|
|
adapter->tx_ring[i] = NULL;
|
|
|
|
|
}
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
|
kfree(adapter->rx_ring[i]);
|
|
|
|
|
adapter->rx_ring[i] = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
adapter->num_tx_queues = 0;
|
|
|
|
|
adapter->num_rx_queues = 0;
|
|
|
|
|
|
|
|
|
@ -2229,11 +2274,11 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
|
adapter->hw_csum_rx_error +=
|
|
|
|
|
adapter->rx_ring[i].hw_csum_rx_error;
|
|
|
|
|
adapter->rx_ring[i]->hw_csum_rx_error;
|
|
|
|
|
adapter->hw_csum_rx_good +=
|
|
|
|
|
adapter->rx_ring[i].hw_csum_rx_good;
|
|
|
|
|
adapter->rx_ring[i].hw_csum_rx_error = 0;
|
|
|
|
|
adapter->rx_ring[i].hw_csum_rx_good = 0;
|
|
|
|
|
adapter->rx_ring[i]->hw_csum_rx_good;
|
|
|
|
|
adapter->rx_ring[i]->hw_csum_rx_error = 0;
|
|
|
|
|
adapter->rx_ring[i]->hw_csum_rx_good = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -2396,6 +2441,10 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
|
|
|
|
|
vfree(tx_ring->tx_buffer_info);
|
|
|
|
|
tx_ring->tx_buffer_info = NULL;
|
|
|
|
|
|
|
|
|
|
/* if not set, then don't free */
|
|
|
|
|
if (!tx_ring->desc)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
|
|
|
|
|
tx_ring->dma);
|
|
|
|
|
|
|
|
|
@ -2413,10 +2462,8 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++)
|
|
|
|
|
if (adapter->tx_ring[i].desc)
|
|
|
|
|
ixgbevf_free_tx_resources(adapter,
|
|
|
|
|
&adapter->tx_ring[i]);
|
|
|
|
|
|
|
|
|
|
if (adapter->tx_ring[i]->desc)
|
|
|
|
|
ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -2471,7 +2518,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
|
|
|
|
|
int i, err = 0;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
|
err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
|
|
|
|
|
err = ixgbevf_setup_tx_resources(adapter, adapter->tx_ring[i]);
|
|
|
|
|
if (!err)
|
|
|
|
|
continue;
|
|
|
|
|
hw_dbg(&adapter->hw,
|
|
|
|
@ -2533,7 +2580,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
|
|
|
|
|
int i, err = 0;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
|
err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
|
|
|
|
|
err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
|
|
|
|
|
if (!err)
|
|
|
|
|
continue;
|
|
|
|
|
hw_dbg(&adapter->hw,
|
|
|
|
@ -2577,9 +2624,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
|
|
if (adapter->rx_ring[i].desc)
|
|
|
|
|
ixgbevf_free_rx_resources(adapter,
|
|
|
|
|
&adapter->rx_ring[i]);
|
|
|
|
|
if (adapter->rx_ring[i]->desc)
|
|
|
|
|
ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -3069,7 +3115,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tx_ring = &adapter->tx_ring[r_idx];
|
|
|
|
|
tx_ring = adapter->tx_ring[r_idx];
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
|
|
|
|
@ -3222,8 +3268,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
|
|
|
#ifdef CONFIG_PM
|
|
|
|
|
static int ixgbevf_resume(struct pci_dev *pdev)
|
|
|
|
|
{
|
|
|
|
|
struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
|
|
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
|
|
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
u32 err;
|
|
|
|
|
|
|
|
|
|
pci_set_power_state(pdev, PCI_D0);
|
|
|
|
@ -3282,7 +3328,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
|
|
|
|
|
stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
|
ring = &adapter->rx_ring[i];
|
|
|
|
|
ring = adapter->rx_ring[i];
|
|
|
|
|
do {
|
|
|
|
|
start = u64_stats_fetch_begin_bh(&ring->syncp);
|
|
|
|
|
bytes = ring->total_bytes;
|
|
|
|
@ -3293,7 +3339,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
|
ring = &adapter->tx_ring[i];
|
|
|
|
|
ring = adapter->tx_ring[i];
|
|
|
|
|
do {
|
|
|
|
|
start = u64_stats_fetch_begin_bh(&ring->syncp);
|
|
|
|
|
bytes = ring->total_bytes;
|
|
|
|
@ -3528,9 +3574,6 @@ static void ixgbevf_remove(struct pci_dev *pdev)
|
|
|
|
|
|
|
|
|
|
hw_dbg(&adapter->hw, "Remove complete\n");
|
|
|
|
|
|
|
|
|
|
kfree(adapter->tx_ring);
|
|
|
|
|
kfree(adapter->rx_ring);
|
|
|
|
|
|
|
|
|
|
free_netdev(netdev);
|
|
|
|
|
|
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
|