ixgbe: split Tx/Rx ring clearing for ethtool loopback test
Commit: fed21bcee7a5 ("ixgbe: Don't bother clearing buffer memory for descriptor rings) exposed some issues with the logic in the current implementation of ixgbe_clean_test_rings() that are being addressed in this patch: - Split the clearing of the Tx and Rx rings in separate loops. Previously both Tx and Rx rings were cleared in a rx_desc->wb.upper.length based loop which could lead to issues if for w/e reason packets were received outside of the frames transmitted for the loopback test. - Add check for IXGBE_TXD_STAT_DD to avoid clearing the rings if the transmits have not comlpeted by the time we enter ixgbe_clean_test_rings() - Exit early on ixgbe_check_lbtest_frame() failure. This change fixes a crash during ethtool diagnostic (ethtool -t). Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
c69be946d6
commit
761c2a48c7
@ -1916,8 +1916,6 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
|
||||
unsigned int size)
|
||||
{
|
||||
union ixgbe_adv_rx_desc *rx_desc;
|
||||
struct ixgbe_rx_buffer *rx_buffer;
|
||||
struct ixgbe_tx_buffer *tx_buffer;
|
||||
u16 rx_ntc, tx_ntc, count = 0;
|
||||
|
||||
/* initialize next to clean and descriptor values */
|
||||
@ -1925,25 +1923,15 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
|
||||
tx_ntc = tx_ring->next_to_clean;
|
||||
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
|
||||
|
||||
while (rx_desc->wb.upper.length) {
|
||||
/* check Rx buffer */
|
||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
|
||||
while (tx_ntc != tx_ring->next_to_use) {
|
||||
union ixgbe_adv_tx_desc *tx_desc;
|
||||
struct ixgbe_tx_buffer *tx_buffer;
|
||||
|
||||
/* sync Rx buffer for CPU read */
|
||||
dma_sync_single_for_cpu(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
ixgbe_rx_bufsz(rx_ring),
|
||||
DMA_FROM_DEVICE);
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
|
||||
|
||||
/* verify contents of skb */
|
||||
if (ixgbe_check_lbtest_frame(rx_buffer, size))
|
||||
count++;
|
||||
|
||||
/* sync Rx buffer for device write */
|
||||
dma_sync_single_for_device(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
ixgbe_rx_bufsz(rx_ring),
|
||||
DMA_FROM_DEVICE);
|
||||
/* if DD is not set transmit has not completed */
|
||||
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
|
||||
return count;
|
||||
|
||||
/* unmap buffer on Tx side */
|
||||
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
|
||||
@ -1958,13 +1946,40 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_len_set(tx_buffer, len, 0);
|
||||
|
||||
/* increment Rx/Tx next to clean counters */
|
||||
rx_ntc++;
|
||||
if (rx_ntc == rx_ring->count)
|
||||
rx_ntc = 0;
|
||||
/* increment Tx next to clean counter */
|
||||
tx_ntc++;
|
||||
if (tx_ntc == tx_ring->count)
|
||||
tx_ntc = 0;
|
||||
}
|
||||
|
||||
while (rx_desc->wb.upper.length) {
|
||||
struct ixgbe_rx_buffer *rx_buffer;
|
||||
|
||||
/* check Rx buffer */
|
||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
|
||||
|
||||
/* sync Rx buffer for CPU read */
|
||||
dma_sync_single_for_cpu(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
ixgbe_rx_bufsz(rx_ring),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* verify contents of skb */
|
||||
if (ixgbe_check_lbtest_frame(rx_buffer, size))
|
||||
count++;
|
||||
else
|
||||
break;
|
||||
|
||||
/* sync Rx buffer for device write */
|
||||
dma_sync_single_for_device(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
ixgbe_rx_bufsz(rx_ring),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* increment Rx next to clean counter */
|
||||
rx_ntc++;
|
||||
if (rx_ntc == rx_ring->count)
|
||||
rx_ntc = 0;
|
||||
|
||||
/* fetch next descriptor */
|
||||
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
|
||||
|
Loading…
x
Reference in New Issue
Block a user