Staging: et131x: tidy up names for the TX structures
Signed-off-by: Alan Cox <alan@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
parent
fb70ed6710
commit
b711b2e0fa
@ -97,10 +97,10 @@
|
||||
static void et131x_update_tcb_list(struct et131x_adapter *etdev);
|
||||
static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
|
||||
static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
||||
PMP_TCB pMpTcb);
|
||||
struct tcb *tcb);
|
||||
static int et131x_send_packet(struct sk_buff *skb,
|
||||
struct et131x_adapter *etdev);
|
||||
static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
|
||||
static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
|
||||
|
||||
/**
|
||||
* et131x_tx_dma_memory_alloc
|
||||
@ -117,12 +117,12 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
|
||||
int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
||||
{
|
||||
int desc_size = 0;
|
||||
TX_RING_t *tx_ring = &adapter->TxRing;
|
||||
struct tx_ring *tx_ring = &adapter->tx_ring;
|
||||
|
||||
/* Allocate memory for the TCB's (Transmit Control Block) */
|
||||
adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
|
||||
GFP_ATOMIC | GFP_DMA);
|
||||
if (!adapter->TxRing.MpTcbMem) {
|
||||
adapter->tx_ring.MpTcbMem = (struct tcb *)
|
||||
kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
|
||||
if (!adapter->tx_ring.MpTcbMem) {
|
||||
dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -130,11 +130,11 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
||||
/* Allocate enough memory for the Tx descriptor ring, and allocate
|
||||
* some extra so that the ring can be aligned on a 4k boundary.
|
||||
*/
|
||||
desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
|
||||
tx_ring->pTxDescRingVa =
|
||||
(PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
|
||||
&tx_ring->pTxDescRingPa);
|
||||
if (!adapter->TxRing.pTxDescRingVa) {
|
||||
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
|
||||
tx_ring->tx_desc_ring =
|
||||
(struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
|
||||
&tx_ring->tx_desc_ring_pa);
|
||||
if (!adapter->tx_ring.tx_desc_ring) {
|
||||
dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -146,20 +146,20 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
||||
* are ever returned, make sure the high part is retrieved here before
|
||||
* storing the adjusted address.
|
||||
*/
|
||||
tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
|
||||
tx_ring->pTxDescRingAdjustedPa = tx_ring->tx_desc_ring_pa;
|
||||
|
||||
/* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
|
||||
et131x_align_allocated_memory(adapter,
|
||||
&tx_ring->pTxDescRingAdjustedPa,
|
||||
&tx_ring->TxDescOffset, 0x0FFF);
|
||||
|
||||
tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
|
||||
tx_ring->tx_desc_ring += tx_ring->TxDescOffset;
|
||||
|
||||
/* Allocate memory for the Tx status block */
|
||||
tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
|
||||
sizeof(TX_STATUS_BLOCK_t),
|
||||
&tx_ring->pTxStatusPa);
|
||||
if (!adapter->TxRing.pTxStatusPa) {
|
||||
if (!adapter->tx_ring.pTxStatusPa) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Cannot alloc memory for Tx status block\n");
|
||||
return -ENOMEM;
|
||||
@ -169,7 +169,7 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
|
||||
tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
|
||||
NIC_MIN_PACKET_SIZE,
|
||||
&tx_ring->pTxDummyBlkPa);
|
||||
if (!adapter->TxRing.pTxDummyBlkPa) {
|
||||
if (!adapter->tx_ring.pTxDummyBlkPa) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Cannot alloc memory for Tx dummy buffer\n");
|
||||
return -ENOMEM;
|
||||
@ -188,43 +188,43 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
|
||||
{
|
||||
int desc_size = 0;
|
||||
|
||||
if (adapter->TxRing.pTxDescRingVa) {
|
||||
if (adapter->tx_ring.tx_desc_ring) {
|
||||
/* Free memory relating to Tx rings here */
|
||||
adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
|
||||
adapter->tx_ring.tx_desc_ring -= adapter->tx_ring.TxDescOffset;
|
||||
|
||||
desc_size =
|
||||
(sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
|
||||
desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
|
||||
+ 4096 - 1;
|
||||
|
||||
pci_free_consistent(adapter->pdev,
|
||||
desc_size,
|
||||
adapter->TxRing.pTxDescRingVa,
|
||||
adapter->TxRing.pTxDescRingPa);
|
||||
adapter->tx_ring.tx_desc_ring,
|
||||
adapter->tx_ring.tx_desc_ring_pa);
|
||||
|
||||
adapter->TxRing.pTxDescRingVa = NULL;
|
||||
adapter->tx_ring.tx_desc_ring = NULL;
|
||||
}
|
||||
|
||||
/* Free memory for the Tx status block */
|
||||
if (adapter->TxRing.pTxStatusVa) {
|
||||
if (adapter->tx_ring.pTxStatusVa) {
|
||||
pci_free_consistent(adapter->pdev,
|
||||
sizeof(TX_STATUS_BLOCK_t),
|
||||
adapter->TxRing.pTxStatusVa,
|
||||
adapter->TxRing.pTxStatusPa);
|
||||
adapter->tx_ring.pTxStatusVa,
|
||||
adapter->tx_ring.pTxStatusPa);
|
||||
|
||||
adapter->TxRing.pTxStatusVa = NULL;
|
||||
adapter->tx_ring.pTxStatusVa = NULL;
|
||||
}
|
||||
|
||||
/* Free memory for the dummy buffer */
|
||||
if (adapter->TxRing.pTxDummyBlkVa) {
|
||||
if (adapter->tx_ring.pTxDummyBlkVa) {
|
||||
pci_free_consistent(adapter->pdev,
|
||||
NIC_MIN_PACKET_SIZE,
|
||||
adapter->TxRing.pTxDummyBlkVa,
|
||||
adapter->TxRing.pTxDummyBlkPa);
|
||||
adapter->tx_ring.pTxDummyBlkVa,
|
||||
adapter->tx_ring.pTxDummyBlkPa);
|
||||
|
||||
adapter->TxRing.pTxDummyBlkVa = NULL;
|
||||
adapter->tx_ring.pTxDummyBlkVa = NULL;
|
||||
}
|
||||
|
||||
/* Free the memory for MP_TCB structures */
|
||||
kfree(adapter->TxRing.MpTcbMem);
|
||||
/* Free the memory for the tcb structures */
|
||||
kfree(adapter->tx_ring.MpTcbMem);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -236,9 +236,9 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
|
||||
struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
|
||||
|
||||
/* Load the hardware with the start of the transmit descriptor ring. */
|
||||
writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
|
||||
writel((u32) (etdev->tx_ring.pTxDescRingAdjustedPa >> 32),
|
||||
&txdma->pr_base_hi);
|
||||
writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa,
|
||||
writel((u32) etdev->tx_ring.pTxDescRingAdjustedPa,
|
||||
&txdma->pr_base_lo);
|
||||
|
||||
/* Initialise the transmit DMA engine */
|
||||
@ -252,12 +252,12 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
|
||||
* storing the adjusted address.
|
||||
*/
|
||||
writel(0, &txdma->dma_wb_base_hi);
|
||||
writel(etdev->TxRing.pTxStatusPa, &txdma->dma_wb_base_lo);
|
||||
writel(etdev->tx_ring.pTxStatusPa, &txdma->dma_wb_base_lo);
|
||||
|
||||
memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
|
||||
memset(etdev->tx_ring.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
|
||||
|
||||
writel(0, &txdma->service_request);
|
||||
etdev->TxRing.txDmaReadyToSend = 0;
|
||||
etdev->tx_ring.txDmaReadyToSend = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -292,39 +292,39 @@ void et131x_tx_dma_enable(struct et131x_adapter *etdev)
|
||||
*/
|
||||
void et131x_init_send(struct et131x_adapter *adapter)
|
||||
{
|
||||
PMP_TCB pMpTcb;
|
||||
uint32_t TcbCount;
|
||||
TX_RING_t *tx_ring;
|
||||
struct tcb *tcb;
|
||||
u32 count;
|
||||
struct tx_ring *tx_ring;
|
||||
|
||||
/* Setup some convenience pointers */
|
||||
tx_ring = &adapter->TxRing;
|
||||
pMpTcb = adapter->TxRing.MpTcbMem;
|
||||
tx_ring = &adapter->tx_ring;
|
||||
tcb = adapter->tx_ring.MpTcbMem;
|
||||
|
||||
tx_ring->TCBReadyQueueHead = pMpTcb;
|
||||
tx_ring->TCBReadyQueueHead = tcb;
|
||||
|
||||
/* Go through and set up each TCB */
|
||||
for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
|
||||
memset(pMpTcb, 0, sizeof(MP_TCB));
|
||||
for (count = 0; count < NUM_TCB; count++) {
|
||||
memset(tcb, 0, sizeof(struct tcb));
|
||||
|
||||
/* Set the link pointer in HW TCB to the next TCB in the
|
||||
* chain. If this is the last TCB in the chain, also set the
|
||||
* tail pointer.
|
||||
*/
|
||||
if (TcbCount < NUM_TCB - 1) {
|
||||
pMpTcb->Next = pMpTcb + 1;
|
||||
if (count < NUM_TCB - 1) {
|
||||
tcb->Next = tcb + 1;
|
||||
} else {
|
||||
tx_ring->TCBReadyQueueTail = pMpTcb;
|
||||
pMpTcb->Next = (PMP_TCB) NULL;
|
||||
tx_ring->TCBReadyQueueTail = tcb;
|
||||
tcb->Next = NULL;
|
||||
}
|
||||
|
||||
pMpTcb++;
|
||||
tcb++;
|
||||
}
|
||||
|
||||
/* Curr send queue should now be empty */
|
||||
tx_ring->CurrSendHead = (PMP_TCB) NULL;
|
||||
tx_ring->CurrSendTail = (PMP_TCB) NULL;
|
||||
tx_ring->CurrSendHead = NULL;
|
||||
tx_ring->CurrSendTail = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
|
||||
INIT_LIST_HEAD(&adapter->tx_ring.SendWaitQueue);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -348,7 +348,7 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
|
||||
*/
|
||||
|
||||
/* Queue is not empty or TCB is not available */
|
||||
if (!list_empty(&etdev->TxRing.SendWaitQueue) ||
|
||||
if (!list_empty(&etdev->tx_ring.SendWaitQueue) ||
|
||||
MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
|
||||
/* NOTE: If there's an error on send, no need to queue the
|
||||
* packet under Linux; if we just send an error up to the
|
||||
@ -404,86 +404,85 @@ static int et131x_send_packet(struct sk_buff *skb,
|
||||
struct et131x_adapter *etdev)
|
||||
{
|
||||
int status = 0;
|
||||
PMP_TCB pMpTcb = NULL;
|
||||
struct tcb *tcb = NULL;
|
||||
uint16_t *shbufva;
|
||||
unsigned long flags;
|
||||
|
||||
/* All packets must have at least a MAC address and a protocol type */
|
||||
if (skb->len < ETH_HLEN) {
|
||||
if (skb->len < ETH_HLEN)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Get a TCB for this packet */
|
||||
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
||||
|
||||
pMpTcb = etdev->TxRing.TCBReadyQueueHead;
|
||||
tcb = etdev->tx_ring.TCBReadyQueueHead;
|
||||
|
||||
if (pMpTcb == NULL) {
|
||||
if (tcb == NULL) {
|
||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next;
|
||||
etdev->tx_ring.TCBReadyQueueHead = tcb->Next;
|
||||
|
||||
if (etdev->TxRing.TCBReadyQueueHead == NULL)
|
||||
etdev->TxRing.TCBReadyQueueTail = NULL;
|
||||
if (etdev->tx_ring.TCBReadyQueueHead == NULL)
|
||||
etdev->tx_ring.TCBReadyQueueTail = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||
|
||||
pMpTcb->PacketLength = skb->len;
|
||||
pMpTcb->Packet = skb;
|
||||
tcb->PacketLength = skb->len;
|
||||
tcb->Packet = skb;
|
||||
|
||||
if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
|
||||
shbufva = (uint16_t *) skb->data;
|
||||
|
||||
if ((shbufva[0] == 0xffff) &&
|
||||
(shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
|
||||
pMpTcb->Flags |= fMP_DEST_BROAD;
|
||||
tcb->Flags |= fMP_DEST_BROAD;
|
||||
} else if ((shbufva[0] & 0x3) == 0x0001) {
|
||||
pMpTcb->Flags |= fMP_DEST_MULTI;
|
||||
tcb->Flags |= fMP_DEST_MULTI;
|
||||
}
|
||||
}
|
||||
|
||||
pMpTcb->Next = NULL;
|
||||
tcb->Next = NULL;
|
||||
|
||||
/* Call the NIC specific send handler. */
|
||||
if (status == 0)
|
||||
status = nic_send_packet(etdev, pMpTcb);
|
||||
status = nic_send_packet(etdev, tcb);
|
||||
|
||||
if (status != 0) {
|
||||
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
||||
|
||||
if (etdev->TxRing.TCBReadyQueueTail) {
|
||||
etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
|
||||
if (etdev->tx_ring.TCBReadyQueueTail) {
|
||||
etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
|
||||
} else {
|
||||
/* Apparently ready Q is empty. */
|
||||
etdev->TxRing.TCBReadyQueueHead = pMpTcb;
|
||||
etdev->tx_ring.TCBReadyQueueHead = tcb;
|
||||
}
|
||||
|
||||
etdev->TxRing.TCBReadyQueueTail = pMpTcb;
|
||||
etdev->tx_ring.TCBReadyQueueTail = tcb;
|
||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||
return status;
|
||||
}
|
||||
WARN_ON(etdev->TxRing.nBusySend > NUM_TCB);
|
||||
WARN_ON(etdev->tx_ring.nBusySend > NUM_TCB);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nic_send_packet - NIC specific send handler for version B silicon.
|
||||
* @etdev: pointer to our adapter
|
||||
* @pMpTcb: pointer to MP_TCB
|
||||
* @tcb: pointer to struct tcb
|
||||
*
|
||||
* Returns 0 or errno.
|
||||
*/
|
||||
static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
|
||||
static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
|
||||
{
|
||||
uint32_t loopIndex;
|
||||
TX_DESC_ENTRY_t CurDesc[24];
|
||||
uint32_t FragmentNumber = 0;
|
||||
uint32_t thiscopy, remainder;
|
||||
struct sk_buff *pPacket = pMpTcb->Packet;
|
||||
uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
|
||||
struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
|
||||
u32 i;
|
||||
struct tx_desc desc[24]; /* 24 x 16 byte */
|
||||
u32 frag = 0;
|
||||
u32 thiscopy, remainder;
|
||||
struct sk_buff *skb = tcb->Packet;
|
||||
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
|
||||
struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
|
||||
unsigned long flags;
|
||||
|
||||
/* Part of the optimizations of this send routine restrict us to
|
||||
@ -494,17 +493,16 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
|
||||
* number of fragments. If needed, we can call this function,
|
||||
* although it is less efficient.
|
||||
*/
|
||||
if (FragListCount > 23) {
|
||||
if (nr_frags > 23)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
|
||||
memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
|
||||
|
||||
for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
|
||||
for (i = 0; i < nr_frags; i++) {
|
||||
/* If there is something in this element, lets get a
|
||||
* descriptor from the ring and get the necessary data
|
||||
*/
|
||||
if (loopIndex == 0) {
|
||||
if (i == 0) {
|
||||
/* If the fragments are smaller than a standard MTU,
|
||||
* then map them to a single descriptor in the Tx
|
||||
* Desc ring. However, if they're larger, as is
|
||||
@ -514,165 +512,165 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
|
||||
* This will work until we determine why the hardware
|
||||
* doesn't seem to like large fragments.
|
||||
*/
|
||||
if ((pPacket->len - pPacket->data_len) <= 1514) {
|
||||
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
|
||||
if ((skb->len - skb->data_len) <= 1514) {
|
||||
desc[frag].addr_hi = 0;
|
||||
/* Low 16bits are length, high is vlan and
|
||||
unused currently so zero */
|
||||
CurDesc[FragmentNumber].word2 =
|
||||
pPacket->len - pPacket->data_len;
|
||||
desc[frag].len_vlan =
|
||||
skb->len - skb->data_len;
|
||||
|
||||
/* NOTE: Here, the dma_addr_t returned from
|
||||
* pci_map_single() is implicitly cast as a
|
||||
* uint32_t. Although dma_addr_t can be
|
||||
* u32. Although dma_addr_t can be
|
||||
* 64-bit, the address returned by
|
||||
* pci_map_single() is always 32-bit
|
||||
* addressable (as defined by the pci/dma
|
||||
* subsystem)
|
||||
*/
|
||||
CurDesc[FragmentNumber++].DataBufferPtrLow =
|
||||
desc[frag++].addr_lo =
|
||||
pci_map_single(etdev->pdev,
|
||||
pPacket->data,
|
||||
pPacket->len -
|
||||
pPacket->data_len,
|
||||
skb->data,
|
||||
skb->len -
|
||||
skb->data_len,
|
||||
PCI_DMA_TODEVICE);
|
||||
} else {
|
||||
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
|
||||
CurDesc[FragmentNumber].word2 =
|
||||
(pPacket->len - pPacket->data_len) / 2;
|
||||
desc[frag].addr_hi = 0;
|
||||
desc[frag].len_vlan =
|
||||
(skb->len - skb->data_len) / 2;
|
||||
|
||||
/* NOTE: Here, the dma_addr_t returned from
|
||||
* pci_map_single() is implicitly cast as a
|
||||
* uint32_t. Although dma_addr_t can be
|
||||
* u32. Although dma_addr_t can be
|
||||
* 64-bit, the address returned by
|
||||
* pci_map_single() is always 32-bit
|
||||
* addressable (as defined by the pci/dma
|
||||
* subsystem)
|
||||
*/
|
||||
CurDesc[FragmentNumber++].DataBufferPtrLow =
|
||||
desc[frag++].addr_lo =
|
||||
pci_map_single(etdev->pdev,
|
||||
pPacket->data,
|
||||
((pPacket->len -
|
||||
pPacket->data_len) / 2),
|
||||
skb->data,
|
||||
((skb->len -
|
||||
skb->data_len) / 2),
|
||||
PCI_DMA_TODEVICE);
|
||||
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
|
||||
desc[frag].addr_hi = 0;
|
||||
|
||||
CurDesc[FragmentNumber].word2 =
|
||||
(pPacket->len - pPacket->data_len) / 2;
|
||||
desc[frag].len_vlan =
|
||||
(skb->len - skb->data_len) / 2;
|
||||
|
||||
/* NOTE: Here, the dma_addr_t returned from
|
||||
* pci_map_single() is implicitly cast as a
|
||||
* uint32_t. Although dma_addr_t can be
|
||||
* u32. Although dma_addr_t can be
|
||||
* 64-bit, the address returned by
|
||||
* pci_map_single() is always 32-bit
|
||||
* addressable (as defined by the pci/dma
|
||||
* subsystem)
|
||||
*/
|
||||
CurDesc[FragmentNumber++].DataBufferPtrLow =
|
||||
desc[frag++].addr_lo =
|
||||
pci_map_single(etdev->pdev,
|
||||
pPacket->data +
|
||||
((pPacket->len -
|
||||
pPacket->data_len) / 2),
|
||||
((pPacket->len -
|
||||
pPacket->data_len) / 2),
|
||||
skb->data +
|
||||
((skb->len -
|
||||
skb->data_len) / 2),
|
||||
((skb->len -
|
||||
skb->data_len) / 2),
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
} else {
|
||||
CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
|
||||
CurDesc[FragmentNumber].word2 =
|
||||
pFragList[loopIndex - 1].size;
|
||||
desc[frag].addr_hi = 0;
|
||||
desc[frag].len_vlan =
|
||||
frags[i - 1].size;
|
||||
|
||||
/* NOTE: Here, the dma_addr_t returned from
|
||||
* pci_map_page() is implicitly cast as a uint32_t.
|
||||
* pci_map_page() is implicitly cast as a u32.
|
||||
* Although dma_addr_t can be 64-bit, the address
|
||||
* returned by pci_map_page() is always 32-bit
|
||||
* addressable (as defined by the pci/dma subsystem)
|
||||
*/
|
||||
CurDesc[FragmentNumber++].DataBufferPtrLow =
|
||||
desc[frag++].addr_lo =
|
||||
pci_map_page(etdev->pdev,
|
||||
pFragList[loopIndex - 1].page,
|
||||
pFragList[loopIndex - 1].page_offset,
|
||||
pFragList[loopIndex - 1].size,
|
||||
frags[i - 1].page,
|
||||
frags[i - 1].page_offset,
|
||||
frags[i - 1].size,
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
if (FragmentNumber == 0)
|
||||
if (frag == 0)
|
||||
return -EIO;
|
||||
|
||||
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
|
||||
if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
|
||||
if (++etdev->tx_ring.TxPacketsSinceLastinterrupt ==
|
||||
PARM_TX_NUM_BUFS_DEF) {
|
||||
/* Last element & Interrupt flag */
|
||||
CurDesc[FragmentNumber - 1].word3 = 0x5;
|
||||
etdev->TxRing.TxPacketsSinceLastinterrupt = 0;
|
||||
desc[frag - 1].flags = 0x5;
|
||||
etdev->tx_ring.TxPacketsSinceLastinterrupt = 0;
|
||||
} else { /* Last element */
|
||||
CurDesc[FragmentNumber - 1].word3 = 0x1;
|
||||
desc[frag - 1].flags = 0x1;
|
||||
}
|
||||
} else {
|
||||
CurDesc[FragmentNumber - 1].word3 = 0x5;
|
||||
desc[frag - 1].flags = 0x5;
|
||||
}
|
||||
CurDesc[0].word3 |= 2; /* First element flag */
|
||||
desc[0].flags |= 2; /* First element flag */
|
||||
|
||||
pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend;
|
||||
pMpTcb->PacketStaleCount = 0;
|
||||
tcb->WrIndexStart = etdev->tx_ring.txDmaReadyToSend;
|
||||
tcb->PacketStaleCount = 0;
|
||||
|
||||
spin_lock_irqsave(&etdev->SendHWLock, flags);
|
||||
|
||||
thiscopy = NUM_DESC_PER_RING_TX -
|
||||
INDEX10(etdev->TxRing.txDmaReadyToSend);
|
||||
INDEX10(etdev->tx_ring.txDmaReadyToSend);
|
||||
|
||||
if (thiscopy >= FragmentNumber) {
|
||||
if (thiscopy >= frag) {
|
||||
remainder = 0;
|
||||
thiscopy = FragmentNumber;
|
||||
thiscopy = frag;
|
||||
} else {
|
||||
remainder = FragmentNumber - thiscopy;
|
||||
remainder = frag - thiscopy;
|
||||
}
|
||||
|
||||
memcpy(etdev->TxRing.pTxDescRingVa +
|
||||
INDEX10(etdev->TxRing.txDmaReadyToSend), CurDesc,
|
||||
sizeof(TX_DESC_ENTRY_t) * thiscopy);
|
||||
memcpy(etdev->tx_ring.tx_desc_ring +
|
||||
INDEX10(etdev->tx_ring.txDmaReadyToSend), desc,
|
||||
sizeof(struct tx_desc) * thiscopy);
|
||||
|
||||
add_10bit(&etdev->TxRing.txDmaReadyToSend, thiscopy);
|
||||
add_10bit(&etdev->tx_ring.txDmaReadyToSend, thiscopy);
|
||||
|
||||
if (INDEX10(etdev->TxRing.txDmaReadyToSend)== 0 ||
|
||||
INDEX10(etdev->TxRing.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
|
||||
etdev->TxRing.txDmaReadyToSend &= ~ET_DMA10_MASK;
|
||||
etdev->TxRing.txDmaReadyToSend ^= ET_DMA10_WRAP;
|
||||
if (INDEX10(etdev->tx_ring.txDmaReadyToSend)== 0 ||
|
||||
INDEX10(etdev->tx_ring.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
|
||||
etdev->tx_ring.txDmaReadyToSend &= ~ET_DMA10_MASK;
|
||||
etdev->tx_ring.txDmaReadyToSend ^= ET_DMA10_WRAP;
|
||||
}
|
||||
|
||||
if (remainder) {
|
||||
memcpy(etdev->TxRing.pTxDescRingVa,
|
||||
CurDesc + thiscopy,
|
||||
sizeof(TX_DESC_ENTRY_t) * remainder);
|
||||
memcpy(etdev->tx_ring.tx_desc_ring,
|
||||
desc + thiscopy,
|
||||
sizeof(struct tx_desc) * remainder);
|
||||
|
||||
add_10bit(&etdev->TxRing.txDmaReadyToSend, remainder);
|
||||
add_10bit(&etdev->tx_ring.txDmaReadyToSend, remainder);
|
||||
}
|
||||
|
||||
if (INDEX10(etdev->TxRing.txDmaReadyToSend) == 0) {
|
||||
if (etdev->TxRing.txDmaReadyToSend)
|
||||
pMpTcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
|
||||
if (INDEX10(etdev->tx_ring.txDmaReadyToSend) == 0) {
|
||||
if (etdev->tx_ring.txDmaReadyToSend)
|
||||
tcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
|
||||
else
|
||||
pMpTcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
|
||||
tcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
|
||||
} else
|
||||
pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend - 1;
|
||||
tcb->WrIndex = etdev->tx_ring.txDmaReadyToSend - 1;
|
||||
|
||||
spin_lock(&etdev->TCBSendQLock);
|
||||
|
||||
if (etdev->TxRing.CurrSendTail)
|
||||
etdev->TxRing.CurrSendTail->Next = pMpTcb;
|
||||
if (etdev->tx_ring.CurrSendTail)
|
||||
etdev->tx_ring.CurrSendTail->Next = tcb;
|
||||
else
|
||||
etdev->TxRing.CurrSendHead = pMpTcb;
|
||||
etdev->tx_ring.CurrSendHead = tcb;
|
||||
|
||||
etdev->TxRing.CurrSendTail = pMpTcb;
|
||||
etdev->tx_ring.CurrSendTail = tcb;
|
||||
|
||||
WARN_ON(pMpTcb->Next != NULL);
|
||||
WARN_ON(tcb->Next != NULL);
|
||||
|
||||
etdev->TxRing.nBusySend++;
|
||||
etdev->tx_ring.nBusySend++;
|
||||
|
||||
spin_unlock(&etdev->TCBSendQLock);
|
||||
|
||||
/* Write the new write pointer back to the device. */
|
||||
writel(etdev->TxRing.txDmaReadyToSend,
|
||||
writel(etdev->tx_ring.txDmaReadyToSend,
|
||||
&etdev->regs->txdma.service_request);
|
||||
|
||||
/* For Gig only, we use Tx Interrupt coalescing. Enable the software
|
||||
@ -689,72 +687,72 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
|
||||
|
||||
|
||||
/**
|
||||
* et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
|
||||
* et131x_free_send_packet - Recycle a struct tcb
|
||||
* @etdev: pointer to our adapter
|
||||
* @pMpTcb: pointer to MP_TCB
|
||||
* @tcb: pointer to struct tcb
|
||||
*
|
||||
* Complete the packet if necessary
|
||||
* Assumption - Send spinlock has been acquired
|
||||
*/
|
||||
inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
||||
PMP_TCB pMpTcb)
|
||||
struct tcb *tcb)
|
||||
{
|
||||
unsigned long flags;
|
||||
TX_DESC_ENTRY_t *desc = NULL;
|
||||
struct tx_desc *desc = NULL;
|
||||
struct net_device_stats *stats = &etdev->net_stats;
|
||||
|
||||
if (pMpTcb->Flags & fMP_DEST_BROAD)
|
||||
if (tcb->Flags & fMP_DEST_BROAD)
|
||||
atomic_inc(&etdev->Stats.brdcstxmt);
|
||||
else if (pMpTcb->Flags & fMP_DEST_MULTI)
|
||||
else if (tcb->Flags & fMP_DEST_MULTI)
|
||||
atomic_inc(&etdev->Stats.multixmt);
|
||||
else
|
||||
atomic_inc(&etdev->Stats.unixmt);
|
||||
|
||||
if (pMpTcb->Packet) {
|
||||
stats->tx_bytes += pMpTcb->Packet->len;
|
||||
if (tcb->Packet) {
|
||||
stats->tx_bytes += tcb->Packet->len;
|
||||
|
||||
/* Iterate through the TX descriptors on the ring
|
||||
* corresponding to this packet and umap the fragments
|
||||
* they point to
|
||||
*/
|
||||
do {
|
||||
desc =
|
||||
(TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa +
|
||||
INDEX10(pMpTcb->WrIndexStart));
|
||||
desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring +
|
||||
INDEX10(tcb->WrIndexStart));
|
||||
|
||||
pci_unmap_single(etdev->pdev,
|
||||
desc->DataBufferPtrLow,
|
||||
desc->word2, PCI_DMA_TODEVICE);
|
||||
desc->addr_lo,
|
||||
desc->len_vlan, PCI_DMA_TODEVICE);
|
||||
|
||||
add_10bit(&pMpTcb->WrIndexStart, 1);
|
||||
if (INDEX10(pMpTcb->WrIndexStart) >=
|
||||
add_10bit(&tcb->WrIndexStart, 1);
|
||||
if (INDEX10(tcb->WrIndexStart) >=
|
||||
NUM_DESC_PER_RING_TX) {
|
||||
pMpTcb->WrIndexStart &= ~ET_DMA10_MASK;
|
||||
pMpTcb->WrIndexStart ^= ET_DMA10_WRAP;
|
||||
tcb->WrIndexStart &= ~ET_DMA10_MASK;
|
||||
tcb->WrIndexStart ^= ET_DMA10_WRAP;
|
||||
}
|
||||
} while (desc != (etdev->TxRing.pTxDescRingVa +
|
||||
INDEX10(pMpTcb->WrIndex)));
|
||||
} while (desc != (etdev->tx_ring.tx_desc_ring +
|
||||
INDEX10(tcb->WrIndex)));
|
||||
|
||||
dev_kfree_skb_any(pMpTcb->Packet);
|
||||
dev_kfree_skb_any(tcb->Packet);
|
||||
}
|
||||
|
||||
memset(pMpTcb, 0, sizeof(MP_TCB));
|
||||
memset(tcb, 0, sizeof(struct tcb));
|
||||
|
||||
/* Add the TCB to the Ready Q */
|
||||
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
|
||||
|
||||
etdev->Stats.opackets++;
|
||||
|
||||
if (etdev->TxRing.TCBReadyQueueTail) {
|
||||
etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
|
||||
if (etdev->tx_ring.TCBReadyQueueTail) {
|
||||
etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
|
||||
} else {
|
||||
/* Apparently ready Q is empty. */
|
||||
etdev->TxRing.TCBReadyQueueHead = pMpTcb;
|
||||
etdev->tx_ring.TCBReadyQueueHead = tcb;
|
||||
}
|
||||
|
||||
etdev->TxRing.TCBReadyQueueTail = pMpTcb;
|
||||
etdev->tx_ring.TCBReadyQueueTail = tcb;
|
||||
|
||||
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
|
||||
WARN_ON(etdev->TxRing.nBusySend < 0);
|
||||
WARN_ON(etdev->tx_ring.nBusySend < 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -765,52 +763,52 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
|
||||
*/
|
||||
void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
|
||||
{
|
||||
PMP_TCB pMpTcb;
|
||||
struct tcb *tcb;
|
||||
struct list_head *entry;
|
||||
unsigned long flags;
|
||||
uint32_t FreeCounter = 0;
|
||||
u32 freed = 0;
|
||||
|
||||
while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
|
||||
while (!list_empty(&etdev->tx_ring.SendWaitQueue)) {
|
||||
spin_lock_irqsave(&etdev->SendWaitLock, flags);
|
||||
|
||||
etdev->TxRing.nWaitSend--;
|
||||
etdev->tx_ring.nWaitSend--;
|
||||
spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
|
||||
|
||||
entry = etdev->TxRing.SendWaitQueue.next;
|
||||
entry = etdev->tx_ring.SendWaitQueue.next;
|
||||
}
|
||||
|
||||
etdev->TxRing.nWaitSend = 0;
|
||||
etdev->tx_ring.nWaitSend = 0;
|
||||
|
||||
/* Any packets being sent? Check the first TCB on the send list */
|
||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||
|
||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
||||
tcb = etdev->tx_ring.CurrSendHead;
|
||||
|
||||
while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
|
||||
PMP_TCB pNext = pMpTcb->Next;
|
||||
while ((tcb != NULL) && (freed < NUM_TCB)) {
|
||||
struct tcb *pNext = tcb->Next;
|
||||
|
||||
etdev->TxRing.CurrSendHead = pNext;
|
||||
etdev->tx_ring.CurrSendHead = pNext;
|
||||
|
||||
if (pNext == NULL)
|
||||
etdev->TxRing.CurrSendTail = NULL;
|
||||
etdev->tx_ring.CurrSendTail = NULL;
|
||||
|
||||
etdev->TxRing.nBusySend--;
|
||||
etdev->tx_ring.nBusySend--;
|
||||
|
||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||
|
||||
FreeCounter++;
|
||||
et131x_free_send_packet(etdev, pMpTcb);
|
||||
freed++;
|
||||
et131x_free_send_packet(etdev, tcb);
|
||||
|
||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||
|
||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
||||
tcb = etdev->tx_ring.CurrSendHead;
|
||||
}
|
||||
|
||||
WARN_ON(FreeCounter == NUM_TCB);
|
||||
WARN_ON(freed == NUM_TCB);
|
||||
|
||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||
|
||||
etdev->TxRing.nBusySend = 0;
|
||||
etdev->tx_ring.nBusySend = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -844,53 +842,53 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
|
||||
static void et131x_update_tcb_list(struct et131x_adapter *etdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 ServiceComplete;
|
||||
PMP_TCB pMpTcb;
|
||||
u32 serviced;
|
||||
struct tcb * tcb;
|
||||
u32 index;
|
||||
|
||||
ServiceComplete = readl(&etdev->regs->txdma.NewServiceComplete);
|
||||
index = INDEX10(ServiceComplete);
|
||||
serviced = readl(&etdev->regs->txdma.NewServiceComplete);
|
||||
index = INDEX10(serviced);
|
||||
|
||||
/* Has the ring wrapped? Process any descriptors that do not have
|
||||
* the same "wrap" indicator as the current completion indicator
|
||||
*/
|
||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||
|
||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
||||
tcb = etdev->tx_ring.CurrSendHead;
|
||||
|
||||
while (pMpTcb &&
|
||||
((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP) &&
|
||||
index < INDEX10(pMpTcb->WrIndex)) {
|
||||
etdev->TxRing.nBusySend--;
|
||||
etdev->TxRing.CurrSendHead = pMpTcb->Next;
|
||||
if (pMpTcb->Next == NULL)
|
||||
etdev->TxRing.CurrSendTail = NULL;
|
||||
while (tcb &&
|
||||
((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP) &&
|
||||
index < INDEX10(tcb->WrIndex)) {
|
||||
etdev->tx_ring.nBusySend--;
|
||||
etdev->tx_ring.CurrSendHead = tcb->Next;
|
||||
if (tcb->Next == NULL)
|
||||
etdev->tx_ring.CurrSendTail = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||
et131x_free_send_packet(etdev, pMpTcb);
|
||||
et131x_free_send_packet(etdev, tcb);
|
||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||
|
||||
/* Goto the next packet */
|
||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
||||
tcb = etdev->tx_ring.CurrSendHead;
|
||||
}
|
||||
while (pMpTcb &&
|
||||
!((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP)
|
||||
&& index > (pMpTcb->WrIndex & ET_DMA10_MASK)) {
|
||||
etdev->TxRing.nBusySend--;
|
||||
etdev->TxRing.CurrSendHead = pMpTcb->Next;
|
||||
if (pMpTcb->Next == NULL)
|
||||
etdev->TxRing.CurrSendTail = NULL;
|
||||
while (tcb &&
|
||||
!((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP)
|
||||
&& index > (tcb->WrIndex & ET_DMA10_MASK)) {
|
||||
etdev->tx_ring.nBusySend--;
|
||||
etdev->tx_ring.CurrSendHead = tcb->Next;
|
||||
if (tcb->Next == NULL)
|
||||
etdev->tx_ring.CurrSendTail = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||
et131x_free_send_packet(etdev, pMpTcb);
|
||||
et131x_free_send_packet(etdev, tcb);
|
||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||
|
||||
/* Goto the next packet */
|
||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
||||
tcb = etdev->tx_ring.CurrSendHead;
|
||||
}
|
||||
|
||||
/* Wake up the queue when we hit a low-water mark */
|
||||
if (etdev->TxRing.nBusySend <= (NUM_TCB / 3))
|
||||
if (etdev->tx_ring.nBusySend <= (NUM_TCB / 3))
|
||||
netif_wake_queue(etdev->netdev);
|
||||
|
||||
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
|
||||
@ -909,13 +907,13 @@ static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
|
||||
|
||||
spin_lock_irqsave(&etdev->SendWaitLock, flags);
|
||||
|
||||
while (!list_empty(&etdev->TxRing.SendWaitQueue) &&
|
||||
while (!list_empty(&etdev->tx_ring.SendWaitQueue) &&
|
||||
MP_TCB_RESOURCES_AVAILABLE(etdev)) {
|
||||
struct list_head *entry;
|
||||
|
||||
entry = etdev->TxRing.SendWaitQueue.next;
|
||||
entry = etdev->tx_ring.SendWaitQueue.next;
|
||||
|
||||
etdev->TxRing.nWaitSend--;
|
||||
etdev->tx_ring.nWaitSend--;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
|
||||
|
@ -89,14 +89,13 @@
|
||||
* 14: UDP checksum assist
|
||||
*/
|
||||
|
||||
/* TX_DESC_ENTRY_t is sructure representing each descriptor on the ring */
|
||||
typedef struct _tx_desc_entry_t {
|
||||
u32 DataBufferPtrHigh;
|
||||
u32 DataBufferPtrLow;
|
||||
u32 word2; /* control words how to xmit the */
|
||||
u32 word3; /* data (detailed above) */
|
||||
} TX_DESC_ENTRY_t, *PTX_DESC_ENTRY_t;
|
||||
|
||||
/* struct tx_desc represents each descriptor on the ring */
|
||||
struct tx_desc {
|
||||
u32 addr_hi;
|
||||
u32 addr_lo;
|
||||
u32 len_vlan; /* control words how to xmit the */
|
||||
u32 flags; /* data (detailed above) */
|
||||
};
|
||||
|
||||
/* Typedefs for Tx DMA engine status writeback */
|
||||
|
||||
@ -120,8 +119,8 @@ typedef union _tx_status_block_t {
|
||||
} TX_STATUS_BLOCK_t, *PTX_STATUS_BLOCK_t;
|
||||
|
||||
/* TCB (Transmit Control Block) */
|
||||
typedef struct _MP_TCB {
|
||||
struct _MP_TCB *Next;
|
||||
struct tcb {
|
||||
struct tcb *Next;
|
||||
u32 Flags;
|
||||
u32 Count;
|
||||
u32 PacketStaleCount;
|
||||
@ -129,7 +128,7 @@ typedef struct _MP_TCB {
|
||||
u32 PacketLength;
|
||||
u32 WrIndex;
|
||||
u32 WrIndexStart;
|
||||
} MP_TCB, *PMP_TCB;
|
||||
};
|
||||
|
||||
/* Structure to hold the skb's in a list */
|
||||
typedef struct tx_skb_list_elem {
|
||||
@ -137,14 +136,14 @@ typedef struct tx_skb_list_elem {
|
||||
struct sk_buff *skb;
|
||||
} TX_SKB_LIST_ELEM, *PTX_SKB_LIST_ELEM;
|
||||
|
||||
/* TX_RING_t is sructure representing our local reference(s) to the ring */
|
||||
typedef struct _tx_ring_t {
|
||||
/* Structure representing our local reference(s) to the ring */
|
||||
struct tx_ring {
|
||||
/* TCB (Transmit Control Block) memory and lists */
|
||||
PMP_TCB MpTcbMem;
|
||||
struct tcb *MpTcbMem;
|
||||
|
||||
/* List of TCBs that are ready to be used */
|
||||
PMP_TCB TCBReadyQueueHead;
|
||||
PMP_TCB TCBReadyQueueTail;
|
||||
struct tcb *TCBReadyQueueHead;
|
||||
struct tcb *TCBReadyQueueTail;
|
||||
|
||||
/* list of TCBs that are currently being sent. NOTE that access to all
|
||||
* three of these (including nBusySend) are controlled via the
|
||||
@ -152,19 +151,19 @@ typedef struct _tx_ring_t {
|
||||
* decrementing nBusySend, or any queue manipulation on CurrSendHead /
|
||||
* Tail
|
||||
*/
|
||||
PMP_TCB CurrSendHead;
|
||||
PMP_TCB CurrSendTail;
|
||||
int32_t nBusySend;
|
||||
struct tcb *CurrSendHead;
|
||||
struct tcb *CurrSendTail;
|
||||
int nBusySend;
|
||||
|
||||
/* List of packets (not TCBs) that were queued for lack of resources */
|
||||
struct list_head SendWaitQueue;
|
||||
int32_t nWaitSend;
|
||||
int nWaitSend;
|
||||
|
||||
/* The actual descriptor ring */
|
||||
PTX_DESC_ENTRY_t pTxDescRingVa;
|
||||
dma_addr_t pTxDescRingPa;
|
||||
uint64_t pTxDescRingAdjustedPa;
|
||||
uint64_t TxDescOffset;
|
||||
struct tx_desc *tx_desc_ring;
|
||||
dma_addr_t tx_desc_ring_pa;
|
||||
u64 pTxDescRingAdjustedPa;
|
||||
u64 TxDescOffset;
|
||||
|
||||
/* ReadyToSend indicates where we last wrote to in the descriptor ring. */
|
||||
u32 txDmaReadyToSend;
|
||||
@ -180,8 +179,8 @@ typedef struct _tx_ring_t {
|
||||
TXMAC_ERR_t TxMacErr;
|
||||
|
||||
/* Variables to track the Tx interrupt coalescing features */
|
||||
int32_t TxPacketsSinceLastinterrupt;
|
||||
} TX_RING_t, *PTX_RING_t;
|
||||
int TxPacketsSinceLastinterrupt;
|
||||
};
|
||||
|
||||
/* Forward declaration of the frag-list for the following prototypes */
|
||||
typedef struct _MP_FRAG_LIST MP_FRAG_LIST, *PMP_FRAG_LIST;
|
||||
|
@ -101,8 +101,8 @@
|
||||
#define LO_MARK_PERCENT_FOR_RX 15
|
||||
|
||||
/* Macros specific to the private adapter structure */
|
||||
#define MP_TCB_RESOURCES_AVAILABLE(_M) ((_M)->TxRing.nBusySend < NUM_TCB)
|
||||
#define MP_TCB_RESOURCES_NOT_AVAILABLE(_M) ((_M)->TxRing.nBusySend >= NUM_TCB)
|
||||
#define MP_TCB_RESOURCES_AVAILABLE(_M) ((_M)->tx_ring.nBusySend < NUM_TCB)
|
||||
#define MP_TCB_RESOURCES_NOT_AVAILABLE(_M) ((_M)->tx_ring.nBusySend >= NUM_TCB)
|
||||
|
||||
#define MP_SHOULD_FAIL_SEND(_M) ((_M)->Flags & fMP_ADAPTER_FAIL_SEND_MASK)
|
||||
|
||||
@ -255,7 +255,7 @@ struct et131x_adapter {
|
||||
MI_BMSR_t Bmsr;
|
||||
|
||||
/* Tx Memory Variables */
|
||||
TX_RING_t TxRing;
|
||||
struct tx_ring tx_ring;
|
||||
|
||||
/* Rx Memory Variables */
|
||||
RX_RING_t RxRing;
|
||||
|
@ -179,15 +179,15 @@ irqreturn_t et131x_isr(int irq, void *dev_id)
|
||||
/* This is our interrupt, so process accordingly */
|
||||
|
||||
if (status & ET_INTR_WATCHDOG) {
|
||||
PMP_TCB pMpTcb = adapter->TxRing.CurrSendHead;
|
||||
struct tcb *tcb = adapter->tx_ring.CurrSendHead;
|
||||
|
||||
if (pMpTcb)
|
||||
if (++pMpTcb->PacketStaleCount > 1)
|
||||
if (tcb)
|
||||
if (++tcb->PacketStaleCount > 1)
|
||||
status |= ET_INTR_TXDMA_ISR;
|
||||
|
||||
if (adapter->RxRing.UnfinishedReceives)
|
||||
status |= ET_INTR_RXDMA_XFR_DONE;
|
||||
else if (pMpTcb == NULL)
|
||||
else if (tcb == NULL)
|
||||
writel(0, &adapter->regs->global.watchdog_timer);
|
||||
|
||||
status &= ~ET_INTR_WATCHDOG;
|
||||
@ -397,7 +397,7 @@ void et131x_isr_handler(struct work_struct *work)
|
||||
|
||||
/* Let's move on to the TxMac */
|
||||
if (status & ET_INTR_TXMAC) {
|
||||
etdev->TxRing.TxMacErr.value =
|
||||
etdev->tx_ring.TxMacErr.value =
|
||||
readl(&iomem->txmac.err.value);
|
||||
|
||||
/*
|
||||
@ -412,7 +412,7 @@ void et131x_isr_handler(struct work_struct *work)
|
||||
*/
|
||||
dev_warn(&etdev->pdev->dev,
|
||||
"TXMAC interrupt, error 0x%08x\n",
|
||||
etdev->TxRing.TxMacErr.value);
|
||||
etdev->tx_ring.TxMacErr.value);
|
||||
|
||||
/* If we are debugging, we want to see this error,
|
||||
* otherwise we just want the device to be reset and
|
||||
|
@ -519,7 +519,7 @@ int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
|
||||
void et131x_tx_timeout(struct net_device *netdev)
|
||||
{
|
||||
struct et131x_adapter *etdev = netdev_priv(netdev);
|
||||
PMP_TCB pMpTcb;
|
||||
struct tcb *tcb;
|
||||
unsigned long flags;
|
||||
|
||||
/* Just skip this part if the adapter is doing link detection */
|
||||
@ -541,28 +541,28 @@ void et131x_tx_timeout(struct net_device *netdev)
|
||||
/* Is send stuck? */
|
||||
spin_lock_irqsave(&etdev->TCBSendQLock, flags);
|
||||
|
||||
pMpTcb = etdev->TxRing.CurrSendHead;
|
||||
tcb = etdev->tx_ring.CurrSendHead;
|
||||
|
||||
if (pMpTcb != NULL) {
|
||||
pMpTcb->Count++;
|
||||
if (tcb != NULL) {
|
||||
tcb->Count++;
|
||||
|
||||
if (pMpTcb->Count > NIC_SEND_HANG_THRESHOLD) {
|
||||
TX_DESC_ENTRY_t StuckDescriptors[10];
|
||||
if (tcb->Count > NIC_SEND_HANG_THRESHOLD) {
|
||||
struct tx_desc stuck[10];
|
||||
|
||||
if (INDEX10(pMpTcb->WrIndex) > 7) {
|
||||
memcpy(StuckDescriptors,
|
||||
etdev->TxRing.pTxDescRingVa +
|
||||
INDEX10(pMpTcb->WrIndex) - 6,
|
||||
sizeof(TX_DESC_ENTRY_t) * 10);
|
||||
if (INDEX10(tcb->WrIndex) > 7) {
|
||||
memcpy(stuck,
|
||||
etdev->tx_ring.tx_desc_ring +
|
||||
INDEX10(tcb->WrIndex) - 6,
|
||||
sizeof(struct tx_desc) * 10);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&etdev->TCBSendQLock,
|
||||
flags);
|
||||
|
||||
dev_warn(&etdev->pdev->dev,
|
||||
"Send stuck - reset. pMpTcb->WrIndex %x, Flags 0x%08x\n",
|
||||
pMpTcb->WrIndex,
|
||||
pMpTcb->Flags);
|
||||
"Send stuck - reset. tcb->WrIndex %x, Flags 0x%08x\n",
|
||||
tcb->WrIndex,
|
||||
tcb->Flags);
|
||||
|
||||
et131x_close(netdev);
|
||||
et131x_open(netdev);
|
||||
|
Loading…
Reference in New Issue
Block a user