net: ethernet: mtk_eth_soc: increase tx ring size for QDMA devices
In order to use the hardware traffic shaper feature, a larger tx ring is needed, especially for the scratch ring, which the hardware shaper uses to reorder packets. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20221116080734.44013-2-nbd@nbd.name Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
dbc4af768b
commit
c30e0b9b88
@ -938,7 +938,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
{
|
||||
const struct mtk_soc_data *soc = eth->soc;
|
||||
dma_addr_t phy_ring_tail;
|
||||
int cnt = MTK_DMA_SIZE;
|
||||
int cnt = MTK_QDMA_RING_SIZE;
|
||||
dma_addr_t dma_addr;
|
||||
int i;
|
||||
|
||||
@ -2208,19 +2208,25 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
|
||||
struct mtk_tx_ring *ring = ð->tx_ring;
|
||||
int i, sz = soc->txrx.txd_size;
|
||||
struct mtk_tx_dma_v2 *txd;
|
||||
int ring_size;
|
||||
|
||||
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
|
||||
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
||||
ring_size = MTK_QDMA_RING_SIZE;
|
||||
else
|
||||
ring_size = MTK_DMA_SIZE;
|
||||
|
||||
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
|
||||
GFP_KERNEL);
|
||||
if (!ring->buf)
|
||||
goto no_tx_mem;
|
||||
|
||||
ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
|
||||
ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
||||
&ring->phys, GFP_KERNEL);
|
||||
if (!ring->dma)
|
||||
goto no_tx_mem;
|
||||
|
||||
for (i = 0; i < MTK_DMA_SIZE; i++) {
|
||||
int next = (i + 1) % MTK_DMA_SIZE;
|
||||
for (i = 0; i < ring_size; i++) {
|
||||
int next = (i + 1) % ring_size;
|
||||
u32 next_ptr = ring->phys + next * sz;
|
||||
|
||||
txd = ring->dma + i * sz;
|
||||
@ -2240,22 +2246,22 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
|
||||
* descriptors in ring->dma_pdma.
|
||||
*/
|
||||
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
||||
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
|
||||
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
||||
&ring->phys_pdma, GFP_KERNEL);
|
||||
if (!ring->dma_pdma)
|
||||
goto no_tx_mem;
|
||||
|
||||
for (i = 0; i < MTK_DMA_SIZE; i++) {
|
||||
for (i = 0; i < ring_size; i++) {
|
||||
ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
|
||||
ring->dma_pdma[i].txd4 = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ring->dma_size = MTK_DMA_SIZE;
|
||||
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
|
||||
ring->dma_size = ring_size;
|
||||
atomic_set(&ring->free_count, ring_size - 2);
|
||||
ring->next_free = ring->dma;
|
||||
ring->last_free = (void *)txd;
|
||||
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
|
||||
ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
|
||||
ring->thresh = MAX_SKB_FRAGS;
|
||||
|
||||
/* make sure that all changes to the dma ring are flushed before we
|
||||
@ -2267,14 +2273,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
|
||||
mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
|
||||
mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
|
||||
mtk_w32(eth,
|
||||
ring->phys + ((MTK_DMA_SIZE - 1) * sz),
|
||||
ring->phys + ((ring_size - 1) * sz),
|
||||
soc->reg_map->qdma.crx_ptr);
|
||||
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
|
||||
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
|
||||
soc->reg_map->qdma.qtx_cfg);
|
||||
} else {
|
||||
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
|
||||
mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
|
||||
mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
|
||||
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
|
||||
mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
|
||||
}
|
||||
@ -2292,7 +2298,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
|
||||
int i;
|
||||
|
||||
if (ring->buf) {
|
||||
for (i = 0; i < MTK_DMA_SIZE; i++)
|
||||
for (i = 0; i < ring->dma_size; i++)
|
||||
mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
|
||||
kfree(ring->buf);
|
||||
ring->buf = NULL;
|
||||
@ -2300,14 +2306,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
|
||||
|
||||
if (ring->dma) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
MTK_DMA_SIZE * soc->txrx.txd_size,
|
||||
ring->dma_size * soc->txrx.txd_size,
|
||||
ring->dma, ring->phys);
|
||||
ring->dma = NULL;
|
||||
}
|
||||
|
||||
if (ring->dma_pdma) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
MTK_DMA_SIZE * soc->txrx.txd_size,
|
||||
ring->dma_size * soc->txrx.txd_size,
|
||||
ring->dma_pdma, ring->phys_pdma);
|
||||
ring->dma_pdma = NULL;
|
||||
}
|
||||
@ -2842,7 +2848,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
|
||||
netdev_reset_queue(eth->netdev[i]);
|
||||
if (eth->scratch_ring) {
|
||||
dma_free_coherent(eth->dma_dev,
|
||||
MTK_DMA_SIZE * soc->txrx.txd_size,
|
||||
MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
|
||||
eth->scratch_ring, eth->phy_scratch_ring);
|
||||
eth->scratch_ring = NULL;
|
||||
eth->phy_scratch_ring = 0;
|
||||
|
@ -30,6 +30,7 @@
|
||||
#define MTK_MAX_RX_LENGTH_2K 2048
|
||||
#define MTK_TX_DMA_BUF_LEN 0x3fff
|
||||
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
|
||||
#define MTK_QDMA_RING_SIZE 2048
|
||||
#define MTK_DMA_SIZE 512
|
||||
#define MTK_MAC_COUNT 2
|
||||
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
|
||||
|
Loading…
x
Reference in New Issue
Block a user