net: stmmac: rearrange RX and TX desc init into per-queue basis

Below functions are made to be per-queue in preparation of XDP ZC:

 __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
 __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)

The original functions below are stay maintained for all queue usage:

 init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
 init_dma_tx_desc_rings(struct net_device *dev)

Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ong Boon Leong 2021-04-13 17:36:23 +08:00 committed by David S. Miller
parent da5ec7f22a
commit de0b90e52a

View File

@ -1575,27 +1575,18 @@ err_reinit_rx_buffers:
} }
/** /**
* init_dma_rx_desc_rings - init the RX descriptor rings * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
* @dev: net device structure * @priv: driver private structure
* @queue: RX queue index
* @flags: gfp flag. * @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors * Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
{ {
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
int ret = -ENOMEM;
int queue;
/* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev,
"SKB addresses:\nskb\t\tskb data\tdma data\n");
for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int ret;
netif_dbg(priv, probe, priv->dev, netif_dbg(priv, probe, priv->dev,
"(%s) dma_rx_phy=0x%08x\n", __func__, "(%s) dma_rx_phy=0x%08x\n", __func__,
@ -1613,7 +1604,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
ret = stmmac_alloc_rx_buffers(priv, queue, flags); ret = stmmac_alloc_rx_buffers(priv, queue, flags);
if (ret < 0) if (ret < 0)
goto err_init_rx_buffers; return -ENOMEM;
rx_q->cur_rx = 0; rx_q->cur_rx = 0;
rx_q->dirty_rx = 0; rx_q->dirty_rx = 0;
@ -1629,6 +1620,25 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
rx_q->dma_rx_phy, rx_q->dma_rx_phy,
priv->dma_rx_size, 0); priv->dma_rx_size, 0);
} }
return 0;
}
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
int ret;
/* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev,
"SKB addresses:\nskb\t\tskb data\tdma data\n");
for (queue = 0; queue < rx_count; queue++) {
ret = __init_dma_rx_desc_rings(priv, queue, flags);
if (ret)
goto err_init_rx_buffers;
} }
return 0; return 0;
@ -1647,21 +1657,17 @@ err_init_rx_buffers:
} }
/** /**
* init_dma_tx_desc_rings - init the TX descriptor rings * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
* @dev: net device structure. * @priv: driver private structure
* @queue : TX queue index
* Description: this function initializes the DMA TX descriptors * Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int init_dma_tx_desc_rings(struct net_device *dev) static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_priv *priv = netdev_priv(dev);
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
u32 queue;
int i;
for (queue = 0; queue < tx_queue_cnt; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
int i;
netif_dbg(priv, probe, priv->dev, netif_dbg(priv, probe, priv->dev,
"(%s) dma_tx_phy=0x%08x\n", __func__, "(%s) dma_tx_phy=0x%08x\n", __func__,
@ -1681,6 +1687,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
for (i = 0; i < priv->dma_tx_size; i++) { for (i = 0; i < priv->dma_tx_size; i++) {
struct dma_desc *p; struct dma_desc *p;
if (priv->extend_desc) if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic); p = &((tx_q->dma_etx + i)->basic);
else if (tx_q->tbs & STMMAC_TBS_AVAIL) else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@ -1702,8 +1709,21 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_q->mss = 0; tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
return 0;
} }
static int init_dma_tx_desc_rings(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 tx_queue_cnt;
u32 queue;
tx_queue_cnt = priv->plat->tx_queues_to_use;
for (queue = 0; queue < tx_queue_cnt; queue++)
__init_dma_tx_desc_rings(priv, queue);
return 0; return 0;
} }