Merge branch 'stmmac-EST'
Rohan G says: ==================== net: stmmac: EST conformance support This patchset enables support for queueMaxSDU and transmission overrun counters which are required for Qbv conformance. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2acfd589e5
@ -202,6 +202,8 @@ struct stmmac_extra_stats {
|
||||
unsigned long mtl_est_hlbf;
|
||||
unsigned long mtl_est_btre;
|
||||
unsigned long mtl_est_btrlm;
|
||||
unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
|
||||
unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
|
||||
/* per queue statistics */
|
||||
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
|
||||
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
|
||||
|
@ -81,6 +81,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
|
||||
u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max;
|
||||
void __iomem *est_addr = priv->estaddr;
|
||||
u32 txqcnt_mask = BIT(txqcnt) - 1;
|
||||
int i;
|
||||
|
||||
status = readl(est_addr + EST_STATUS);
|
||||
|
||||
@ -125,6 +126,11 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
|
||||
|
||||
x->mtl_est_hlbf++;
|
||||
|
||||
for (i = 0; i < txqcnt; i++) {
|
||||
if (feqn & BIT(i))
|
||||
x->mtl_est_txq_hlbf[i]++;
|
||||
}
|
||||
|
||||
/* Clear Interrupt */
|
||||
writel(feqn, est_addr + EST_FRM_SZ_ERR);
|
||||
|
||||
|
@ -2507,6 +2507,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
|
||||
if (!xsk_tx_peek_desc(pool, &xdp_desc))
|
||||
break;
|
||||
|
||||
if (priv->plat->est && priv->plat->est->enable &&
|
||||
priv->plat->est->max_sdu[queue] &&
|
||||
xdp_desc.len > priv->plat->est->max_sdu[queue]) {
|
||||
priv->xstats.max_sdu_txq_drop[queue]++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (likely(priv->extend_desc))
|
||||
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
|
||||
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
|
||||
@ -4498,6 +4505,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return stmmac_tso_xmit(skb, dev);
|
||||
}
|
||||
|
||||
if (priv->plat->est && priv->plat->est->enable &&
|
||||
priv->plat->est->max_sdu[queue] &&
|
||||
skb->len > priv->plat->est->max_sdu[queue]){
|
||||
priv->xstats.max_sdu_txq_drop[queue]++;
|
||||
goto max_sdu_err;
|
||||
}
|
||||
|
||||
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
|
||||
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
|
||||
@ -4715,6 +4729,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
dma_map_err:
|
||||
netdev_err(priv->dev, "Tx DMA map failed\n");
|
||||
max_sdu_err:
|
||||
dev_kfree_skb(skb);
|
||||
priv->xstats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
@ -4871,6 +4886,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
|
||||
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
|
||||
return STMMAC_XDP_CONSUMED;
|
||||
|
||||
if (priv->plat->est && priv->plat->est->enable &&
|
||||
priv->plat->est->max_sdu[queue] &&
|
||||
xdpf->len > priv->plat->est->max_sdu[queue]) {
|
||||
priv->xstats.max_sdu_txq_drop[queue]++;
|
||||
return STMMAC_XDP_CONSUMED;
|
||||
}
|
||||
|
||||
if (likely(priv->extend_desc))
|
||||
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
|
||||
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
|
||||
|
@ -915,8 +915,30 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
|
||||
return time;
|
||||
}
|
||||
|
||||
static int tc_setup_taprio(struct stmmac_priv *priv,
|
||||
struct tc_taprio_qopt_offload *qopt)
|
||||
static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
|
||||
struct tc_taprio_qopt_offload *qopt)
|
||||
{
|
||||
struct plat_stmmacenet_data *plat = priv->plat;
|
||||
u32 num_tc = qopt->mqprio.qopt.num_tc;
|
||||
u32 offset, count, i, j;
|
||||
|
||||
/* QueueMaxSDU received from the driver corresponds to the Linux traffic
|
||||
* class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
|
||||
*/
|
||||
for (i = 0; i < num_tc; i++) {
|
||||
if (!qopt->max_sdu[i])
|
||||
continue;
|
||||
|
||||
offset = qopt->mqprio.qopt.offset[i];
|
||||
count = qopt->mqprio.qopt.count[i];
|
||||
|
||||
for (j = offset; j < offset + count; j++)
|
||||
plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
|
||||
}
|
||||
}
|
||||
|
||||
static int tc_taprio_configure(struct stmmac_priv *priv,
|
||||
struct tc_taprio_qopt_offload *qopt)
|
||||
{
|
||||
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
|
||||
struct plat_stmmacenet_data *plat = priv->plat;
|
||||
@ -968,8 +990,6 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
|
||||
|
||||
if (qopt->cmd == TAPRIO_CMD_DESTROY)
|
||||
goto disable;
|
||||
else if (qopt->cmd != TAPRIO_CMD_REPLACE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (qopt->num_entries >= dep)
|
||||
return -EINVAL;
|
||||
@ -1045,6 +1065,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
|
||||
|
||||
priv->plat->est->ter = qopt->cycle_time_extension;
|
||||
|
||||
tc_taprio_map_maxsdu_txq(priv, qopt);
|
||||
|
||||
if (fpe && !priv->dma_cap.fpesel) {
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
return -EOPNOTSUPP;
|
||||
@ -1078,6 +1100,11 @@ disable:
|
||||
priv->plat->est->enable = false;
|
||||
stmmac_est_configure(priv, priv, priv->plat->est,
|
||||
priv->plat->clk_ptp_rate);
|
||||
/* Reset taprio status */
|
||||
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
|
||||
priv->xstats.max_sdu_txq_drop[i] = 0;
|
||||
priv->xstats.mtl_est_txq_hlbf[i] = 0;
|
||||
}
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
}
|
||||
|
||||
@ -1095,6 +1122,57 @@ disable:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tc_taprio_stats(struct stmmac_priv *priv,
|
||||
struct tc_taprio_qopt_offload *qopt)
|
||||
{
|
||||
u64 window_drops = 0;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
|
||||
window_drops += priv->xstats.max_sdu_txq_drop[i] +
|
||||
priv->xstats.mtl_est_txq_hlbf[i];
|
||||
qopt->stats.window_drops = window_drops;
|
||||
|
||||
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
|
||||
qopt->stats.tx_overruns = 0;
|
||||
}
|
||||
|
||||
static void tc_taprio_queue_stats(struct stmmac_priv *priv,
|
||||
struct tc_taprio_qopt_offload *qopt)
|
||||
{
|
||||
struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats;
|
||||
int queue = qopt->queue_stats.queue;
|
||||
|
||||
q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
|
||||
priv->xstats.mtl_est_txq_hlbf[queue];
|
||||
|
||||
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
|
||||
q_stats->stats.tx_overruns = 0;
|
||||
}
|
||||
|
||||
static int tc_setup_taprio(struct stmmac_priv *priv,
|
||||
struct tc_taprio_qopt_offload *qopt)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
switch (qopt->cmd) {
|
||||
case TAPRIO_CMD_REPLACE:
|
||||
case TAPRIO_CMD_DESTROY:
|
||||
err = tc_taprio_configure(priv, qopt);
|
||||
break;
|
||||
case TAPRIO_CMD_STATS:
|
||||
tc_taprio_stats(priv, qopt);
|
||||
break;
|
||||
case TAPRIO_CMD_QUEUE_STATS:
|
||||
tc_taprio_queue_stats(priv, qopt);
|
||||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int tc_setup_etf(struct stmmac_priv *priv,
|
||||
struct tc_etf_qopt_offload *qopt)
|
||||
{
|
||||
@ -1126,6 +1204,7 @@ static int tc_query_caps(struct stmmac_priv *priv,
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
caps->gate_mask_per_txq = true;
|
||||
caps->supports_queue_max_sdu = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -127,6 +127,7 @@ struct stmmac_est {
|
||||
u32 gcl_unaligned[EST_GCL];
|
||||
u32 gcl[EST_GCL];
|
||||
u32 gcl_size;
|
||||
u32 max_sdu[MTL_MAX_TX_QUEUES];
|
||||
};
|
||||
|
||||
struct stmmac_rxq_cfg {
|
||||
|
Loading…
x
Reference in New Issue
Block a user