bnxt_en: share NQ ring sw_stats memory with subrings
On P5_PLUS chips and later, the NQ rings have subrings for RX and TX completions respectively. These subrings are passed to the poll function instead of the base NQ, but each ring carries its own copy of the software ring statistics. For stats to be conveniently accessible in __bnxt_poll_work(), the statistics memory should either be shared between the NQ and its subrings or the subrings need to be included in the ethtool stats aggregation logic. This patch opts for the former, because it's more efficient and less confusing having the software statistics for a ring exist in a single place. Before this patch, the counter will not be displayed if the "wrong" cpr->sw_stats was used to increment a counter. Link: https://lore.kernel.org/netdev/CACKFLikEhVAJA+osD7UjQNotdGte+fth7zOy7yDdLkTyFk9Pyw@mail.gmail.com/ Signed-off-by: Edwin Peer <edwin.peer@broadcom.com> Signed-off-by: Michael Chan <michael.chan@broadcom.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://lore.kernel.org/r/20240501003056.100607-2-michael.chan@broadcom.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
fc1fa5a071
commit
a75fbb3aa4
@ -1811,7 +1811,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
||||
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
|
||||
if (!skb) {
|
||||
bnxt_abort_tpa(cpr, idx, agg_bufs);
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
cpr->sw_stats->rx.rx_oom_discards += 1;
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
@ -1821,7 +1821,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
||||
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
|
||||
if (!new_data) {
|
||||
bnxt_abort_tpa(cpr, idx, agg_bufs);
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
cpr->sw_stats->rx.rx_oom_discards += 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1837,7 +1837,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
||||
if (!skb) {
|
||||
skb_free_frag(data);
|
||||
bnxt_abort_tpa(cpr, idx, agg_bufs);
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
cpr->sw_stats->rx.rx_oom_discards += 1;
|
||||
return NULL;
|
||||
}
|
||||
skb_reserve(skb, bp->rx_offset);
|
||||
@ -1848,7 +1848,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
||||
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
|
||||
if (!skb) {
|
||||
/* Page reuse already handled by bnxt_rx_pages(). */
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
cpr->sw_stats->rx.rx_oom_discards += 1;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@ -2106,7 +2106,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
|
||||
rc = -EIO;
|
||||
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
|
||||
bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
|
||||
bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
|
||||
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
|
||||
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
|
||||
netdev_warn_once(bp->dev, "RX buffer error %x\n",
|
||||
@ -2222,7 +2222,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
} else {
|
||||
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
|
||||
if (dev->features & NETIF_F_RXCSUM)
|
||||
bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
|
||||
bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2259,7 +2259,7 @@ next_rx_no_prod_no_len:
|
||||
return rc;
|
||||
|
||||
oom_next_rx:
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
|
||||
cpr->sw_stats->rx.rx_oom_discards += 1;
|
||||
rc = -ENOMEM;
|
||||
goto next_rx;
|
||||
}
|
||||
@ -2308,7 +2308,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
|
||||
}
|
||||
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
|
||||
if (rc && rc != -EBUSY)
|
||||
cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
|
||||
cpr->sw_stats->rx.rx_netpoll_discards += 1;
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -3951,6 +3951,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
|
||||
if (rc)
|
||||
return rc;
|
||||
cpr2->bnapi = bnapi;
|
||||
cpr2->sw_stats = cpr->sw_stats;
|
||||
cpr2->cp_idx = k;
|
||||
if (!k && rx) {
|
||||
bp->rx_ring[i].rx_cpr = cpr2;
|
||||
@ -4792,6 +4793,9 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
|
||||
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
||||
|
||||
bnxt_free_stats_mem(bp, &cpr->stats);
|
||||
|
||||
kfree(cpr->sw_stats);
|
||||
cpr->sw_stats = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4806,6 +4810,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
|
||||
struct bnxt_napi *bnapi = bp->bnapi[i];
|
||||
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
||||
|
||||
cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
|
||||
if (!cpr->sw_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
cpr->stats.len = size;
|
||||
rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
|
||||
if (rc)
|
||||
@ -10811,9 +10819,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
|
||||
|
||||
cpr = &bnapi->cp_ring;
|
||||
if (bnapi->tx_fault)
|
||||
cpr->sw_stats.tx.tx_resets++;
|
||||
cpr->sw_stats->tx.tx_resets++;
|
||||
if (bnapi->in_reset)
|
||||
cpr->sw_stats.rx.rx_resets++;
|
||||
cpr->sw_stats->rx.rx_resets++;
|
||||
napi_disable(&bnapi->napi);
|
||||
if (bnapi->rx_ring)
|
||||
cancel_work_sync(&cpr->dim.work);
|
||||
@ -12338,8 +12346,8 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
|
||||
stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
|
||||
|
||||
stats->rx_dropped +=
|
||||
cpr->sw_stats.rx.rx_netpoll_discards +
|
||||
cpr->sw_stats.rx.rx_oom_discards;
|
||||
cpr->sw_stats->rx.rx_netpoll_discards +
|
||||
cpr->sw_stats->rx.rx_oom_discards;
|
||||
}
|
||||
}
|
||||
|
||||
@ -12406,7 +12414,7 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
|
||||
struct bnxt_total_ring_err_stats *stats,
|
||||
struct bnxt_cp_ring_info *cpr)
|
||||
{
|
||||
struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
|
||||
struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
|
||||
u64 *hw_stats = cpr->stats.sw_stats;
|
||||
|
||||
stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
|
||||
@ -13249,7 +13257,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
|
||||
rxr->bnapi->in_reset = false;
|
||||
bnxt_alloc_one_rx_ring(bp, i);
|
||||
cpr = &rxr->bnapi->cp_ring;
|
||||
cpr->sw_stats.rx.rx_resets++;
|
||||
cpr->sw_stats->rx.rx_resets++;
|
||||
if (bp->flags & BNXT_FLAG_AGG_RINGS)
|
||||
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
|
||||
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
|
||||
@ -13461,7 +13469,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
|
||||
bnxt_dbg_hwrm_ring_info_get(bp,
|
||||
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
|
||||
fw_ring_id, &val[0], &val[1]);
|
||||
cpr->sw_stats.cmn.missed_irqs++;
|
||||
cpr->sw_stats->cmn.missed_irqs++;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -14769,7 +14777,7 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
|
||||
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
|
||||
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
|
||||
|
||||
stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
|
||||
stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
|
||||
}
|
||||
|
||||
static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
|
||||
|
@ -1152,7 +1152,7 @@ struct bnxt_cp_ring_info {
|
||||
struct bnxt_stats_mem stats;
|
||||
u32 hw_stats_ctx_id;
|
||||
|
||||
struct bnxt_sw_stats sw_stats;
|
||||
struct bnxt_sw_stats *sw_stats;
|
||||
|
||||
struct bnxt_ring_struct cp_ring_struct;
|
||||
|
||||
|
@ -631,13 +631,13 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
|
||||
buf[j] = sw_stats[k];
|
||||
|
||||
skip_tpa_ring_stats:
|
||||
sw = (u64 *)&cpr->sw_stats.rx;
|
||||
sw = (u64 *)&cpr->sw_stats->rx;
|
||||
if (is_rx_ring(bp, i)) {
|
||||
for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
|
||||
buf[j] = sw[k];
|
||||
}
|
||||
|
||||
sw = (u64 *)&cpr->sw_stats.cmn;
|
||||
sw = (u64 *)&cpr->sw_stats->cmn;
|
||||
for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
|
||||
buf[j] = sw[k];
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user