Merge branch 'bnxt_en-update'
Michael Chan says: ==================== bnxt_en update. This patchset removes the PCIe histogram and other debug register data from ethtool -S. The removed data are not counters and they have very large and constantly fluctuating values that are not suitable for the ethtool -S decimal counter display. The rest of the patches implement counter rollover for all hardware counters that are not 64-bit counters. Different generations of hardware have different counter widths. The driver will now query the counter widths of all counters from firmware and implement rollover support on all non-64-bit counters. The last patch adds the PCIe histogram and other PCIe register data back using the ethtool -d interface. v2: Fix bnxt_re RDMA driver compile issue. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
43e7a0e5ad
@ -132,7 +132,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
||||
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
|
||||
le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
|
||||
stats->value[BNXT_RE_RX_DROPS] =
|
||||
le64_to_cpu(bnxt_re_stats->rx_drop_pkts);
|
||||
le64_to_cpu(bnxt_re_stats->rx_error_pkts);
|
||||
stats->value[BNXT_RE_RX_DISCARDS] =
|
||||
le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
|
||||
stats->value[BNXT_RE_RX_PKTS] =
|
||||
|
@ -3711,67 +3711,191 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
|
||||
{
|
||||
kfree(stats->hw_masks);
|
||||
stats->hw_masks = NULL;
|
||||
kfree(stats->sw_stats);
|
||||
stats->sw_stats = NULL;
|
||||
if (stats->hw_stats) {
|
||||
dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
|
||||
stats->hw_stats_map);
|
||||
stats->hw_stats = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
|
||||
bool alloc_masks)
|
||||
{
|
||||
stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
|
||||
&stats->hw_stats_map, GFP_KERNEL);
|
||||
if (!stats->hw_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(stats->hw_stats, 0, stats->len);
|
||||
|
||||
stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
|
||||
if (!stats->sw_stats)
|
||||
goto stats_mem_err;
|
||||
|
||||
if (alloc_masks) {
|
||||
stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
|
||||
if (!stats->hw_masks)
|
||||
goto stats_mem_err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
stats_mem_err:
|
||||
bnxt_free_stats_mem(bp, stats);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
mask_arr[i] = mask;
|
||||
}
|
||||
|
||||
static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
|
||||
struct bnxt_stats_mem *stats)
|
||||
{
|
||||
struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
struct hwrm_func_qstats_ext_input req = {0};
|
||||
__le64 *hw_masks;
|
||||
int rc;
|
||||
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
|
||||
!(bp->flags & BNXT_FLAG_CHIP_P5))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
|
||||
req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc)
|
||||
goto qstat_exit;
|
||||
|
||||
hw_masks = &resp->rx_ucast_pkts;
|
||||
bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
|
||||
|
||||
qstat_exit:
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
|
||||
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
|
||||
|
||||
static void bnxt_init_stats(struct bnxt *bp)
|
||||
{
|
||||
struct bnxt_napi *bnapi = bp->bnapi[0];
|
||||
struct bnxt_cp_ring_info *cpr;
|
||||
struct bnxt_stats_mem *stats;
|
||||
__le64 *rx_stats, *tx_stats;
|
||||
int rc, rx_count, tx_count;
|
||||
u64 *rx_masks, *tx_masks;
|
||||
u64 mask;
|
||||
u8 flags;
|
||||
|
||||
cpr = &bnapi->cp_ring;
|
||||
stats = &cpr->stats;
|
||||
rc = bnxt_hwrm_func_qstat_ext(bp, stats);
|
||||
if (rc) {
|
||||
if (bp->flags & BNXT_FLAG_CHIP_P5)
|
||||
mask = (1ULL << 48) - 1;
|
||||
else
|
||||
mask = -1ULL;
|
||||
bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
|
||||
}
|
||||
if (bp->flags & BNXT_FLAG_PORT_STATS) {
|
||||
stats = &bp->port_stats;
|
||||
rx_stats = stats->hw_stats;
|
||||
rx_masks = stats->hw_masks;
|
||||
rx_count = sizeof(struct rx_port_stats) / 8;
|
||||
tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
|
||||
tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
|
||||
tx_count = sizeof(struct tx_port_stats) / 8;
|
||||
|
||||
flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
|
||||
rc = bnxt_hwrm_port_qstats(bp, flags);
|
||||
if (rc) {
|
||||
mask = (1ULL << 40) - 1;
|
||||
|
||||
bnxt_fill_masks(rx_masks, mask, rx_count);
|
||||
bnxt_fill_masks(tx_masks, mask, tx_count);
|
||||
} else {
|
||||
bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
|
||||
bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
|
||||
bnxt_hwrm_port_qstats(bp, 0);
|
||||
}
|
||||
}
|
||||
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
|
||||
stats = &bp->rx_port_stats_ext;
|
||||
rx_stats = stats->hw_stats;
|
||||
rx_masks = stats->hw_masks;
|
||||
rx_count = sizeof(struct rx_port_stats_ext) / 8;
|
||||
stats = &bp->tx_port_stats_ext;
|
||||
tx_stats = stats->hw_stats;
|
||||
tx_masks = stats->hw_masks;
|
||||
tx_count = sizeof(struct tx_port_stats_ext) / 8;
|
||||
|
||||
flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
|
||||
rc = bnxt_hwrm_port_qstats_ext(bp, flags);
|
||||
if (rc) {
|
||||
mask = (1ULL << 40) - 1;
|
||||
|
||||
bnxt_fill_masks(rx_masks, mask, rx_count);
|
||||
if (tx_stats)
|
||||
bnxt_fill_masks(tx_masks, mask, tx_count);
|
||||
} else {
|
||||
bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
|
||||
if (tx_stats)
|
||||
bnxt_copy_hw_masks(tx_masks, tx_stats,
|
||||
tx_count);
|
||||
bnxt_hwrm_port_qstats_ext(bp, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_free_port_stats(struct bnxt *bp)
|
||||
{
|
||||
struct pci_dev *pdev = bp->pdev;
|
||||
|
||||
bp->flags &= ~BNXT_FLAG_PORT_STATS;
|
||||
bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
|
||||
|
||||
if (bp->hw_rx_port_stats) {
|
||||
dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
|
||||
bp->hw_rx_port_stats,
|
||||
bp->hw_rx_port_stats_map);
|
||||
bp->hw_rx_port_stats = NULL;
|
||||
}
|
||||
|
||||
if (bp->hw_tx_port_stats_ext) {
|
||||
dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
|
||||
bp->hw_tx_port_stats_ext,
|
||||
bp->hw_tx_port_stats_ext_map);
|
||||
bp->hw_tx_port_stats_ext = NULL;
|
||||
}
|
||||
|
||||
if (bp->hw_rx_port_stats_ext) {
|
||||
dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
|
||||
bp->hw_rx_port_stats_ext,
|
||||
bp->hw_rx_port_stats_ext_map);
|
||||
bp->hw_rx_port_stats_ext = NULL;
|
||||
}
|
||||
|
||||
if (bp->hw_pcie_stats) {
|
||||
dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
|
||||
bp->hw_pcie_stats, bp->hw_pcie_stats_map);
|
||||
bp->hw_pcie_stats = NULL;
|
||||
}
|
||||
bnxt_free_stats_mem(bp, &bp->port_stats);
|
||||
bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
|
||||
bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
|
||||
}
|
||||
|
||||
static void bnxt_free_ring_stats(struct bnxt *bp)
|
||||
{
|
||||
struct pci_dev *pdev = bp->pdev;
|
||||
int size, i;
|
||||
int i;
|
||||
|
||||
if (!bp->bnapi)
|
||||
return;
|
||||
|
||||
size = bp->hw_ring_stats_size;
|
||||
|
||||
for (i = 0; i < bp->cp_nr_rings; i++) {
|
||||
struct bnxt_napi *bnapi = bp->bnapi[i];
|
||||
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
||||
|
||||
if (cpr->hw_stats) {
|
||||
dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
|
||||
cpr->hw_stats_map);
|
||||
cpr->hw_stats = NULL;
|
||||
}
|
||||
bnxt_free_stats_mem(bp, &cpr->stats);
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_alloc_stats(struct bnxt *bp)
|
||||
{
|
||||
u32 size, i;
|
||||
struct pci_dev *pdev = bp->pdev;
|
||||
int rc;
|
||||
|
||||
size = bp->hw_ring_stats_size;
|
||||
|
||||
@ -3779,11 +3903,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
|
||||
struct bnxt_napi *bnapi = bp->bnapi[i];
|
||||
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
||||
|
||||
cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
|
||||
&cpr->hw_stats_map,
|
||||
GFP_KERNEL);
|
||||
if (!cpr->hw_stats)
|
||||
return -ENOMEM;
|
||||
cpr->stats.len = size;
|
||||
rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
|
||||
}
|
||||
@ -3791,22 +3914,14 @@ static int bnxt_alloc_stats(struct bnxt *bp)
|
||||
if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
|
||||
return 0;
|
||||
|
||||
if (bp->hw_rx_port_stats)
|
||||
if (bp->port_stats.hw_stats)
|
||||
goto alloc_ext_stats;
|
||||
|
||||
bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
|
||||
sizeof(struct tx_port_stats) + 1024;
|
||||
bp->port_stats.len = BNXT_PORT_STATS_SIZE;
|
||||
rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
bp->hw_rx_port_stats =
|
||||
dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
|
||||
&bp->hw_rx_port_stats_map,
|
||||
GFP_KERNEL);
|
||||
if (!bp->hw_rx_port_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
|
||||
bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
|
||||
sizeof(struct rx_port_stats) + 512;
|
||||
bp->flags |= BNXT_FLAG_PORT_STATS;
|
||||
|
||||
alloc_ext_stats:
|
||||
@ -3815,41 +3930,28 @@ alloc_ext_stats:
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
|
||||
return 0;
|
||||
|
||||
if (bp->hw_rx_port_stats_ext)
|
||||
if (bp->rx_port_stats_ext.hw_stats)
|
||||
goto alloc_tx_ext_stats;
|
||||
|
||||
bp->hw_rx_port_stats_ext =
|
||||
dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
|
||||
&bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
|
||||
if (!bp->hw_rx_port_stats_ext)
|
||||
bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
|
||||
rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
|
||||
/* Extended stats are optional */
|
||||
if (rc)
|
||||
return 0;
|
||||
|
||||
alloc_tx_ext_stats:
|
||||
if (bp->hw_tx_port_stats_ext)
|
||||
goto alloc_pcie_stats;
|
||||
if (bp->tx_port_stats_ext.hw_stats)
|
||||
return 0;
|
||||
|
||||
if (bp->hwrm_spec_code >= 0x10902 ||
|
||||
(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
|
||||
bp->hw_tx_port_stats_ext =
|
||||
dma_alloc_coherent(&pdev->dev,
|
||||
sizeof(struct tx_port_stats_ext),
|
||||
&bp->hw_tx_port_stats_ext_map,
|
||||
GFP_KERNEL);
|
||||
bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
|
||||
rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
|
||||
/* Extended stats are optional */
|
||||
if (rc)
|
||||
return 0;
|
||||
}
|
||||
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
|
||||
|
||||
alloc_pcie_stats:
|
||||
if (bp->hw_pcie_stats ||
|
||||
!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
|
||||
return 0;
|
||||
|
||||
bp->hw_pcie_stats =
|
||||
dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
|
||||
&bp->hw_pcie_stats_map, GFP_KERNEL);
|
||||
if (!bp->hw_pcie_stats)
|
||||
return 0;
|
||||
|
||||
bp->flags |= BNXT_FLAG_PCIE_STATS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3949,6 +4051,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
|
||||
bnxt_free_ntp_fltrs(bp, irq_re_init);
|
||||
if (irq_re_init) {
|
||||
bnxt_free_ring_stats(bp);
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
|
||||
bnxt_free_port_stats(bp);
|
||||
bnxt_free_ring_grps(bp);
|
||||
bnxt_free_vnics(bp);
|
||||
kfree(bp->tx_ring_map);
|
||||
@ -4052,6 +4156,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
|
||||
rc = bnxt_alloc_stats(bp);
|
||||
if (rc)
|
||||
goto alloc_mem_err;
|
||||
bnxt_init_stats(bp);
|
||||
|
||||
rc = bnxt_alloc_ntp_fltrs(bp);
|
||||
if (rc)
|
||||
@ -6458,7 +6563,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
|
||||
struct bnxt_napi *bnapi = bp->bnapi[i];
|
||||
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
||||
|
||||
req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
|
||||
req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
|
||||
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req),
|
||||
HWRM_CMD_TIMEOUT);
|
||||
@ -7489,7 +7594,89 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
|
||||
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_port_qstats(struct bnxt *bp)
|
||||
static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
|
||||
{
|
||||
u64 sw_tmp;
|
||||
|
||||
sw_tmp = (*sw & ~mask) | hw;
|
||||
if (hw < (*sw & mask))
|
||||
sw_tmp += mask + 1;
|
||||
WRITE_ONCE(*sw, sw_tmp);
|
||||
}
|
||||
|
||||
static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
|
||||
int count, bool ignore_zero)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
|
||||
|
||||
if (ignore_zero && !hw)
|
||||
continue;
|
||||
|
||||
if (masks[i] == -1ULL)
|
||||
sw_stats[i] = hw;
|
||||
else
|
||||
bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
|
||||
{
|
||||
if (!stats->hw_stats)
|
||||
return;
|
||||
|
||||
__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
|
||||
stats->hw_masks, stats->len / 8, false);
|
||||
}
|
||||
|
||||
static void bnxt_accumulate_all_stats(struct bnxt *bp)
|
||||
{
|
||||
struct bnxt_stats_mem *ring0_stats;
|
||||
bool ignore_zero = false;
|
||||
int i;
|
||||
|
||||
/* Chip bug. Counter intermittently becomes 0. */
|
||||
if (bp->flags & BNXT_FLAG_CHIP_P5)
|
||||
ignore_zero = true;
|
||||
|
||||
for (i = 0; i < bp->cp_nr_rings; i++) {
|
||||
struct bnxt_napi *bnapi = bp->bnapi[i];
|
||||
struct bnxt_cp_ring_info *cpr;
|
||||
struct bnxt_stats_mem *stats;
|
||||
|
||||
cpr = &bnapi->cp_ring;
|
||||
stats = &cpr->stats;
|
||||
if (!i)
|
||||
ring0_stats = stats;
|
||||
__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
|
||||
ring0_stats->hw_masks,
|
||||
ring0_stats->len / 8, ignore_zero);
|
||||
}
|
||||
if (bp->flags & BNXT_FLAG_PORT_STATS) {
|
||||
struct bnxt_stats_mem *stats = &bp->port_stats;
|
||||
__le64 *hw_stats = stats->hw_stats;
|
||||
u64 *sw_stats = stats->sw_stats;
|
||||
u64 *masks = stats->hw_masks;
|
||||
int cnt;
|
||||
|
||||
cnt = sizeof(struct rx_port_stats) / 8;
|
||||
__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
|
||||
|
||||
hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
|
||||
sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
|
||||
masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
|
||||
cnt = sizeof(struct tx_port_stats) / 8;
|
||||
__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
|
||||
}
|
||||
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
|
||||
bnxt_accumulate_stats(&bp->rx_port_stats_ext);
|
||||
bnxt_accumulate_stats(&bp->tx_port_stats_ext);
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
|
||||
{
|
||||
struct bnxt_pf_info *pf = &bp->pf;
|
||||
struct hwrm_port_qstats_input req = {0};
|
||||
@ -7497,14 +7684,19 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
|
||||
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
|
||||
return 0;
|
||||
|
||||
if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
req.flags = flags;
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
|
||||
req.port_id = cpu_to_le16(pf->port_id);
|
||||
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
|
||||
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
|
||||
req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
|
||||
BNXT_TX_PORT_STATS_BYTE_OFFSET);
|
||||
req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
|
||||
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
||||
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
|
||||
{
|
||||
struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
|
||||
@ -7516,14 +7708,18 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
||||
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
|
||||
return 0;
|
||||
|
||||
if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
|
||||
req.flags = flags;
|
||||
req.port_id = cpu_to_le16(pf->port_id);
|
||||
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
|
||||
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
|
||||
tx_stat_size = bp->hw_tx_port_stats_ext ?
|
||||
sizeof(*bp->hw_tx_port_stats_ext) : 0;
|
||||
req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
|
||||
tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
|
||||
sizeof(struct tx_port_stats_ext) : 0;
|
||||
req.tx_stat_size = cpu_to_le16(tx_stat_size);
|
||||
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
|
||||
req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (!rc) {
|
||||
@ -7534,6 +7730,9 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
||||
bp->fw_rx_stats_ext_size = 0;
|
||||
bp->fw_tx_stats_ext_size = 0;
|
||||
}
|
||||
if (flags)
|
||||
goto qstats_done;
|
||||
|
||||
if (bp->fw_tx_stats_ext_size <=
|
||||
offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
@ -7574,19 +7773,6 @@ qstats_done:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
|
||||
{
|
||||
struct hwrm_pcie_qstats_input req = {0};
|
||||
|
||||
if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
|
||||
return 0;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
|
||||
req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
|
||||
req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
|
||||
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
}
|
||||
|
||||
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
|
||||
{
|
||||
if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
|
||||
@ -8608,6 +8794,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
|
||||
if (BNXT_PF(bp))
|
||||
bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
|
||||
}
|
||||
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
|
||||
bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
|
||||
|
||||
if (resp->supported_speeds_auto_mode)
|
||||
link_info->support_auto_speeds =
|
||||
le16_to_cpu(resp->supported_speeds_auto_mode);
|
||||
@ -9610,34 +9799,33 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
|
||||
{
|
||||
int i;
|
||||
|
||||
|
||||
for (i = 0; i < bp->cp_nr_rings; i++) {
|
||||
struct bnxt_napi *bnapi = bp->bnapi[i];
|
||||
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
||||
struct ctx_hw_stats *hw_stats = cpr->hw_stats;
|
||||
u64 *sw = cpr->stats.sw_stats;
|
||||
|
||||
stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
|
||||
stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
|
||||
stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
|
||||
stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
|
||||
stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
|
||||
stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
|
||||
|
||||
stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
|
||||
stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
|
||||
stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
|
||||
stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
|
||||
stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
|
||||
stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
|
||||
|
||||
stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
|
||||
stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
|
||||
stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
|
||||
stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
|
||||
stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
|
||||
stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
|
||||
|
||||
stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
|
||||
stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
|
||||
stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
|
||||
stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
|
||||
stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
|
||||
stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
|
||||
|
||||
stats->rx_missed_errors +=
|
||||
le64_to_cpu(hw_stats->rx_discard_pkts);
|
||||
BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
|
||||
|
||||
stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
|
||||
stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
|
||||
|
||||
stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
|
||||
stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
|
||||
}
|
||||
}
|
||||
|
||||
@ -9675,19 +9863,26 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
bnxt_add_prev_stats(bp, stats);
|
||||
|
||||
if (bp->flags & BNXT_FLAG_PORT_STATS) {
|
||||
struct rx_port_stats *rx = bp->hw_rx_port_stats;
|
||||
struct tx_port_stats *tx = bp->hw_tx_port_stats;
|
||||
u64 *rx = bp->port_stats.sw_stats;
|
||||
u64 *tx = bp->port_stats.sw_stats +
|
||||
BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
|
||||
|
||||
stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
|
||||
stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
|
||||
stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
|
||||
le64_to_cpu(rx->rx_ovrsz_frames) +
|
||||
le64_to_cpu(rx->rx_runt_frames);
|
||||
stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
|
||||
le64_to_cpu(rx->rx_jbr_frames);
|
||||
stats->collisions = le64_to_cpu(tx->tx_total_collisions);
|
||||
stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
|
||||
stats->tx_errors = le64_to_cpu(tx->tx_err);
|
||||
stats->rx_crc_errors =
|
||||
BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
|
||||
stats->rx_frame_errors =
|
||||
BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
|
||||
stats->rx_length_errors =
|
||||
BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
|
||||
BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
|
||||
BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
|
||||
stats->rx_errors =
|
||||
BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
|
||||
BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
|
||||
stats->collisions =
|
||||
BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
|
||||
stats->tx_fifo_errors =
|
||||
BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
|
||||
stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
|
||||
}
|
||||
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
|
||||
}
|
||||
@ -10033,6 +10228,38 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
|
||||
u32 *reg_buf)
|
||||
{
|
||||
struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
|
||||
struct hwrm_dbg_read_direct_input req = {0};
|
||||
__le32 *dbg_reg_buf;
|
||||
dma_addr_t mapping;
|
||||
int rc, i;
|
||||
|
||||
dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
|
||||
&mapping, GFP_KERNEL);
|
||||
if (!dbg_reg_buf)
|
||||
return -ENOMEM;
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
|
||||
req.host_dest_addr = cpu_to_le64(mapping);
|
||||
req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
|
||||
req.read_len32 = cpu_to_le32(num_words);
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc || resp->error_code) {
|
||||
rc = -EIO;
|
||||
goto dbg_rd_reg_exit;
|
||||
}
|
||||
for (i = 0; i < num_words; i++)
|
||||
reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
|
||||
|
||||
dbg_rd_reg_exit:
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
|
||||
u32 ring_id, u32 *prod, u32 *cons)
|
||||
{
|
||||
@ -10177,8 +10404,7 @@ static void bnxt_timer(struct timer_list *t)
|
||||
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
|
||||
bnxt_fw_health_check(bp);
|
||||
|
||||
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
|
||||
bp->stats_coal_ticks) {
|
||||
if (bp->link_info.link_up && bp->stats_coal_ticks) {
|
||||
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
@ -10464,9 +10690,9 @@ static void bnxt_sp_task(struct work_struct *work)
|
||||
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
|
||||
bnxt_hwrm_exec_fwd_req(bp);
|
||||
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
|
||||
bnxt_hwrm_port_qstats(bp);
|
||||
bnxt_hwrm_port_qstats_ext(bp);
|
||||
bnxt_hwrm_pcie_qstats(bp);
|
||||
bnxt_hwrm_port_qstats(bp, 0);
|
||||
bnxt_hwrm_port_qstats_ext(bp, 0);
|
||||
bnxt_accumulate_all_stats(bp);
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
|
||||
|
@ -919,6 +919,14 @@ struct bnxt_sw_stats {
|
||||
struct bnxt_cmn_sw_stats cmn;
|
||||
};
|
||||
|
||||
struct bnxt_stats_mem {
|
||||
u64 *sw_stats;
|
||||
u64 *hw_masks;
|
||||
void *hw_stats;
|
||||
dma_addr_t hw_stats_map;
|
||||
int len;
|
||||
};
|
||||
|
||||
struct bnxt_cp_ring_info {
|
||||
struct bnxt_napi *bnapi;
|
||||
u32 cp_raw_cons;
|
||||
@ -943,8 +951,7 @@ struct bnxt_cp_ring_info {
|
||||
|
||||
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
|
||||
|
||||
struct ctx_hw_stats *hw_stats;
|
||||
dma_addr_t hw_stats_map;
|
||||
struct bnxt_stats_mem stats;
|
||||
u32 hw_stats_ctx_id;
|
||||
|
||||
struct bnxt_sw_stats sw_stats;
|
||||
@ -1135,6 +1142,50 @@ struct bnxt_ntuple_filter {
|
||||
#define BNXT_FLTR_UPDATE 1
|
||||
};
|
||||
|
||||
struct hwrm_port_phy_qcfg_output_compat {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
__le16 seq_id;
|
||||
__le16 resp_len;
|
||||
u8 link;
|
||||
u8 link_signal_mode;
|
||||
__le16 link_speed;
|
||||
u8 duplex_cfg;
|
||||
u8 pause;
|
||||
__le16 support_speeds;
|
||||
__le16 force_link_speed;
|
||||
u8 auto_mode;
|
||||
u8 auto_pause;
|
||||
__le16 auto_link_speed;
|
||||
__le16 auto_link_speed_mask;
|
||||
u8 wirespeed;
|
||||
u8 lpbk;
|
||||
u8 force_pause;
|
||||
u8 module_status;
|
||||
__le32 preemphasis;
|
||||
u8 phy_maj;
|
||||
u8 phy_min;
|
||||
u8 phy_bld;
|
||||
u8 phy_type;
|
||||
u8 media_type;
|
||||
u8 xcvr_pkg_type;
|
||||
u8 eee_config_phy_addr;
|
||||
u8 parallel_detect;
|
||||
__le16 link_partner_adv_speeds;
|
||||
u8 link_partner_adv_auto_mode;
|
||||
u8 link_partner_adv_pause;
|
||||
__le16 adv_eee_link_speed_mask;
|
||||
__le16 link_partner_adv_eee_link_speed_mask;
|
||||
__le32 xcvr_identifier_type_tx_lpi_timer;
|
||||
__le16 fec_cfg;
|
||||
u8 duplex_state;
|
||||
u8 option_flags;
|
||||
char phy_vendor_name[16];
|
||||
char phy_vendor_partnumber[16];
|
||||
u8 unused_0[7];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
struct bnxt_link_info {
|
||||
u8 phy_type;
|
||||
u8 media_type;
|
||||
@ -1253,6 +1304,9 @@ struct bnxt_test_info {
|
||||
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
|
||||
};
|
||||
|
||||
#define CHIMP_REG_VIEW_ADDR \
|
||||
((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
|
||||
|
||||
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
|
||||
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
|
||||
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
|
||||
@ -1566,7 +1620,6 @@ struct bnxt {
|
||||
#define BNXT_FLAG_DIM 0x2000000
|
||||
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
|
||||
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
|
||||
#define BNXT_FLAG_PCIE_STATS 0x40000000
|
||||
|
||||
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
|
||||
BNXT_FLAG_RFS | \
|
||||
@ -1719,6 +1772,7 @@ struct bnxt {
|
||||
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
|
||||
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
|
||||
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
|
||||
#define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000
|
||||
|
||||
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
|
||||
u32 hwrm_spec_code;
|
||||
@ -1733,17 +1787,9 @@ struct bnxt {
|
||||
dma_addr_t hwrm_cmd_kong_resp_dma_addr;
|
||||
|
||||
struct rtnl_link_stats64 net_stats_prev;
|
||||
struct rx_port_stats *hw_rx_port_stats;
|
||||
struct tx_port_stats *hw_tx_port_stats;
|
||||
struct rx_port_stats_ext *hw_rx_port_stats_ext;
|
||||
struct tx_port_stats_ext *hw_tx_port_stats_ext;
|
||||
struct pcie_ctx_hw_stats *hw_pcie_stats;
|
||||
dma_addr_t hw_rx_port_stats_map;
|
||||
dma_addr_t hw_tx_port_stats_map;
|
||||
dma_addr_t hw_rx_port_stats_ext_map;
|
||||
dma_addr_t hw_tx_port_stats_ext_map;
|
||||
dma_addr_t hw_pcie_stats_map;
|
||||
int hw_port_stats_size;
|
||||
struct bnxt_stats_mem port_stats;
|
||||
struct bnxt_stats_mem rx_port_stats_ext;
|
||||
struct bnxt_stats_mem tx_port_stats_ext;
|
||||
u16 fw_rx_stats_ext_size;
|
||||
u16 fw_tx_stats_ext_size;
|
||||
u16 hw_ring_stats_size;
|
||||
@ -1885,12 +1931,27 @@ struct bnxt {
|
||||
struct device *hwmon_dev;
|
||||
};
|
||||
|
||||
#define BNXT_GET_RING_STATS64(sw, counter) \
|
||||
(*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
|
||||
|
||||
#define BNXT_GET_RX_PORT_STATS64(sw, counter) \
|
||||
(*((sw) + offsetof(struct rx_port_stats, counter) / 8))
|
||||
|
||||
#define BNXT_GET_TX_PORT_STATS64(sw, counter) \
|
||||
(*((sw) + offsetof(struct tx_port_stats, counter) / 8))
|
||||
|
||||
#define BNXT_PORT_STATS_SIZE \
|
||||
(sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024)
|
||||
|
||||
#define BNXT_TX_PORT_STATS_BYTE_OFFSET \
|
||||
(sizeof(struct rx_port_stats) + 512)
|
||||
|
||||
#define BNXT_RX_STATS_OFFSET(counter) \
|
||||
(offsetof(struct rx_port_stats, counter) / 8)
|
||||
|
||||
#define BNXT_TX_STATS_OFFSET(counter) \
|
||||
((offsetof(struct tx_port_stats, counter) + \
|
||||
sizeof(struct rx_port_stats) + 512) / 8)
|
||||
BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8)
|
||||
|
||||
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
|
||||
(offsetof(struct rx_port_stats_ext, counter) / 8)
|
||||
@ -1898,9 +1959,6 @@ struct bnxt {
|
||||
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
|
||||
(offsetof(struct tx_port_stats_ext, counter) / 8)
|
||||
|
||||
#define BNXT_PCIE_STATS_OFFSET(counter) \
|
||||
(offsetof(struct pcie_ctx_hw_stats, counter) / 8)
|
||||
|
||||
#define BNXT_HW_FEATURE_VLAN_ALL_RX \
|
||||
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
|
||||
#define BNXT_HW_FEATURE_VLAN_ALL_TX \
|
||||
@ -2062,6 +2120,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
|
||||
int bnxt_half_open_nic(struct bnxt *bp);
|
||||
void bnxt_half_close_nic(struct bnxt *bp);
|
||||
int bnxt_close_nic(struct bnxt *, bool, bool);
|
||||
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
|
||||
u32 *reg_buf);
|
||||
void bnxt_fw_exception(struct bnxt *bp);
|
||||
void bnxt_fw_reset(struct bnxt *bp);
|
||||
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
|
||||
|
@ -544,7 +544,7 @@ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
|
||||
static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
__le64 *stats = (__le64 *)bp->hw_rx_port_stats;
|
||||
__le64 *stats = bp->port_stats.hw_stats;
|
||||
struct ieee_pfc *my_pfc = bp->ieee_pfc;
|
||||
long rx_off, tx_off;
|
||||
int i, rc;
|
||||
|
@ -142,7 +142,7 @@ static const char * const bnxt_ring_rx_stats_str[] = {
|
||||
"rx_mcast_packets",
|
||||
"rx_bcast_packets",
|
||||
"rx_discards",
|
||||
"rx_drops",
|
||||
"rx_errors",
|
||||
"rx_ucast_bytes",
|
||||
"rx_mcast_bytes",
|
||||
"rx_bcast_bytes",
|
||||
@ -152,8 +152,8 @@ static const char * const bnxt_ring_tx_stats_str[] = {
|
||||
"tx_ucast_packets",
|
||||
"tx_mcast_packets",
|
||||
"tx_bcast_packets",
|
||||
"tx_errors",
|
||||
"tx_discards",
|
||||
"tx_drops",
|
||||
"tx_ucast_bytes",
|
||||
"tx_mcast_bytes",
|
||||
"tx_bcast_bytes",
|
||||
@ -293,9 +293,6 @@ static const char * const bnxt_cmn_sw_stats_str[] = {
|
||||
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
|
||||
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
|
||||
|
||||
#define BNXT_PCIE_STATS_ENTRY(counter) \
|
||||
{ BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
|
||||
|
||||
enum {
|
||||
RX_TOTAL_DISCARDS,
|
||||
TX_TOTAL_DISCARDS,
|
||||
@ -454,24 +451,6 @@ static const struct {
|
||||
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
|
||||
};
|
||||
|
||||
static const struct {
|
||||
long offset;
|
||||
char string[ETH_GSTRING_LEN];
|
||||
} bnxt_pcie_stats_arr[] = {
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
|
||||
BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
|
||||
};
|
||||
|
||||
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
|
||||
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
|
||||
#define BNXT_NUM_STATS_PRI \
|
||||
@ -479,7 +458,6 @@ static const struct {
|
||||
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
|
||||
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
|
||||
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
|
||||
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
|
||||
|
||||
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
|
||||
{
|
||||
@ -526,9 +504,6 @@ static int bnxt_get_num_stats(struct bnxt *bp)
|
||||
num_stats += BNXT_NUM_STATS_PRI;
|
||||
}
|
||||
|
||||
if (bp->flags & BNXT_FLAG_PCIE_STATS)
|
||||
num_stats += BNXT_NUM_PCIE_STATS;
|
||||
|
||||
return num_stats;
|
||||
}
|
||||
|
||||
@ -584,19 +559,19 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
|
||||
for (i = 0; i < bp->cp_nr_rings; i++) {
|
||||
struct bnxt_napi *bnapi = bp->bnapi[i];
|
||||
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
||||
__le64 *hw_stats = (__le64 *)cpr->hw_stats;
|
||||
u64 *sw_stats = cpr->stats.sw_stats;
|
||||
u64 *sw;
|
||||
int k;
|
||||
|
||||
if (is_rx_ring(bp, i)) {
|
||||
for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
|
||||
buf[j] = le64_to_cpu(hw_stats[k]);
|
||||
buf[j] = sw_stats[k];
|
||||
}
|
||||
if (is_tx_ring(bp, i)) {
|
||||
k = NUM_RING_RX_HW_STATS;
|
||||
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
|
||||
j++, k++)
|
||||
buf[j] = le64_to_cpu(hw_stats[k]);
|
||||
buf[j] = sw_stats[k];
|
||||
}
|
||||
if (!tpa_stats || !is_rx_ring(bp, i))
|
||||
goto skip_tpa_ring_stats;
|
||||
@ -604,7 +579,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
|
||||
k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
|
||||
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
|
||||
tpa_stats; j++, k++)
|
||||
buf[j] = le64_to_cpu(hw_stats[k]);
|
||||
buf[j] = sw_stats[k];
|
||||
|
||||
skip_tpa_ring_stats:
|
||||
sw = (u64 *)&cpr->sw_stats.rx;
|
||||
@ -618,9 +593,9 @@ skip_tpa_ring_stats:
|
||||
buf[j] = sw[k];
|
||||
|
||||
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
|
||||
le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
|
||||
BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
|
||||
bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
|
||||
le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
|
||||
BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
|
||||
}
|
||||
|
||||
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
|
||||
@ -628,60 +603,50 @@ skip_tpa_ring_stats:
|
||||
|
||||
skip_ring_stats:
|
||||
if (bp->flags & BNXT_FLAG_PORT_STATS) {
|
||||
__le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
|
||||
u64 *port_stats = bp->port_stats.sw_stats;
|
||||
|
||||
for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
|
||||
buf[j] = le64_to_cpu(*(port_stats +
|
||||
bnxt_port_stats_arr[i].offset));
|
||||
}
|
||||
for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
|
||||
buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
|
||||
}
|
||||
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
|
||||
__le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
|
||||
__le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
|
||||
u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
|
||||
u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
|
||||
|
||||
for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
|
||||
buf[j] = le64_to_cpu(*(rx_port_stats_ext +
|
||||
bnxt_port_stats_ext_arr[i].offset));
|
||||
buf[j] = *(rx_port_stats_ext +
|
||||
bnxt_port_stats_ext_arr[i].offset);
|
||||
}
|
||||
for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
|
||||
buf[j] = le64_to_cpu(*(tx_port_stats_ext +
|
||||
bnxt_tx_port_stats_ext_arr[i].offset));
|
||||
buf[j] = *(tx_port_stats_ext +
|
||||
bnxt_tx_port_stats_ext_arr[i].offset);
|
||||
}
|
||||
if (bp->pri2cos_valid) {
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_rx_bytes_pri_arr[i].base_off +
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
|
||||
buf[j] = *(rx_port_stats_ext + n);
|
||||
}
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_rx_pkts_pri_arr[i].base_off +
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
|
||||
buf[j] = *(rx_port_stats_ext + n);
|
||||
}
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_tx_bytes_pri_arr[i].base_off +
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
|
||||
buf[j] = *(tx_port_stats_ext + n);
|
||||
}
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_tx_pkts_pri_arr[i].base_off +
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
|
||||
buf[j] = *(tx_port_stats_ext + n);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
|
||||
__le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
|
||||
|
||||
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
|
||||
buf[j] = le64_to_cpu(*(pcie_stats +
|
||||
bnxt_pcie_stats_arr[i].offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
|
||||
@ -782,12 +747,6 @@ skip_tpa_stats:
|
||||
}
|
||||
}
|
||||
}
|
||||
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
|
||||
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
|
||||
strcpy(buf, bnxt_pcie_stats_arr[i].string);
|
||||
buf += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case ETH_SS_TEST:
|
||||
if (bp->num_tests)
|
||||
@ -1365,6 +1324,59 @@ static void bnxt_get_drvinfo(struct net_device *dev,
|
||||
info->regdump_len = 0;
|
||||
}
|
||||
|
||||
static int bnxt_get_regs_len(struct net_device *dev)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
int reg_len;
|
||||
|
||||
reg_len = BNXT_PXP_REG_LEN;
|
||||
|
||||
if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
|
||||
reg_len += sizeof(struct pcie_ctx_hw_stats);
|
||||
|
||||
return reg_len;
|
||||
}
|
||||
|
||||
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
||||
void *_p)
|
||||
{
|
||||
struct pcie_ctx_hw_stats *hw_pcie_stats;
|
||||
struct hwrm_pcie_qstats_input req = {0};
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
dma_addr_t hw_pcie_stats_addr;
|
||||
int rc;
|
||||
|
||||
regs->version = 0;
|
||||
bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
|
||||
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
|
||||
return;
|
||||
|
||||
hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
|
||||
sizeof(*hw_pcie_stats),
|
||||
&hw_pcie_stats_addr, GFP_KERNEL);
|
||||
if (!hw_pcie_stats)
|
||||
return;
|
||||
|
||||
regs->version = 1;
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
|
||||
req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
|
||||
req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (!rc) {
|
||||
__le64 *src = (__le64 *)hw_pcie_stats;
|
||||
u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
|
||||
dst[i] = le64_to_cpu(src[i]);
|
||||
}
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
|
||||
hw_pcie_stats_addr);
|
||||
}
|
||||
|
||||
static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
@ -3640,6 +3652,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
|
||||
.get_pauseparam = bnxt_get_pauseparam,
|
||||
.set_pauseparam = bnxt_set_pauseparam,
|
||||
.get_drvinfo = bnxt_get_drvinfo,
|
||||
.get_regs_len = bnxt_get_regs_len,
|
||||
.get_regs = bnxt_get_regs,
|
||||
.get_wol = bnxt_get_wol,
|
||||
.set_wol = bnxt_set_wol,
|
||||
.get_coalesce = bnxt_get_coalesce,
|
||||
|
@ -84,6 +84,8 @@ struct hwrm_dbg_cmn_output {
|
||||
ETH_RESET_PHY | ETH_RESET_RAM) \
|
||||
<< ETH_RESET_SHARED_SHIFT)
|
||||
|
||||
#define BNXT_PXP_REG_LEN 0x3110
|
||||
|
||||
extern const struct ethtool_ops bnxt_ethtool_ops;
|
||||
|
||||
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
|
||||
|
@ -169,9 +169,14 @@ struct cmd_nums {
|
||||
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
|
||||
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
|
||||
#define HWRM_RING_AGGINT_QCAPS 0x54UL
|
||||
#define HWRM_RING_SCHQ_ALLOC 0x55UL
|
||||
#define HWRM_RING_SCHQ_CFG 0x56UL
|
||||
#define HWRM_RING_SCHQ_FREE 0x57UL
|
||||
#define HWRM_RING_RESET 0x5eUL
|
||||
#define HWRM_RING_GRP_ALLOC 0x60UL
|
||||
#define HWRM_RING_GRP_FREE 0x61UL
|
||||
#define HWRM_RING_CFG 0x62UL
|
||||
#define HWRM_RING_QCFG 0x63UL
|
||||
#define HWRM_RESERVED5 0x64UL
|
||||
#define HWRM_RESERVED6 0x65UL
|
||||
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
|
||||
@ -224,6 +229,7 @@ struct cmd_nums {
|
||||
#define HWRM_FW_IPC_MAILBOX 0xccUL
|
||||
#define HWRM_FW_ECN_CFG 0xcdUL
|
||||
#define HWRM_FW_ECN_QCFG 0xceUL
|
||||
#define HWRM_FW_SECURE_CFG 0xcfUL
|
||||
#define HWRM_EXEC_FWD_RESP 0xd0UL
|
||||
#define HWRM_REJECT_FWD_RESP 0xd1UL
|
||||
#define HWRM_FWD_RESP 0xd2UL
|
||||
@ -337,6 +343,7 @@ struct cmd_nums {
|
||||
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
|
||||
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
|
||||
#define HWRM_FUNC_QSTATS_EXT 0x198UL
|
||||
#define HWRM_STAT_EXT_CTX_QUERY 0x199UL
|
||||
#define HWRM_SELFTEST_QLIST 0x200UL
|
||||
#define HWRM_SELFTEST_EXEC 0x201UL
|
||||
#define HWRM_SELFTEST_IRQ 0x202UL
|
||||
@ -353,24 +360,30 @@ struct cmd_nums {
|
||||
#define HWRM_TF_VERSION_GET 0x2bdUL
|
||||
#define HWRM_TF_SESSION_OPEN 0x2c6UL
|
||||
#define HWRM_TF_SESSION_ATTACH 0x2c7UL
|
||||
#define HWRM_TF_SESSION_CLOSE 0x2c8UL
|
||||
#define HWRM_TF_SESSION_QCFG 0x2c9UL
|
||||
#define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL
|
||||
#define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL
|
||||
#define HWRM_TF_SESSION_RESC_FREE 0x2ccUL
|
||||
#define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL
|
||||
#define HWRM_TF_TBL_TYPE_GET 0x2d0UL
|
||||
#define HWRM_TF_TBL_TYPE_SET 0x2d1UL
|
||||
#define HWRM_TF_CTXT_MEM_RGTR 0x2daUL
|
||||
#define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL
|
||||
#define HWRM_TF_EXT_EM_QCAPS 0x2dcUL
|
||||
#define HWRM_TF_EXT_EM_OP 0x2ddUL
|
||||
#define HWRM_TF_EXT_EM_CFG 0x2deUL
|
||||
#define HWRM_TF_EXT_EM_QCFG 0x2dfUL
|
||||
#define HWRM_TF_TCAM_SET 0x2eeUL
|
||||
#define HWRM_TF_TCAM_GET 0x2efUL
|
||||
#define HWRM_TF_TCAM_MOVE 0x2f0UL
|
||||
#define HWRM_TF_TCAM_FREE 0x2f1UL
|
||||
#define HWRM_TF_SESSION_REGISTER 0x2c8UL
|
||||
#define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
|
||||
#define HWRM_TF_SESSION_CLOSE 0x2caUL
|
||||
#define HWRM_TF_SESSION_QCFG 0x2cbUL
|
||||
#define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
|
||||
#define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
|
||||
#define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
|
||||
#define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
|
||||
#define HWRM_TF_TBL_TYPE_GET 0x2daUL
|
||||
#define HWRM_TF_TBL_TYPE_SET 0x2dbUL
|
||||
#define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
|
||||
#define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
|
||||
#define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
|
||||
#define HWRM_TF_EXT_EM_OP 0x2e7UL
|
||||
#define HWRM_TF_EXT_EM_CFG 0x2e8UL
|
||||
#define HWRM_TF_EXT_EM_QCFG 0x2e9UL
|
||||
#define HWRM_TF_EM_INSERT 0x2eaUL
|
||||
#define HWRM_TF_EM_DELETE 0x2ebUL
|
||||
#define HWRM_TF_TCAM_SET 0x2f8UL
|
||||
#define HWRM_TF_TCAM_GET 0x2f9UL
|
||||
#define HWRM_TF_TCAM_MOVE 0x2faUL
|
||||
#define HWRM_TF_TCAM_FREE 0x2fbUL
|
||||
#define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
|
||||
#define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
|
||||
#define HWRM_SV 0x400UL
|
||||
#define HWRM_DBG_READ_DIRECT 0xff10UL
|
||||
#define HWRM_DBG_READ_INDIRECT 0xff11UL
|
||||
@ -391,6 +404,7 @@ struct cmd_nums {
|
||||
#define HWRM_DBG_QCAPS 0xff20UL
|
||||
#define HWRM_DBG_QCFG 0xff21UL
|
||||
#define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
|
||||
#define HWRM_NVM_REQ_ARBITRATION 0xffedUL
|
||||
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
|
||||
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
|
||||
#define HWRM_NVM_FLUSH 0xfff0UL
|
||||
@ -464,8 +478,8 @@ struct hwrm_err_output {
|
||||
#define HWRM_VERSION_MAJOR 1
|
||||
#define HWRM_VERSION_MINOR 10
|
||||
#define HWRM_VERSION_UPDATE 1
|
||||
#define HWRM_VERSION_RSVD 33
|
||||
#define HWRM_VERSION_STR "1.10.1.33"
|
||||
#define HWRM_VERSION_RSVD 54
|
||||
#define HWRM_VERSION_STR "1.10.1.54"
|
||||
|
||||
/* hwrm_ver_get_input (size:192b/24B) */
|
||||
struct hwrm_ver_get_input {
|
||||
@ -1094,6 +1108,8 @@ struct hwrm_func_vf_cfg_input {
|
||||
#define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
|
||||
#define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
|
||||
#define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
|
||||
#define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
|
||||
#define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
|
||||
__le16 num_rsscos_ctxs;
|
||||
__le16 num_cmpl_rings;
|
||||
__le16 num_tx_rings;
|
||||
@ -1189,10 +1205,16 @@ struct hwrm_func_qcaps_output {
|
||||
__le16 max_sp_tx_rings;
|
||||
u8 unused_0[2];
|
||||
__le32 flags_ext;
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
|
||||
u8 unused_1[3];
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
|
||||
u8 max_schqs;
|
||||
u8 unused_1[2];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
@ -1226,6 +1248,8 @@ struct hwrm_func_qcfg_output {
|
||||
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
|
||||
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
|
||||
#define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
|
||||
#define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
|
||||
#define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
|
||||
u8 mac_address[6];
|
||||
__le16 pci_id;
|
||||
__le16 alloc_rsscos_ctx;
|
||||
@ -1321,7 +1345,7 @@ struct hwrm_func_qcfg_output {
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_func_cfg_input (size:704b/88B) */
|
||||
/* hwrm_func_cfg_input (size:768b/96B) */
|
||||
struct hwrm_func_cfg_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
@ -1352,30 +1376,35 @@ struct hwrm_func_cfg_input {
|
||||
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
|
||||
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
|
||||
#define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
|
||||
#define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
|
||||
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
|
||||
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
|
||||
__le32 enables;
|
||||
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
|
||||
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
|
||||
#define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
|
||||
#define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
|
||||
#define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
|
||||
#define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
|
||||
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
|
||||
#define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
|
||||
#define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
|
||||
#define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
|
||||
#define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
|
||||
#define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
|
||||
__le16 mtu;
|
||||
__le16 mru;
|
||||
__le16 num_rsscos_ctxs;
|
||||
@ -1449,6 +1478,8 @@ struct hwrm_func_cfg_input {
|
||||
#define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
|
||||
#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
|
||||
__le16 num_mcast_filters;
|
||||
__le16 schq_id;
|
||||
u8 unused_0[6];
|
||||
};
|
||||
|
||||
/* hwrm_func_cfg_output (size:128b/16B) */
|
||||
@ -1507,7 +1538,7 @@ struct hwrm_func_qstats_output {
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_func_qstats_ext_input (size:192b/24B) */
|
||||
/* hwrm_func_qstats_ext_input (size:256b/32B) */
|
||||
struct hwrm_func_qstats_ext_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
@ -1520,7 +1551,12 @@ struct hwrm_func_qstats_ext_input {
|
||||
#define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
|
||||
#define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
|
||||
#define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
|
||||
u8 unused_0[5];
|
||||
u8 unused_0[1];
|
||||
__le32 enables;
|
||||
#define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
|
||||
__le16 schq_id;
|
||||
__le16 traffic_class;
|
||||
u8 unused_1[4];
|
||||
};
|
||||
|
||||
/* hwrm_func_qstats_ext_output (size:1472b/184B) */
|
||||
@ -1533,15 +1569,15 @@ struct hwrm_func_qstats_ext_output {
|
||||
__le64 rx_mcast_pkts;
|
||||
__le64 rx_bcast_pkts;
|
||||
__le64 rx_discard_pkts;
|
||||
__le64 rx_drop_pkts;
|
||||
__le64 rx_error_pkts;
|
||||
__le64 rx_ucast_bytes;
|
||||
__le64 rx_mcast_bytes;
|
||||
__le64 rx_bcast_bytes;
|
||||
__le64 tx_ucast_pkts;
|
||||
__le64 tx_mcast_pkts;
|
||||
__le64 tx_bcast_pkts;
|
||||
__le64 tx_error_pkts;
|
||||
__le64 tx_discard_pkts;
|
||||
__le64 tx_drop_pkts;
|
||||
__le64 tx_ucast_bytes;
|
||||
__le64 tx_mcast_bytes;
|
||||
__le64 tx_bcast_bytes;
|
||||
@ -2376,33 +2412,39 @@ struct hwrm_port_phy_cfg_input {
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le32 flags;
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_ENABLE 0x20000UL
|
||||
#define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_DISABLE 0x40000UL
|
||||
__le32 enables;
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
|
||||
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
|
||||
__le16 port_id;
|
||||
__le16 force_link_speed;
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
|
||||
@ -2415,7 +2457,6 @@ struct hwrm_port_phy_cfg_input {
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
|
||||
u8 auto_mode;
|
||||
@ -2446,7 +2487,6 @@ struct hwrm_port_phy_cfg_input {
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
|
||||
__le16 auto_link_speed_mask;
|
||||
@ -2464,7 +2504,6 @@ struct hwrm_port_phy_cfg_input {
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
|
||||
u8 wirespeed;
|
||||
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
|
||||
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
|
||||
@ -2488,11 +2527,19 @@ struct hwrm_port_phy_cfg_input {
|
||||
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
|
||||
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
|
||||
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
|
||||
u8 unused_2[2];
|
||||
__le16 force_pam4_link_speed;
|
||||
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
|
||||
__le32 tx_lpi_timer;
|
||||
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
|
||||
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
|
||||
__le32 unused_3;
|
||||
__le16 auto_link_pam4_speed_mask;
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
|
||||
u8 unused_2[2];
|
||||
};
|
||||
|
||||
/* hwrm_port_phy_cfg_output (size:128b/16B) */
|
||||
@ -2526,7 +2573,7 @@ struct hwrm_port_phy_qcfg_input {
|
||||
u8 unused_0[6];
|
||||
};
|
||||
|
||||
/* hwrm_port_phy_qcfg_output (size:768b/96B) */
|
||||
/* hwrm_port_phy_qcfg_output (size:832b/104B) */
|
||||
struct hwrm_port_phy_qcfg_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
@ -2537,7 +2584,10 @@ struct hwrm_port_phy_qcfg_output {
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
|
||||
u8 unused_0;
|
||||
u8 link_signal_mode;
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ 0x0UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 0x1UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4
|
||||
__le16 link_speed;
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
|
||||
@ -2574,7 +2624,6 @@ struct hwrm_port_phy_qcfg_output {
|
||||
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
|
||||
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
|
||||
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
|
||||
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL
|
||||
__le16 force_link_speed;
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
|
||||
@ -2586,7 +2635,6 @@ struct hwrm_port_phy_qcfg_output {
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
|
||||
u8 auto_mode;
|
||||
@ -2611,7 +2659,6 @@ struct hwrm_port_phy_qcfg_output {
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
|
||||
__le16 auto_link_speed_mask;
|
||||
@ -2629,7 +2676,6 @@ struct hwrm_port_phy_qcfg_output {
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
|
||||
u8 wirespeed;
|
||||
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
|
||||
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
|
||||
@ -2763,13 +2809,21 @@ struct hwrm_port_phy_qcfg_output {
|
||||
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
|
||||
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
|
||||
__le16 fec_cfg;
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_SUPPORTED 0x200UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ENABLED 0x400UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ACTIVE 0x800UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ACTIVE 0x1000UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ACTIVE 0x2000UL
|
||||
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ACTIVE 0x4000UL
|
||||
u8 duplex_state;
|
||||
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
|
||||
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
|
||||
@ -2778,7 +2832,24 @@ struct hwrm_port_phy_qcfg_output {
|
||||
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
|
||||
char phy_vendor_name[16];
|
||||
char phy_vendor_partnumber[16];
|
||||
u8 unused_2[7];
|
||||
__le16 support_pam4_speeds;
|
||||
#define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
|
||||
#define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
|
||||
#define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
|
||||
__le16 force_pam4_link_speed;
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
|
||||
#define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
|
||||
__le16 auto_pam4_link_speed_mask;
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
|
||||
#define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
|
||||
__le16 link_partner_pam4_adv_speeds;
|
||||
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
|
||||
u8 unused_0[7];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
@ -3304,19 +3375,20 @@ struct hwrm_port_phy_qcaps_input {
|
||||
u8 unused_0[6];
|
||||
};
|
||||
|
||||
/* hwrm_port_phy_qcaps_output (size:192b/24B) */
|
||||
/* hwrm_port_phy_qcaps_output (size:256b/32B) */
|
||||
struct hwrm_port_phy_qcaps_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
__le16 seq_id;
|
||||
__le16 resp_len;
|
||||
u8 flags;
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xe0UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 5
|
||||
u8 port_cnt;
|
||||
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
|
||||
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
|
||||
@ -3339,7 +3411,6 @@ struct hwrm_port_phy_qcaps_output {
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL
|
||||
__le16 supported_speeds_auto_mode;
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
|
||||
@ -3355,7 +3426,6 @@ struct hwrm_port_phy_qcaps_output {
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL
|
||||
__le16 supported_speeds_eee_mode;
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
|
||||
@ -3372,8 +3442,18 @@ struct hwrm_port_phy_qcaps_output {
|
||||
__le32 valid_tx_lpi_timer_high;
|
||||
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
|
||||
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
|
||||
#define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
|
||||
#define PORT_PHY_QCAPS_RESP_VALID_SFT 24
|
||||
#define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
|
||||
#define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
|
||||
__le16 supported_pam4_speeds_auto_mode;
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
|
||||
__le16 supported_pam4_speeds_force_mode;
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
|
||||
u8 unused_0[3];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
|
||||
@ -3812,7 +3892,7 @@ struct hwrm_queue_qportcfg_input {
|
||||
u8 unused_0;
|
||||
};
|
||||
|
||||
/* hwrm_queue_qportcfg_output (size:256b/32B) */
|
||||
/* hwrm_queue_qportcfg_output (size:1344b/168B) */
|
||||
struct hwrm_queue_qportcfg_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
@ -3898,6 +3978,49 @@ struct hwrm_queue_qportcfg_output {
|
||||
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
|
||||
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
|
||||
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
|
||||
u8 unused_0;
|
||||
char qid0_name[16];
|
||||
char qid1_name[16];
|
||||
char qid2_name[16];
|
||||
char qid3_name[16];
|
||||
char qid4_name[16];
|
||||
char qid5_name[16];
|
||||
char qid6_name[16];
|
||||
char qid7_name[16];
|
||||
u8 unused_1[7];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_queue_qcfg_input (size:192b/24B) */
|
||||
struct hwrm_queue_qcfg_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
__le16 seq_id;
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le32 flags;
|
||||
#define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
|
||||
#define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
|
||||
#define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
|
||||
#define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
|
||||
__le32 queue_id;
|
||||
};
|
||||
|
||||
/* hwrm_queue_qcfg_output (size:128b/16B) */
|
||||
struct hwrm_queue_qcfg_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
__le16 seq_id;
|
||||
__le16 resp_len;
|
||||
__le32 queue_len;
|
||||
u8 service_profile;
|
||||
#define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
|
||||
#define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
|
||||
#define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
|
||||
#define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
|
||||
u8 queue_cfg_info;
|
||||
#define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
|
||||
u8 unused_0;
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
@ -4938,6 +5061,7 @@ struct hwrm_vnic_cfg_input {
|
||||
#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
|
||||
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
|
||||
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
|
||||
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
|
||||
__le16 vnic_id;
|
||||
__le16 dflt_ring_grp;
|
||||
__le16 rss_rule;
|
||||
@ -4947,7 +5071,12 @@ struct hwrm_vnic_cfg_input {
|
||||
__le16 default_rx_ring_id;
|
||||
__le16 default_cmpl_ring_id;
|
||||
__le16 queue_id;
|
||||
u8 unused0[6];
|
||||
u8 rx_csum_v2_mode;
|
||||
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
|
||||
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
|
||||
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
|
||||
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
|
||||
u8 unused0[5];
|
||||
};
|
||||
|
||||
/* hwrm_vnic_cfg_output (size:128b/16B) */
|
||||
@ -4989,6 +5118,7 @@ struct hwrm_vnic_qcaps_output {
|
||||
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
|
||||
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
|
||||
#define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
|
||||
#define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
|
||||
__le16 max_aggs_supported;
|
||||
u8 unused_1[5];
|
||||
u8 valid;
|
||||
@ -5155,15 +5285,18 @@ struct hwrm_vnic_plcmodes_cfg_input {
|
||||
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
|
||||
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
|
||||
#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
|
||||
#define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
|
||||
__le32 enables;
|
||||
#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
|
||||
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
|
||||
#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
|
||||
#define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
|
||||
__le32 vnic_id;
|
||||
__le16 jumbo_thresh;
|
||||
__le16 hds_offset;
|
||||
__le16 hds_threshold;
|
||||
u8 unused_0[6];
|
||||
__le16 max_bds;
|
||||
u8 unused_0[4];
|
||||
};
|
||||
|
||||
/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
|
||||
@ -5231,6 +5364,7 @@ struct hwrm_ring_alloc_input {
|
||||
#define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
|
||||
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
|
||||
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
|
||||
#define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
|
||||
u8 ring_type;
|
||||
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
|
||||
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
|
||||
@ -5246,7 +5380,7 @@ struct hwrm_ring_alloc_input {
|
||||
__le32 fbo;
|
||||
u8 page_size;
|
||||
u8 page_tbl_depth;
|
||||
u8 unused_1[2];
|
||||
__le16 schq_id;
|
||||
__le32 length;
|
||||
__le16 logical_id;
|
||||
__le16 cmpl_ring_id;
|
||||
@ -5344,11 +5478,12 @@ struct hwrm_ring_reset_input {
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
u8 ring_type;
|
||||
#define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
|
||||
#define RING_RESET_REQ_RING_TYPE_TX 0x1UL
|
||||
#define RING_RESET_REQ_RING_TYPE_RX 0x2UL
|
||||
#define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
|
||||
#define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
|
||||
#define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
|
||||
#define RING_RESET_REQ_RING_TYPE_TX 0x1UL
|
||||
#define RING_RESET_REQ_RING_TYPE_RX 0x2UL
|
||||
#define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
|
||||
#define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
|
||||
#define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
|
||||
u8 unused_0;
|
||||
__le16 ring_id;
|
||||
u8 unused_1[4];
|
||||
@ -5529,6 +5664,7 @@ struct hwrm_ring_grp_free_output {
|
||||
u8 unused_0[7];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
|
||||
#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
|
||||
#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
|
||||
@ -6816,15 +6952,15 @@ struct ctx_hw_stats {
|
||||
__le64 rx_mcast_pkts;
|
||||
__le64 rx_bcast_pkts;
|
||||
__le64 rx_discard_pkts;
|
||||
__le64 rx_drop_pkts;
|
||||
__le64 rx_error_pkts;
|
||||
__le64 rx_ucast_bytes;
|
||||
__le64 rx_mcast_bytes;
|
||||
__le64 rx_bcast_bytes;
|
||||
__le64 tx_ucast_pkts;
|
||||
__le64 tx_mcast_pkts;
|
||||
__le64 tx_bcast_pkts;
|
||||
__le64 tx_error_pkts;
|
||||
__le64 tx_discard_pkts;
|
||||
__le64 tx_drop_pkts;
|
||||
__le64 tx_ucast_bytes;
|
||||
__le64 tx_mcast_bytes;
|
||||
__le64 tx_bcast_bytes;
|
||||
@ -6840,15 +6976,15 @@ struct ctx_hw_stats_ext {
|
||||
__le64 rx_mcast_pkts;
|
||||
__le64 rx_bcast_pkts;
|
||||
__le64 rx_discard_pkts;
|
||||
__le64 rx_drop_pkts;
|
||||
__le64 rx_error_pkts;
|
||||
__le64 rx_ucast_bytes;
|
||||
__le64 rx_mcast_bytes;
|
||||
__le64 rx_bcast_bytes;
|
||||
__le64 tx_ucast_pkts;
|
||||
__le64 tx_mcast_pkts;
|
||||
__le64 tx_bcast_pkts;
|
||||
__le64 tx_error_pkts;
|
||||
__le64 tx_discard_pkts;
|
||||
__le64 tx_drop_pkts;
|
||||
__le64 tx_ucast_bytes;
|
||||
__le64 tx_mcast_bytes;
|
||||
__le64 tx_bcast_bytes;
|
||||
@ -6915,7 +7051,9 @@ struct hwrm_stat_ctx_query_input {
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le32 stat_ctx_id;
|
||||
u8 unused_0[4];
|
||||
u8 flags;
|
||||
#define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
|
||||
u8 unused_0[3];
|
||||
};
|
||||
|
||||
/* hwrm_stat_ctx_query_output (size:1408b/176B) */
|
||||
@ -6948,6 +7086,50 @@ struct hwrm_stat_ctx_query_output {
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
|
||||
struct hwrm_stat_ext_ctx_query_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
__le16 seq_id;
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le32 stat_ctx_id;
|
||||
u8 flags;
|
||||
#define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
|
||||
u8 unused_0[3];
|
||||
};
|
||||
|
||||
/* hwrm_stat_ext_ctx_query_output (size:1472b/184B) */
|
||||
struct hwrm_stat_ext_ctx_query_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
__le16 seq_id;
|
||||
__le16 resp_len;
|
||||
__le64 rx_ucast_pkts;
|
||||
__le64 rx_mcast_pkts;
|
||||
__le64 rx_bcast_pkts;
|
||||
__le64 rx_discard_pkts;
|
||||
__le64 rx_error_pkts;
|
||||
__le64 rx_ucast_bytes;
|
||||
__le64 rx_mcast_bytes;
|
||||
__le64 rx_bcast_bytes;
|
||||
__le64 tx_ucast_pkts;
|
||||
__le64 tx_mcast_pkts;
|
||||
__le64 tx_bcast_pkts;
|
||||
__le64 tx_error_pkts;
|
||||
__le64 tx_discard_pkts;
|
||||
__le64 tx_ucast_bytes;
|
||||
__le64 tx_mcast_bytes;
|
||||
__le64 tx_bcast_bytes;
|
||||
__le64 rx_tpa_eligible_pkt;
|
||||
__le64 rx_tpa_eligible_bytes;
|
||||
__le64 rx_tpa_pkt;
|
||||
__le64 rx_tpa_bytes;
|
||||
__le64 rx_tpa_errors;
|
||||
u8 unused_0[7];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
|
||||
struct hwrm_stat_ctx_clr_stats_input {
|
||||
__le16 req_type;
|
||||
@ -7497,6 +7679,29 @@ struct hwrm_wol_reason_qcfg_output {
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_dbg_read_direct_input (size:256b/32B) */
|
||||
struct hwrm_dbg_read_direct_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
__le16 seq_id;
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le64 host_dest_addr;
|
||||
__le32 read_addr;
|
||||
__le32 read_len32;
|
||||
};
|
||||
|
||||
/* hwrm_dbg_read_direct_output (size:128b/16B) */
|
||||
struct hwrm_dbg_read_direct_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
__le16 seq_id;
|
||||
__le16 resp_len;
|
||||
__le32 crc32;
|
||||
u8 unused_0[3];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* coredump_segment_record (size:128b/16B) */
|
||||
struct coredump_segment_record {
|
||||
__le16 component_id;
|
||||
@ -7507,7 +7712,8 @@ struct coredump_segment_record {
|
||||
u8 seg_flags;
|
||||
u8 compress_flags;
|
||||
#define SFLAG_COMPRESSED_ZLIB 0x1UL
|
||||
u8 unused_0[6];
|
||||
u8 unused_0[2];
|
||||
__le32 segment_len;
|
||||
};
|
||||
|
||||
/* hwrm_dbg_coredump_list_input (size:256b/32B) */
|
||||
@ -7620,7 +7826,8 @@ struct hwrm_dbg_ring_info_get_input {
|
||||
#define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
|
||||
#define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
|
||||
#define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
|
||||
#define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX
|
||||
#define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
|
||||
#define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
|
||||
u8 unused_0[3];
|
||||
__le32 fw_ring_id;
|
||||
};
|
||||
@ -7633,7 +7840,8 @@ struct hwrm_dbg_ring_info_get_output {
|
||||
__le16 resp_len;
|
||||
__le32 producer_index;
|
||||
__le32 consumer_index;
|
||||
u8 unused_0[7];
|
||||
__le32 cag_vector_ctrl;
|
||||
u8 unused_0[3];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
@ -7922,6 +8130,7 @@ struct hwrm_nvm_install_update_input {
|
||||
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
|
||||
#define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
|
||||
#define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
|
||||
#define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
|
||||
u8 unused_0[2];
|
||||
};
|
||||
|
||||
@ -8101,7 +8310,14 @@ struct hwrm_selftest_qlist_output {
|
||||
char test5_name[32];
|
||||
char test6_name[32];
|
||||
char test7_name[32];
|
||||
u8 unused_2[7];
|
||||
u8 eyescope_target_BER_support;
|
||||
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
|
||||
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
|
||||
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
|
||||
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
|
||||
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
|
||||
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
|
||||
u8 unused_2[6];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
|
@ -1029,7 +1029,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
|
||||
rc = bnxt_hwrm_exec_fwd_resp(
|
||||
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
|
||||
} else {
|
||||
struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
|
||||
struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0};
|
||||
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
|
||||
|
||||
phy_qcfg_req =
|
||||
|
Loading…
Reference in New Issue
Block a user