Merge branch 'bnxt_en-ptp-and-rss-updates'

Michael Chan says:

====================
bnxt_en: PTP and RSS updates

The first 2 patches are v2 of the PTP patches posted about 3 weeks ago:

https://lore.kernel.org/netdev/20240229070202.107488-1-michael.chan@broadcom.com/

The devlink parameter is dropped and v2 is just to increase the timeout
accuracy and to use a default timeout of 1 second.

Patches 3 to 12 implement additional RSS contexts and ntuple filters for
the RSS contexts.
====================

Link: https://lore.kernel.org/r/20240325222902.220712-1-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-03-28 22:36:54 -07:00
commit b3f4c32913
6 changed files with 509 additions and 136 deletions

View File

@ -4241,6 +4241,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
vnic->vnic_id = i;
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
@ -5788,8 +5789,22 @@ void bnxt_fill_ipv6_mask(__be32 mask[4])
static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
struct hwrm_cfa_ntuple_filter_alloc_input *req,
u16 rxq)
struct bnxt_ntuple_filter *fltr)
{
struct bnxt_rss_ctx *rss_ctx, *tmp;
u16 rxq = fltr->base.rxq;
if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
if (rss_ctx->index == fltr->base.fw_vnic_id) {
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
break;
}
}
return;
}
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
struct bnxt_vnic_info *vnic;
u32 enables;
@ -5830,7 +5845,7 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->flags =
cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
} else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
} else {
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
@ -5938,9 +5953,9 @@ static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
}
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
u32 tpa_flags)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input *req;
int rc;
@ -6025,9 +6040,10 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
}
static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
{
int entries;
u16 *tbl;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
@ -6035,16 +6051,22 @@ static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
entries = HW_HASH_INDEX_SIZE;
bp->rss_indir_tbl_entries = entries;
bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
GFP_KERNEL);
if (!bp->rss_indir_tbl)
tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
if (!tbl)
return -ENOMEM;
if (rss_ctx)
rss_ctx->rss_indir_tbl = tbl;
else
bp->rss_indir_tbl = tbl;
return 0;
}
static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
{
u16 max_rings, max_entries, pad, i;
u16 *rss_indir_tbl;
if (!bp->rx_nr_rings)
return;
@ -6055,13 +6077,17 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
max_rings = bp->rx_nr_rings;
max_entries = bnxt_get_rxfh_indir_size(bp->dev);
if (rss_ctx)
rss_indir_tbl = &rss_ctx->rss_indir_tbl[0];
else
rss_indir_tbl = &bp->rss_indir_tbl[0];
for (i = 0; i < max_entries; i++)
bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
pad = bp->rss_indir_tbl_entries - max_entries;
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
memset(&rss_indir_tbl[i], 0, pad * sizeof(u16));
}
static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
@ -6117,6 +6143,8 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
j = vnic->rss_ctx->rss_indir_tbl[i];
else
j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
@ -6154,9 +6182,9 @@ __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
bool set_rss)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
int rc;
@ -6174,9 +6202,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
struct bnxt_vnic_info *vnic, bool set_rss)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
dma_addr_t ring_tbl_map;
u32 i, nr_ctxs;
@ -6229,9 +6257,8 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@ -6256,7 +6283,8 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
return hwrm_req_send(bp, req);
}
static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
struct bnxt_vnic_info *vnic,
u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
@ -6265,10 +6293,10 @@ static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
return;
req->rss_cos_lb_ctx_id =
cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
hwrm_req_send(bp, req);
bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
@ -6280,13 +6308,14 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
}
}
bp->rsscos_nr_ctxs = 0;
}
static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
struct bnxt_vnic_info *vnic, u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
@ -6299,7 +6328,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
vnic->fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
hwrm_req_drop(bp, req);
@ -6313,10 +6342,9 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
}
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
u16 def_vlan = 0;
@ -6364,8 +6392,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
ring = 0;
else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
ring = vnic_id - 1;
else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
ring = vnic->vnic_id - 1;
else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
@ -6381,25 +6409,25 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
if (!vnic_id && bnxt_ulp_registered(bp->edev))
if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
}
static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
struct bnxt_vnic_info *vnic)
{
if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input *req;
if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
return;
req->vnic_id =
cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
hwrm_req_send(bp, req);
bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
}
}
@ -6408,15 +6436,14 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp)
u16 i;
for (i = 0; i < bp->nr_vnics; i++)
bnxt_hwrm_vnic_free_one(bp, i);
bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
}
static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
unsigned int start_rx_ring_idx,
unsigned int nr_rings)
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
unsigned int start_rx_ring_idx,
unsigned int nr_rings)
{
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_alloc_output *resp;
struct hwrm_vnic_alloc_input *req;
int rc;
@ -6442,7 +6469,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
if (vnic_id == BNXT_VNIC_DEFAULT)
if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@ -7341,7 +7368,7 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
hw_resc->resv_rx_rings = bp->rx_nr_rings;
if (!netif_is_rxfh_configured(bp->dev))
bnxt_set_dflt_rss_indir_tbl(bp);
bnxt_set_dflt_rss_indir_tbl(bp, NULL);
}
}
@ -7349,7 +7376,7 @@ static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
{
if (bp->flags & BNXT_FLAG_RFS) {
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return 2;
return 2 + bp->num_rss_ctx;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return rx_rings + 1;
}
@ -7497,7 +7524,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return -ENOMEM;
if (!netif_is_rxfh_configured(bp->dev))
bnxt_set_dflt_rss_indir_tbl(bp);
bnxt_set_dflt_rss_indir_tbl(bp, NULL);
return rc;
}
@ -9676,7 +9703,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
else if (BNXT_NO_FW_ACCESS(bp))
return 0;
for (i = 0; i < bp->nr_vnics; i++) {
rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
if (rc) {
netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
i, rc);
@ -9691,7 +9718,7 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
int i;
for (i = 0; i < bp->nr_vnics; i++)
bnxt_hwrm_vnic_set_rss(bp, i, false);
bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
}
static void bnxt_clear_vnic(struct bnxt *bp)
@ -9769,28 +9796,27 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
return hwrm_req_send(bp, req);
}
static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
goto skip_rss_ctx;
/* allocate context for vnic */
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
vnic_id, rc);
vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
vnic_id, rc);
vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
@ -9798,26 +9824,26 @@ static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
skip_rss_ctx:
/* configure default vnic, ring grp */
rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
vnic_id, rc);
vnic->vnic_id, rc);
goto vnic_setup_err;
}
/* Enable RSS hashing on vnic */
rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
vnic_id, rc);
vnic->vnic_id, rc);
goto vnic_setup_err;
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
vnic_id, rc);
vnic->vnic_id, rc);
}
}
@ -9825,16 +9851,33 @@ vnic_setup_err:
return rc;
}
static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc;
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
vnic->vnic_id, rc);
return rc;
}
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc)
netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
vnic->vnic_id, rc);
return rc;
}
int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc, i, nr_ctxs;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0; i < nr_ctxs; i++) {
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
vnic_id, i, rc);
vnic->vnic_id, i, rc);
break;
}
bp->rsscos_nr_ctxs++;
@ -9842,63 +9885,57 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
if (i < nr_ctxs)
return -ENOMEM;
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
vnic_id, rc);
rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
if (rc)
return rc;
}
rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
vnic_id, rc);
return rc;
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
vnic_id, rc);
vnic->vnic_id, rc);
}
}
return rc;
}
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return __bnxt_setup_vnic_p5(bp, vnic_id);
return __bnxt_setup_vnic_p5(bp, vnic);
else
return __bnxt_setup_vnic(bp, vnic_id);
return __bnxt_setup_vnic(bp, vnic);
}
static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
struct bnxt_vnic_info *vnic,
u16 start_rx_ring_idx, int rx_rings)
{
int rc;
rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
vnic_id, rc);
vnic->vnic_id, rc);
return rc;
}
return bnxt_setup_vnic(bp, vnic_id);
return bnxt_setup_vnic(bp, vnic);
}
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
int i, rc = 0;
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
bp->rx_nr_rings);
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
u16 ring_id = i;
@ -9909,12 +9946,104 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
break;
}
return rc;
}
void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bool all)
{
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
struct bnxt_filter_base *usr_fltr, *tmp;
struct bnxt_ntuple_filter *ntp_fltr;
int i;
bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
}
if (!all)
return;
list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
usr_fltr->fw_vnic_id == rss_ctx->index) {
ntp_fltr = container_of(usr_fltr,
struct bnxt_ntuple_filter,
base);
bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
bnxt_del_ntp_filter(bp, ntp_fltr);
bnxt_del_one_usr_fltr(bp, usr_fltr);
}
}
if (vnic->rss_table)
dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
vnic->rss_table,
vnic->rss_table_dma_addr);
kfree(rss_ctx->rss_indir_tbl);
list_del(&rss_ctx->list);
bp->num_rss_ctx--;
clear_bit(rss_ctx->index, bp->rss_ctx_bmap);
kfree(rss_ctx);
}
static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
{
bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
struct bnxt_rss_ctx *rss_ctx, *tmp;
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
__bnxt_setup_vnic_p5(bp, vnic)) {
netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
rss_ctx->index);
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
}
}
}
struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp)
{
struct bnxt_rss_ctx *rss_ctx = NULL;
rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL);
if (rss_ctx) {
rss_ctx->vnic.rss_ctx = rss_ctx;
list_add_tail(&rss_ctx->list, &bp->rss_ctx_list);
bp->num_rss_ctx++;
}
return rss_ctx;
}
void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all)
{
struct bnxt_rss_ctx *rss_ctx, *tmp;
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
bnxt_del_one_rss_ctx(bp, rss_ctx, all);
if (all)
bitmap_free(bp->rss_ctx_bmap);
}
static void bnxt_init_multi_rss_ctx(struct bnxt *bp)
{
bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL);
if (bp->rss_ctx_bmap) {
/* burn index 0 since we cannot have context 0 */
__set_bit(0, bp->rss_ctx_bmap);
INIT_LIST_HEAD(&bp->rss_ctx_list);
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
}
}
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
static bool bnxt_promisc_ok(struct bnxt *bp)
{
@ -9927,16 +10056,17 @@ static bool bnxt_promisc_ok(struct bnxt *bp)
static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
unsigned int rc = 0;
rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
return rc;
}
rc = bnxt_hwrm_vnic_cfg(bp, 1);
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
@ -9979,7 +10109,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@ -9988,7 +10118,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
rc = bnxt_setup_vnic(bp, vnic);
if (rc)
goto err_out;
if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
@ -11760,6 +11890,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_vf_reps_open(bp);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_hwrm_realloc_rss_ctx_vnic(bp);
bnxt_cfg_usr_fltrs(bp);
return 0;
@ -11908,6 +12040,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_clear_rss_ctxs(bp, false);
/* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
@ -12405,33 +12539,26 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
}
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
{
struct bnxt_hw_rings hwr = {0};
int max_vnics, max_rss_ctxs;
hwr.rss_ctx = 1;
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
/* 2 VNICS: default + Ntuple */
hwr.vnic = 2;
hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
hwr.vnic;
goto check_reserve_vnic;
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
hwr.vnic = 1 + bp->rx_nr_rings;
check_reserve_vnic:
hwr.grp = bp->rx_nr_rings;
hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
if (new_rss_ctx)
hwr.vnic++;
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
hwr.rss_ctx = hwr.vnic;
if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
@ -12465,7 +12592,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
netdev_features_t vlan_features;
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
features &= ~NETIF_F_NTUPLE;
if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
@ -13601,7 +13728,7 @@ static void bnxt_set_dflt_rfs(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_RFS;
if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) {
if (bnxt_rfs_capable(bp, false)) {
bp->flags |= BNXT_FLAG_RFS;
dev->features |= NETIF_F_NTUPLE;
}
@ -14601,6 +14728,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
bnxt_free_l2_filters(bp, true);
bnxt_free_ntp_fltrs(bp, true);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_clear_rss_ctxs(bp, true);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
@ -15059,7 +15188,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->flags |= BNXT_FLAG_CHIP_P7;
}
rc = bnxt_alloc_rss_indir_tbl(bp);
rc = bnxt_alloc_rss_indir_tbl(bp, NULL);
if (rc)
goto init_err_pci_clean;
@ -15212,6 +15341,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&bp->usr_fltr_list);
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
bnxt_init_multi_rss_ctx(bp);
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
@ -15232,6 +15364,8 @@ init_err_dl:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_clear_rss_ctxs(bp, true);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
bnxt_hwmon_uninit(bp);

View File

@ -1256,8 +1256,22 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
#define BNXT_VNIC_NTUPLE_FLAG 0x20
#define BNXT_VNIC_RSSCTX_FLAG 0x40
struct bnxt_rss_ctx *rss_ctx;
u32 vnic_id;
};
struct bnxt_rss_ctx {
struct list_head list;
struct bnxt_vnic_info vnic;
u16 *rss_indir_tbl;
u8 index;
};
#define BNXT_MAX_ETH_RSS_CTX 32
#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1)
#define BNXT_VNIC_ID_INVALID 0xffffffff
struct bnxt_hw_rings {
int tx;
int rx;
@ -1360,6 +1374,7 @@ struct bnxt_filter_base {
#define BNXT_ACT_RING_DST 2
#define BNXT_ACT_FUNC_DST 4
#define BNXT_ACT_NO_AGING 8
#define BNXT_ACT_RSS_CTX 0x10
u16 sw_id;
u16 rxq;
u16 fw_vnic_id;
@ -2227,6 +2242,9 @@ struct bnxt {
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
struct list_head rss_ctx_list;
unsigned long *rss_ctx_bmap;
u32 num_rss_ctx;
int nr_vnics;
u16 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
@ -2241,6 +2259,7 @@ struct bnxt {
#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
@ -2340,6 +2359,10 @@ struct bnxt {
#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
(BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@ -2693,9 +2716,16 @@ int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
u32 tpa_flags);
void bnxt_fill_ipv6_mask(__be32 mask[4]);
int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
unsigned int start_rx_ring_idx,
unsigned int nr_rings);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
@ -2721,6 +2751,12 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bool all);
struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp);
void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
@ -2728,6 +2764,7 @@ void bnxt_reenable_sriov(struct bnxt *bp);
void bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats);
bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);

View File

@ -969,6 +969,8 @@ static int bnxt_set_channels(struct net_device *dev,
}
bnxt_clear_usr_fltrs(bp, true);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_clear_rss_ctxs(bp, false);
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@ -1205,6 +1207,36 @@ fltr_err:
return rc;
}
static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
u32 index)
{
struct bnxt_rss_ctx *rss_ctx, *tmp;
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
if (rss_ctx->index == index)
return rss_ctx;
return NULL;
}
static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp,
struct bnxt_rss_ctx *rss_ctx)
{
int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
vnic->rss_table_size,
&vnic->rss_table_dma_addr,
GFP_KERNEL);
if (!vnic->rss_table)
return -ENOMEM;
vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
return 0;
}
static int bnxt_add_l2_cls_rule(struct bnxt *bp,
struct ethtool_rx_flow_spec *fs)
{
@ -1280,22 +1312,24 @@ static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct ethtool_rx_flow_spec *fs)
struct ethtool_rxnfc *cmd)
{
u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_ntuple_filter *new_fltr, *fltr;
u32 flow_type = fs->flow_type & 0xff;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_flow_masks *fmasks;
u32 flow_type = fs->flow_type;
struct flow_keys *fkeys;
u32 idx;
u32 idx, ring;
int rc;
u8 vf;
if (!bp->vnic_info)
return -EAGAIN;
if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
if (flow_type == IP_USER_FLOW) {
@ -1403,6 +1437,19 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
rcu_read_unlock();
new_fltr->base.flags = BNXT_ACT_NO_AGING;
if (fs->flow_type & FLOW_RSS) {
struct bnxt_rss_ctx *rss_ctx;
new_fltr->base.fw_vnic_id = 0;
new_fltr->base.flags |= BNXT_ACT_RSS_CTX;
rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context);
if (rss_ctx) {
new_fltr->base.fw_vnic_id = rss_ctx->index;
} else {
rc = -EINVAL;
goto ntuple_err;
}
}
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
new_fltr->base.flags |= BNXT_ACT_DROP;
else
@ -1444,12 +1491,12 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
flow_type == IPV6_USER_FLOW) &&
!(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
return -EOPNOTSUPP;
if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
if (flow_type & FLOW_MAC_EXT)
return -EINVAL;
flow_type &= ~FLOW_EXT;
if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
return bnxt_add_ntuple_cls_rule(bp, fs);
return bnxt_add_ntuple_cls_rule(bp, cmd);
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
@ -1463,7 +1510,7 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (flow_type == ETHER_FLOW)
rc = bnxt_add_l2_cls_rule(bp, fs);
else
rc = bnxt_add_ntuple_cls_rule(bp, fs);
rc = bnxt_add_ntuple_cls_rule(bp, cmd);
return rc;
}
@ -1754,7 +1801,10 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
static int bnxt_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
u32 rss_context = rxfh->rss_context;
struct bnxt_rss_ctx *rss_ctx = NULL;
struct bnxt *bp = netdev_priv(dev);
u16 *indir_tbl = bp->rss_indir_tbl;
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
@ -1764,10 +1814,18 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
if (rxfh->indir && bp->rss_indir_tbl) {
if (rxfh->rss_context) {
rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context);
if (!rss_ctx)
return -EINVAL;
indir_tbl = rss_ctx->rss_indir_tbl;
vnic = &rss_ctx->vnic;
}
if (rxfh->indir && indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
rxfh->indir[i] = bp->rss_indir_tbl[i];
rxfh->indir[i] = indir_tbl[i];
}
if (rxfh->key && vnic->rss_hash_key)
@ -1776,6 +1834,131 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
}
static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
struct ethtool_rxfh_param *rxfh)
{
if (rxfh->key) {
if (rss_ctx) {
memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key,
HW_HASH_KEY_SIZE);
} else {
memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
bp->rss_hash_key_updated = true;
}
}
if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
u16 *indir_tbl = bp->rss_indir_tbl;
if (rss_ctx)
indir_tbl = rss_ctx->rss_indir_tbl;
for (i = 0; i < tbl_size; i++)
indir_tbl[i] = rxfh->indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
}
}
static int bnxt_set_rxfh_context(struct bnxt *bp,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
u32 *rss_context = &rxfh->rss_context;
struct bnxt_rss_ctx *rss_ctx;
struct bnxt_vnic_info *vnic;
bool modify = false;
int bit_id;
int rc;
if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
return -EOPNOTSUPP;
}
if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) {
rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context);
if (!rss_ctx) {
NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found",
*rss_context);
return -EINVAL;
}
if (*rss_context && rxfh->rss_delete) {
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return 0;
}
modify = true;
vnic = &rss_ctx->vnic;
goto modify_context;
}
if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
BNXT_MAX_ETH_RSS_CTX);
return -EINVAL;
}
if (!bnxt_rfs_capable(bp, true)) {
NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
return -ENOMEM;
}
rss_ctx = bnxt_alloc_rss_ctx(bp);
if (!rss_ctx)
return -ENOMEM;
vnic = &rss_ctx->vnic;
vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
vnic->vnic_id = BNXT_VNIC_ID_INVALID;
rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx);
if (rc)
goto out;
rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx);
if (rc)
goto out;
bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx);
memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
goto out;
}
rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
goto out;
}
modify_context:
bnxt_modify_rss(bp, rss_ctx, rxfh);
if (modify)
return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
rc = __bnxt_setup_vnic_p5(bp, vnic);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
goto out;
}
bit_id = bitmap_find_free_region(bp->rss_ctx_bmap,
BNXT_RSS_CTX_BMAP_LEN, 0);
if (bit_id < 0) {
rc = -ENOMEM;
goto out;
}
rss_ctx->index = (u16)bit_id;
*rss_context = rss_ctx->index;
return 0;
out:
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return rc;
}
static int bnxt_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
@ -1786,20 +1969,11 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (rxfh->key) {
memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
bp->rss_hash_key_updated = true;
}
if (rxfh->rss_context)
return bnxt_set_rxfh_context(bp, rxfh, extack);
if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
bnxt_modify_rss(bp, NULL, rxfh);
for (i = 0; i < tbl_size; i++)
bp->rss_indir_tbl[i] = rxfh->indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
}
bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
@ -5071,6 +5245,7 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
.cap_rss_ctx_supported = 1,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |

View File

@ -109,7 +109,8 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
spin_unlock_bh(&ptp->ptp_lock);
}
static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
u32 txts_tmo)
{
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
@ -122,10 +123,15 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
u32 tmo_us = txts_tmo * 1000;
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
if (!tmo_us)
tmo_us = BNXT_PTP_QTS_TIMEOUT;
tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US);
req->ts_req_timeout = cpu_to_le16(txts_tmo);
}
resp = hwrm_req_hold(bp, req);
@ -672,10 +678,17 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct skb_shared_hwtstamps timestamp;
unsigned long now = jiffies;
u64 ts = 0, ns = 0;
u32 tmo = 0;
int rc;
rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts);
if (!ptp->txts_pending)
ptp->abs_txts_tmo = now + msecs_to_jiffies(ptp->txts_tmo);
if (!time_after_eq(now, ptp->abs_txts_tmo))
tmo = jiffies_to_msecs(ptp->abs_txts_tmo - now);
rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts,
tmo);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
spin_lock_bh(&ptp->ptp_lock);
@ -684,6 +697,10 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(ptp->tx_skb, &timestamp);
} else {
if (!time_after_eq(jiffies, ptp->abs_txts_tmo)) {
ptp->txts_pending = true;
return;
}
netdev_warn_once(bp->dev,
"TS query for TX timer failed rc = %x\n", rc);
}
@ -691,6 +708,7 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
atomic_inc(&ptp->tx_avail);
ptp->txts_pending = false;
}
static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
@ -714,6 +732,8 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
spin_unlock_bh(&ptp->ptp_lock);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
if (ptp->txts_pending)
return 0;
return HZ;
}
@ -891,7 +911,8 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
if (rc)
return rc;
} else {
rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME,
&ns, 0);
if (rc)
return rc;
}
@ -965,6 +986,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
spin_unlock_bh(&ptp->ptp_lock);
ptp_schedule_worker(ptp->ptp_clock, 0);
}
ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO;
return 0;
out:

View File

@ -22,7 +22,9 @@
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */
#define BNXT_PTP_QTS_TIMEOUT 1000
#define BNXT_PTP_QTS_MAX_TMO_US 65535
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
@ -115,11 +117,14 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_REQ | \
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
u8 txts_pending:1;
int rx_filter;
u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
u32 txts_tmo;
unsigned long abs_txts_tmo;
};
#if BITS_PER_LONG == 32

View File

@ -71,7 +71,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_hwrm_vnic_cfg(bp, 0);
bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;