octeontx2-pf: Rename tot_tx_queues to non_qos_queues
current implementation is such that tot_tx_queues contains both xdp queues and normal tx queues. which will be allocated in interface open calls and deallocated on interface down calls respectively. With addition of QOS, where send quees are allocated/deallacated upon user request Qos send queues won't be part of tot_tx_queues. So this patch renames tot_tx_queues to non_qos_queues. Signed-off-by: Hariprasad Kelam <hkelam@marvell.com> Reviewed-by: Simon Horman <simon.horman@corigine.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
12e7789ad5
commit
508c58f76c
@ -762,7 +762,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
|
||||
int timeout = 1000;
|
||||
|
||||
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
|
||||
for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
|
||||
for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
|
||||
incr = (u64)qidx << 32;
|
||||
while (timeout) {
|
||||
val = otx2_atomic64_add(incr, ptr);
|
||||
@ -1048,7 +1048,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
|
||||
}
|
||||
|
||||
/* Initialize TX queues */
|
||||
for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
|
||||
for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
|
||||
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
|
||||
|
||||
err = otx2_sq_init(pfvf, qidx, sqb_aura);
|
||||
@ -1095,7 +1095,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
|
||||
|
||||
/* Set RQ/SQ/CQ counts */
|
||||
nixlf->rq_cnt = pfvf->hw.rx_queues;
|
||||
nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
|
||||
nixlf->sq_cnt = pfvf->hw.non_qos_queues;
|
||||
nixlf->cq_cnt = pfvf->qset.cq_cnt;
|
||||
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
|
||||
nixlf->rss_grps = MAX_RSS_GROUPS;
|
||||
@ -1133,7 +1133,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
|
||||
int sqb, qidx;
|
||||
u64 iova, pa;
|
||||
|
||||
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
|
||||
for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
|
||||
sq = &qset->sq[qidx];
|
||||
if (!sq->sqb_ptrs)
|
||||
continue;
|
||||
@ -1349,7 +1349,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
|
||||
stack_pages =
|
||||
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
|
||||
|
||||
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
|
||||
for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
|
||||
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
|
||||
/* Initialize aura context */
|
||||
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
|
||||
@ -1369,7 +1369,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
|
||||
goto fail;
|
||||
|
||||
/* Allocate pointers and free them to aura/pool */
|
||||
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
|
||||
for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
|
||||
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
|
||||
pool = &pfvf->qset.pool[pool_id];
|
||||
|
||||
|
@ -190,7 +190,7 @@ struct otx2_hw {
|
||||
u16 rx_queues;
|
||||
u16 tx_queues;
|
||||
u16 xdp_queues;
|
||||
u16 tot_tx_queues;
|
||||
u16 non_qos_queues; /* tx queues plus xdp queues */
|
||||
u16 max_queues;
|
||||
u16 pool_cnt;
|
||||
u16 rqpool_cnt;
|
||||
|
@ -1257,7 +1257,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
|
||||
}
|
||||
|
||||
/* SQ */
|
||||
for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
|
||||
for (qidx = 0; qidx < pf->hw.non_qos_queues; qidx++) {
|
||||
u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
|
||||
u8 sq_op_err_code, mnq_err_code, snd_err_code;
|
||||
|
||||
@ -1383,7 +1383,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
|
||||
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
|
||||
/* Free SQB pointers */
|
||||
otx2_sq_free_sqbs(pf);
|
||||
for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
|
||||
for (qidx = 0; qidx < pf->hw.non_qos_queues; qidx++) {
|
||||
sq = &qset->sq[qidx];
|
||||
qmem_free(pf->dev, sq->sqe);
|
||||
qmem_free(pf->dev, sq->tso_hdrs);
|
||||
@ -1433,7 +1433,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
|
||||
* so, aura count = pool count.
|
||||
*/
|
||||
hw->rqpool_cnt = hw->rx_queues;
|
||||
hw->sqpool_cnt = hw->tot_tx_queues;
|
||||
hw->sqpool_cnt = hw->non_qos_queues;
|
||||
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
|
||||
|
||||
/* Maximum hardware supported transmit length */
|
||||
@ -1688,7 +1688,7 @@ int otx2_open(struct net_device *netdev)
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
|
||||
pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.non_qos_queues;
|
||||
/* RQ and SQs are mapped to different CQs,
|
||||
* so find out max CQ IRQs (i.e CINTs) needed.
|
||||
*/
|
||||
@ -1708,7 +1708,7 @@ int otx2_open(struct net_device *netdev)
|
||||
if (!qset->cq)
|
||||
goto err_free_mem;
|
||||
|
||||
qset->sq = kcalloc(pf->hw.tot_tx_queues,
|
||||
qset->sq = kcalloc(pf->hw.non_qos_queues,
|
||||
sizeof(struct otx2_snd_queue), GFP_KERNEL);
|
||||
if (!qset->sq)
|
||||
goto err_free_mem;
|
||||
@ -2529,7 +2529,7 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
|
||||
xdp_features_clear_redirect_target(dev);
|
||||
}
|
||||
|
||||
pf->hw.tot_tx_queues += pf->hw.xdp_queues;
|
||||
pf->hw.non_qos_queues += pf->hw.xdp_queues;
|
||||
|
||||
if (if_up)
|
||||
otx2_open(pf->netdev);
|
||||
@ -2760,7 +2760,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
hw->pdev = pdev;
|
||||
hw->rx_queues = qcount;
|
||||
hw->tx_queues = qcount;
|
||||
hw->tot_tx_queues = qcount;
|
||||
hw->non_qos_queues = qcount;
|
||||
hw->max_queues = qcount;
|
||||
hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
|
||||
/* Use CQE of 128 byte descriptor size by default */
|
||||
|
@ -570,7 +570,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
hw->rx_queues = qcount;
|
||||
hw->tx_queues = qcount;
|
||||
hw->max_queues = qcount;
|
||||
hw->tot_tx_queues = qcount;
|
||||
hw->non_qos_queues = qcount;
|
||||
hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
|
||||
/* Use CQE of 128 byte descriptor size by default */
|
||||
hw->xqe_size = 128;
|
||||
|
Loading…
x
Reference in New Issue
Block a user