crypto: hisilicon - Use one workqueue per qm instead of per qp
Since SEC need not so many workqueues as our test, we just use one workqueue created by the device driver of QM if necessary, which will also reduce CPU waste without any throughput decreasing. Signed-off-by: Shukun Tan <tanshukun1@huawei.com> Signed-off-by: Zaibo Xu <xuzaibo@huawei.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
3cd54a4c3c
commit
57ca81245f
@ -494,17 +494,9 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
|
||||
}
|
||||
}
|
||||
|
||||
static void qm_qp_work_func(struct work_struct *work)
|
||||
static void qm_work_process(struct work_struct *work)
|
||||
{
|
||||
struct hisi_qp *qp;
|
||||
|
||||
qp = container_of(work, struct hisi_qp, work);
|
||||
qm_poll_qp(qp, qp->qm);
|
||||
}
|
||||
|
||||
static irqreturn_t qm_irq_handler(int irq, void *data)
|
||||
{
|
||||
struct hisi_qm *qm = data;
|
||||
struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
|
||||
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
|
||||
struct hisi_qp *qp;
|
||||
int eqe_num = 0;
|
||||
@ -513,7 +505,7 @@ static irqreturn_t qm_irq_handler(int irq, void *data)
|
||||
eqe_num++;
|
||||
qp = qm_to_hisi_qp(qm, eqe);
|
||||
if (qp)
|
||||
queue_work(qp->wq, &qp->work);
|
||||
qm_poll_qp(qp, qm);
|
||||
|
||||
if (qm->status.eq_head == QM_Q_DEPTH - 1) {
|
||||
qm->status.eqc_phase = !qm->status.eqc_phase;
|
||||
@ -531,6 +523,17 @@ static irqreturn_t qm_irq_handler(int irq, void *data)
|
||||
}
|
||||
|
||||
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
|
||||
}
|
||||
|
||||
static irqreturn_t do_qm_irq(int irq, void *data)
|
||||
{
|
||||
struct hisi_qm *qm = (struct hisi_qm *)data;
|
||||
|
||||
/* the workqueue created by device driver of QM */
|
||||
if (qm->wq)
|
||||
queue_work(qm->wq, &qm->work);
|
||||
else
|
||||
schedule_work(&qm->work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -540,7 +543,7 @@ static irqreturn_t qm_irq(int irq, void *data)
|
||||
struct hisi_qm *qm = data;
|
||||
|
||||
if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
|
||||
return qm_irq_handler(irq, data);
|
||||
return do_qm_irq(irq, data);
|
||||
|
||||
dev_err(&qm->pdev->dev, "invalid int source\n");
|
||||
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
|
||||
@ -1159,20 +1162,9 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
|
||||
|
||||
qp->qp_id = qp_id;
|
||||
qp->alg_type = alg_type;
|
||||
INIT_WORK(&qp->work, qm_qp_work_func);
|
||||
qp->wq = alloc_workqueue("hisi_qm", WQ_UNBOUND | WQ_HIGHPRI |
|
||||
WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0);
|
||||
if (!qp->wq) {
|
||||
ret = -EFAULT;
|
||||
goto err_free_qp_mem;
|
||||
}
|
||||
|
||||
return qp;
|
||||
|
||||
err_free_qp_mem:
|
||||
if (qm->use_dma_api)
|
||||
dma_free_coherent(dev, qp->qdma.size, qp->qdma.va,
|
||||
qp->qdma.dma);
|
||||
err_clear_bit:
|
||||
write_lock(&qm->qps_lock);
|
||||
qm->qp_array[qp_id] = NULL;
|
||||
@ -1704,6 +1696,7 @@ int hisi_qm_init(struct hisi_qm *qm)
|
||||
qm->qp_in_used = 0;
|
||||
mutex_init(&qm->mailbox_lock);
|
||||
rwlock_init(&qm->qps_lock);
|
||||
INIT_WORK(&qm->work, qm_work_process);
|
||||
|
||||
dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf",
|
||||
qm->use_dma_api ? "dma api" : "iommu api");
|
||||
|
@ -183,6 +183,9 @@ struct hisi_qm {
|
||||
u32 error_mask;
|
||||
u32 msi_mask;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct work;
|
||||
|
||||
const char *algs;
|
||||
bool use_dma_api;
|
||||
bool use_sva;
|
||||
@ -219,8 +222,6 @@ struct hisi_qp {
|
||||
void *qp_ctx;
|
||||
void (*req_cb)(struct hisi_qp *qp, void *data);
|
||||
void (*event_cb)(struct hisi_qp *qp);
|
||||
struct work_struct work;
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
struct hisi_qm *qm;
|
||||
u16 pasid;
|
||||
|
Loading…
Reference in New Issue
Block a user