crypto: hisilicon - PASID fixed on Kunpeng 930

Enable PASID by setting 'sqc' and 'cqc' pasid bits
per queue in Kunpeng 930.

For Kunpeng 920, PASID is effective for all queues once set
in SVA scenarios.

Signed-off-by: Weili Qian <qianweili@huawei.com>
Signed-off-by: Hui Tang <tanghui20@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Weili Qian 2021-02-04 16:59:36 +08:00 committed by Herbert Xu
parent 553d09b314
commit cc3292d1df
5 changed files with 27 additions and 13 deletions

View File

@ -356,10 +356,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
/* Enable data buffer pasid */
if (qm->use_sva)
hpre_pasid_enable(qm);
writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
@ -383,6 +379,10 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
dev_err(dev, "acpi_evaluate_dsm err.\n");
disable_flr_of_bme(qm);
/* Enable data buffer pasid */
if (qm->use_sva)
hpre_pasid_enable(qm);
}
return ret;
@ -993,16 +993,18 @@ static void hpre_remove(struct pci_dev *pdev)
return;
}
}
if (qm->fun_type == QM_HW_PF) {
if (qm->use_sva)
hpre_pasid_disable(qm);
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
hisi_qm_dev_err_uninit(qm);
if (qm->fun_type == QM_HW_PF) {
if (qm->use_sva && qm->ver == QM_HW_V2)
hpre_pasid_disable(qm);
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
hisi_qm_dev_err_uninit(qm);
}
hisi_qm_uninit(qm);
}

View File

@ -54,6 +54,8 @@
#define QM_SQ_PRIORITY_SHIFT 0
#define QM_SQ_ORDERS_SHIFT 4
#define QM_SQ_TYPE_SHIFT 8
#define QM_QC_PASID_ENABLE 0x1
#define QM_QC_PASID_ENABLE_SHIFT 7
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
@ -1685,6 +1687,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp->req_cb = NULL;
qp->qp_id = qp_id;
qp->alg_type = alg_type;
qp->is_in_kernel = true;
qm->qp_in_used++;
atomic_set(&qp->qp_status.flags, QP_INIT);
@ -1759,6 +1762,10 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
sqc->cq_num = cpu_to_le16(qp_id);
sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
QM_QC_PASID_ENABLE_SHIFT);
sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
DMA_TO_DEVICE);
if (dma_mapping_error(dev, sqc_dma)) {
@ -1797,6 +1804,9 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
}
cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
DMA_TO_DEVICE);
if (dma_mapping_error(dev, cqc_dma)) {
@ -2067,6 +2077,7 @@ static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
qp->uacce_q = q;
qp->event_cb = qm_qp_event_notifier;
qp->pasid = arg;
qp->is_in_kernel = false;
return 0;
}

View File

@ -288,6 +288,7 @@ struct hisi_qp {
struct hisi_qm *qm;
bool is_resetting;
bool is_in_kernel;
u16 pasid;
struct uacce_queue *uacce_q;
};

View File

@ -323,7 +323,7 @@ static int sec_engine_init(struct hisi_qm *qm)
reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
reg &= SEC_USER1_SMMU_MASK;
if (qm->use_sva)
if (qm->use_sva && qm->ver == QM_HW_V2)
reg |= SEC_USER1_SMMU_SVA;
else
reg |= SEC_USER1_SMMU_NORMAL;

View File

@ -292,7 +292,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
if (qm->use_sva) {
if (qm->use_sva && qm->ver == QM_HW_V2) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {