crypto: qat - remove dma_free_coherent() for RSA

After commit f5ff79fddf ("dma-mapping: remove CONFIG_DMA_REMAP"), if
the algorithms are enabled, the driver crashes with a BUG_ON while
executing vunmap() in the context of a tasklet. This is due to the fact
that the function dma_free_coherent() cannot be called in an interrupt
context (see Documentation/core-api/dma-api-howto.rst).

The functions qat_rsa_enc() and qat_rsa_dec() allocate memory with
dma_alloc_coherent() if the source or the destination buffers are made
of multiple flat buffers or of a size that is not compatible with the
hardware.
This memory is then freed with dma_free_coherent() in the context of a
tasklet invoked to handle the response for the corresponding request.

Replace allocations with dma_alloc_coherent() in the functions
qat_rsa_enc() and qat_rsa_dec() with kmalloc() + dma_map_single().

Cc: stable@vger.kernel.org
Fixes: a990532023 ("crypto: qat - Add support for RSA algorithm")
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Adam Guerin <adam.guerin@intel.com>
Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Giovanni Cabiddu 2022-05-09 14:34:12 +01:00 committed by Herbert Xu
parent 80a52e1ee7
commit 3dfaf0071e

View File

@ -529,25 +529,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
if (req->src_align)
dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
req->in.rsa.enc.m);
else
dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
DMA_TO_DEVICE);
kfree_sensitive(req->src_align);
dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
DMA_TO_DEVICE);
areq->dst_len = req->ctx.rsa->key_sz;
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
req->out.rsa.enc.c);
} else {
dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
DMA_FROM_DEVICE);
kfree_sensitive(req->dst_align);
}
dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
DMA_FROM_DEVICE);
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
@ -664,6 +661,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
u8 *vaddr;
int ret;
if (unlikely(!ctx->n || !ctx->e))
@ -701,40 +699,39 @@ static int qat_rsa_enc(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
req->src_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
return ret;
vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
&qat_req->in.rsa.enc.m,
GFP_KERNEL);
qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
vaddr = qat_req->src_align;
}
qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
goto unmap_src;
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
req->dst_len,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
goto unmap_src;
vaddr = sg_virt(req->dst);
} else {
qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
&qat_req->out.rsa.enc.c,
GFP_KERNEL);
qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
vaddr = qat_req->dst_align;
}
qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
goto unmap_dst;
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
@ -772,21 +769,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
qat_req->out.rsa.enc.c);
else
if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
dma_unmap_single(dev, qat_req->out.rsa.enc.c,
ctx->key_sz, DMA_FROM_DEVICE);
if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
dma_unmap_single(dev, qat_req->out.rsa.enc.c,
ctx->key_sz, DMA_FROM_DEVICE);
kfree_sensitive(qat_req->dst_align);
unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
qat_req->in.rsa.enc.m);
else
if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
dma_unmap_single(dev, qat_req->in.rsa.enc.m,
ctx->key_sz, DMA_TO_DEVICE);
if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
DMA_TO_DEVICE);
kfree_sensitive(qat_req->src_align);
return ret;
}
@ -799,6 +790,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
u8 *vaddr;
int ret;
if (unlikely(!ctx->n || !ctx->d))
@ -846,40 +838,37 @@ static int qat_rsa_dec(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
req->dst_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
return ret;
vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
&qat_req->in.rsa.dec.c,
GFP_KERNEL);
qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
vaddr = qat_req->src_align;
}
qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
goto unmap_src;
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
req->dst_len,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
goto unmap_src;
vaddr = sg_virt(req->dst);
} else {
qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
&qat_req->out.rsa.dec.m,
GFP_KERNEL);
qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
vaddr = qat_req->dst_align;
}
qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
goto unmap_dst;
if (ctx->crt_mode)
qat_req->in.rsa.in_tab[6] = 0;
@ -925,21 +914,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
qat_req->out.rsa.dec.m);
else
if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
dma_unmap_single(dev, qat_req->out.rsa.dec.m,
ctx->key_sz, DMA_FROM_DEVICE);
if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
dma_unmap_single(dev, qat_req->out.rsa.dec.m,
ctx->key_sz, DMA_FROM_DEVICE);
kfree_sensitive(qat_req->dst_align);
unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
qat_req->in.rsa.dec.c);
else
if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
dma_unmap_single(dev, qat_req->in.rsa.dec.c,
ctx->key_sz, DMA_TO_DEVICE);
if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
DMA_TO_DEVICE);
kfree_sensitive(qat_req->src_align);
return ret;
}