crypto: qat - fix use of 'dma_map_single'
DMA_TO_DEVICE synchronisation must be done after the last modification of the memory region by the software and before it is handed off to the device. Signed-off-by: Hui Tang <tanghui20@huawei.com> Reported-by: kernel test robot <lkp@intel.com> Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
792b32fad5
commit
7cc05071f9
@ -718,8 +718,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
int n = sg_nents(sgl);
|
||||
struct qat_alg_buf_list *bufl;
|
||||
struct qat_alg_buf_list *buflout = NULL;
|
||||
dma_addr_t blp;
|
||||
dma_addr_t bloutp;
|
||||
dma_addr_t blp = DMA_MAPPING_ERROR;
|
||||
dma_addr_t bloutp = DMA_MAPPING_ERROR;
|
||||
struct scatterlist *sg;
|
||||
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
|
||||
|
||||
@ -734,10 +734,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
for_each_sg(sgl, sg, n, i)
|
||||
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
|
||||
|
||||
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, blp)))
|
||||
goto err_in;
|
||||
|
||||
for_each_sg(sgl, sg, n, i) {
|
||||
int y = sg_nctr;
|
||||
|
||||
@ -753,6 +749,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
sg_nctr++;
|
||||
}
|
||||
bufl->num_bufs = sg_nctr;
|
||||
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, blp)))
|
||||
goto err_in;
|
||||
qat_req->buf.bl = bufl;
|
||||
qat_req->buf.blp = blp;
|
||||
qat_req->buf.sz = sz;
|
||||
@ -772,9 +771,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
for_each_sg(sglout, sg, n, i)
|
||||
bufers[i].addr = DMA_MAPPING_ERROR;
|
||||
|
||||
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, bloutp)))
|
||||
goto err_out;
|
||||
for_each_sg(sglout, sg, n, i) {
|
||||
int y = sg_nctr;
|
||||
|
||||
@ -791,6 +787,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
}
|
||||
buflout->num_bufs = sg_nctr;
|
||||
buflout->num_mapped_bufs = sg_nctr;
|
||||
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, bloutp)))
|
||||
goto err_out;
|
||||
qat_req->buf.blout = buflout;
|
||||
qat_req->buf.bloutp = bloutp;
|
||||
qat_req->buf.sz_out = sz_out;
|
||||
@ -802,17 +801,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
if (!dma_mapping_error(dev, bloutp))
|
||||
dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
|
||||
|
||||
n = sg_nents(sglout);
|
||||
for (i = 0; i < n; i++)
|
||||
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
|
||||
dma_unmap_single(dev, buflout->bufers[i].addr,
|
||||
buflout->bufers[i].len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!dma_mapping_error(dev, bloutp))
|
||||
dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
|
||||
kfree(buflout);
|
||||
|
||||
err_in:
|
||||
if (!dma_mapping_error(dev, blp))
|
||||
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
|
||||
|
||||
n = sg_nents(sgl);
|
||||
for (i = 0; i < n; i++)
|
||||
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
|
||||
@ -820,8 +823,6 @@ err_in:
|
||||
bufl->bufers[i].len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
if (!dma_mapping_error(dev, blp))
|
||||
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
|
||||
kfree(bufl);
|
||||
|
||||
dev_err(dev, "Failed to map buf for dma\n");
|
||||
|
Loading…
Reference in New Issue
Block a user