crypto: inside-secure - move cache result dma mapping to request

In heavy traffic the DMA mapping is overwritten by multiple requests as
the DMA address is stored in a global context. This patch moves this
information to the per-hash request context so that it can't be
overwritten.

Fixes: 1b44c5a60c ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Antoine Tenart 2018-02-26 14:45:11 +01:00 committed by Herbert Xu
parent b859202722
commit cff9a17545
3 changed files with 18 additions and 45 deletions

View File

@ -537,20 +537,6 @@ finalize:
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req)
{
struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
if (ctx->cache) {
dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
DMA_TO_DEVICE);
kfree(ctx->cache);
ctx->cache = NULL;
ctx->cache_sz = 0;
}
}
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;

View File

@ -578,11 +578,6 @@ struct safexcel_context {
int ring;
bool needs_inv;
bool exit_inv;
/* Used for ahash requests */
void *cache;
dma_addr_t cache_dma;
unsigned int cache_sz;
};
/*
@ -606,8 +601,6 @@ struct safexcel_inv_result {
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,

View File

@ -43,6 +43,9 @@ struct safexcel_ahash_req {
u64 processed;
u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
};
@ -165,7 +168,11 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
sreq->result_dma = 0;
}
safexcel_free_context(priv, async);
if (sreq->cache_dma) {
dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
DMA_TO_DEVICE);
sreq->cache_dma = 0;
}
cache_len = sreq->len - sreq->processed;
if (cache_len)
@ -227,24 +234,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
if (!ctx->base.cache) {
ret = -ENOMEM;
goto unlock;
}
memcpy(ctx->base.cache, req->cache, cache_len);
ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
cache_len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
ret = -EINVAL;
goto free_cache;
}
req->cache_dma = dma_map_single(priv->dev, req->cache,
cache_len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->dev, req->cache_dma))
return -EINVAL;
ctx->base.cache_sz = cache_len;
req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
ctx->base.cache_dma,
cache_len, len,
req->cache_dma, cache_len, len,
ctx->base.ctxr_dma);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
@ -328,16 +326,12 @@ cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unmap_cache:
if (ctx->base.cache_dma) {
dma_unmap_single(priv->dev, ctx->base.cache_dma,
ctx->base.cache_sz, DMA_TO_DEVICE);
ctx->base.cache_sz = 0;
if (req->cache_dma) {
dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
DMA_TO_DEVICE);
req->cache_sz = 0;
}
free_cache:
kfree(ctx->base.cache);
ctx->base.cache = NULL;
unlock:
spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}