crypto: inside-secure - do not use areq->result for partial results

This patches update the SafeXcel driver to stop using the crypto
ahash_request result field for partial results (i.e. on updates).
Instead the driver local safexcel_ahash_req state field is used, and
only on final operations the ahash_request result buffer is updated.

Fixes: 1b44c5a60c ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Antoine Ténart 2017-12-11 12:10:58 +01:00 committed by Herbert Xu
parent 7cad2fabd5
commit 2973633e9f

View File

@ -35,7 +35,7 @@ struct safexcel_ahash_req {
bool needs_inv; bool needs_inv;
u8 state_sz; /* expected sate size, only set once */ u8 state_sz; /* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
u64 len; u64 len;
u64 processed; u64 processed;
@ -128,7 +128,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
struct ahash_request *areq = ahash_request_cast(async); struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
int cache_len, result_sz = sreq->state_sz; int cache_len;
*ret = 0; *ret = 0;
@ -149,8 +149,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
spin_unlock_bh(&priv->ring[ring].egress_lock); spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->finish) if (sreq->finish)
result_sz = crypto_ahash_digestsize(ahash); memcpy(areq->result, sreq->state,
memcpy(sreq->state, areq->result, result_sz); crypto_ahash_digestsize(ahash));
dma_unmap_sg(priv->dev, areq->src, dma_unmap_sg(priv->dev, areq->src,
sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
@ -274,7 +274,7 @@ send_command:
/* Add the token */ /* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz); safexcel_hash_token(first_cdesc, len, req->state_sz);
ctx->base.result_dma = dma_map_single(priv->dev, areq->result, ctx->base.result_dma = dma_map_single(priv->dev, req->state,
req->state_sz, DMA_FROM_DEVICE); req->state_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) { if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
ret = -EINVAL; ret = -EINVAL;