2018-07-13 17:51:37 +03:00
// SPDX-License-Identifier: GPL-2.0
2017-05-24 17:10:34 +03:00
/*
* Copyright ( C ) 2017 Marvell
*
* Antoine Tenart < antoine . tenart @ free - electrons . com >
*/
2017-06-15 10:56:17 +03:00
# include <crypto/hmac.h>
2018-06-28 18:21:53 +03:00
# include <crypto/md5.h>
2017-05-24 17:10:34 +03:00
# include <crypto/sha.h>
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/dmapool.h>
# include "safexcel.h"
struct safexcel_ahash_ctx {
struct safexcel_context base ;
struct safexcel_crypto_priv * priv ;
u32 alg ;
2018-05-29 15:13:47 +03:00
u32 ipad [ SHA512_DIGEST_SIZE / sizeof ( u32 ) ] ;
u32 opad [ SHA512_DIGEST_SIZE / sizeof ( u32 ) ] ;
2017-05-24 17:10:34 +03:00
} ;
struct safexcel_ahash_req {
bool last_req ;
bool finish ;
bool hmac ;
2017-12-11 14:10:55 +03:00
bool needs_inv ;
2019-07-02 17:40:00 +03:00
bool hmac_zlen ;
bool len_is_le ;
2017-05-24 17:10:34 +03:00
2017-12-26 19:21:16 +03:00
int nents ;
2018-02-26 16:45:10 +03:00
dma_addr_t result_dma ;
2017-12-26 19:21:16 +03:00
2018-03-19 11:21:13 +03:00
u32 digest ;
2019-07-02 17:39:59 +03:00
u8 state_sz ; /* expected state size, only set once */
u8 block_sz ; /* block size, only set once */
2018-05-29 15:13:46 +03:00
u32 state [ SHA512_DIGEST_SIZE / sizeof ( u32 ) ] __aligned ( sizeof ( u32 ) ) ;
2017-05-24 17:10:34 +03:00
2019-07-05 10:36:31 +03:00
u64 len ;
u64 processed ;
2017-05-24 17:10:34 +03:00
2019-07-02 17:39:59 +03:00
u8 cache [ HASH_CACHE_SIZE ] __aligned ( sizeof ( u32 ) ) ;
2018-02-26 16:45:11 +03:00
dma_addr_t cache_dma ;
unsigned int cache_sz ;
2019-07-02 17:39:59 +03:00
u8 cache_next [ HASH_CACHE_SIZE ] __aligned ( sizeof ( u32 ) ) ;
2017-05-24 17:10:34 +03:00
} ;
2018-05-29 15:13:46 +03:00
static inline u64 safexcel_queued_len ( struct safexcel_ahash_req * req )
{
2019-07-05 10:36:31 +03:00
return req - > len - req - > processed ;
2018-05-29 15:13:46 +03:00
}
2017-05-24 17:10:34 +03:00
static void safexcel_hash_token ( struct safexcel_command_desc * cdesc ,
u32 input_length , u32 result_length )
{
struct safexcel_token * token =
( struct safexcel_token * ) cdesc - > control_data . token ;
token [ 0 ] . opcode = EIP197_TOKEN_OPCODE_DIRECTION ;
token [ 0 ] . packet_length = input_length ;
token [ 0 ] . stat = EIP197_TOKEN_STAT_LAST_HASH ;
token [ 0 ] . instructions = EIP197_TOKEN_INS_TYPE_HASH ;
token [ 1 ] . opcode = EIP197_TOKEN_OPCODE_INSERT ;
token [ 1 ] . packet_length = result_length ;
token [ 1 ] . stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET ;
token [ 1 ] . instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST ;
}
static void safexcel_context_control ( struct safexcel_ahash_ctx * ctx ,
struct safexcel_ahash_req * req ,
2019-07-02 17:39:59 +03:00
struct safexcel_command_desc * cdesc )
2017-05-24 17:10:34 +03:00
{
2018-05-29 15:13:46 +03:00
struct safexcel_crypto_priv * priv = ctx - > priv ;
2019-07-02 17:39:59 +03:00
u64 count = 0 ;
2017-05-24 17:10:34 +03:00
cdesc - > control_data . control0 | = ctx - > alg ;
2019-07-02 17:39:59 +03:00
/*
* Copy the input digest if needed , and setup the context
* fields . Do this now as we need it to setup the first command
* descriptor .
*/
2019-07-05 10:36:31 +03:00
if ( ! req - > processed ) {
2019-07-02 17:39:59 +03:00
/* First - and possibly only - block of basic hash only */
if ( req - > finish ) {
cdesc - > control_data . control0 | =
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE ( 1 ) ;
2017-05-24 17:10:34 +03:00
} else {
2019-07-02 17:39:59 +03:00
cdesc - > control_data . control0 | =
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_RESTART_HASH |
CONTEXT_CONTROL_NO_FINISH_HASH |
/* ensure its not 0! */
CONTEXT_CONTROL_SIZE ( 1 ) ;
2017-05-24 17:10:34 +03:00
}
2019-07-02 17:39:59 +03:00
return ;
}
2017-05-24 17:10:34 +03:00
2019-07-02 17:39:59 +03:00
/* Hash continuation or HMAC, setup (inner) digest from state */
memcpy ( ctx - > base . ctxr - > data , req - > state , req - > state_sz ) ;
if ( req - > finish ) {
/* Compute digest count for hash/HMAC finish operations */
if ( ( req - > digest = = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ) | |
2019-07-05 10:36:31 +03:00
req - > hmac_zlen | | ( req - > processed ! = req - > block_sz ) ) {
count = req - > processed / EIP197_COUNTER_BLOCK_SIZE ;
2019-07-02 17:39:59 +03:00
/* This is a hardware limitation, as the
* counter must fit into an u32 . This represents
* a fairly big amount of input data , so we
* shouldn ' t see this .
*/
if ( unlikely ( count & 0xffffffff00000000ULL ) ) {
dev_warn ( priv - > dev ,
" Input data is too big \n " ) ;
return ;
2018-05-29 15:13:46 +03:00
}
2017-05-24 17:10:34 +03:00
}
2019-07-02 17:39:59 +03:00
if ( ( req - > digest = = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ) | |
2019-07-02 17:40:00 +03:00
/* Special case: zero length HMAC */
req - > hmac_zlen | |
2019-07-02 17:39:59 +03:00
/* PE HW < 4.4 cannot do HMAC continue, fake using hash */
2019-07-05 10:36:31 +03:00
( req - > processed ! = req - > block_sz ) ) {
2019-07-02 17:39:59 +03:00
/* Basic hash continue operation, need digest + cnt */
cdesc - > control_data . control0 | =
CONTEXT_CONTROL_SIZE ( ( req - > state_sz > > 2 ) + 1 ) |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
2019-07-02 17:40:00 +03:00
/* For zero-len HMAC, don't finalize, already padded! */
if ( req - > hmac_zlen )
cdesc - > control_data . control0 | =
CONTEXT_CONTROL_NO_FINISH_HASH ;
2019-07-02 17:39:59 +03:00
cdesc - > control_data . control1 | =
CONTEXT_CONTROL_DIGEST_CNT ;
ctx - > base . ctxr - > data [ req - > state_sz > > 2 ] =
cpu_to_le32 ( count ) ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
2019-07-02 17:40:00 +03:00
/* Clear zero-length HMAC flag for next operation! */
req - > hmac_zlen = false ;
2019-07-02 17:39:59 +03:00
} else { /* HMAC */
/* Need outer digest for HMAC finalization */
memcpy ( ctx - > base . ctxr - > data + ( req - > state_sz > > 2 ) ,
ctx - > opad , req - > state_sz ) ;
/* Single pass HMAC - no digest count */
cdesc - > control_data . control0 | =
CONTEXT_CONTROL_SIZE ( req - > state_sz > > 1 ) |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_DIGEST_HMAC ;
}
} else { /* Hash continuation, do not finish yet */
cdesc - > control_data . control0 | =
CONTEXT_CONTROL_SIZE ( req - > state_sz > > 2 ) |
CONTEXT_CONTROL_DIGEST_PRECOMPUTED |
CONTEXT_CONTROL_TYPE_HASH_OUT |
CONTEXT_CONTROL_NO_FINISH_HASH ;
2017-05-24 17:10:34 +03:00
}
}
2019-07-02 17:39:59 +03:00
static int safexcel_ahash_enqueue ( struct ahash_request * areq ) ;
static int safexcel_handle_req_result ( struct safexcel_crypto_priv * priv ,
int ring ,
2017-12-11 14:10:55 +03:00
struct crypto_async_request * async ,
bool * should_complete , int * ret )
2017-05-24 17:10:34 +03:00
{
struct safexcel_result_desc * rdesc ;
struct ahash_request * areq = ahash_request_cast ( async ) ;
struct crypto_ahash * ahash = crypto_ahash_reqtfm ( areq ) ;
struct safexcel_ahash_req * sreq = ahash_request_ctx ( areq ) ;
2019-07-02 17:39:59 +03:00
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( ahash ) ;
2018-05-29 15:13:46 +03:00
u64 cache_len ;
2017-05-24 17:10:34 +03:00
* ret = 0 ;
rdesc = safexcel_ring_next_rptr ( priv , & priv - > ring [ ring ] . rdr ) ;
if ( IS_ERR ( rdesc ) ) {
dev_err ( priv - > dev ,
" hash: result: could not retrieve the result descriptor \n " ) ;
* ret = PTR_ERR ( rdesc ) ;
2018-05-14 16:11:01 +03:00
} else {
* ret = safexcel_rdesc_check_errors ( priv , rdesc ) ;
2017-05-24 17:10:34 +03:00
}
safexcel_complete ( priv , ring ) ;
2017-12-26 19:21:16 +03:00
if ( sreq - > nents ) {
dma_unmap_sg ( priv - > dev , areq - > src , sreq - > nents , DMA_TO_DEVICE ) ;
sreq - > nents = 0 ;
}
2017-05-24 17:10:34 +03:00
2018-02-26 16:45:10 +03:00
if ( sreq - > result_dma ) {
dma_unmap_single ( priv - > dev , sreq - > result_dma , sreq - > state_sz ,
DMA_FROM_DEVICE ) ;
sreq - > result_dma = 0 ;
}
2018-02-26 16:45:11 +03:00
if ( sreq - > cache_dma ) {
dma_unmap_single ( priv - > dev , sreq - > cache_dma , sreq - > cache_sz ,
DMA_TO_DEVICE ) ;
sreq - > cache_dma = 0 ;
2019-05-27 17:51:00 +03:00
sreq - > cache_sz = 0 ;
2018-02-26 16:45:11 +03:00
}
2017-05-24 17:10:34 +03:00
2019-07-02 17:39:59 +03:00
if ( sreq - > finish ) {
if ( sreq - > hmac & &
( sreq - > digest ! = CONTEXT_CONTROL_DIGEST_HMAC ) ) {
/* Faking HMAC using hash - need to do outer hash */
memcpy ( sreq - > cache , sreq - > state ,
crypto_ahash_digestsize ( ahash ) ) ;
memcpy ( sreq - > state , ctx - > opad , sreq - > state_sz ) ;
2019-07-05 10:36:31 +03:00
sreq - > len = sreq - > block_sz +
crypto_ahash_digestsize ( ahash ) ;
sreq - > processed = sreq - > block_sz ;
2019-07-02 17:39:59 +03:00
sreq - > hmac = 0 ;
ctx - > base . needs_inv = true ;
areq - > nbytes = 0 ;
safexcel_ahash_enqueue ( areq ) ;
* should_complete = false ; /* Not done yet */
return 1 ;
}
2018-03-19 11:21:17 +03:00
memcpy ( areq - > result , sreq - > state ,
crypto_ahash_digestsize ( ahash ) ) ;
2019-07-02 17:39:59 +03:00
}
2018-03-19 11:21:17 +03:00
2018-05-29 15:13:46 +03:00
cache_len = safexcel_queued_len ( sreq ) ;
2017-05-24 17:10:34 +03:00
if ( cache_len )
memcpy ( sreq - > cache , sreq - > cache_next , cache_len ) ;
* should_complete = true ;
return 1 ;
}
2017-12-11 14:10:55 +03:00
static int safexcel_ahash_send_req ( struct crypto_async_request * async , int ring ,
int * commands , int * results )
2017-05-24 17:10:34 +03:00
{
struct ahash_request * areq = ahash_request_cast ( async ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
struct safexcel_command_desc * cdesc , * first_cdesc = NULL ;
struct safexcel_result_desc * rdesc ;
struct scatterlist * sg ;
2019-05-27 17:51:04 +03:00
int i , extra = 0 , n_cdesc = 0 , ret = 0 ;
2019-07-02 17:39:59 +03:00
u64 queued , len , cache_len ;
2017-05-24 17:10:34 +03:00
2018-05-29 15:13:46 +03:00
queued = len = safexcel_queued_len ( req ) ;
2019-07-02 17:39:59 +03:00
if ( queued < = HASH_CACHE_SIZE )
2017-05-24 17:10:34 +03:00
cache_len = queued ;
else
cache_len = queued - areq - > nbytes ;
2019-07-02 17:39:59 +03:00
if ( ! req - > finish & & ! req - > last_req ) {
2017-12-26 19:21:17 +03:00
/* If this is not the last request and the queued data does not
2019-07-02 17:39:59 +03:00
* fit into full cache blocks , cache it for the next send call .
2017-12-26 19:21:17 +03:00
*/
2019-07-02 17:39:59 +03:00
extra = queued & ( HASH_CACHE_SIZE - 1 ) ;
2019-05-27 17:51:04 +03:00
2019-05-27 17:50:54 +03:00
/* If this is not the last request and the queued data
* is a multiple of a block , cache the last one for now .
*/
2017-12-26 19:21:17 +03:00
if ( ! extra )
2019-07-02 17:39:59 +03:00
extra = HASH_CACHE_SIZE ;
2017-12-26 19:21:17 +03:00
2019-05-27 17:50:56 +03:00
sg_pcopy_to_buffer ( areq - > src , sg_nents ( areq - > src ) ,
req - > cache_next , extra ,
areq - > nbytes - extra ) ;
queued - = extra ;
len - = extra ;
2019-07-02 17:39:57 +03:00
if ( ! queued ) {
* commands = 0 ;
* results = 0 ;
return 0 ;
}
2017-05-24 17:10:34 +03:00
}
/* Add a command descriptor for the cached data, if any */
if ( cache_len ) {
2018-02-26 16:45:11 +03:00
req - > cache_dma = dma_map_single ( priv - > dev , req - > cache ,
cache_len , DMA_TO_DEVICE ) ;
2018-06-28 18:21:57 +03:00
if ( dma_mapping_error ( priv - > dev , req - > cache_dma ) )
2018-02-26 16:45:11 +03:00
return - EINVAL ;
2017-05-24 17:10:34 +03:00
2018-02-26 16:45:11 +03:00
req - > cache_sz = cache_len ;
2017-05-24 17:10:34 +03:00
first_cdesc = safexcel_add_cdesc ( priv , ring , 1 ,
( cache_len = = len ) ,
2018-02-26 16:45:11 +03:00
req - > cache_dma , cache_len , len ,
2017-05-24 17:10:34 +03:00
ctx - > base . ctxr_dma ) ;
if ( IS_ERR ( first_cdesc ) ) {
ret = PTR_ERR ( first_cdesc ) ;
goto unmap_cache ;
}
n_cdesc + + ;
queued - = cache_len ;
if ( ! queued )
goto send_command ;
}
2019-07-02 17:39:59 +03:00
/* Skip descriptor generation for zero-length requests */
if ( ! areq - > nbytes )
goto send_command ;
2017-05-24 17:10:34 +03:00
/* Now handle the current ahash request buffer(s) */
2019-07-02 17:39:59 +03:00
req - > nents = dma_map_sg ( priv - > dev , areq - > src ,
sg_nents_for_len ( areq - > src ,
areq - > nbytes ) ,
2017-12-26 19:21:16 +03:00
DMA_TO_DEVICE ) ;
if ( ! req - > nents ) {
2017-05-24 17:10:34 +03:00
ret = - ENOMEM ;
goto cdesc_rollback ;
}
2017-12-26 19:21:16 +03:00
for_each_sg ( areq - > src , sg , req - > nents , i ) {
2017-05-24 17:10:34 +03:00
int sglen = sg_dma_len ( sg ) ;
/* Do not overflow the request */
2018-05-29 15:13:46 +03:00
if ( queued < sglen )
2017-05-24 17:10:34 +03:00
sglen = queued ;
cdesc = safexcel_add_cdesc ( priv , ring , ! n_cdesc ,
2019-07-02 17:39:59 +03:00
! ( queued - sglen ) ,
sg_dma_address ( sg ) ,
2017-05-24 17:10:34 +03:00
sglen , len , ctx - > base . ctxr_dma ) ;
if ( IS_ERR ( cdesc ) ) {
ret = PTR_ERR ( cdesc ) ;
2018-03-19 11:21:16 +03:00
goto unmap_sg ;
2017-05-24 17:10:34 +03:00
}
n_cdesc + + ;
if ( n_cdesc = = 1 )
first_cdesc = cdesc ;
queued - = sglen ;
if ( ! queued )
break ;
}
send_command :
/* Setup the context options */
2019-07-02 17:39:59 +03:00
safexcel_context_control ( ctx , req , first_cdesc ) ;
2017-05-24 17:10:34 +03:00
/* Add the token */
safexcel_hash_token ( first_cdesc , len , req - > state_sz ) ;
2018-02-26 16:45:10 +03:00
req - > result_dma = dma_map_single ( priv - > dev , req - > state , req - > state_sz ,
DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( priv - > dev , req - > result_dma ) ) {
2017-05-24 17:10:34 +03:00
ret = - EINVAL ;
2018-03-19 11:21:16 +03:00
goto unmap_sg ;
2017-05-24 17:10:34 +03:00
}
/* Add a result descriptor */
2018-02-26 16:45:10 +03:00
rdesc = safexcel_add_rdesc ( priv , ring , 1 , 1 , req - > result_dma ,
2017-05-24 17:10:34 +03:00
req - > state_sz ) ;
if ( IS_ERR ( rdesc ) ) {
ret = PTR_ERR ( rdesc ) ;
2018-02-13 11:26:57 +03:00
goto unmap_result ;
2017-05-24 17:10:34 +03:00
}
2018-06-28 18:21:57 +03:00
safexcel_rdr_req_set ( priv , ring , rdesc , & areq - > base ) ;
2017-05-24 17:10:34 +03:00
2019-07-05 10:36:31 +03:00
req - > processed + = len ;
2018-05-29 15:13:46 +03:00
2017-05-24 17:10:34 +03:00
* commands = n_cdesc ;
* results = 1 ;
return 0 ;
2018-02-13 11:26:57 +03:00
unmap_result :
2018-03-19 11:21:16 +03:00
dma_unmap_single ( priv - > dev , req - > result_dma , req - > state_sz ,
DMA_FROM_DEVICE ) ;
unmap_sg :
2018-02-13 11:26:57 +03:00
dma_unmap_sg ( priv - > dev , areq - > src , req - > nents , DMA_TO_DEVICE ) ;
2017-05-24 17:10:34 +03:00
cdesc_rollback :
for ( i = 0 ; i < n_cdesc ; i + + )
safexcel_ring_rollback_wptr ( priv , & priv - > ring [ ring ] . cdr ) ;
unmap_cache :
2018-02-26 16:45:11 +03:00
if ( req - > cache_dma ) {
dma_unmap_single ( priv - > dev , req - > cache_dma , req - > cache_sz ,
DMA_TO_DEVICE ) ;
2019-05-27 17:51:00 +03:00
req - > cache_dma = 0 ;
2018-02-26 16:45:11 +03:00
req - > cache_sz = 0 ;
2017-05-24 17:10:34 +03:00
}
return ret ;
}
static int safexcel_handle_inv_result ( struct safexcel_crypto_priv * priv ,
int ring ,
struct crypto_async_request * async ,
bool * should_complete , int * ret )
{
struct safexcel_result_desc * rdesc ;
struct ahash_request * areq = ahash_request_cast ( async ) ;
struct crypto_ahash * ahash = crypto_ahash_reqtfm ( areq ) ;
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( ahash ) ;
int enq_ret ;
* ret = 0 ;
rdesc = safexcel_ring_next_rptr ( priv , & priv - > ring [ ring ] . rdr ) ;
if ( IS_ERR ( rdesc ) ) {
dev_err ( priv - > dev ,
" hash: invalidate: could not retrieve the result descriptor \n " ) ;
* ret = PTR_ERR ( rdesc ) ;
2018-05-29 15:13:43 +03:00
} else {
* ret = safexcel_rdesc_check_errors ( priv , rdesc ) ;
2017-05-24 17:10:34 +03:00
}
safexcel_complete ( priv , ring ) ;
if ( ctx - > base . exit_inv ) {
dma_pool_free ( priv - > context_pool , ctx - > base . ctxr ,
ctx - > base . ctxr_dma ) ;
* should_complete = true ;
return 1 ;
}
2017-06-15 10:56:24 +03:00
ring = safexcel_select_ring ( priv ) ;
ctx - > base . ring = ring ;
2017-05-24 17:10:34 +03:00
2017-06-15 10:56:24 +03:00
spin_lock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
enq_ret = crypto_enqueue_request ( & priv - > ring [ ring ] . queue , async ) ;
spin_unlock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2017-05-24 17:10:34 +03:00
if ( enq_ret ! = - EINPROGRESS )
* ret = enq_ret ;
2017-12-14 17:26:51 +03:00
queue_work ( priv - > ring [ ring ] . workqueue ,
& priv - > ring [ ring ] . work_data . work ) ;
2017-06-15 10:56:24 +03:00
2017-05-24 17:10:34 +03:00
* should_complete = false ;
return 1 ;
}
2017-12-11 14:10:55 +03:00
static int safexcel_handle_result ( struct safexcel_crypto_priv * priv , int ring ,
struct crypto_async_request * async ,
bool * should_complete , int * ret )
{
struct ahash_request * areq = ahash_request_cast ( async ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
int err ;
2018-06-28 18:15:35 +03:00
BUG_ON ( ! ( priv - > flags & EIP197_TRC_CACHE ) & & req - > needs_inv ) ;
2017-12-14 17:26:58 +03:00
2017-12-11 14:10:55 +03:00
if ( req - > needs_inv ) {
req - > needs_inv = false ;
err = safexcel_handle_inv_result ( priv , ring , async ,
should_complete , ret ) ;
} else {
err = safexcel_handle_req_result ( priv , ring , async ,
should_complete , ret ) ;
}
return err ;
}
2017-05-24 17:10:34 +03:00
static int safexcel_ahash_send_inv ( struct crypto_async_request * async ,
2018-06-28 18:21:57 +03:00
int ring , int * commands , int * results )
2017-05-24 17:10:34 +03:00
{
struct ahash_request * areq = ahash_request_cast ( async ) ;
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
int ret ;
2017-12-14 17:26:50 +03:00
ret = safexcel_invalidate_cache ( async , ctx - > priv ,
2018-06-28 18:21:57 +03:00
ctx - > base . ctxr_dma , ring ) ;
2017-05-24 17:10:34 +03:00
if ( unlikely ( ret ) )
return ret ;
* commands = 1 ;
* results = 1 ;
return 0 ;
}
2017-12-11 14:10:55 +03:00
static int safexcel_ahash_send ( struct crypto_async_request * async ,
2018-06-28 18:21:57 +03:00
int ring , int * commands , int * results )
2017-12-11 14:10:55 +03:00
{
struct ahash_request * areq = ahash_request_cast ( async ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
int ret ;
if ( req - > needs_inv )
2018-06-28 18:21:57 +03:00
ret = safexcel_ahash_send_inv ( async , ring , commands , results ) ;
2017-12-11 14:10:55 +03:00
else
2018-06-28 18:21:57 +03:00
ret = safexcel_ahash_send_req ( async , ring , commands , results ) ;
2017-12-11 14:10:55 +03:00
return ret ;
}
2017-05-24 17:10:34 +03:00
static int safexcel_ahash_exit_inv ( struct crypto_tfm * tfm )
{
struct safexcel_ahash_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
2018-05-14 16:10:55 +03:00
EIP197_REQUEST_ON_STACK ( req , ahash , EIP197_AHASH_REQ_SIZE ) ;
2017-12-11 14:10:57 +03:00
struct safexcel_ahash_req * rctx = ahash_request_ctx ( req ) ;
2017-09-12 13:12:16 +03:00
struct safexcel_inv_result result = { } ;
2017-06-15 10:56:24 +03:00
int ring = ctx - > base . ring ;
2017-05-24 17:10:34 +03:00
2019-05-27 17:51:01 +03:00
memset ( req , 0 , EIP197_AHASH_REQ_SIZE ) ;
2017-05-24 17:10:34 +03:00
/* create invalidation request */
init_completion ( & result . completion ) ;
2017-12-11 14:10:57 +03:00
ahash_request_set_callback ( req , CRYPTO_TFM_REQ_MAY_BACKLOG ,
2017-05-24 17:10:34 +03:00
safexcel_inv_complete , & result ) ;
2017-12-11 14:10:57 +03:00
ahash_request_set_tfm ( req , __crypto_ahash_cast ( tfm ) ) ;
ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
2017-05-24 17:10:34 +03:00
ctx - > base . exit_inv = true ;
2017-12-11 14:10:55 +03:00
rctx - > needs_inv = true ;
2017-05-24 17:10:34 +03:00
2017-06-15 10:56:24 +03:00
spin_lock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2017-12-11 14:10:57 +03:00
crypto_enqueue_request ( & priv - > ring [ ring ] . queue , & req - > base ) ;
2017-06-15 10:56:24 +03:00
spin_unlock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:51 +03:00
queue_work ( priv - > ring [ ring ] . workqueue ,
& priv - > ring [ ring ] . work_data . work ) ;
2017-05-24 17:10:34 +03:00
2018-02-13 11:26:55 +03:00
wait_for_completion ( & result . completion ) ;
2017-05-24 17:10:34 +03:00
if ( result . error ) {
dev_warn ( priv - > dev , " hash: completion error (%d) \n " ,
result . error ) ;
return result . error ;
}
return 0 ;
}
2017-12-14 17:26:46 +03:00
/* safexcel_ahash_cache: cache data until at least one request can be sent to
* the engine , aka . when there is at least 1 block size in the pipe .
*/
2019-07-02 17:39:59 +03:00
static int safexcel_ahash_cache ( struct ahash_request * areq )
2017-05-24 17:10:34 +03:00
{
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
2019-07-02 17:39:59 +03:00
u64 cache_len ;
2017-05-24 17:10:34 +03:00
2018-05-29 15:13:46 +03:00
/* cache_len: everything accepted by the driver but not sent yet,
* tot sz handled by update ( ) - last req sz - tot sz handled by send ( )
*/
2019-07-02 17:39:59 +03:00
cache_len = safexcel_queued_len ( req ) ;
2017-05-24 17:10:34 +03:00
/*
* In case there isn ' t enough bytes to proceed ( less than a
* block size ) , cache the data until we have enough .
*/
2019-07-02 17:39:59 +03:00
if ( cache_len + areq - > nbytes < = HASH_CACHE_SIZE ) {
2017-05-24 17:10:34 +03:00
sg_pcopy_to_buffer ( areq - > src , sg_nents ( areq - > src ) ,
req - > cache + cache_len ,
areq - > nbytes , 0 ) ;
2019-07-02 17:39:59 +03:00
return 0 ;
2017-05-24 17:10:34 +03:00
}
2017-12-14 17:26:44 +03:00
/* We couldn't cache all the data */
2017-05-24 17:10:34 +03:00
return - E2BIG ;
}
static int safexcel_ahash_enqueue ( struct ahash_request * areq )
{
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
2017-06-15 10:56:24 +03:00
int ret , ring ;
2017-05-24 17:10:34 +03:00
2017-12-11 14:10:55 +03:00
req - > needs_inv = false ;
2017-05-24 17:10:34 +03:00
if ( ctx - > base . ctxr ) {
2018-06-28 18:15:35 +03:00
if ( priv - > flags & EIP197_TRC_CACHE & & ! ctx - > base . needs_inv & &
2019-07-05 10:36:31 +03:00
req - > processed & &
2019-07-02 17:39:59 +03:00
( /* invalidate for basic hash continuation finish */
( req - > finish & &
( req - > digest = = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ) ) | |
/* invalidate if (i)digest changed */
memcmp ( ctx - > base . ctxr - > data , req - > state , req - > state_sz ) | |
/* invalidate for HMAC continuation finish */
2019-07-05 10:36:31 +03:00
( req - > finish & & ( req - > processed ! = req - > block_sz ) ) | |
2019-07-02 17:39:59 +03:00
/* invalidate for HMAC finish with odigest changed */
( req - > finish & &
memcmp ( ctx - > base . ctxr - > data + ( req - > state_sz > > 2 ) ,
ctx - > opad , req - > state_sz ) ) ) )
/*
* We ' re still setting needs_inv here , even though it is
2017-12-14 17:26:47 +03:00
* cleared right away , because the needs_inv flag can be
* set in other functions and we want to keep the same
* logic .
*/
2019-07-02 17:39:59 +03:00
ctx - > base . needs_inv = true ;
2017-12-14 17:26:47 +03:00
2017-12-11 14:10:55 +03:00
if ( ctx - > base . needs_inv ) {
ctx - > base . needs_inv = false ;
req - > needs_inv = true ;
}
2017-05-24 17:10:34 +03:00
} else {
ctx - > base . ring = safexcel_select_ring ( priv ) ;
ctx - > base . ctxr = dma_pool_zalloc ( priv - > context_pool ,
EIP197_GFP_FLAGS ( areq - > base ) ,
& ctx - > base . ctxr_dma ) ;
if ( ! ctx - > base . ctxr )
return - ENOMEM ;
}
2017-06-15 10:56:24 +03:00
ring = ctx - > base . ring ;
spin_lock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
ret = crypto_enqueue_request ( & priv - > ring [ ring ] . queue , & areq - > base ) ;
spin_unlock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:51 +03:00
queue_work ( priv - > ring [ ring ] . workqueue ,
& priv - > ring [ ring ] . work_data . work ) ;
2017-05-24 17:10:34 +03:00
return ret ;
}
static int safexcel_ahash_update ( struct ahash_request * areq )
{
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
2019-07-02 17:39:59 +03:00
int ret ;
2017-05-24 17:10:34 +03:00
/* If the request is 0 length, do nothing */
if ( ! areq - > nbytes )
return 0 ;
2019-07-02 17:39:59 +03:00
/* Add request to the cache if it fits */
ret = safexcel_ahash_cache ( areq ) ;
/* Update total request length */
2019-07-05 10:36:31 +03:00
req - > len + = areq - > nbytes ;
2017-05-24 17:10:34 +03:00
2019-07-02 17:39:59 +03:00
/* If not all data could fit into the cache, go process the excess.
* Also go process immediately for an HMAC IV precompute , which
* will never be finished at all , but needs to be processed anyway .
2017-05-24 17:10:34 +03:00
*/
2019-07-02 17:39:59 +03:00
if ( ( ret & & ! req - > finish ) | | req - > last_req )
2017-05-24 17:10:34 +03:00
return safexcel_ahash_enqueue ( areq ) ;
return 0 ;
}
static int safexcel_ahash_final ( struct ahash_request * areq )
{
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
req - > finish = true ;
2019-07-05 10:36:31 +03:00
if ( unlikely ( ! req - > len & & ! areq - > nbytes ) ) {
2019-07-02 17:39:58 +03:00
/*
* If we have an overall 0 length * hash * request :
* The HW cannot do 0 length hash , so we provide the correct
* result directly here .
*/
2018-06-28 18:21:53 +03:00
if ( ctx - > alg = = CONTEXT_CONTROL_CRYPTO_ALG_MD5 )
memcpy ( areq - > result , md5_zero_message_hash ,
MD5_DIGEST_SIZE ) ;
else if ( ctx - > alg = = CONTEXT_CONTROL_CRYPTO_ALG_SHA1 )
2017-05-24 17:10:34 +03:00
memcpy ( areq - > result , sha1_zero_message_hash ,
SHA1_DIGEST_SIZE ) ;
else if ( ctx - > alg = = CONTEXT_CONTROL_CRYPTO_ALG_SHA224 )
memcpy ( areq - > result , sha224_zero_message_hash ,
SHA224_DIGEST_SIZE ) ;
else if ( ctx - > alg = = CONTEXT_CONTROL_CRYPTO_ALG_SHA256 )
memcpy ( areq - > result , sha256_zero_message_hash ,
SHA256_DIGEST_SIZE ) ;
2018-05-29 15:13:50 +03:00
else if ( ctx - > alg = = CONTEXT_CONTROL_CRYPTO_ALG_SHA384 )
memcpy ( areq - > result , sha384_zero_message_hash ,
SHA384_DIGEST_SIZE ) ;
2018-05-29 15:13:46 +03:00
else if ( ctx - > alg = = CONTEXT_CONTROL_CRYPTO_ALG_SHA512 )
memcpy ( areq - > result , sha512_zero_message_hash ,
SHA512_DIGEST_SIZE ) ;
2017-05-24 17:10:34 +03:00
return 0 ;
2019-07-05 10:36:31 +03:00
} else if ( unlikely ( req - > hmac & &
( req - > len = = req - > block_sz ) & &
2019-07-02 17:39:59 +03:00
! areq - > nbytes ) ) {
2019-07-02 17:40:00 +03:00
/*
* If we have an overall 0 length * HMAC * request :
* For HMAC , we need to finalize the inner digest
* and then perform the outer hash .
*/
/* generate pad block in the cache */
/* start with a hash block of all zeroes */
memset ( req - > cache , 0 , req - > block_sz ) ;
/* set the first byte to 0x80 to 'append a 1 bit' */
req - > cache [ 0 ] = 0x80 ;
/* add the length in bits in the last 2 bytes */
if ( req - > len_is_le ) {
/* Little endian length word (e.g. MD5) */
req - > cache [ req - > block_sz - 8 ] = ( req - > block_sz < < 3 ) &
255 ;
req - > cache [ req - > block_sz - 7 ] = ( req - > block_sz > > 5 ) ;
} else {
/* Big endian length word (e.g. any SHA) */
req - > cache [ req - > block_sz - 2 ] = ( req - > block_sz > > 5 ) ;
req - > cache [ req - > block_sz - 1 ] = ( req - > block_sz < < 3 ) &
255 ;
}
2019-07-05 10:36:31 +03:00
req - > len + = req - > block_sz ; /* plus 1 hash block */
2019-07-02 17:40:00 +03:00
/* Set special zero-length HMAC flag */
req - > hmac_zlen = true ;
/* Finalize HMAC */
req - > digest = CONTEXT_CONTROL_DIGEST_HMAC ;
2019-07-02 17:39:59 +03:00
} else if ( req - > hmac ) {
/* Finalize HMAC */
req - > digest = CONTEXT_CONTROL_DIGEST_HMAC ;
2017-05-24 17:10:34 +03:00
}
return safexcel_ahash_enqueue ( areq ) ;
}
static int safexcel_ahash_finup ( struct ahash_request * areq )
{
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
req - > finish = true ;
safexcel_ahash_update ( areq ) ;
return safexcel_ahash_final ( areq ) ;
}
static int safexcel_ahash_export ( struct ahash_request * areq , void * out )
{
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
struct safexcel_ahash_export_state * export = out ;
2019-07-05 10:36:31 +03:00
export - > len = req - > len ;
export - > processed = req - > processed ;
2017-05-24 17:10:34 +03:00
2018-03-19 11:21:13 +03:00
export - > digest = req - > digest ;
2017-05-24 17:10:34 +03:00
memcpy ( export - > state , req - > state , req - > state_sz ) ;
2019-07-02 17:39:59 +03:00
memcpy ( export - > cache , req - > cache , HASH_CACHE_SIZE ) ;
2017-05-24 17:10:34 +03:00
return 0 ;
}
static int safexcel_ahash_import ( struct ahash_request * areq , const void * in )
{
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
const struct safexcel_ahash_export_state * export = in ;
int ret ;
ret = crypto_ahash_init ( areq ) ;
if ( ret )
return ret ;
2019-07-05 10:36:31 +03:00
req - > len = export - > len ;
req - > processed = export - > processed ;
2017-05-24 17:10:34 +03:00
2018-03-19 11:21:13 +03:00
req - > digest = export - > digest ;
2019-07-02 17:39:59 +03:00
memcpy ( req - > cache , export - > cache , HASH_CACHE_SIZE ) ;
2017-05-24 17:10:34 +03:00
memcpy ( req - > state , export - > state , req - > state_sz ) ;
return 0 ;
}
static int safexcel_ahash_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_ahash_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_alg_template * tmpl =
container_of ( __crypto_ahash_alg ( tfm - > __crt_alg ) ,
struct safexcel_alg_template , alg . ahash ) ;
ctx - > priv = tmpl - > priv ;
2017-12-11 14:10:55 +03:00
ctx - > base . send = safexcel_ahash_send ;
ctx - > base . handle_result = safexcel_handle_result ;
2017-05-24 17:10:34 +03:00
crypto_ahash_set_reqsize ( __crypto_ahash_cast ( tfm ) ,
sizeof ( struct safexcel_ahash_req ) ) ;
return 0 ;
}
static int safexcel_sha1_init ( struct ahash_request * areq )
{
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
memset ( req , 0 , sizeof ( * req ) ) ;
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1 ;
2018-03-19 11:21:13 +03:00
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
2017-05-24 17:10:34 +03:00
req - > state_sz = SHA1_DIGEST_SIZE ;
2019-07-02 17:39:59 +03:00
req - > block_sz = SHA1_BLOCK_SIZE ;
2017-05-24 17:10:34 +03:00
return 0 ;
}
static int safexcel_sha1_digest ( struct ahash_request * areq )
{
int ret = safexcel_sha1_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
static void safexcel_ahash_cra_exit ( struct crypto_tfm * tfm )
{
struct safexcel_ahash_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
int ret ;
/* context not allocated, skip invalidation */
if ( ! ctx - > base . ctxr )
return ;
2018-06-28 18:15:35 +03:00
if ( priv - > flags & EIP197_TRC_CACHE ) {
2017-12-14 17:26:58 +03:00
ret = safexcel_ahash_exit_inv ( tfm ) ;
if ( ret )
dev_warn ( priv - > dev , " hash: invalidation error %d \n " , ret ) ;
} else {
dma_pool_free ( priv - > context_pool , ctx - > base . ctxr ,
ctx - > base . ctxr_dma ) ;
}
2017-05-24 17:10:34 +03:00
}
struct safexcel_alg_template safexcel_alg_sha1 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA1 ,
2017-05-24 17:10:34 +03:00
. alg . ahash = {
. init = safexcel_sha1_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_sha1_digest ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA1_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " sha1 " ,
. cra_driver_name = " safexcel-sha1 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2017-05-24 17:10:34 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
static int safexcel_hmac_sha1_init ( struct ahash_request * areq )
{
2019-07-02 17:39:59 +03:00
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
2018-03-19 11:21:13 +03:00
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
2017-05-24 17:10:34 +03:00
2019-07-02 17:39:59 +03:00
memset ( req , 0 , sizeof ( * req ) ) ;
/* Start from ipad precompute */
memcpy ( req - > state , ctx - > ipad , SHA1_DIGEST_SIZE ) ;
/* Already processed the key^ipad part now! */
2019-07-05 10:36:31 +03:00
req - > len = SHA1_BLOCK_SIZE ;
req - > processed = SHA1_BLOCK_SIZE ;
2019-07-02 17:39:59 +03:00
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1 ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
req - > state_sz = SHA1_DIGEST_SIZE ;
req - > block_sz = SHA1_BLOCK_SIZE ;
req - > hmac = true ;
2017-05-24 17:10:34 +03:00
return 0 ;
}
static int safexcel_hmac_sha1_digest ( struct ahash_request * areq )
{
int ret = safexcel_hmac_sha1_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_ahash_result {
struct completion completion ;
int error ;
} ;
static void safexcel_ahash_complete ( struct crypto_async_request * req , int error )
{
struct safexcel_ahash_result * result = req - > data ;
if ( error = = - EINPROGRESS )
return ;
result - > error = error ;
complete ( & result - > completion ) ;
}
static int safexcel_hmac_init_pad ( struct ahash_request * areq ,
unsigned int blocksize , const u8 * key ,
unsigned int keylen , u8 * ipad , u8 * opad )
{
struct safexcel_ahash_result result ;
struct scatterlist sg ;
int ret , i ;
u8 * keydup ;
if ( keylen < = blocksize ) {
memcpy ( ipad , key , keylen ) ;
} else {
keydup = kmemdup ( key , keylen , GFP_KERNEL ) ;
if ( ! keydup )
return - ENOMEM ;
ahash_request_set_callback ( areq , CRYPTO_TFM_REQ_MAY_BACKLOG ,
safexcel_ahash_complete , & result ) ;
sg_init_one ( & sg , keydup , keylen ) ;
ahash_request_set_crypt ( areq , & sg , ipad , keylen ) ;
init_completion ( & result . completion ) ;
ret = crypto_ahash_digest ( areq ) ;
2018-02-26 16:45:12 +03:00
if ( ret = = - EINPROGRESS | | ret = = - EBUSY ) {
2017-05-24 17:10:34 +03:00
wait_for_completion_interruptible ( & result . completion ) ;
ret = result . error ;
}
/* Avoid leaking */
memzero_explicit ( keydup , keylen ) ;
kfree ( keydup ) ;
if ( ret )
return ret ;
keylen = crypto_ahash_digestsize ( crypto_ahash_reqtfm ( areq ) ) ;
}
memset ( ipad + keylen , 0 , blocksize - keylen ) ;
memcpy ( opad , ipad , blocksize ) ;
for ( i = 0 ; i < blocksize ; i + + ) {
2017-06-15 10:56:17 +03:00
ipad [ i ] ^ = HMAC_IPAD_VALUE ;
opad [ i ] ^ = HMAC_OPAD_VALUE ;
2017-05-24 17:10:34 +03:00
}
return 0 ;
}
static int safexcel_hmac_init_iv ( struct ahash_request * areq ,
unsigned int blocksize , u8 * pad , void * state )
{
struct safexcel_ahash_result result ;
struct safexcel_ahash_req * req ;
struct scatterlist sg ;
int ret ;
ahash_request_set_callback ( areq , CRYPTO_TFM_REQ_MAY_BACKLOG ,
safexcel_ahash_complete , & result ) ;
sg_init_one ( & sg , pad , blocksize ) ;
ahash_request_set_crypt ( areq , & sg , pad , blocksize ) ;
init_completion ( & result . completion ) ;
ret = crypto_ahash_init ( areq ) ;
if ( ret )
return ret ;
req = ahash_request_ctx ( areq ) ;
req - > hmac = true ;
req - > last_req = true ;
ret = crypto_ahash_update ( areq ) ;
2017-12-14 17:26:48 +03:00
if ( ret & & ret ! = - EINPROGRESS & & ret ! = - EBUSY )
2017-05-24 17:10:34 +03:00
return ret ;
wait_for_completion_interruptible ( & result . completion ) ;
if ( result . error )
return result . error ;
return crypto_ahash_export ( areq , state ) ;
}
2018-05-14 16:11:02 +03:00
int safexcel_hmac_setkey ( const char * alg , const u8 * key , unsigned int keylen ,
void * istate , void * ostate )
2017-05-24 17:10:34 +03:00
{
struct ahash_request * areq ;
struct crypto_ahash * tfm ;
unsigned int blocksize ;
u8 * ipad , * opad ;
int ret ;
2018-07-01 01:16:16 +03:00
tfm = crypto_alloc_ahash ( alg , 0 , 0 ) ;
2017-05-24 17:10:34 +03:00
if ( IS_ERR ( tfm ) )
return PTR_ERR ( tfm ) ;
areq = ahash_request_alloc ( tfm , GFP_KERNEL ) ;
if ( ! areq ) {
ret = - ENOMEM ;
goto free_ahash ;
}
crypto_ahash_clear_flags ( tfm , ~ 0 ) ;
blocksize = crypto_tfm_alg_blocksize ( crypto_ahash_tfm ( tfm ) ) ;
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
ipad = kcalloc ( 2 , blocksize , GFP_KERNEL ) ;
2017-05-24 17:10:34 +03:00
if ( ! ipad ) {
ret = - ENOMEM ;
goto free_request ;
}
opad = ipad + blocksize ;
ret = safexcel_hmac_init_pad ( areq , blocksize , key , keylen , ipad , opad ) ;
if ( ret )
goto free_ipad ;
ret = safexcel_hmac_init_iv ( areq , blocksize , ipad , istate ) ;
if ( ret )
goto free_ipad ;
ret = safexcel_hmac_init_iv ( areq , blocksize , opad , ostate ) ;
free_ipad :
kfree ( ipad ) ;
free_request :
ahash_request_free ( areq ) ;
free_ahash :
crypto_free_ahash ( tfm ) ;
return ret ;
}
2018-03-19 11:21:20 +03:00
static int safexcel_hmac_alg_setkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen , const char * alg ,
unsigned int state_sz )
2017-05-24 17:10:34 +03:00
{
struct safexcel_ahash_ctx * ctx = crypto_tfm_ctx ( crypto_ahash_tfm ( tfm ) ) ;
2017-12-14 17:26:58 +03:00
struct safexcel_crypto_priv * priv = ctx - > priv ;
2017-05-24 17:10:34 +03:00
struct safexcel_ahash_export_state istate , ostate ;
2019-07-02 17:39:59 +03:00
int ret ;
2017-05-24 17:10:34 +03:00
2018-03-19 11:21:20 +03:00
ret = safexcel_hmac_setkey ( alg , key , keylen , & istate , & ostate ) ;
2017-05-24 17:10:34 +03:00
if ( ret )
return ret ;
2019-07-02 17:39:59 +03:00
if ( priv - > flags & EIP197_TRC_CACHE & & ctx - > base . ctxr & &
( memcmp ( ctx - > ipad , istate . state , state_sz ) | |
memcmp ( ctx - > opad , ostate . state , state_sz ) ) )
ctx - > base . needs_inv = true ;
2017-05-24 17:10:34 +03:00
2018-03-19 11:21:20 +03:00
memcpy ( ctx - > ipad , & istate . state , state_sz ) ;
memcpy ( ctx - > opad , & ostate . state , state_sz ) ;
2017-07-19 12:02:30 +03:00
2017-05-24 17:10:34 +03:00
return 0 ;
}
2018-03-19 11:21:20 +03:00
static int safexcel_hmac_sha1_setkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
return safexcel_hmac_alg_setkey ( tfm , key , keylen , " safexcel-sha1 " ,
SHA1_DIGEST_SIZE ) ;
}
2017-05-24 17:10:34 +03:00
struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA1 ,
2017-05-24 17:10:34 +03:00
. alg . ahash = {
. init = safexcel_hmac_sha1_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_hmac_sha1_digest ,
. setkey = safexcel_hmac_sha1_setkey ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA1_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " hmac(sha1) " ,
. cra_driver_name = " safexcel-hmac-sha1 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2017-05-24 17:10:34 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
static int safexcel_sha256_init ( struct ahash_request * areq )
{
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
memset ( req , 0 , sizeof ( * req ) ) ;
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256 ;
2018-03-19 11:21:13 +03:00
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
2017-05-24 17:10:34 +03:00
req - > state_sz = SHA256_DIGEST_SIZE ;
2019-07-02 17:39:59 +03:00
req - > block_sz = SHA256_BLOCK_SIZE ;
2017-05-24 17:10:34 +03:00
return 0 ;
}
static int safexcel_sha256_digest ( struct ahash_request * areq )
{
int ret = safexcel_sha256_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_sha256 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA2_256 ,
2017-05-24 17:10:34 +03:00
. alg . ahash = {
. init = safexcel_sha256_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_sha256_digest ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA256_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " sha256 " ,
. cra_driver_name = " safexcel-sha256 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2017-05-24 17:10:34 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
static int safexcel_sha224_init ( struct ahash_request * areq )
{
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
memset ( req , 0 , sizeof ( * req ) ) ;
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ;
2018-03-19 11:21:13 +03:00
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
2017-05-24 17:10:34 +03:00
req - > state_sz = SHA256_DIGEST_SIZE ;
2019-07-02 17:39:59 +03:00
req - > block_sz = SHA256_BLOCK_SIZE ;
2017-05-24 17:10:34 +03:00
return 0 ;
}
static int safexcel_sha224_digest ( struct ahash_request * areq )
{
int ret = safexcel_sha224_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_sha224 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA2_256 ,
2017-05-24 17:10:34 +03:00
. alg . ahash = {
. init = safexcel_sha224_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_sha224_digest ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA224_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " sha224 " ,
. cra_driver_name = " safexcel-sha224 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2017-05-24 17:10:34 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA224_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
2018-03-19 11:21:20 +03:00
2018-03-19 11:21:21 +03:00
static int safexcel_hmac_sha224_setkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
return safexcel_hmac_alg_setkey ( tfm , key , keylen , " safexcel-sha224 " ,
SHA256_DIGEST_SIZE ) ;
}
static int safexcel_hmac_sha224_init ( struct ahash_request * areq )
{
2019-07-02 17:39:59 +03:00
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
2018-03-19 11:21:21 +03:00
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
2019-07-02 17:39:59 +03:00
memset ( req , 0 , sizeof ( * req ) ) ;
/* Start from ipad precompute */
memcpy ( req - > state , ctx - > ipad , SHA256_DIGEST_SIZE ) ;
/* Already processed the key^ipad part now! */
2019-07-05 10:36:31 +03:00
req - > len = SHA256_BLOCK_SIZE ;
req - > processed = SHA256_BLOCK_SIZE ;
2019-07-02 17:39:59 +03:00
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
req - > state_sz = SHA256_DIGEST_SIZE ;
req - > block_sz = SHA256_BLOCK_SIZE ;
req - > hmac = true ;
2018-03-19 11:21:21 +03:00
return 0 ;
}
static int safexcel_hmac_sha224_digest ( struct ahash_request * areq )
{
int ret = safexcel_hmac_sha224_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA2_256 ,
2018-03-19 11:21:21 +03:00
. alg . ahash = {
. init = safexcel_hmac_sha224_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_hmac_sha224_digest ,
. setkey = safexcel_hmac_sha224_setkey ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA224_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " hmac(sha224) " ,
. cra_driver_name = " safexcel-hmac-sha224 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2018-03-19 11:21:21 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA224_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
2018-03-19 11:21:20 +03:00
static int safexcel_hmac_sha256_setkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
return safexcel_hmac_alg_setkey ( tfm , key , keylen , " safexcel-sha256 " ,
SHA256_DIGEST_SIZE ) ;
}
static int safexcel_hmac_sha256_init ( struct ahash_request * areq )
{
2019-07-02 17:39:59 +03:00
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
2018-03-19 11:21:20 +03:00
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
2019-07-02 17:39:59 +03:00
memset ( req , 0 , sizeof ( * req ) ) ;
/* Start from ipad precompute */
memcpy ( req - > state , ctx - > ipad , SHA256_DIGEST_SIZE ) ;
/* Already processed the key^ipad part now! */
2019-07-05 10:36:31 +03:00
req - > len = SHA256_BLOCK_SIZE ;
req - > processed = SHA256_BLOCK_SIZE ;
2019-07-02 17:39:59 +03:00
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256 ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
req - > state_sz = SHA256_DIGEST_SIZE ;
req - > block_sz = SHA256_BLOCK_SIZE ;
req - > hmac = true ;
2018-03-19 11:21:20 +03:00
return 0 ;
}
static int safexcel_hmac_sha256_digest ( struct ahash_request * areq )
{
int ret = safexcel_hmac_sha256_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA2_256 ,
2018-03-19 11:21:20 +03:00
. alg . ahash = {
. init = safexcel_hmac_sha256_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_hmac_sha256_digest ,
. setkey = safexcel_hmac_sha256_setkey ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA256_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " hmac(sha256) " ,
. cra_driver_name = " safexcel-hmac-sha256 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2018-03-19 11:21:20 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
2018-05-29 15:13:46 +03:00
static int safexcel_sha512_init ( struct ahash_request * areq )
{
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
memset ( req , 0 , sizeof ( * req ) ) ;
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512 ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
req - > state_sz = SHA512_DIGEST_SIZE ;
2019-07-02 17:39:59 +03:00
req - > block_sz = SHA512_BLOCK_SIZE ;
2018-05-29 15:13:46 +03:00
return 0 ;
}
static int safexcel_sha512_digest ( struct ahash_request * areq )
{
int ret = safexcel_sha512_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_sha512 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA2_512 ,
2018-05-29 15:13:46 +03:00
. alg . ahash = {
. init = safexcel_sha512_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_sha512_digest ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA512_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " sha512 " ,
. cra_driver_name = " safexcel-sha512 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2018-05-29 15:13:46 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA512_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
2018-05-29 15:13:47 +03:00
2018-05-29 15:13:50 +03:00
static int safexcel_sha384_init ( struct ahash_request * areq )
{
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
memset ( req , 0 , sizeof ( * req ) ) ;
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
req - > state_sz = SHA512_DIGEST_SIZE ;
2019-07-02 17:39:59 +03:00
req - > block_sz = SHA512_BLOCK_SIZE ;
2018-05-29 15:13:50 +03:00
return 0 ;
}
static int safexcel_sha384_digest ( struct ahash_request * areq )
{
int ret = safexcel_sha384_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_sha384 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA2_512 ,
2018-05-29 15:13:50 +03:00
. alg . ahash = {
. init = safexcel_sha384_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_sha384_digest ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA384_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " sha384 " ,
. cra_driver_name = " safexcel-sha384 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2018-05-29 15:13:50 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA384_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
2018-05-29 15:13:47 +03:00
static int safexcel_hmac_sha512_setkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
return safexcel_hmac_alg_setkey ( tfm , key , keylen , " safexcel-sha512 " ,
SHA512_DIGEST_SIZE ) ;
}
static int safexcel_hmac_sha512_init ( struct ahash_request * areq )
{
2019-07-02 17:39:59 +03:00
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
2018-05-29 15:13:47 +03:00
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
2019-07-02 17:39:59 +03:00
memset ( req , 0 , sizeof ( * req ) ) ;
/* Start from ipad precompute */
memcpy ( req - > state , ctx - > ipad , SHA512_DIGEST_SIZE ) ;
/* Already processed the key^ipad part now! */
2019-07-05 10:36:31 +03:00
req - > len = SHA512_BLOCK_SIZE ;
req - > processed = SHA512_BLOCK_SIZE ;
2019-07-02 17:39:59 +03:00
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512 ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
req - > state_sz = SHA512_DIGEST_SIZE ;
req - > block_sz = SHA512_BLOCK_SIZE ;
req - > hmac = true ;
2018-05-29 15:13:47 +03:00
return 0 ;
}
static int safexcel_hmac_sha512_digest ( struct ahash_request * areq )
{
int ret = safexcel_hmac_sha512_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA2_512 ,
2018-05-29 15:13:47 +03:00
. alg . ahash = {
. init = safexcel_hmac_sha512_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_hmac_sha512_digest ,
. setkey = safexcel_hmac_sha512_setkey ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA512_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " hmac(sha512) " ,
. cra_driver_name = " safexcel-hmac-sha512 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2018-05-29 15:13:47 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA512_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
2018-05-29 15:13:51 +03:00
static int safexcel_hmac_sha384_setkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
return safexcel_hmac_alg_setkey ( tfm , key , keylen , " safexcel-sha384 " ,
SHA512_DIGEST_SIZE ) ;
}
static int safexcel_hmac_sha384_init ( struct ahash_request * areq )
{
2019-07-02 17:39:59 +03:00
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
2018-05-29 15:13:51 +03:00
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
2019-07-02 17:39:59 +03:00
memset ( req , 0 , sizeof ( * req ) ) ;
/* Start from ipad precompute */
memcpy ( req - > state , ctx - > ipad , SHA512_DIGEST_SIZE ) ;
/* Already processed the key^ipad part now! */
2019-07-05 10:36:31 +03:00
req - > len = SHA512_BLOCK_SIZE ;
req - > processed = SHA512_BLOCK_SIZE ;
2019-07-02 17:39:59 +03:00
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
req - > state_sz = SHA512_DIGEST_SIZE ;
req - > block_sz = SHA512_BLOCK_SIZE ;
req - > hmac = true ;
2018-05-29 15:13:51 +03:00
return 0 ;
}
static int safexcel_hmac_sha384_digest ( struct ahash_request * areq )
{
int ret = safexcel_hmac_sha384_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_SHA2_512 ,
2018-05-29 15:13:51 +03:00
. alg . ahash = {
. init = safexcel_hmac_sha384_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_hmac_sha384_digest ,
. setkey = safexcel_hmac_sha384_setkey ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = SHA384_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " hmac(sha384) " ,
. cra_driver_name = " safexcel-hmac-sha384 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2018-05-29 15:13:51 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = SHA384_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
2018-06-28 18:21:53 +03:00
static int safexcel_md5_init ( struct ahash_request * areq )
{
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
memset ( req , 0 , sizeof ( * req ) ) ;
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5 ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
req - > state_sz = MD5_DIGEST_SIZE ;
2019-07-02 17:39:59 +03:00
req - > block_sz = MD5_HMAC_BLOCK_SIZE ;
2018-06-28 18:21:53 +03:00
return 0 ;
}
static int safexcel_md5_digest ( struct ahash_request * areq )
{
int ret = safexcel_md5_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_md5 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_MD5 ,
2018-06-28 18:21:53 +03:00
. alg . ahash = {
. init = safexcel_md5_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_md5_digest ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = MD5_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " md5 " ,
. cra_driver_name = " safexcel-md5 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2018-06-28 18:21:53 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = MD5_HMAC_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;
2018-06-28 18:21:54 +03:00
static int safexcel_hmac_md5_init ( struct ahash_request * areq )
{
2019-07-02 17:39:59 +03:00
struct safexcel_ahash_ctx * ctx = crypto_ahash_ctx ( crypto_ahash_reqtfm ( areq ) ) ;
2018-06-28 18:21:54 +03:00
struct safexcel_ahash_req * req = ahash_request_ctx ( areq ) ;
2019-07-02 17:39:59 +03:00
memset ( req , 0 , sizeof ( * req ) ) ;
/* Start from ipad precompute */
memcpy ( req - > state , ctx - > ipad , MD5_DIGEST_SIZE ) ;
/* Already processed the key^ipad part now! */
2019-07-05 10:36:31 +03:00
req - > len = MD5_HMAC_BLOCK_SIZE ;
req - > processed = MD5_HMAC_BLOCK_SIZE ;
2019-07-02 17:39:59 +03:00
ctx - > alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5 ;
req - > digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED ;
req - > state_sz = MD5_DIGEST_SIZE ;
req - > block_sz = MD5_HMAC_BLOCK_SIZE ;
2019-07-02 17:40:00 +03:00
req - > len_is_le = true ; /* MD5 is little endian! ... */
2019-07-02 17:39:59 +03:00
req - > hmac = true ;
2018-06-28 18:21:54 +03:00
return 0 ;
}
static int safexcel_hmac_md5_setkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
return safexcel_hmac_alg_setkey ( tfm , key , keylen , " safexcel-md5 " ,
MD5_DIGEST_SIZE ) ;
}
static int safexcel_hmac_md5_digest ( struct ahash_request * areq )
{
int ret = safexcel_hmac_md5_init ( areq ) ;
if ( ret )
return ret ;
return safexcel_ahash_finup ( areq ) ;
}
struct safexcel_alg_template safexcel_alg_hmac_md5 = {
. type = SAFEXCEL_ALG_TYPE_AHASH ,
2019-08-30 10:40:54 +03:00
. algo_mask = SAFEXCEL_ALG_MD5 ,
2018-06-28 18:21:54 +03:00
. alg . ahash = {
. init = safexcel_hmac_md5_init ,
. update = safexcel_ahash_update ,
. final = safexcel_ahash_final ,
. finup = safexcel_ahash_finup ,
. digest = safexcel_hmac_md5_digest ,
. setkey = safexcel_hmac_md5_setkey ,
. export = safexcel_ahash_export ,
. import = safexcel_ahash_import ,
. halg = {
. digestsize = MD5_DIGEST_SIZE ,
. statesize = sizeof ( struct safexcel_ahash_export_state ) ,
. base = {
. cra_name = " hmac(md5) " ,
. cra_driver_name = " safexcel-hmac-md5 " ,
2019-08-30 10:41:47 +03:00
. cra_priority = SAFEXCEL_CRA_PRIORITY ,
2018-06-28 18:21:54 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = MD5_HMAC_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_ahash_ctx ) ,
. cra_init = safexcel_ahash_cra_init ,
. cra_exit = safexcel_ahash_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ,
} ;