2018-07-13 17:51:37 +03:00
// SPDX-License-Identifier: GPL-2.0
2017-05-24 17:10:34 +03:00
/*
* Copyright ( C ) 2017 Marvell
*
* Antoine Tenart < antoine . tenart @ free - electrons . com >
*/
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/dmapool.h>
2018-05-14 16:11:02 +03:00
# include <crypto/aead.h>
2017-05-24 17:10:34 +03:00
# include <crypto/aes.h>
2018-05-14 16:11:02 +03:00
# include <crypto/authenc.h>
2019-07-30 16:27:11 +03:00
# include <crypto/ctr.h>
2019-08-15 12:01:09 +03:00
# include <crypto/internal/des.h>
2018-05-14 16:11:02 +03:00
# include <crypto/sha.h>
2019-08-30 10:40:53 +03:00
# include <crypto/xts.h>
2017-05-24 17:10:34 +03:00
# include <crypto/skcipher.h>
2018-05-14 16:11:02 +03:00
# include <crypto/internal/aead.h>
2017-12-11 14:10:55 +03:00
# include <crypto/internal/skcipher.h>
2017-05-24 17:10:34 +03:00
# include "safexcel.h"
enum safexcel_cipher_direction {
SAFEXCEL_ENCRYPT ,
SAFEXCEL_DECRYPT ,
} ;
2018-06-28 18:21:55 +03:00
enum safexcel_cipher_alg {
SAFEXCEL_DES ,
2018-06-28 18:21:56 +03:00
SAFEXCEL_3DES ,
2018-06-28 18:21:55 +03:00
SAFEXCEL_AES ,
} ;
2017-05-24 17:10:34 +03:00
struct safexcel_cipher_ctx {
struct safexcel_context base ;
struct safexcel_crypto_priv * priv ;
u32 mode ;
2018-06-28 18:21:55 +03:00
enum safexcel_cipher_alg alg ;
2018-05-14 16:11:02 +03:00
bool aead ;
2017-05-24 17:10:34 +03:00
2019-08-30 10:40:53 +03:00
__le32 key [ 16 ] ;
2019-07-05 09:49:23 +03:00
u32 nonce ;
2019-08-30 10:40:53 +03:00
unsigned int key_len , xts ;
2018-05-14 16:11:02 +03:00
/* All the below is AEAD specific */
2018-06-28 18:21:55 +03:00
u32 hash_alg ;
2018-05-14 16:11:02 +03:00
u32 state_sz ;
2018-05-29 15:13:48 +03:00
u32 ipad [ SHA512_DIGEST_SIZE / sizeof ( u32 ) ] ;
u32 opad [ SHA512_DIGEST_SIZE / sizeof ( u32 ) ] ;
2017-05-24 17:10:34 +03:00
} ;
2017-12-11 14:10:55 +03:00
struct safexcel_cipher_req {
2017-12-14 17:26:49 +03:00
enum safexcel_cipher_direction direction ;
2019-05-27 17:51:06 +03:00
/* Number of result descriptors associated to the request */
unsigned int rdescs ;
2017-12-11 14:10:55 +03:00
bool needs_inv ;
2019-07-02 17:39:55 +03:00
int nr_src , nr_dst ;
2017-12-11 14:10:55 +03:00
} ;
2019-07-05 09:49:24 +03:00
static void safexcel_cipher_token ( struct safexcel_cipher_ctx * ctx , u8 * iv ,
struct safexcel_command_desc * cdesc )
2017-05-24 17:10:34 +03:00
{
2019-07-05 09:49:23 +03:00
u32 block_sz = 0 ;
2017-05-24 17:10:34 +03:00
2019-07-05 09:49:23 +03:00
if ( ctx - > mode ! = CONTEXT_CONTROL_CRYPTO_MODE_ECB ) {
2018-06-28 18:21:55 +03:00
switch ( ctx - > alg ) {
case SAFEXCEL_DES :
2019-05-27 17:51:03 +03:00
block_sz = DES_BLOCK_SIZE ;
2018-06-28 18:21:55 +03:00
cdesc - > control_data . options | = EIP197_OPTION_2_TOKEN_IV_CMD ;
break ;
2018-06-28 18:21:56 +03:00
case SAFEXCEL_3DES :
2019-05-27 17:51:03 +03:00
block_sz = DES3_EDE_BLOCK_SIZE ;
2018-06-28 18:21:56 +03:00
cdesc - > control_data . options | = EIP197_OPTION_2_TOKEN_IV_CMD ;
break ;
2018-06-28 18:21:55 +03:00
case SAFEXCEL_AES :
2019-05-27 17:51:03 +03:00
block_sz = AES_BLOCK_SIZE ;
2018-06-28 18:21:55 +03:00
cdesc - > control_data . options | = EIP197_OPTION_4_TOKEN_IV_CMD ;
break ;
}
2019-05-27 17:51:03 +03:00
2019-07-05 09:49:23 +03:00
if ( ctx - > mode = = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ) {
/* 32 bit nonce */
cdesc - > control_data . token [ 0 ] = ctx - > nonce ;
/* 64 bit IV part */
memcpy ( & cdesc - > control_data . token [ 1 ] , iv , 8 ) ;
/* 32 bit counter, start at 1 (big endian!) */
cdesc - > control_data . token [ 3 ] = cpu_to_be32 ( 1 ) ;
} else {
memcpy ( cdesc - > control_data . token , iv , block_sz ) ;
}
2017-05-24 17:10:34 +03:00
}
2019-07-05 09:49:24 +03:00
}
static void safexcel_skcipher_token ( struct safexcel_cipher_ctx * ctx , u8 * iv ,
struct safexcel_command_desc * cdesc ,
u32 length )
{
struct safexcel_token * token ;
safexcel_cipher_token ( ctx , iv , cdesc ) ;
2017-05-24 17:10:34 +03:00
2019-07-05 09:49:23 +03:00
/* skip over worst case IV of 4 dwords, no need to be exact */
token = ( struct safexcel_token * ) ( cdesc - > control_data . token + 4 ) ;
2017-05-24 17:10:34 +03:00
token [ 0 ] . opcode = EIP197_TOKEN_OPCODE_DIRECTION ;
token [ 0 ] . packet_length = length ;
2018-03-19 11:21:18 +03:00
token [ 0 ] . stat = EIP197_TOKEN_STAT_LAST_PACKET |
EIP197_TOKEN_STAT_LAST_HASH ;
2017-05-24 17:10:34 +03:00
token [ 0 ] . instructions = EIP197_TOKEN_INS_LAST |
2019-07-02 17:39:56 +03:00
EIP197_TOKEN_INS_TYPE_CRYPTO |
2017-05-24 17:10:34 +03:00
EIP197_TOKEN_INS_TYPE_OUTPUT ;
}
2018-05-14 16:11:02 +03:00
static void safexcel_aead_token ( struct safexcel_cipher_ctx * ctx , u8 * iv ,
struct safexcel_command_desc * cdesc ,
enum safexcel_cipher_direction direction ,
u32 cryptlen , u32 assoclen , u32 digestsize )
{
struct safexcel_token * token ;
2019-07-05 09:49:24 +03:00
safexcel_cipher_token ( ctx , iv , cdesc ) ;
2018-05-14 16:11:02 +03:00
if ( direction = = SAFEXCEL_DECRYPT )
cryptlen - = digestsize ;
if ( direction = = SAFEXCEL_ENCRYPT ) {
2019-07-05 09:49:23 +03:00
/* align end of instruction sequence to end of token */
token = ( struct safexcel_token * ) ( cdesc - > control_data . token +
EIP197_MAX_TOKENS - 3 ) ;
2018-05-14 16:11:02 +03:00
token [ 2 ] . opcode = EIP197_TOKEN_OPCODE_INSERT ;
token [ 2 ] . packet_length = digestsize ;
token [ 2 ] . stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET ;
token [ 2 ] . instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
EIP197_TOKEN_INS_INSERT_HASH_DIGEST ;
} else {
2019-07-05 09:49:23 +03:00
/* align end of instruction sequence to end of token */
token = ( struct safexcel_token * ) ( cdesc - > control_data . token +
EIP197_MAX_TOKENS - 4 ) ;
2018-05-14 16:11:02 +03:00
token [ 2 ] . opcode = EIP197_TOKEN_OPCODE_RETRIEVE ;
token [ 2 ] . packet_length = digestsize ;
token [ 2 ] . stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET ;
token [ 2 ] . instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST ;
token [ 3 ] . opcode = EIP197_TOKEN_OPCODE_VERIFY ;
token [ 3 ] . packet_length = digestsize |
EIP197_TOKEN_HASH_RESULT_VERIFY ;
token [ 3 ] . stat = EIP197_TOKEN_STAT_LAST_HASH |
EIP197_TOKEN_STAT_LAST_PACKET ;
token [ 3 ] . instructions = EIP197_TOKEN_INS_TYPE_OUTPUT ;
}
2019-07-05 09:49:23 +03:00
2019-07-05 09:49:24 +03:00
if ( unlikely ( ! cryptlen ) ) {
token [ 1 ] . opcode = EIP197_TOKEN_OPCODE_DIRECTION ;
token [ 1 ] . packet_length = assoclen ;
token [ 1 ] . stat = EIP197_TOKEN_STAT_LAST_HASH ;
token [ 1 ] . instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_HASH ;
} else {
if ( likely ( assoclen ) ) {
token [ 0 ] . opcode = EIP197_TOKEN_OPCODE_DIRECTION ;
token [ 0 ] . packet_length = assoclen ;
token [ 0 ] . instructions = EIP197_TOKEN_INS_TYPE_HASH ;
}
2019-07-05 09:49:23 +03:00
2019-07-05 09:49:24 +03:00
token [ 1 ] . opcode = EIP197_TOKEN_OPCODE_DIRECTION ;
token [ 1 ] . packet_length = cryptlen ;
token [ 1 ] . stat = EIP197_TOKEN_STAT_LAST_HASH ;
token [ 1 ] . instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_CRYPTO |
EIP197_TOKEN_INS_TYPE_HASH |
EIP197_TOKEN_INS_TYPE_OUTPUT ;
}
2018-05-14 16:11:02 +03:00
}
2018-05-14 16:10:56 +03:00
static int safexcel_skcipher_aes_setkey ( struct crypto_skcipher * ctfm ,
const u8 * key , unsigned int len )
2017-05-24 17:10:34 +03:00
{
struct crypto_tfm * tfm = crypto_skcipher_tfm ( ctfm ) ;
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2017-12-14 17:26:58 +03:00
struct safexcel_crypto_priv * priv = ctx - > priv ;
2017-05-24 17:10:34 +03:00
struct crypto_aes_ctx aes ;
int ret , i ;
2019-07-02 22:41:27 +03:00
ret = aes_expandkey ( & aes , key , len ) ;
2017-05-24 17:10:34 +03:00
if ( ret ) {
crypto_skcipher_set_flags ( ctfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return ret ;
}
2018-06-28 18:15:35 +03:00
if ( priv - > flags & EIP197_TRC_CACHE & & ctx - > base . ctxr_dma ) {
2017-12-14 17:26:47 +03:00
for ( i = 0 ; i < len / sizeof ( u32 ) ; i + + ) {
if ( ctx - > key [ i ] ! = cpu_to_le32 ( aes . key_enc [ i ] ) ) {
ctx - > base . needs_inv = true ;
break ;
}
2017-05-24 17:10:34 +03:00
}
}
for ( i = 0 ; i < len / sizeof ( u32 ) ; i + + )
ctx - > key [ i ] = cpu_to_le32 ( aes . key_enc [ i ] ) ;
ctx - > key_len = len ;
memzero_explicit ( & aes , sizeof ( aes ) ) ;
return 0 ;
}
2019-07-05 09:49:22 +03:00
static int safexcel_aead_setkey ( struct crypto_aead * ctfm , const u8 * key ,
unsigned int len )
2018-05-14 16:11:02 +03:00
{
struct crypto_tfm * tfm = crypto_aead_tfm ( ctfm ) ;
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_ahash_export_state istate , ostate ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
struct crypto_authenc_keys keys ;
2019-07-05 09:49:24 +03:00
struct crypto_aes_ctx aes ;
int err = - EINVAL ;
2018-05-14 16:11:02 +03:00
if ( crypto_authenc_extractkeys ( & keys , key , len ) ! = 0 )
goto badkey ;
2019-07-05 09:49:24 +03:00
if ( ctx - > mode = = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ) {
2019-07-30 16:27:11 +03:00
/* Minimum keysize is minimum AES key size + nonce size */
if ( keys . enckeylen < ( AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE ) )
2019-07-05 09:49:24 +03:00
goto badkey ;
/* last 4 bytes of key are the nonce! */
2019-07-30 16:27:11 +03:00
ctx - > nonce = * ( u32 * ) ( keys . enckey + keys . enckeylen -
CTR_RFC3686_NONCE_SIZE ) ;
2019-07-05 09:49:24 +03:00
/* exclude the nonce here */
2019-07-30 16:27:11 +03:00
keys . enckeylen - = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ;
2019-07-05 09:49:24 +03:00
}
2018-05-14 16:11:02 +03:00
/* Encryption key */
2019-07-05 09:49:24 +03:00
switch ( ctx - > alg ) {
case SAFEXCEL_3DES :
2019-08-15 12:00:55 +03:00
err = verify_aead_des3_key ( ctfm , keys . enckey , keys . enckeylen ) ;
2019-07-05 09:49:22 +03:00
if ( unlikely ( err ) )
2019-07-05 09:49:24 +03:00
goto badkey_expflags ;
break ;
case SAFEXCEL_AES :
err = aes_expandkey ( & aes , keys . enckey , keys . enckeylen ) ;
if ( unlikely ( err ) )
goto badkey ;
break ;
default :
dev_err ( priv - > dev , " aead: unsupported cipher algorithm \n " ) ;
goto badkey ;
2019-07-05 09:49:22 +03:00
}
2018-06-28 18:15:35 +03:00
if ( priv - > flags & EIP197_TRC_CACHE & & ctx - > base . ctxr_dma & &
2018-05-14 16:11:02 +03:00
memcmp ( ctx - > key , keys . enckey , keys . enckeylen ) )
ctx - > base . needs_inv = true ;
/* Auth key */
2018-06-28 18:21:55 +03:00
switch ( ctx - > hash_alg ) {
2018-05-14 16:11:04 +03:00
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1 :
if ( safexcel_hmac_setkey ( " safexcel-sha1 " , keys . authkey ,
keys . authkeylen , & istate , & ostate ) )
goto badkey ;
break ;
2018-05-14 16:11:03 +03:00
case CONTEXT_CONTROL_CRYPTO_ALG_SHA224 :
if ( safexcel_hmac_setkey ( " safexcel-sha224 " , keys . authkey ,
keys . authkeylen , & istate , & ostate ) )
goto badkey ;
break ;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA256 :
if ( safexcel_hmac_setkey ( " safexcel-sha256 " , keys . authkey ,
keys . authkeylen , & istate , & ostate ) )
goto badkey ;
break ;
2018-05-29 15:13:52 +03:00
case CONTEXT_CONTROL_CRYPTO_ALG_SHA384 :
if ( safexcel_hmac_setkey ( " safexcel-sha384 " , keys . authkey ,
keys . authkeylen , & istate , & ostate ) )
goto badkey ;
break ;
2018-05-29 15:13:48 +03:00
case CONTEXT_CONTROL_CRYPTO_ALG_SHA512 :
if ( safexcel_hmac_setkey ( " safexcel-sha512 " , keys . authkey ,
keys . authkeylen , & istate , & ostate ) )
goto badkey ;
break ;
2018-05-14 16:11:03 +03:00
default :
dev_err ( priv - > dev , " aead: unsupported hash algorithm \n " ) ;
2018-05-14 16:11:02 +03:00
goto badkey ;
2018-05-14 16:11:03 +03:00
}
2018-05-14 16:11:02 +03:00
crypto_aead_set_flags ( ctfm , crypto_aead_get_flags ( ctfm ) &
CRYPTO_TFM_RES_MASK ) ;
2018-06-28 18:15:35 +03:00
if ( priv - > flags & EIP197_TRC_CACHE & & ctx - > base . ctxr_dma & &
2018-05-14 16:11:02 +03:00
( memcmp ( ctx - > ipad , istate . state , ctx - > state_sz ) | |
memcmp ( ctx - > opad , ostate . state , ctx - > state_sz ) ) )
ctx - > base . needs_inv = true ;
/* Now copy the keys into the context */
memcpy ( ctx - > key , keys . enckey , keys . enckeylen ) ;
ctx - > key_len = keys . enckeylen ;
memcpy ( ctx - > ipad , & istate . state , ctx - > state_sz ) ;
memcpy ( ctx - > opad , & ostate . state , ctx - > state_sz ) ;
memzero_explicit ( & keys , sizeof ( keys ) ) ;
return 0 ;
badkey :
crypto_aead_set_flags ( ctfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2019-07-05 09:49:24 +03:00
badkey_expflags :
2018-05-14 16:11:02 +03:00
memzero_explicit ( & keys , sizeof ( keys ) ) ;
2019-07-05 09:49:24 +03:00
return err ;
2018-05-14 16:11:02 +03:00
}
2017-05-24 17:10:34 +03:00
static int safexcel_context_control ( struct safexcel_cipher_ctx * ctx ,
2017-12-14 17:26:49 +03:00
struct crypto_async_request * async ,
2018-05-14 16:10:56 +03:00
struct safexcel_cipher_req * sreq ,
2017-05-24 17:10:34 +03:00
struct safexcel_command_desc * cdesc )
{
struct safexcel_crypto_priv * priv = ctx - > priv ;
int ctrl_size ;
2018-05-14 16:11:02 +03:00
if ( ctx - > aead ) {
if ( sreq - > direction = = SAFEXCEL_ENCRYPT )
cdesc - > control_data . control0 | = CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT ;
else
cdesc - > control_data . control0 | = CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN ;
} else {
cdesc - > control_data . control0 | = CONTEXT_CONTROL_TYPE_CRYPTO_OUT ;
/* The decryption control type is a combination of the
* encryption type and CONTEXT_CONTROL_TYPE_NULL_IN , for all
* types .
*/
if ( sreq - > direction = = SAFEXCEL_DECRYPT )
cdesc - > control_data . control0 | = CONTEXT_CONTROL_TYPE_NULL_IN ;
}
2017-05-24 17:10:34 +03:00
cdesc - > control_data . control0 | = CONTEXT_CONTROL_KEY_EN ;
cdesc - > control_data . control1 | = ctx - > mode ;
2018-05-14 16:11:02 +03:00
if ( ctx - > aead )
cdesc - > control_data . control0 | = CONTEXT_CONTROL_DIGEST_HMAC |
2018-06-28 18:21:55 +03:00
ctx - > hash_alg ;
if ( ctx - > alg = = SAFEXCEL_DES ) {
cdesc - > control_data . control0 | = CONTEXT_CONTROL_CRYPTO_ALG_DES ;
2018-06-28 18:21:56 +03:00
} else if ( ctx - > alg = = SAFEXCEL_3DES ) {
cdesc - > control_data . control0 | = CONTEXT_CONTROL_CRYPTO_ALG_3DES ;
2018-06-28 18:21:55 +03:00
} else if ( ctx - > alg = = SAFEXCEL_AES ) {
2019-08-30 10:40:53 +03:00
switch ( ctx - > key_len > > ctx - > xts ) {
2018-06-28 18:21:55 +03:00
case AES_KEYSIZE_128 :
cdesc - > control_data . control0 | = CONTEXT_CONTROL_CRYPTO_ALG_AES128 ;
break ;
case AES_KEYSIZE_192 :
cdesc - > control_data . control0 | = CONTEXT_CONTROL_CRYPTO_ALG_AES192 ;
break ;
case AES_KEYSIZE_256 :
cdesc - > control_data . control0 | = CONTEXT_CONTROL_CRYPTO_ALG_AES256 ;
break ;
default :
dev_err ( priv - > dev , " aes keysize not supported: %u \n " ,
2019-08-30 10:40:53 +03:00
ctx - > key_len > > ctx - > xts ) ;
2018-06-28 18:21:55 +03:00
return - EINVAL ;
}
2017-05-24 17:10:34 +03:00
}
2018-05-14 16:10:58 +03:00
ctrl_size = ctx - > key_len / sizeof ( u32 ) ;
2018-05-14 16:11:02 +03:00
if ( ctx - > aead )
/* Take in account the ipad+opad digests */
ctrl_size + = ctx - > state_sz / sizeof ( u32 ) * 2 ;
2017-05-24 17:10:34 +03:00
cdesc - > control_data . control0 | = CONTEXT_CONTROL_SIZE ( ctrl_size ) ;
return 0 ;
}
2017-12-11 14:10:55 +03:00
static int safexcel_handle_req_result ( struct safexcel_crypto_priv * priv , int ring ,
struct crypto_async_request * async ,
2018-05-14 16:10:56 +03:00
struct scatterlist * src ,
struct scatterlist * dst ,
unsigned int cryptlen ,
struct safexcel_cipher_req * sreq ,
2017-12-11 14:10:55 +03:00
bool * should_complete , int * ret )
2017-05-24 17:10:34 +03:00
{
2019-07-02 17:39:54 +03:00
struct skcipher_request * areq = skcipher_request_cast ( async ) ;
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( areq ) ;
struct safexcel_cipher_ctx * ctx = crypto_skcipher_ctx ( skcipher ) ;
2017-05-24 17:10:34 +03:00
struct safexcel_result_desc * rdesc ;
int ndesc = 0 ;
* ret = 0 ;
2019-05-27 17:51:06 +03:00
if ( unlikely ( ! sreq - > rdescs ) )
return 0 ;
while ( sreq - > rdescs - - ) {
2017-05-24 17:10:34 +03:00
rdesc = safexcel_ring_next_rptr ( priv , & priv - > ring [ ring ] . rdr ) ;
if ( IS_ERR ( rdesc ) ) {
dev_err ( priv - > dev ,
" cipher: result: could not retrieve the result descriptor \n " ) ;
* ret = PTR_ERR ( rdesc ) ;
break ;
}
2018-05-14 16:11:01 +03:00
if ( likely ( ! * ret ) )
* ret = safexcel_rdesc_check_errors ( priv , rdesc ) ;
2017-05-24 17:10:34 +03:00
ndesc + + ;
2019-05-27 17:51:06 +03:00
}
2017-05-24 17:10:34 +03:00
safexcel_complete ( priv , ring ) ;
2018-05-14 16:10:56 +03:00
if ( src = = dst ) {
2019-07-02 17:39:55 +03:00
dma_unmap_sg ( priv - > dev , src , sreq - > nr_src , DMA_BIDIRECTIONAL ) ;
2017-05-24 17:10:34 +03:00
} else {
2019-07-02 17:39:55 +03:00
dma_unmap_sg ( priv - > dev , src , sreq - > nr_src , DMA_TO_DEVICE ) ;
dma_unmap_sg ( priv - > dev , dst , sreq - > nr_dst , DMA_FROM_DEVICE ) ;
2017-05-24 17:10:34 +03:00
}
2019-07-02 17:39:54 +03:00
/*
* Update IV in req from last crypto output word for CBC modes
*/
if ( ( ! ctx - > aead ) & & ( ctx - > mode = = CONTEXT_CONTROL_CRYPTO_MODE_CBC ) & &
( sreq - > direction = = SAFEXCEL_ENCRYPT ) ) {
/* For encrypt take the last output word */
2019-07-02 17:39:55 +03:00
sg_pcopy_to_buffer ( dst , sreq - > nr_dst , areq - > iv ,
2019-07-02 17:39:54 +03:00
crypto_skcipher_ivsize ( skcipher ) ,
( cryptlen -
crypto_skcipher_ivsize ( skcipher ) ) ) ;
}
2017-05-24 17:10:34 +03:00
* should_complete = true ;
return ndesc ;
}
2018-06-28 18:21:55 +03:00
static int safexcel_send_req ( struct crypto_async_request * base , int ring ,
2018-05-14 16:10:56 +03:00
struct safexcel_cipher_req * sreq ,
struct scatterlist * src , struct scatterlist * dst ,
2018-05-14 16:11:02 +03:00
unsigned int cryptlen , unsigned int assoclen ,
unsigned int digestsize , u8 * iv , int * commands ,
2018-05-14 16:10:56 +03:00
int * results )
2017-05-24 17:10:34 +03:00
{
2019-07-02 17:39:54 +03:00
struct skcipher_request * areq = skcipher_request_cast ( base ) ;
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( areq ) ;
2018-05-14 16:10:56 +03:00
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( base - > tfm ) ;
2017-05-24 17:10:34 +03:00
struct safexcel_crypto_priv * priv = ctx - > priv ;
struct safexcel_command_desc * cdesc ;
2019-07-02 17:39:55 +03:00
struct safexcel_command_desc * first_cdesc = NULL ;
2018-07-13 18:43:16 +03:00
struct safexcel_result_desc * rdesc , * first_rdesc = NULL ;
2017-05-24 17:10:34 +03:00
struct scatterlist * sg ;
2019-07-02 17:39:55 +03:00
unsigned int totlen ;
unsigned int totlen_src = cryptlen + assoclen ;
unsigned int totlen_dst = totlen_src ;
int n_cdesc = 0 , n_rdesc = 0 ;
int queued , i , ret = 0 ;
bool first = true ;
2017-05-24 17:10:34 +03:00
2019-07-02 17:39:55 +03:00
sreq - > nr_src = sg_nents_for_len ( src , totlen_src ) ;
if ( ctx - > aead ) {
/*
* AEAD has auth tag appended to output for encrypt and
* removed from the output for decrypt !
*/
if ( sreq - > direction = = SAFEXCEL_DECRYPT )
totlen_dst - = digestsize ;
else
totlen_dst + = digestsize ;
memcpy ( ctx - > base . ctxr - > data + ctx - > key_len / sizeof ( u32 ) ,
ctx - > ipad , ctx - > state_sz ) ;
memcpy ( ctx - > base . ctxr - > data + ( ctx - > key_len + ctx - > state_sz ) /
sizeof ( u32 ) ,
ctx - > opad , ctx - > state_sz ) ;
} else if ( ( ctx - > mode = = CONTEXT_CONTROL_CRYPTO_MODE_CBC ) & &
( sreq - > direction = = SAFEXCEL_DECRYPT ) ) {
2019-07-02 17:39:54 +03:00
/*
* Save IV from last crypto input word for CBC modes in decrypt
* direction . Need to do this first in case of inplace operation
* as it will be overwritten .
*/
2019-07-02 17:39:55 +03:00
sg_pcopy_to_buffer ( src , sreq - > nr_src , areq - > iv ,
2019-07-02 17:39:54 +03:00
crypto_skcipher_ivsize ( skcipher ) ,
2019-07-02 17:39:55 +03:00
( totlen_src -
2019-07-02 17:39:54 +03:00
crypto_skcipher_ivsize ( skcipher ) ) ) ;
}
2019-07-02 17:39:55 +03:00
sreq - > nr_dst = sg_nents_for_len ( dst , totlen_dst ) ;
/*
* Remember actual input length , source buffer length may be
* updated in case of inline operation below .
*/
totlen = totlen_src ;
queued = totlen_src ;
2018-05-14 16:10:56 +03:00
if ( src = = dst ) {
2019-07-02 17:39:55 +03:00
sreq - > nr_src = max ( sreq - > nr_src , sreq - > nr_dst ) ;
sreq - > nr_dst = sreq - > nr_src ;
if ( unlikely ( ( totlen_src | | totlen_dst ) & &
( sreq - > nr_src < = 0 ) ) ) {
dev_err ( priv - > dev , " In-place buffer not large enough (need %d bytes)! " ,
max ( totlen_src , totlen_dst ) ) ;
2017-05-24 17:10:34 +03:00
return - EINVAL ;
2019-07-02 17:39:55 +03:00
}
dma_map_sg ( priv - > dev , src , sreq - > nr_src , DMA_BIDIRECTIONAL ) ;
2017-05-24 17:10:34 +03:00
} else {
2019-07-02 17:39:55 +03:00
if ( unlikely ( totlen_src & & ( sreq - > nr_src < = 0 ) ) ) {
dev_err ( priv - > dev , " Source buffer not large enough (need %d bytes)! " ,
totlen_src ) ;
2017-05-24 17:10:34 +03:00
return - EINVAL ;
2019-07-02 17:39:55 +03:00
}
dma_map_sg ( priv - > dev , src , sreq - > nr_src , DMA_TO_DEVICE ) ;
2017-05-24 17:10:34 +03:00
2019-07-02 17:39:55 +03:00
if ( unlikely ( totlen_dst & & ( sreq - > nr_dst < = 0 ) ) ) {
dev_err ( priv - > dev , " Dest buffer not large enough (need %d bytes)! " ,
totlen_dst ) ;
dma_unmap_sg ( priv - > dev , src , sreq - > nr_src ,
DMA_TO_DEVICE ) ;
2017-05-24 17:10:34 +03:00
return - EINVAL ;
}
2019-07-02 17:39:55 +03:00
dma_map_sg ( priv - > dev , dst , sreq - > nr_dst , DMA_FROM_DEVICE ) ;
2017-05-24 17:10:34 +03:00
}
memcpy ( ctx - > base . ctxr - > data , ctx - > key , ctx - > key_len ) ;
2019-07-02 17:39:55 +03:00
/* The EIP cannot deal with zero length input packets! */
if ( totlen = = 0 )
totlen = 1 ;
2018-05-14 16:11:02 +03:00
2017-05-24 17:10:34 +03:00
/* command descriptors */
2019-07-02 17:39:55 +03:00
for_each_sg ( src , sg , sreq - > nr_src , i ) {
2017-05-24 17:10:34 +03:00
int len = sg_dma_len ( sg ) ;
/* Do not overflow the request */
if ( queued - len < 0 )
len = queued ;
2019-07-02 17:39:55 +03:00
cdesc = safexcel_add_cdesc ( priv , ring , ! n_cdesc ,
! ( queued - len ) ,
2018-05-14 16:11:02 +03:00
sg_dma_address ( sg ) , len , totlen ,
2017-05-24 17:10:34 +03:00
ctx - > base . ctxr_dma ) ;
if ( IS_ERR ( cdesc ) ) {
/* No space left in the command descriptor ring */
ret = PTR_ERR ( cdesc ) ;
goto cdesc_rollback ;
}
n_cdesc + + ;
if ( n_cdesc = = 1 ) {
2019-07-02 17:39:55 +03:00
first_cdesc = cdesc ;
2017-05-24 17:10:34 +03:00
}
queued - = len ;
if ( ! queued )
break ;
}
2019-07-02 17:39:55 +03:00
if ( unlikely ( ! n_cdesc ) ) {
/*
* Special case : zero length input buffer .
* The engine always needs the 1 st command descriptor , however !
*/
first_cdesc = safexcel_add_cdesc ( priv , ring , 1 , 1 , 0 , 0 , totlen ,
ctx - > base . ctxr_dma ) ;
n_cdesc = 1 ;
}
/* Add context control words and token to first command descriptor */
safexcel_context_control ( ctx , base , sreq , first_cdesc ) ;
if ( ctx - > aead )
safexcel_aead_token ( ctx , iv , first_cdesc ,
sreq - > direction , cryptlen ,
assoclen , digestsize ) ;
else
safexcel_skcipher_token ( ctx , iv , first_cdesc ,
cryptlen ) ;
2017-05-24 17:10:34 +03:00
/* result descriptors */
2019-07-02 17:39:55 +03:00
for_each_sg ( dst , sg , sreq - > nr_dst , i ) {
bool last = ( i = = sreq - > nr_dst - 1 ) ;
2017-05-24 17:10:34 +03:00
u32 len = sg_dma_len ( sg ) ;
2019-07-02 17:39:55 +03:00
/* only allow the part of the buffer we know we need */
if ( len > totlen_dst )
len = totlen_dst ;
if ( unlikely ( ! len ) )
break ;
totlen_dst - = len ;
/* skip over AAD space in buffer - not written */
if ( assoclen ) {
if ( assoclen > = len ) {
assoclen - = len ;
continue ;
}
rdesc = safexcel_add_rdesc ( priv , ring , first , last ,
sg_dma_address ( sg ) +
assoclen ,
len - assoclen ) ;
assoclen = 0 ;
} else {
rdesc = safexcel_add_rdesc ( priv , ring , first , last ,
sg_dma_address ( sg ) ,
len ) ;
}
2017-05-24 17:10:34 +03:00
if ( IS_ERR ( rdesc ) ) {
/* No space left in the result descriptor ring */
ret = PTR_ERR ( rdesc ) ;
goto rdesc_rollback ;
}
2019-07-02 17:39:55 +03:00
if ( first ) {
2018-06-28 18:21:57 +03:00
first_rdesc = rdesc ;
2019-07-02 17:39:55 +03:00
first = false ;
}
2017-05-24 17:10:34 +03:00
n_rdesc + + ;
}
2019-07-02 17:39:55 +03:00
if ( unlikely ( first ) ) {
/*
* Special case : AEAD decrypt with only AAD data .
* In this case there is NO output data from the engine ,
* but the engine still needs a result descriptor !
* Create a dummy one just for catching the result token .
*/
rdesc = safexcel_add_rdesc ( priv , ring , true , true , 0 , 0 ) ;
if ( IS_ERR ( rdesc ) ) {
/* No space left in the result descriptor ring */
ret = PTR_ERR ( rdesc ) ;
goto rdesc_rollback ;
}
first_rdesc = rdesc ;
n_rdesc = 1 ;
}
2018-06-28 18:21:57 +03:00
safexcel_rdr_req_set ( priv , ring , first_rdesc , base ) ;
2017-06-15 10:56:23 +03:00
2017-05-24 17:10:34 +03:00
* commands = n_cdesc ;
2017-06-15 10:56:22 +03:00
* results = n_rdesc ;
2017-05-24 17:10:34 +03:00
return 0 ;
rdesc_rollback :
for ( i = 0 ; i < n_rdesc ; i + + )
safexcel_ring_rollback_wptr ( priv , & priv - > ring [ ring ] . rdr ) ;
cdesc_rollback :
for ( i = 0 ; i < n_cdesc ; i + + )
safexcel_ring_rollback_wptr ( priv , & priv - > ring [ ring ] . cdr ) ;
2018-05-14 16:10:56 +03:00
if ( src = = dst ) {
2019-07-02 17:39:55 +03:00
dma_unmap_sg ( priv - > dev , src , sreq - > nr_src , DMA_BIDIRECTIONAL ) ;
2017-05-24 17:10:34 +03:00
} else {
2019-07-02 17:39:55 +03:00
dma_unmap_sg ( priv - > dev , src , sreq - > nr_src , DMA_TO_DEVICE ) ;
dma_unmap_sg ( priv - > dev , dst , sreq - > nr_dst , DMA_FROM_DEVICE ) ;
2017-05-24 17:10:34 +03:00
}
return ret ;
}
static int safexcel_handle_inv_result ( struct safexcel_crypto_priv * priv ,
int ring ,
2018-05-14 16:10:56 +03:00
struct crypto_async_request * base ,
2019-05-27 17:51:06 +03:00
struct safexcel_cipher_req * sreq ,
2017-05-24 17:10:34 +03:00
bool * should_complete , int * ret )
{
2018-05-14 16:10:56 +03:00
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( base - > tfm ) ;
2017-05-24 17:10:34 +03:00
struct safexcel_result_desc * rdesc ;
int ndesc = 0 , enq_ret ;
* ret = 0 ;
2019-05-27 17:51:06 +03:00
if ( unlikely ( ! sreq - > rdescs ) )
return 0 ;
while ( sreq - > rdescs - - ) {
2017-05-24 17:10:34 +03:00
rdesc = safexcel_ring_next_rptr ( priv , & priv - > ring [ ring ] . rdr ) ;
if ( IS_ERR ( rdesc ) ) {
dev_err ( priv - > dev ,
" cipher: invalidate: could not retrieve the result descriptor \n " ) ;
* ret = PTR_ERR ( rdesc ) ;
break ;
}
2018-05-29 15:13:43 +03:00
if ( likely ( ! * ret ) )
* ret = safexcel_rdesc_check_errors ( priv , rdesc ) ;
2017-05-24 17:10:34 +03:00
ndesc + + ;
2019-05-27 17:51:06 +03:00
}
2017-05-24 17:10:34 +03:00
safexcel_complete ( priv , ring ) ;
if ( ctx - > base . exit_inv ) {
dma_pool_free ( priv - > context_pool , ctx - > base . ctxr ,
ctx - > base . ctxr_dma ) ;
* should_complete = true ;
return ndesc ;
}
2017-06-15 10:56:24 +03:00
ring = safexcel_select_ring ( priv ) ;
ctx - > base . ring = ring ;
2017-05-24 17:10:34 +03:00
2017-06-15 10:56:24 +03:00
spin_lock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2018-05-14 16:10:56 +03:00
enq_ret = crypto_enqueue_request ( & priv - > ring [ ring ] . queue , base ) ;
2017-06-15 10:56:24 +03:00
spin_unlock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2017-05-24 17:10:34 +03:00
if ( enq_ret ! = - EINPROGRESS )
* ret = enq_ret ;
2017-12-14 17:26:51 +03:00
queue_work ( priv - > ring [ ring ] . workqueue ,
& priv - > ring [ ring ] . work_data . work ) ;
2017-06-15 10:56:24 +03:00
2017-05-24 17:10:34 +03:00
* should_complete = false ;
return ndesc ;
}
2018-05-14 16:10:56 +03:00
static int safexcel_skcipher_handle_result ( struct safexcel_crypto_priv * priv ,
int ring ,
struct crypto_async_request * async ,
bool * should_complete , int * ret )
2017-12-11 14:10:55 +03:00
{
struct skcipher_request * req = skcipher_request_cast ( async ) ;
struct safexcel_cipher_req * sreq = skcipher_request_ctx ( req ) ;
int err ;
if ( sreq - > needs_inv ) {
sreq - > needs_inv = false ;
2019-05-27 17:51:06 +03:00
err = safexcel_handle_inv_result ( priv , ring , async , sreq ,
2017-12-11 14:10:55 +03:00
should_complete , ret ) ;
} else {
2018-05-14 16:10:56 +03:00
err = safexcel_handle_req_result ( priv , ring , async , req - > src ,
req - > dst , req - > cryptlen , sreq ,
2017-12-11 14:10:55 +03:00
should_complete , ret ) ;
}
return err ;
}
2018-05-14 16:11:02 +03:00
static int safexcel_aead_handle_result ( struct safexcel_crypto_priv * priv ,
int ring ,
struct crypto_async_request * async ,
bool * should_complete , int * ret )
{
struct aead_request * req = aead_request_cast ( async ) ;
struct crypto_aead * tfm = crypto_aead_reqtfm ( req ) ;
struct safexcel_cipher_req * sreq = aead_request_ctx ( req ) ;
int err ;
if ( sreq - > needs_inv ) {
sreq - > needs_inv = false ;
2019-05-27 17:51:06 +03:00
err = safexcel_handle_inv_result ( priv , ring , async , sreq ,
2018-05-14 16:11:02 +03:00
should_complete , ret ) ;
} else {
err = safexcel_handle_req_result ( priv , ring , async , req - > src ,
req - > dst ,
req - > cryptlen + crypto_aead_authsize ( tfm ) ,
sreq , should_complete , ret ) ;
}
return err ;
}
2018-05-14 16:10:56 +03:00
static int safexcel_cipher_send_inv ( struct crypto_async_request * base ,
2018-06-28 18:21:57 +03:00
int ring , int * commands , int * results )
2017-05-24 17:10:34 +03:00
{
2018-05-14 16:10:56 +03:00
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( base - > tfm ) ;
2017-05-24 17:10:34 +03:00
struct safexcel_crypto_priv * priv = ctx - > priv ;
int ret ;
2018-06-28 18:21:57 +03:00
ret = safexcel_invalidate_cache ( base , priv , ctx - > base . ctxr_dma , ring ) ;
2017-05-24 17:10:34 +03:00
if ( unlikely ( ret ) )
return ret ;
* commands = 1 ;
* results = 1 ;
return 0 ;
}
2018-05-14 16:10:56 +03:00
static int safexcel_skcipher_send ( struct crypto_async_request * async , int ring ,
int * commands , int * results )
2017-12-11 14:10:55 +03:00
{
struct skcipher_request * req = skcipher_request_cast ( async ) ;
2017-12-14 17:26:58 +03:00
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
2017-12-11 14:10:55 +03:00
struct safexcel_cipher_req * sreq = skcipher_request_ctx ( req ) ;
2017-12-14 17:26:58 +03:00
struct safexcel_crypto_priv * priv = ctx - > priv ;
2017-12-11 14:10:55 +03:00
int ret ;
2018-06-28 18:15:35 +03:00
BUG_ON ( ! ( priv - > flags & EIP197_TRC_CACHE ) & & sreq - > needs_inv ) ;
2017-12-14 17:26:58 +03:00
2019-07-02 17:39:54 +03:00
if ( sreq - > needs_inv ) {
2018-06-28 18:21:57 +03:00
ret = safexcel_cipher_send_inv ( async , ring , commands , results ) ;
2019-07-02 17:39:54 +03:00
} else {
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
u8 input_iv [ AES_BLOCK_SIZE ] ;
/*
* Save input IV in case of CBC decrypt mode
* Will be overwritten with output IV prior to use !
*/
memcpy ( input_iv , req - > iv , crypto_skcipher_ivsize ( skcipher ) ) ;
2018-06-28 18:21:57 +03:00
ret = safexcel_send_req ( async , ring , sreq , req - > src ,
2019-07-02 17:39:54 +03:00
req - > dst , req - > cryptlen , 0 , 0 , input_iv ,
2018-05-14 16:11:02 +03:00
commands , results ) ;
2019-07-02 17:39:54 +03:00
}
2019-05-27 17:51:06 +03:00
sreq - > rdescs = * results ;
2018-05-14 16:11:02 +03:00
return ret ;
}
static int safexcel_aead_send ( struct crypto_async_request * async , int ring ,
2018-06-28 18:21:57 +03:00
int * commands , int * results )
2018-05-14 16:11:02 +03:00
{
struct aead_request * req = aead_request_cast ( async ) ;
struct crypto_aead * tfm = crypto_aead_reqtfm ( req ) ;
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
struct safexcel_cipher_req * sreq = aead_request_ctx ( req ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
int ret ;
2018-06-28 18:15:35 +03:00
BUG_ON ( ! ( priv - > flags & EIP197_TRC_CACHE ) & & sreq - > needs_inv ) ;
2018-05-14 16:11:02 +03:00
if ( sreq - > needs_inv )
2018-06-28 18:21:57 +03:00
ret = safexcel_cipher_send_inv ( async , ring , commands , results ) ;
2018-05-14 16:11:02 +03:00
else
2018-06-28 18:21:57 +03:00
ret = safexcel_send_req ( async , ring , sreq , req - > src , req - > dst ,
req - > cryptlen , req - > assoclen ,
2018-05-14 16:11:02 +03:00
crypto_aead_authsize ( tfm ) , req - > iv ,
2017-12-11 14:10:55 +03:00
commands , results ) ;
2019-05-27 17:51:06 +03:00
sreq - > rdescs = * results ;
2017-12-11 14:10:55 +03:00
return ret ;
}
2018-05-14 16:10:56 +03:00
static int safexcel_cipher_exit_inv ( struct crypto_tfm * tfm ,
struct crypto_async_request * base ,
struct safexcel_cipher_req * sreq ,
struct safexcel_inv_result * result )
2017-05-24 17:10:34 +03:00
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
2017-06-15 10:56:24 +03:00
int ring = ctx - > base . ring ;
2017-05-24 17:10:34 +03:00
2018-05-14 16:10:56 +03:00
init_completion ( & result - > completion ) ;
2017-05-24 17:10:34 +03:00
2018-05-14 16:10:56 +03:00
ctx = crypto_tfm_ctx ( base - > tfm ) ;
2017-05-24 17:10:34 +03:00
ctx - > base . exit_inv = true ;
2017-12-11 14:10:55 +03:00
sreq - > needs_inv = true ;
2017-05-24 17:10:34 +03:00
2017-06-15 10:56:24 +03:00
spin_lock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2018-05-14 16:10:56 +03:00
crypto_enqueue_request ( & priv - > ring [ ring ] . queue , base ) ;
2017-06-15 10:56:24 +03:00
spin_unlock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:51 +03:00
queue_work ( priv - > ring [ ring ] . workqueue ,
& priv - > ring [ ring ] . work_data . work ) ;
2017-05-24 17:10:34 +03:00
2018-05-14 16:10:56 +03:00
wait_for_completion ( & result - > completion ) ;
2017-05-24 17:10:34 +03:00
2018-05-14 16:10:56 +03:00
if ( result - > error ) {
2017-05-24 17:10:34 +03:00
dev_warn ( priv - > dev ,
" cipher: sync: invalidate: completion error %d \n " ,
2018-05-14 16:10:56 +03:00
result - > error ) ;
return result - > error ;
2017-05-24 17:10:34 +03:00
}
return 0 ;
}
2018-05-14 16:10:56 +03:00
static int safexcel_skcipher_exit_inv ( struct crypto_tfm * tfm )
2017-05-24 17:10:34 +03:00
{
2018-05-14 16:10:56 +03:00
EIP197_REQUEST_ON_STACK ( req , skcipher , EIP197_SKCIPHER_REQ_SIZE ) ;
2017-12-11 14:10:55 +03:00
struct safexcel_cipher_req * sreq = skcipher_request_ctx ( req ) ;
2018-05-14 16:10:56 +03:00
struct safexcel_inv_result result = { } ;
memset ( req , 0 , sizeof ( struct skcipher_request ) ) ;
skcipher_request_set_callback ( req , CRYPTO_TFM_REQ_MAY_BACKLOG ,
safexcel_inv_complete , & result ) ;
skcipher_request_set_tfm ( req , __crypto_skcipher_cast ( tfm ) ) ;
return safexcel_cipher_exit_inv ( tfm , & req - > base , sreq , & result ) ;
}
2018-05-14 16:11:02 +03:00
static int safexcel_aead_exit_inv ( struct crypto_tfm * tfm )
{
EIP197_REQUEST_ON_STACK ( req , aead , EIP197_AEAD_REQ_SIZE ) ;
struct safexcel_cipher_req * sreq = aead_request_ctx ( req ) ;
struct safexcel_inv_result result = { } ;
memset ( req , 0 , sizeof ( struct aead_request ) ) ;
aead_request_set_callback ( req , CRYPTO_TFM_REQ_MAY_BACKLOG ,
safexcel_inv_complete , & result ) ;
aead_request_set_tfm ( req , __crypto_aead_cast ( tfm ) ) ;
return safexcel_cipher_exit_inv ( tfm , & req - > base , sreq , & result ) ;
}
2018-06-28 18:21:55 +03:00
static int safexcel_queue_req ( struct crypto_async_request * base ,
2018-05-14 16:10:56 +03:00
struct safexcel_cipher_req * sreq ,
2019-08-30 10:40:52 +03:00
enum safexcel_cipher_direction dir )
2018-05-14 16:10:56 +03:00
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( base - > tfm ) ;
2017-05-24 17:10:34 +03:00
struct safexcel_crypto_priv * priv = ctx - > priv ;
2017-06-15 10:56:24 +03:00
int ret , ring ;
2017-05-24 17:10:34 +03:00
2017-12-11 14:10:55 +03:00
sreq - > needs_inv = false ;
2017-12-14 17:26:49 +03:00
sreq - > direction = dir ;
2017-05-24 17:10:34 +03:00
if ( ctx - > base . ctxr ) {
2018-06-28 18:15:35 +03:00
if ( priv - > flags & EIP197_TRC_CACHE & & ctx - > base . needs_inv ) {
2017-12-11 14:10:55 +03:00
sreq - > needs_inv = true ;
ctx - > base . needs_inv = false ;
}
2017-05-24 17:10:34 +03:00
} else {
ctx - > base . ring = safexcel_select_ring ( priv ) ;
ctx - > base . ctxr = dma_pool_zalloc ( priv - > context_pool ,
2018-05-14 16:10:56 +03:00
EIP197_GFP_FLAGS ( * base ) ,
2017-05-24 17:10:34 +03:00
& ctx - > base . ctxr_dma ) ;
if ( ! ctx - > base . ctxr )
return - ENOMEM ;
}
2017-06-15 10:56:24 +03:00
ring = ctx - > base . ring ;
spin_lock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2018-05-14 16:10:56 +03:00
ret = crypto_enqueue_request ( & priv - > ring [ ring ] . queue , base ) ;
2017-06-15 10:56:24 +03:00
spin_unlock_bh ( & priv - > ring [ ring ] . queue_lock ) ;
2017-05-24 17:10:34 +03:00
2017-12-14 17:26:51 +03:00
queue_work ( priv - > ring [ ring ] . workqueue ,
& priv - > ring [ ring ] . work_data . work ) ;
2017-05-24 17:10:34 +03:00
return ret ;
}
2019-08-30 10:40:52 +03:00
static int safexcel_encrypt ( struct skcipher_request * req )
2017-05-24 17:10:34 +03:00
{
2018-06-28 18:21:55 +03:00
return safexcel_queue_req ( & req - > base , skcipher_request_ctx ( req ) ,
2019-08-30 10:40:52 +03:00
SAFEXCEL_ENCRYPT ) ;
2017-05-24 17:10:34 +03:00
}
2019-08-30 10:40:52 +03:00
static int safexcel_decrypt ( struct skcipher_request * req )
2017-05-24 17:10:34 +03:00
{
2018-06-28 18:21:55 +03:00
return safexcel_queue_req ( & req - > base , skcipher_request_ctx ( req ) ,
2019-08-30 10:40:52 +03:00
SAFEXCEL_DECRYPT ) ;
2017-05-24 17:10:34 +03:00
}
static int safexcel_skcipher_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_alg_template * tmpl =
container_of ( tfm - > __crt_alg , struct safexcel_alg_template ,
alg . skcipher . base ) ;
2017-12-11 14:10:55 +03:00
crypto_skcipher_set_reqsize ( __crypto_skcipher_cast ( tfm ) ,
sizeof ( struct safexcel_cipher_req ) ) ;
2017-05-24 17:10:34 +03:00
2018-05-14 16:10:56 +03:00
ctx - > priv = tmpl - > priv ;
ctx - > base . send = safexcel_skcipher_send ;
ctx - > base . handle_result = safexcel_skcipher_handle_result ;
2017-05-24 17:10:34 +03:00
return 0 ;
}
2018-05-14 16:10:56 +03:00
static int safexcel_cipher_cra_exit ( struct crypto_tfm * tfm )
2017-05-24 17:10:34 +03:00
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2018-05-14 16:10:59 +03:00
memzero_explicit ( ctx - > key , sizeof ( ctx - > key ) ) ;
2017-05-24 17:10:34 +03:00
/* context not allocated, skip invalidation */
if ( ! ctx - > base . ctxr )
2018-05-14 16:10:56 +03:00
return - ENOMEM ;
2017-05-24 17:10:34 +03:00
2018-05-14 16:10:59 +03:00
memzero_explicit ( ctx - > base . ctxr - > data , sizeof ( ctx - > base . ctxr - > data ) ) ;
2018-05-14 16:10:56 +03:00
return 0 ;
}
static void safexcel_skcipher_cra_exit ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
int ret ;
if ( safexcel_cipher_cra_exit ( tfm ) )
return ;
2017-05-24 17:10:34 +03:00
2018-06-28 18:15:35 +03:00
if ( priv - > flags & EIP197_TRC_CACHE ) {
2018-05-14 16:10:56 +03:00
ret = safexcel_skcipher_exit_inv ( tfm ) ;
2017-12-14 17:26:58 +03:00
if ( ret )
2018-05-14 16:10:56 +03:00
dev_warn ( priv - > dev , " skcipher: invalidation error %d \n " ,
ret ) ;
2017-12-14 17:26:58 +03:00
} else {
dma_pool_free ( priv - > context_pool , ctx - > base . ctxr ,
ctx - > base . ctxr_dma ) ;
}
2017-05-24 17:10:34 +03:00
}
2018-05-14 16:11:02 +03:00
static void safexcel_aead_cra_exit ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
int ret ;
if ( safexcel_cipher_cra_exit ( tfm ) )
return ;
2018-06-28 18:15:35 +03:00
if ( priv - > flags & EIP197_TRC_CACHE ) {
2018-05-14 16:11:02 +03:00
ret = safexcel_aead_exit_inv ( tfm ) ;
if ( ret )
dev_warn ( priv - > dev , " aead: invalidation error %d \n " ,
ret ) ;
} else {
dma_pool_free ( priv - > context_pool , ctx - > base . ctxr ,
ctx - > base . ctxr_dma ) ;
}
}
2019-08-30 10:40:52 +03:00
static int safexcel_skcipher_aes_ecb_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_skcipher_cra_init ( tfm ) ;
ctx - > alg = SAFEXCEL_AES ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB ;
return 0 ;
}
2017-05-24 17:10:34 +03:00
struct safexcel_alg_template safexcel_alg_ecb_aes = {
. type = SAFEXCEL_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
2018-05-14 16:10:56 +03:00
. setkey = safexcel_skcipher_aes_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_encrypt ,
. decrypt = safexcel_decrypt ,
2017-05-24 17:10:34 +03:00
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. base = {
. cra_name = " ecb(aes) " ,
. cra_driver_name = " safexcel-ecb-aes " ,
. cra_priority = 300 ,
2018-07-01 01:16:15 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2017-05-24 17:10:34 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
2019-08-30 10:40:52 +03:00
. cra_init = safexcel_skcipher_aes_ecb_cra_init ,
2017-05-24 17:10:34 +03:00
. cra_exit = safexcel_skcipher_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2019-08-30 10:40:52 +03:00
static int safexcel_skcipher_aes_cbc_cra_init ( struct crypto_tfm * tfm )
2017-05-24 17:10:34 +03:00
{
2019-08-30 10:40:52 +03:00
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2017-05-24 17:10:34 +03:00
2019-08-30 10:40:52 +03:00
safexcel_skcipher_cra_init ( tfm ) ;
ctx - > alg = SAFEXCEL_AES ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC ;
return 0 ;
2017-05-24 17:10:34 +03:00
}
struct safexcel_alg_template safexcel_alg_cbc_aes = {
. type = SAFEXCEL_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
2018-05-14 16:10:56 +03:00
. setkey = safexcel_skcipher_aes_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_encrypt ,
. decrypt = safexcel_decrypt ,
2017-05-24 17:10:34 +03:00
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. base = {
. cra_name = " cbc(aes) " ,
. cra_driver_name = " safexcel-cbc-aes " ,
. cra_priority = 300 ,
2018-07-01 01:16:15 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2017-05-24 17:10:34 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
2019-08-30 10:40:52 +03:00
. cra_init = safexcel_skcipher_aes_cbc_cra_init ,
2017-05-24 17:10:34 +03:00
. cra_exit = safexcel_skcipher_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2018-05-14 16:11:02 +03:00
2019-07-05 09:49:23 +03:00
static int safexcel_skcipher_aesctr_setkey ( struct crypto_skcipher * ctfm ,
const u8 * key , unsigned int len )
{
struct crypto_tfm * tfm = crypto_skcipher_tfm ( ctfm ) ;
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
struct crypto_aes_ctx aes ;
int ret , i ;
unsigned int keylen ;
/* last 4 bytes of key are the nonce! */
2019-07-30 16:27:11 +03:00
ctx - > nonce = * ( u32 * ) ( key + len - CTR_RFC3686_NONCE_SIZE ) ;
2019-07-05 09:49:23 +03:00
/* exclude the nonce here */
2019-07-30 16:27:11 +03:00
keylen = len - CTR_RFC3686_NONCE_SIZE ;
2019-07-05 09:49:23 +03:00
ret = aes_expandkey ( & aes , key , keylen ) ;
if ( ret ) {
crypto_skcipher_set_flags ( ctfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return ret ;
}
if ( priv - > flags & EIP197_TRC_CACHE & & ctx - > base . ctxr_dma ) {
for ( i = 0 ; i < keylen / sizeof ( u32 ) ; i + + ) {
if ( ctx - > key [ i ] ! = cpu_to_le32 ( aes . key_enc [ i ] ) ) {
ctx - > base . needs_inv = true ;
break ;
}
}
}
for ( i = 0 ; i < keylen / sizeof ( u32 ) ; i + + )
ctx - > key [ i ] = cpu_to_le32 ( aes . key_enc [ i ] ) ;
ctx - > key_len = keylen ;
memzero_explicit ( & aes , sizeof ( aes ) ) ;
return 0 ;
}
2019-08-30 10:40:52 +03:00
static int safexcel_skcipher_aes_ctr_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_skcipher_cra_init ( tfm ) ;
ctx - > alg = SAFEXCEL_AES ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ;
return 0 ;
}
2019-07-05 09:49:23 +03:00
struct safexcel_alg_template safexcel_alg_ctr_aes = {
. type = SAFEXCEL_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. setkey = safexcel_skcipher_aesctr_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_encrypt ,
. decrypt = safexcel_decrypt ,
2019-07-30 16:27:11 +03:00
/* Add nonce size */
. min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE ,
. ivsize = CTR_RFC3686_IV_SIZE ,
2019-07-05 09:49:23 +03:00
. base = {
. cra_name = " rfc3686(ctr(aes)) " ,
. cra_driver_name = " safexcel-ctr-aes " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
2019-08-30 10:40:52 +03:00
. cra_init = safexcel_skcipher_aes_ctr_cra_init ,
2019-07-05 09:49:23 +03:00
. cra_exit = safexcel_skcipher_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2018-06-28 18:21:55 +03:00
static int safexcel_des_setkey ( struct crypto_skcipher * ctfm , const u8 * key ,
unsigned int len )
{
2019-08-15 12:00:55 +03:00
struct safexcel_cipher_ctx * ctx = crypto_skcipher_ctx ( ctfm ) ;
2018-06-28 18:21:55 +03:00
int ret ;
2019-08-15 12:00:55 +03:00
ret = verify_skcipher_des_key ( ctfm , key ) ;
if ( ret )
return ret ;
2018-06-28 18:21:55 +03:00
/* if context exits and key changed, need to invalidate it */
if ( ctx - > base . ctxr_dma )
if ( memcmp ( ctx - > key , key , len ) )
ctx - > base . needs_inv = true ;
memcpy ( ctx - > key , key , len ) ;
ctx - > key_len = len ;
return 0 ;
}
2019-08-30 10:40:52 +03:00
static int safexcel_skcipher_des_cbc_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_skcipher_cra_init ( tfm ) ;
ctx - > alg = SAFEXCEL_DES ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC ;
return 0 ;
}
2018-06-28 18:21:55 +03:00
struct safexcel_alg_template safexcel_alg_cbc_des = {
. type = SAFEXCEL_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. setkey = safexcel_des_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_encrypt ,
. decrypt = safexcel_decrypt ,
2018-06-28 18:21:55 +03:00
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. ivsize = DES_BLOCK_SIZE ,
. base = {
. cra_name = " cbc(des) " ,
. cra_driver_name = " safexcel-cbc-des " ,
. cra_priority = 300 ,
2018-11-14 22:10:53 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2018-06-28 18:21:55 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = DES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
2019-08-30 10:40:52 +03:00
. cra_init = safexcel_skcipher_des_cbc_cra_init ,
2018-06-28 18:21:55 +03:00
. cra_exit = safexcel_skcipher_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2019-08-30 10:40:52 +03:00
static int safexcel_skcipher_des_ecb_cra_init ( struct crypto_tfm * tfm )
2018-06-28 18:21:55 +03:00
{
2019-08-30 10:40:52 +03:00
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2018-06-28 18:21:55 +03:00
2019-08-30 10:40:52 +03:00
safexcel_skcipher_cra_init ( tfm ) ;
ctx - > alg = SAFEXCEL_DES ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB ;
return 0 ;
2018-06-28 18:21:55 +03:00
}
struct safexcel_alg_template safexcel_alg_ecb_des = {
. type = SAFEXCEL_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. setkey = safexcel_des_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_encrypt ,
. decrypt = safexcel_decrypt ,
2018-06-28 18:21:55 +03:00
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. base = {
. cra_name = " ecb(des) " ,
. cra_driver_name = " safexcel-ecb-des " ,
. cra_priority = 300 ,
2018-11-14 22:10:53 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2018-06-28 18:21:55 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = DES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
2019-08-30 10:40:52 +03:00
. cra_init = safexcel_skcipher_des_ecb_cra_init ,
2018-06-28 18:21:55 +03:00
. cra_exit = safexcel_skcipher_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2018-06-28 18:21:56 +03:00
static int safexcel_des3_ede_setkey ( struct crypto_skcipher * ctfm ,
const u8 * key , unsigned int len )
{
2019-04-11 11:51:10 +03:00
struct safexcel_cipher_ctx * ctx = crypto_skcipher_ctx ( ctfm ) ;
int err ;
2018-06-28 18:21:56 +03:00
2019-08-15 12:00:55 +03:00
err = verify_skcipher_des3_key ( ctfm , key ) ;
if ( err )
2019-04-11 11:51:10 +03:00
return err ;
2018-06-28 18:21:56 +03:00
/* if context exits and key changed, need to invalidate it */
if ( ctx - > base . ctxr_dma ) {
if ( memcmp ( ctx - > key , key , len ) )
ctx - > base . needs_inv = true ;
}
memcpy ( ctx - > key , key , len ) ;
ctx - > key_len = len ;
return 0 ;
}
2019-08-30 10:40:52 +03:00
static int safexcel_skcipher_des3_cbc_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_skcipher_cra_init ( tfm ) ;
ctx - > alg = SAFEXCEL_3DES ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC ;
return 0 ;
}
2018-06-28 18:21:56 +03:00
struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
. type = SAFEXCEL_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. setkey = safexcel_des3_ede_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_encrypt ,
. decrypt = safexcel_decrypt ,
2018-06-28 18:21:56 +03:00
. min_keysize = DES3_EDE_KEY_SIZE ,
. max_keysize = DES3_EDE_KEY_SIZE ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. base = {
. cra_name = " cbc(des3_ede) " ,
. cra_driver_name = " safexcel-cbc-des3_ede " ,
. cra_priority = 300 ,
2018-11-14 22:10:53 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2018-06-28 18:21:56 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
2019-08-30 10:40:52 +03:00
. cra_init = safexcel_skcipher_des3_cbc_cra_init ,
2018-06-28 18:21:56 +03:00
. cra_exit = safexcel_skcipher_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2019-08-30 10:40:52 +03:00
static int safexcel_skcipher_des3_ecb_cra_init ( struct crypto_tfm * tfm )
2018-06-28 18:21:56 +03:00
{
2019-08-30 10:40:52 +03:00
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2018-06-28 18:21:56 +03:00
2019-08-30 10:40:52 +03:00
safexcel_skcipher_cra_init ( tfm ) ;
ctx - > alg = SAFEXCEL_3DES ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB ;
return 0 ;
2018-06-28 18:21:56 +03:00
}
struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
. type = SAFEXCEL_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. setkey = safexcel_des3_ede_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_encrypt ,
. decrypt = safexcel_decrypt ,
2018-06-28 18:21:56 +03:00
. min_keysize = DES3_EDE_KEY_SIZE ,
. max_keysize = DES3_EDE_KEY_SIZE ,
. base = {
. cra_name = " ecb(des3_ede) " ,
. cra_driver_name = " safexcel-ecb-des3_ede " ,
. cra_priority = 300 ,
2018-11-14 22:10:53 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2018-06-28 18:21:56 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
2019-08-30 10:40:52 +03:00
. cra_init = safexcel_skcipher_des3_ecb_cra_init ,
2018-06-28 18:21:56 +03:00
. cra_exit = safexcel_skcipher_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2019-08-30 10:40:52 +03:00
static int safexcel_aead_encrypt ( struct aead_request * req )
2018-05-14 16:11:02 +03:00
{
struct safexcel_cipher_req * creq = aead_request_ctx ( req ) ;
2019-08-30 10:40:52 +03:00
return safexcel_queue_req ( & req - > base , creq , SAFEXCEL_ENCRYPT ) ;
2018-05-14 16:11:02 +03:00
}
2019-08-30 10:40:52 +03:00
static int safexcel_aead_decrypt ( struct aead_request * req )
2018-05-14 16:11:02 +03:00
{
struct safexcel_cipher_req * creq = aead_request_ctx ( req ) ;
2019-08-30 10:40:52 +03:00
return safexcel_queue_req ( & req - > base , creq , SAFEXCEL_DECRYPT ) ;
2018-05-14 16:11:02 +03:00
}
static int safexcel_aead_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_alg_template * tmpl =
container_of ( tfm - > __crt_alg , struct safexcel_alg_template ,
alg . aead . base ) ;
crypto_aead_set_reqsize ( __crypto_aead_cast ( tfm ) ,
sizeof ( struct safexcel_cipher_req ) ) ;
ctx - > priv = tmpl - > priv ;
2019-07-05 09:49:24 +03:00
ctx - > alg = SAFEXCEL_AES ; /* default */
2019-08-30 10:40:52 +03:00
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC ; /* default */
2018-05-14 16:11:02 +03:00
ctx - > aead = true ;
ctx - > base . send = safexcel_aead_send ;
ctx - > base . handle_result = safexcel_aead_handle_result ;
return 0 ;
}
2018-05-14 16:11:04 +03:00
static int safexcel_aead_sha1_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_cra_init ( tfm ) ;
2018-06-28 18:21:55 +03:00
ctx - > hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1 ;
2018-05-14 16:11:04 +03:00
ctx - > state_sz = SHA1_DIGEST_SIZE ;
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
2019-07-05 09:49:22 +03:00
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2018-05-14 16:11:04 +03:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha1),cbc(aes)) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha1-cbc-aes " ,
. cra_priority = 300 ,
2018-07-01 01:16:14 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2018-05-14 16:11:04 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha1_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2018-05-14 16:11:02 +03:00
static int safexcel_aead_sha256_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_cra_init ( tfm ) ;
2018-06-28 18:21:55 +03:00
ctx - > hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256 ;
2018-05-14 16:11:02 +03:00
ctx - > state_sz = SHA256_DIGEST_SIZE ;
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
2019-07-05 09:49:22 +03:00
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2018-05-14 16:11:02 +03:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha256),cbc(aes)) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha256-cbc-aes " ,
. cra_priority = 300 ,
2018-07-01 01:16:14 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2018-05-14 16:11:02 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha256_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2018-05-14 16:11:03 +03:00
static int safexcel_aead_sha224_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_cra_init ( tfm ) ;
2018-06-28 18:21:55 +03:00
ctx - > hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ;
2018-05-14 16:11:03 +03:00
ctx - > state_sz = SHA256_DIGEST_SIZE ;
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
2019-07-05 09:49:22 +03:00
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2018-05-14 16:11:03 +03:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha224),cbc(aes)) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha224-cbc-aes " ,
. cra_priority = 300 ,
2018-07-01 01:16:14 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2018-05-14 16:11:03 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha224_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2018-05-29 15:13:48 +03:00
static int safexcel_aead_sha512_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_cra_init ( tfm ) ;
2018-06-28 18:21:55 +03:00
ctx - > hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512 ;
2018-05-29 15:13:48 +03:00
ctx - > state_sz = SHA512_DIGEST_SIZE ;
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
2019-07-05 09:49:22 +03:00
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2018-05-29 15:13:48 +03:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha512),cbc(aes)) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha512-cbc-aes " ,
. cra_priority = 300 ,
2018-07-01 01:16:14 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2018-05-29 15:13:48 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha512_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2018-05-29 15:13:52 +03:00
static int safexcel_aead_sha384_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_cra_init ( tfm ) ;
2018-06-28 18:21:55 +03:00
ctx - > hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ;
2018-05-29 15:13:52 +03:00
ctx - > state_sz = SHA512_DIGEST_SIZE ;
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
2019-07-05 09:49:22 +03:00
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2018-05-29 15:13:52 +03:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha384),cbc(aes)) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha384-cbc-aes " ,
. cra_priority = 300 ,
2018-07-01 01:16:14 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2018-05-29 15:13:52 +03:00
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha384_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2019-07-05 09:49:22 +03:00
2019-07-05 09:49:24 +03:00
static int safexcel_aead_sha1_des3_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_sha1_cra_init ( tfm ) ;
ctx - > alg = SAFEXCEL_3DES ; /* override default */
return 0 ;
}
2019-07-05 09:49:22 +03:00
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2019-07-05 09:49:22 +03:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha1),cbc(des3_ede)) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha1-cbc-des3_ede " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
2019-07-05 09:49:24 +03:00
. cra_init = safexcel_aead_sha1_des3_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
static int safexcel_aead_sha1_ctr_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_sha1_cra_init ( tfm ) ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ; /* override default */
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2019-07-30 16:27:11 +03:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2019-07-05 09:49:24 +03:00
. maxauthsize = SHA1_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha1),rfc3686(ctr(aes))) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha1-ctr-aes " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha1_ctr_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
static int safexcel_aead_sha256_ctr_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_sha256_cra_init ( tfm ) ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ; /* override default */
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2019-07-30 16:27:11 +03:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2019-07-05 09:49:24 +03:00
. maxauthsize = SHA256_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha256),rfc3686(ctr(aes))) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha256-ctr-aes " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha256_ctr_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
static int safexcel_aead_sha224_ctr_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_sha224_cra_init ( tfm ) ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ; /* override default */
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2019-07-30 16:27:11 +03:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2019-07-05 09:49:24 +03:00
. maxauthsize = SHA224_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha224),rfc3686(ctr(aes))) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha224-ctr-aes " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha224_ctr_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
static int safexcel_aead_sha512_ctr_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_sha512_cra_init ( tfm ) ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ; /* override default */
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2019-07-30 16:27:11 +03:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2019-07-05 09:49:24 +03:00
. maxauthsize = SHA512_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha512),rfc3686(ctr(aes))) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha512-ctr-aes " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha512_ctr_cra_init ,
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
static int safexcel_aead_sha384_ctr_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_aead_sha384_cra_init ( tfm ) ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ; /* override default */
return 0 ;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = {
. type = SAFEXCEL_ALG_TYPE_AEAD ,
. alg . aead = {
. setkey = safexcel_aead_setkey ,
2019-08-30 10:40:52 +03:00
. encrypt = safexcel_aead_encrypt ,
. decrypt = safexcel_aead_decrypt ,
2019-07-30 16:27:11 +03:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2019-07-05 09:49:24 +03:00
. maxauthsize = SHA384_DIGEST_SIZE ,
. base = {
. cra_name = " authenc(hmac(sha384),rfc3686(ctr(aes))) " ,
. cra_driver_name = " safexcel-authenc-hmac-sha384-ctr-aes " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_aead_sha384_ctr_cra_init ,
2019-07-05 09:49:22 +03:00
. cra_exit = safexcel_aead_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;
2019-08-30 10:40:53 +03:00
static int safexcel_skcipher_aesxts_setkey ( struct crypto_skcipher * ctfm ,
const u8 * key , unsigned int len )
{
struct crypto_tfm * tfm = crypto_skcipher_tfm ( ctfm ) ;
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct safexcel_crypto_priv * priv = ctx - > priv ;
struct crypto_aes_ctx aes ;
int ret , i ;
unsigned int keylen ;
/* Check for illegal XTS keys */
ret = xts_verify_key ( ctfm , key , len ) ;
if ( ret )
return ret ;
/* Only half of the key data is cipher key */
keylen = ( len > > 1 ) ;
ret = aes_expandkey ( & aes , key , keylen ) ;
if ( ret ) {
crypto_skcipher_set_flags ( ctfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return ret ;
}
if ( priv - > flags & EIP197_TRC_CACHE & & ctx - > base . ctxr_dma ) {
for ( i = 0 ; i < keylen / sizeof ( u32 ) ; i + + ) {
if ( ctx - > key [ i ] ! = cpu_to_le32 ( aes . key_enc [ i ] ) ) {
ctx - > base . needs_inv = true ;
break ;
}
}
}
for ( i = 0 ; i < keylen / sizeof ( u32 ) ; i + + )
ctx - > key [ i ] = cpu_to_le32 ( aes . key_enc [ i ] ) ;
/* The other half is the tweak key */
ret = aes_expandkey ( & aes , ( u8 * ) ( key + keylen ) , keylen ) ;
if ( ret ) {
crypto_skcipher_set_flags ( ctfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return ret ;
}
if ( priv - > flags & EIP197_TRC_CACHE & & ctx - > base . ctxr_dma ) {
for ( i = 0 ; i < keylen / sizeof ( u32 ) ; i + + ) {
if ( ctx - > key [ i + keylen / sizeof ( u32 ) ] ! =
cpu_to_le32 ( aes . key_enc [ i ] ) ) {
ctx - > base . needs_inv = true ;
break ;
}
}
}
for ( i = 0 ; i < keylen / sizeof ( u32 ) ; i + + )
ctx - > key [ i + keylen / sizeof ( u32 ) ] =
cpu_to_le32 ( aes . key_enc [ i ] ) ;
ctx - > key_len = keylen < < 1 ;
memzero_explicit ( & aes , sizeof ( aes ) ) ;
return 0 ;
}
static int safexcel_skcipher_aes_xts_cra_init ( struct crypto_tfm * tfm )
{
struct safexcel_cipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
safexcel_skcipher_cra_init ( tfm ) ;
ctx - > alg = SAFEXCEL_AES ;
ctx - > xts = 1 ;
ctx - > mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS ;
return 0 ;
}
static int safexcel_encrypt_xts ( struct skcipher_request * req )
{
if ( req - > cryptlen < XTS_BLOCK_SIZE )
return - EINVAL ;
return safexcel_queue_req ( & req - > base , skcipher_request_ctx ( req ) ,
SAFEXCEL_ENCRYPT ) ;
}
static int safexcel_decrypt_xts ( struct skcipher_request * req )
{
if ( req - > cryptlen < XTS_BLOCK_SIZE )
return - EINVAL ;
return safexcel_queue_req ( & req - > base , skcipher_request_ctx ( req ) ,
SAFEXCEL_DECRYPT ) ;
}
struct safexcel_alg_template safexcel_alg_xts_aes = {
. type = SAFEXCEL_ALG_TYPE_SKCIPHER ,
. alg . skcipher = {
. setkey = safexcel_skcipher_aesxts_setkey ,
. encrypt = safexcel_encrypt_xts ,
. decrypt = safexcel_decrypt_xts ,
/* XTS actually uses 2 AES keys glued together */
. min_keysize = AES_MIN_KEY_SIZE * 2 ,
. max_keysize = AES_MAX_KEY_SIZE * 2 ,
. ivsize = XTS_BLOCK_SIZE ,
. base = {
. cra_name = " xts(aes) " ,
. cra_driver_name = " safexcel-xts-aes " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY ,
. cra_blocksize = XTS_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct safexcel_cipher_ctx ) ,
. cra_alignmask = 0 ,
. cra_init = safexcel_skcipher_aes_xts_cra_init ,
. cra_exit = safexcel_skcipher_cra_exit ,
. cra_module = THIS_MODULE ,
} ,
} ,
} ;