2011-03-13 16:54:26 +08:00
/*
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008 - 2011 Freescale Semiconductor , Inc .
2018-08-06 15:43:59 +03:00
* Copyright 2016 - 2018 NXP
2011-03-13 16:54:26 +08:00
*
* Based on talitos crypto API driver .
*
* relationship of job descriptors to shared descriptors ( SteveC Dec 10 2008 ) :
*
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* | JobDesc # 1 | - - - - - - - - - - - - - - - - - - - - > | ShareDesc |
* | * ( packet 1 ) | | ( PDB ) |
* - - - - - - - - - - - - - - - | - - - - - - - - - - - - - > | ( hashKey ) |
* . | | ( cipherKey ) |
* . | | - - - - - - - - > | ( operation ) |
* - - - - - - - - - - - - - - - | | - - - - - - - - - - - - - - -
* | JobDesc # 2 | - - - - - - | |
* | * ( packet 2 ) | |
* - - - - - - - - - - - - - - - |
* . |
* . |
* - - - - - - - - - - - - - - - |
* | JobDesc # 3 | - - - - - - - - - - - -
* | * ( packet 3 ) |
* - - - - - - - - - - - - - - -
*
* The SharedDesc never changes for a connection unless rekeyed , but
* each packet will likely be in a different place . So all we need
* to know to process the packet is where the input is , where the
* output goes , and what context we want to process with . Context is
* in the SharedDesc , packet references in the JobDesc .
*
* So , a job desc looks like :
*
* - - - - - - - - - - - - - - - - - - - - -
* | Header |
* | ShareDesc Pointer |
* | SEQ_OUT_PTR |
* | ( output buffer ) |
2012-06-22 19:48:43 -05:00
* | ( output length ) |
2011-03-13 16:54:26 +08:00
* | SEQ_IN_PTR |
* | ( input buffer ) |
2012-06-22 19:48:43 -05:00
* | ( input length ) |
2011-03-13 16:54:26 +08:00
* - - - - - - - - - - - - - - - - - - - - -
*/
# include "compat.h"
# include "regs.h"
# include "intern.h"
# include "desc_constr.h"
# include "jr.h"
# include "error.h"
2012-06-22 19:48:46 -05:00
# include "sg_sw_sec4.h"
2012-06-22 19:48:45 -05:00
# include "key_gen.h"
2016-11-22 15:44:09 +02:00
# include "caamalg_desc.h"
2011-03-13 16:54:26 +08:00
/*
* crypto alg
*/
# define CAAM_CRA_PRIORITY 3000
/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
# define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
2014-10-31 12:45:37 +02:00
CTR_RFC3686_NONCE_SIZE + \
2011-03-13 16:54:26 +08:00
SHA512_DIGEST_SIZE * 2 )
2015-06-16 13:54:23 +08:00
# define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
# define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 4 )
2015-07-30 17:53:17 +08:00
# define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5 )
2015-06-16 13:54:23 +08:00
2015-06-18 14:25:55 +08:00
# define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
# define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
2011-05-14 22:08:17 -05:00
2011-03-13 16:54:26 +08:00
# ifdef DEBUG
/* for print_hex_dumps with line references */
# define debug(format, arg...) printk(format, arg)
# else
# define debug(format, arg...)
# endif
2016-09-22 11:57:58 +03:00
2015-07-30 17:53:17 +08:00
struct caam_alg_entry {
int class1_alg_type ;
int class2_alg_type ;
bool rfc3686 ;
bool geniv ;
} ;
struct caam_aead_alg {
struct aead_alg aead ;
struct caam_alg_entry caam ;
bool registered ;
} ;
2018-08-06 15:43:59 +03:00
struct caam_skcipher_alg {
struct skcipher_alg skcipher ;
struct caam_alg_entry caam ;
bool registered ;
} ;
2011-03-13 16:54:26 +08:00
/*
* per - session context
*/
struct caam_ctx {
2011-07-15 11:21:42 +08:00
u32 sh_desc_enc [ DESC_MAX_USED_LEN ] ;
u32 sh_desc_dec [ DESC_MAX_USED_LEN ] ;
2017-02-10 14:07:22 +02:00
u8 key [ CAAM_MAX_KEY_SIZE ] ;
2011-07-15 11:21:42 +08:00
dma_addr_t sh_desc_enc_dma ;
dma_addr_t sh_desc_dec_dma ;
2011-07-15 11:21:41 +08:00
dma_addr_t key_dma ;
2017-12-19 12:16:07 +02:00
enum dma_data_direction dir ;
2017-02-10 14:07:22 +02:00
struct device * jrdev ;
2016-11-22 15:44:04 +02:00
struct alginfo adata ;
struct alginfo cdata ;
2011-03-13 16:54:26 +08:00
unsigned int authsize ;
} ;
2014-03-14 17:46:52 +02:00
static int aead_null_set_sh_desc ( struct crypto_aead * aead )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2017-12-19 12:16:07 +02:00
struct caam_drv_private * ctrlpriv = dev_get_drvdata ( jrdev - > parent ) ;
2014-03-14 17:46:52 +02:00
u32 * desc ;
2016-11-22 15:44:06 +02:00
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx - > adata . keylen_pad ;
2014-03-14 17:46:52 +02:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( rem_bytes > = DESC_AEAD_NULL_ENC_LEN ) {
2016-11-22 15:44:04 +02:00
ctx - > adata . key_inline = true ;
2016-11-30 22:01:59 +01:00
ctx - > adata . key_virt = ctx - > key ;
2016-11-22 15:44:04 +02:00
} else {
ctx - > adata . key_inline = false ;
2016-11-30 22:01:59 +01:00
ctx - > adata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:04 +02:00
}
2014-03-14 17:46:52 +02:00
2015-07-30 17:53:17 +08:00
/* aead_encrypt shared descriptor */
2014-03-14 17:46:52 +02:00
desc = ctx - > sh_desc_enc ;
2017-12-19 12:16:07 +02:00
cnstr_shdsc_aead_null_encap ( desc , & ctx - > adata , ctx - > authsize ,
ctrlpriv - > era ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_enc_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2014-03-14 17:46:52 +02:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( rem_bytes > = DESC_AEAD_NULL_DEC_LEN ) {
2016-11-22 15:44:04 +02:00
ctx - > adata . key_inline = true ;
2016-11-30 22:01:59 +01:00
ctx - > adata . key_virt = ctx - > key ;
2016-11-22 15:44:04 +02:00
} else {
ctx - > adata . key_inline = false ;
2016-11-30 22:01:59 +01:00
ctx - > adata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:04 +02:00
}
2014-03-14 17:46:52 +02:00
2015-07-30 17:53:17 +08:00
/* aead_decrypt shared descriptor */
2016-11-22 15:44:09 +02:00
desc = ctx - > sh_desc_dec ;
2017-12-19 12:16:07 +02:00
cnstr_shdsc_aead_null_decap ( desc , & ctx - > adata , ctx - > authsize ,
ctrlpriv - > era ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_dec_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2014-03-14 17:46:52 +02:00
return 0 ;
}
2011-07-15 11:21:42 +08:00
static int aead_set_sh_desc ( struct crypto_aead * aead )
{
2015-07-30 17:53:17 +08:00
struct caam_aead_alg * alg = container_of ( crypto_aead_alg ( aead ) ,
struct caam_aead_alg , aead ) ;
2015-05-11 17:47:50 +08:00
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
2011-07-15 11:21:42 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2017-12-19 12:16:07 +02:00
struct caam_drv_private * ctrlpriv = dev_get_drvdata ( jrdev - > parent ) ;
2014-10-31 12:45:37 +02:00
u32 ctx1_iv_off = 0 ;
2016-11-22 15:44:09 +02:00
u32 * desc , * nonce = NULL ;
2016-11-22 15:44:06 +02:00
u32 inl_mask ;
unsigned int data_len [ 2 ] ;
2016-11-22 15:44:04 +02:00
const bool ctr_mode = ( ( ctx - > cdata . algtype & OP_ALG_AAI_MASK ) = =
2014-10-31 12:45:37 +02:00
OP_ALG_AAI_CTR_MOD128 ) ;
2015-07-30 17:53:17 +08:00
const bool is_rfc3686 = alg - > caam . rfc3686 ;
2011-07-15 11:21:42 +08:00
2016-08-04 20:02:47 +03:00
if ( ! ctx - > authsize )
return 0 ;
2014-03-14 17:46:52 +02:00
/* NULL encryption / decryption */
2016-11-22 15:44:04 +02:00
if ( ! ctx - > cdata . keylen )
2014-03-14 17:46:52 +02:00
return aead_null_set_sh_desc ( aead ) ;
2014-10-31 12:45:37 +02:00
/*
* AES - CTR needs to load IV in CONTEXT1 reg
* at an offset of 128 bits ( 16 bytes )
* CONTEXT1 [ 255 : 128 ] = IV
*/
if ( ctr_mode )
ctx1_iv_off = 16 ;
/*
* RFC3686 specific :
* CONTEXT1 [ 255 : 128 ] = { NONCE , IV , COUNTER }
*/
2016-11-22 15:44:09 +02:00
if ( is_rfc3686 ) {
2014-10-31 12:45:37 +02:00
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE ;
2016-11-22 15:44:09 +02:00
nonce = ( u32 * ) ( ( void * ) ctx - > key + ctx - > adata . keylen_pad +
ctx - > cdata . keylen - CTR_RFC3686_NONCE_SIZE ) ;
}
2014-10-31 12:45:37 +02:00
2016-11-22 15:44:06 +02:00
data_len [ 0 ] = ctx - > adata . keylen_pad ;
data_len [ 1 ] = ctx - > cdata . keylen ;
2015-07-30 17:53:17 +08:00
if ( alg - > caam . geniv )
goto skip_enc ;
2011-07-15 11:21:42 +08:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( desc_inline_query ( DESC_AEAD_ENC_LEN +
( is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0 ) ,
AUTHENC_DESC_JOB_IO_LEN , data_len , & inl_mask ,
ARRAY_SIZE ( data_len ) ) < 0 )
return - EINVAL ;
if ( inl_mask & 1 )
2016-11-30 22:01:59 +01:00
ctx - > adata . key_virt = ctx - > key ;
2016-11-22 15:44:06 +02:00
else
2016-11-30 22:01:59 +01:00
ctx - > adata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:06 +02:00
if ( inl_mask & 2 )
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_virt = ctx - > key + ctx - > adata . keylen_pad ;
2016-11-22 15:44:06 +02:00
else
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_dma = ctx - > key_dma + ctx - > adata . keylen_pad ;
2016-11-22 15:44:06 +02:00
ctx - > adata . key_inline = ! ! ( inl_mask & 1 ) ;
ctx - > cdata . key_inline = ! ! ( inl_mask & 2 ) ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
/* aead_encrypt shared descriptor */
2011-07-15 11:21:42 +08:00
desc = ctx - > sh_desc_enc ;
crypto: caam/qi - add ablkcipher and authenc algorithms
Add support to submit ablkcipher and authenc algorithms
via the QI backend:
-ablkcipher:
cbc({aes,des,des3_ede})
ctr(aes), rfc3686(ctr(aes))
xts(aes)
-authenc:
authenc(hmac(md5),cbc({aes,des,des3_ede}))
authenc(hmac(sha*),cbc({aes,des,des3_ede}))
caam/qi being a new driver, let's wait some time to settle down without
interfering with existing caam/jr driver.
Accordingly, for now all caam/qi algorithms (caamalg_qi module) are
marked to be of lower priority than caam/jr ones (caamalg module).
Signed-off-by: Vakul Garg <vakul.garg@nxp.com>
Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2017-03-17 12:06:02 +02:00
cnstr_shdsc_aead_encap ( desc , & ctx - > cdata , & ctx - > adata , ivsize ,
ctx - > authsize , is_rfc3686 , nonce , ctx1_iv_off ,
2017-12-19 12:16:07 +02:00
false , ctrlpriv - > era ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_enc_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
skip_enc :
2011-07-15 11:21:42 +08:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( desc_inline_query ( DESC_AEAD_DEC_LEN +
( is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0 ) ,
AUTHENC_DESC_JOB_IO_LEN , data_len , & inl_mask ,
ARRAY_SIZE ( data_len ) ) < 0 )
return - EINVAL ;
if ( inl_mask & 1 )
2016-11-30 22:01:59 +01:00
ctx - > adata . key_virt = ctx - > key ;
2016-11-22 15:44:06 +02:00
else
2016-11-30 22:01:59 +01:00
ctx - > adata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:06 +02:00
if ( inl_mask & 2 )
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_virt = ctx - > key + ctx - > adata . keylen_pad ;
2016-11-22 15:44:06 +02:00
else
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_dma = ctx - > key_dma + ctx - > adata . keylen_pad ;
2016-11-22 15:44:06 +02:00
ctx - > adata . key_inline = ! ! ( inl_mask & 1 ) ;
ctx - > cdata . key_inline = ! ! ( inl_mask & 2 ) ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
/* aead_decrypt shared descriptor */
2014-03-14 17:46:49 +02:00
desc = ctx - > sh_desc_dec ;
2016-11-22 15:44:09 +02:00
cnstr_shdsc_aead_decap ( desc , & ctx - > cdata , & ctx - > adata , ivsize ,
ctx - > authsize , alg - > caam . geniv , is_rfc3686 ,
2017-12-19 12:16:07 +02:00
nonce , ctx1_iv_off , false , ctrlpriv - > era ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_dec_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
if ( ! alg - > caam . geniv )
goto skip_givenc ;
2011-07-15 11:21:42 +08:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( desc_inline_query ( DESC_AEAD_GIVENC_LEN +
( is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0 ) ,
AUTHENC_DESC_JOB_IO_LEN , data_len , & inl_mask ,
ARRAY_SIZE ( data_len ) ) < 0 )
return - EINVAL ;
if ( inl_mask & 1 )
2016-11-30 22:01:59 +01:00
ctx - > adata . key_virt = ctx - > key ;
2016-11-22 15:44:06 +02:00
else
2016-11-30 22:01:59 +01:00
ctx - > adata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:06 +02:00
if ( inl_mask & 2 )
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_virt = ctx - > key + ctx - > adata . keylen_pad ;
2016-11-22 15:44:06 +02:00
else
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_dma = ctx - > key_dma + ctx - > adata . keylen_pad ;
2016-11-22 15:44:06 +02:00
ctx - > adata . key_inline = ! ! ( inl_mask & 1 ) ;
ctx - > cdata . key_inline = ! ! ( inl_mask & 2 ) ;
2011-07-15 11:21:42 +08:00
/* aead_givencrypt shared descriptor */
2016-08-04 20:02:46 +03:00
desc = ctx - > sh_desc_enc ;
2016-11-22 15:44:09 +02:00
cnstr_shdsc_aead_givencap ( desc , & ctx - > cdata , & ctx - > adata , ivsize ,
ctx - > authsize , is_rfc3686 , nonce ,
2017-12-19 12:16:07 +02:00
ctx1_iv_off , false , ctrlpriv - > era ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_enc_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
skip_givenc :
2011-07-15 11:21:42 +08:00
return 0 ;
}
2011-07-15 11:21:41 +08:00
static int aead_setauthsize ( struct crypto_aead * authenc ,
2011-03-13 16:54:26 +08:00
unsigned int authsize )
{
struct caam_ctx * ctx = crypto_aead_ctx ( authenc ) ;
ctx - > authsize = authsize ;
2011-07-15 11:21:42 +08:00
aead_set_sh_desc ( authenc ) ;
2011-03-13 16:54:26 +08:00
return 0 ;
}
2014-10-23 16:11:23 +03:00
static int gcm_set_sh_desc ( struct crypto_aead * aead )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2018-01-29 10:38:36 +02:00
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
2014-10-23 16:11:23 +03:00
u32 * desc ;
2016-11-22 15:44:06 +02:00
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx - > cdata . keylen ;
2014-10-23 16:11:23 +03:00
2016-11-22 15:44:04 +02:00
if ( ! ctx - > cdata . keylen | | ! ctx - > authsize )
2014-10-23 16:11:23 +03:00
return 0 ;
/*
* AES GCM encrypt shared descriptor
* Job Descriptor and Shared Descriptor
* must fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( rem_bytes > = DESC_GCM_ENC_LEN ) {
2016-11-22 15:44:04 +02:00
ctx - > cdata . key_inline = true ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_virt = ctx - > key ;
2016-11-22 15:44:04 +02:00
} else {
ctx - > cdata . key_inline = false ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:04 +02:00
}
2014-10-23 16:11:23 +03:00
desc = ctx - > sh_desc_enc ;
2018-01-29 10:38:36 +02:00
cnstr_shdsc_gcm_encap ( desc , & ctx - > cdata , ivsize , ctx - > authsize , false ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_enc_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2014-10-23 16:11:23 +03:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( rem_bytes > = DESC_GCM_DEC_LEN ) {
2016-11-22 15:44:04 +02:00
ctx - > cdata . key_inline = true ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_virt = ctx - > key ;
2016-11-22 15:44:04 +02:00
} else {
ctx - > cdata . key_inline = false ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:04 +02:00
}
2014-10-23 16:11:23 +03:00
desc = ctx - > sh_desc_dec ;
2018-01-29 10:38:36 +02:00
cnstr_shdsc_gcm_decap ( desc , & ctx - > cdata , ivsize , ctx - > authsize , false ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_dec_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2014-10-23 16:11:23 +03:00
return 0 ;
}
static int gcm_setauthsize ( struct crypto_aead * authenc , unsigned int authsize )
{
struct caam_ctx * ctx = crypto_aead_ctx ( authenc ) ;
ctx - > authsize = authsize ;
gcm_set_sh_desc ( authenc ) ;
return 0 ;
}
2014-10-23 16:14:03 +03:00
static int rfc4106_set_sh_desc ( struct crypto_aead * aead )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2018-01-29 10:38:36 +02:00
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
2014-10-23 16:14:03 +03:00
u32 * desc ;
2016-11-22 15:44:06 +02:00
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx - > cdata . keylen ;
2014-10-23 16:14:03 +03:00
2016-11-22 15:44:04 +02:00
if ( ! ctx - > cdata . keylen | | ! ctx - > authsize )
2014-10-23 16:14:03 +03:00
return 0 ;
/*
* RFC4106 encrypt shared descriptor
* Job Descriptor and Shared Descriptor
* must fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( rem_bytes > = DESC_RFC4106_ENC_LEN ) {
2016-11-22 15:44:04 +02:00
ctx - > cdata . key_inline = true ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_virt = ctx - > key ;
2016-11-22 15:44:04 +02:00
} else {
ctx - > cdata . key_inline = false ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:04 +02:00
}
2014-10-23 16:14:03 +03:00
desc = ctx - > sh_desc_enc ;
2018-01-29 10:38:36 +02:00
cnstr_shdsc_rfc4106_encap ( desc , & ctx - > cdata , ivsize , ctx - > authsize ,
false ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_enc_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2014-10-23 16:14:03 +03:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( rem_bytes > = DESC_RFC4106_DEC_LEN ) {
2016-11-22 15:44:04 +02:00
ctx - > cdata . key_inline = true ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_virt = ctx - > key ;
2016-11-22 15:44:04 +02:00
} else {
ctx - > cdata . key_inline = false ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:04 +02:00
}
2014-10-23 16:14:03 +03:00
desc = ctx - > sh_desc_dec ;
2018-01-29 10:38:36 +02:00
cnstr_shdsc_rfc4106_decap ( desc , & ctx - > cdata , ivsize , ctx - > authsize ,
false ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_dec_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2014-10-23 16:14:03 +03:00
return 0 ;
}
static int rfc4106_setauthsize ( struct crypto_aead * authenc ,
unsigned int authsize )
{
struct caam_ctx * ctx = crypto_aead_ctx ( authenc ) ;
ctx - > authsize = authsize ;
rfc4106_set_sh_desc ( authenc ) ;
return 0 ;
}
2014-10-30 18:55:07 +02:00
static int rfc4543_set_sh_desc ( struct crypto_aead * aead )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2018-01-29 10:38:36 +02:00
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
2014-10-30 18:55:07 +02:00
u32 * desc ;
2016-11-22 15:44:06 +02:00
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx - > cdata . keylen ;
2014-10-30 18:55:07 +02:00
2016-11-22 15:44:04 +02:00
if ( ! ctx - > cdata . keylen | | ! ctx - > authsize )
2014-10-30 18:55:07 +02:00
return 0 ;
/*
* RFC4543 encrypt shared descriptor
* Job Descriptor and Shared Descriptor
* must fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( rem_bytes > = DESC_RFC4543_ENC_LEN ) {
2016-11-22 15:44:04 +02:00
ctx - > cdata . key_inline = true ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_virt = ctx - > key ;
2016-11-22 15:44:04 +02:00
} else {
ctx - > cdata . key_inline = false ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:04 +02:00
}
2014-10-30 18:55:07 +02:00
desc = ctx - > sh_desc_enc ;
2018-01-29 10:38:36 +02:00
cnstr_shdsc_rfc4543_encap ( desc , & ctx - > cdata , ivsize , ctx - > authsize ,
false ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_enc_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2014-10-30 18:55:07 +02:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2016-11-22 15:44:06 +02:00
if ( rem_bytes > = DESC_RFC4543_DEC_LEN ) {
2016-11-22 15:44:04 +02:00
ctx - > cdata . key_inline = true ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_virt = ctx - > key ;
2016-11-22 15:44:04 +02:00
} else {
ctx - > cdata . key_inline = false ;
2016-11-30 22:01:59 +01:00
ctx - > cdata . key_dma = ctx - > key_dma ;
2016-11-22 15:44:04 +02:00
}
2014-10-30 18:55:07 +02:00
desc = ctx - > sh_desc_dec ;
2018-01-29 10:38:36 +02:00
cnstr_shdsc_rfc4543_decap ( desc , & ctx - > cdata , ivsize , ctx - > authsize ,
false ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_dec_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2014-10-30 18:55:07 +02:00
2015-06-16 13:54:23 +08:00
return 0 ;
}
2014-10-30 18:55:07 +02:00
2015-06-16 13:54:23 +08:00
static int rfc4543_setauthsize ( struct crypto_aead * authenc ,
unsigned int authsize )
{
struct caam_ctx * ctx = crypto_aead_ctx ( authenc ) ;
2014-10-30 18:55:07 +02:00
2015-06-16 13:54:23 +08:00
ctx - > authsize = authsize ;
rfc4543_set_sh_desc ( authenc ) ;
2014-10-30 18:55:07 +02:00
2015-06-16 13:54:23 +08:00
return 0 ;
}
2014-10-30 18:55:07 +02:00
2011-07-15 11:21:41 +08:00
static int aead_setkey ( struct crypto_aead * aead ,
2011-03-13 16:54:26 +08:00
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2017-12-19 12:16:07 +02:00
struct caam_drv_private * ctrlpriv = dev_get_drvdata ( jrdev - > parent ) ;
2013-12-19 17:27:35 +02:00
struct crypto_authenc_keys keys ;
2011-03-13 16:54:26 +08:00
int ret = 0 ;
2013-12-19 17:27:35 +02:00
if ( crypto_authenc_extractkeys ( & keys , key , keylen ) ! = 0 )
2011-03-13 16:54:26 +08:00
goto badkey ;
# ifdef DEBUG
printk ( KERN_ERR " keylen %d enckeylen %d authkeylen %d \n " ,
2013-12-19 17:27:35 +02:00
keys . authkeylen + keys . enckeylen , keys . enckeylen ,
keys . authkeylen ) ;
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
2011-03-13 16:54:26 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
2017-12-19 12:16:07 +02:00
/*
* If DKP is supported , use it in the shared descriptor to generate
* the split key .
*/
if ( ctrlpriv - > era > = 6 ) {
ctx - > adata . keylen = keys . authkeylen ;
ctx - > adata . keylen_pad = split_key_len ( ctx - > adata . algtype &
OP_ALG_ALGSEL_MASK ) ;
if ( ctx - > adata . keylen_pad + keys . enckeylen > CAAM_MAX_KEY_SIZE )
goto badkey ;
memcpy ( ctx - > key , keys . authkey , keys . authkeylen ) ;
memcpy ( ctx - > key + ctx - > adata . keylen_pad , keys . enckey ,
keys . enckeylen ) ;
dma_sync_single_for_device ( jrdev , ctx - > key_dma ,
ctx - > adata . keylen_pad +
keys . enckeylen , ctx - > dir ) ;
goto skip_split_key ;
}
2016-11-22 15:44:10 +02:00
ret = gen_split_key ( ctx - > jrdev , ctx - > key , & ctx - > adata , keys . authkey ,
keys . authkeylen , CAAM_MAX_KEY_SIZE -
keys . enckeylen ) ;
2011-03-13 16:54:26 +08:00
if ( ret ) {
goto badkey ;
}
/* postpend encryption key to auth split key */
2016-11-22 15:44:04 +02:00
memcpy ( ctx - > key + ctx - > adata . keylen_pad , keys . enckey , keys . enckeylen ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > key_dma , ctx - > adata . keylen_pad +
2017-12-19 12:16:07 +02:00
keys . enckeylen , ctx - > dir ) ;
2011-03-13 16:54:26 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " ctx.key@ " __stringify ( __LINE__ ) " : " ,
2011-03-13 16:54:26 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , ctx - > key ,
2016-11-22 15:44:04 +02:00
ctx - > adata . keylen_pad + keys . enckeylen , 1 ) ;
2011-03-13 16:54:26 +08:00
# endif
2017-12-19 12:16:07 +02:00
skip_split_key :
2016-11-22 15:44:04 +02:00
ctx - > cdata . keylen = keys . enckeylen ;
2018-03-23 12:42:18 +02:00
memzero_explicit ( & keys , sizeof ( keys ) ) ;
2017-02-10 14:07:22 +02:00
return aead_set_sh_desc ( aead ) ;
2011-03-13 16:54:26 +08:00
badkey :
crypto_aead_set_flags ( aead , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2018-03-23 12:42:18 +02:00
memzero_explicit ( & keys , sizeof ( keys ) ) ;
2011-03-13 16:54:26 +08:00
return - EINVAL ;
}
2014-10-23 16:11:23 +03:00
static int gcm_setkey ( struct crypto_aead * aead ,
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
memcpy ( ctx - > key , key , keylen ) ;
2017-12-19 12:16:07 +02:00
dma_sync_single_for_device ( jrdev , ctx - > key_dma , keylen , ctx - > dir ) ;
2016-11-22 15:44:04 +02:00
ctx - > cdata . keylen = keylen ;
2014-10-23 16:11:23 +03:00
2017-02-10 14:07:22 +02:00
return gcm_set_sh_desc ( aead ) ;
2014-10-23 16:11:23 +03:00
}
2014-10-23 16:14:03 +03:00
static int rfc4106_setkey ( struct crypto_aead * aead ,
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
if ( keylen < 4 )
return - EINVAL ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
memcpy ( ctx - > key , key , keylen ) ;
/*
* The last four bytes of the key material are used as the salt value
* in the nonce . Update the AES key length .
*/
2016-11-22 15:44:04 +02:00
ctx - > cdata . keylen = keylen - 4 ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > key_dma , ctx - > cdata . keylen ,
2017-12-19 12:16:07 +02:00
ctx - > dir ) ;
2017-02-10 14:07:22 +02:00
return rfc4106_set_sh_desc ( aead ) ;
2014-10-23 16:14:03 +03:00
}
2014-10-30 18:55:07 +02:00
static int rfc4543_setkey ( struct crypto_aead * aead ,
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
if ( keylen < 4 )
return - EINVAL ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
memcpy ( ctx - > key , key , keylen ) ;
/*
* The last four bytes of the key material are used as the salt value
* in the nonce . Update the AES key length .
*/
2016-11-22 15:44:04 +02:00
ctx - > cdata . keylen = keylen - 4 ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > key_dma , ctx - > cdata . keylen ,
2017-12-19 12:16:07 +02:00
ctx - > dir ) ;
2017-02-10 14:07:22 +02:00
return rfc4543_set_sh_desc ( aead ) ;
2014-10-30 18:55:07 +02:00
}
2018-08-06 15:43:59 +03:00
static int skcipher_setkey ( struct crypto_skcipher * skcipher , const u8 * key ,
unsigned int keylen )
2011-07-15 11:21:42 +08:00
{
2018-08-06 15:43:59 +03:00
struct caam_ctx * ctx = crypto_skcipher_ctx ( skcipher ) ;
struct caam_skcipher_alg * alg =
container_of ( crypto_skcipher_alg ( skcipher ) , typeof ( * alg ) ,
skcipher ) ;
2011-07-15 11:21:42 +08:00
struct device * jrdev = ctx - > jrdev ;
2018-08-06 15:43:59 +03:00
unsigned int ivsize = crypto_skcipher_ivsize ( skcipher ) ;
2011-07-15 11:21:42 +08:00
u32 * desc ;
2014-10-31 12:45:35 +02:00
u32 ctx1_iv_off = 0 ;
2016-11-22 15:44:04 +02:00
const bool ctr_mode = ( ( ctx - > cdata . algtype & OP_ALG_AAI_MASK ) = =
2014-10-31 12:45:35 +02:00
OP_ALG_AAI_CTR_MOD128 ) ;
2018-08-06 15:43:59 +03:00
const bool is_rfc3686 = alg - > caam . rfc3686 ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
2014-10-31 12:45:35 +02:00
/*
* AES - CTR needs to load IV in CONTEXT1 reg
* at an offset of 128 bits ( 16 bytes )
* CONTEXT1 [ 255 : 128 ] = IV
*/
if ( ctr_mode )
ctx1_iv_off = 16 ;
2011-07-15 11:21:42 +08:00
2014-10-31 12:45:36 +02:00
/*
* RFC3686 specific :
* | CONTEXT1 [ 255 : 128 ] = { NONCE , IV , COUNTER }
* | * key = { KEY , NONCE }
*/
if ( is_rfc3686 ) {
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE ;
keylen - = CTR_RFC3686_NONCE_SIZE ;
}
2016-11-22 15:44:04 +02:00
ctx - > cdata . keylen = keylen ;
2017-12-19 12:16:05 +02:00
ctx - > cdata . key_virt = key ;
2016-11-22 15:44:04 +02:00
ctx - > cdata . key_inline = true ;
2011-07-15 11:21:42 +08:00
2018-08-06 15:43:59 +03:00
/* skcipher_encrypt shared descriptor */
2011-07-15 11:21:42 +08:00
desc = ctx - > sh_desc_enc ;
2016-11-22 15:44:09 +02:00
cnstr_shdsc_ablkcipher_encap ( desc , & ctx - > cdata , ivsize , is_rfc3686 ,
ctx1_iv_off ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_enc_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2016-11-22 15:44:09 +02:00
2018-08-06 15:43:59 +03:00
/* skcipher_decrypt shared descriptor */
2011-07-15 11:21:42 +08:00
desc = ctx - > sh_desc_dec ;
2016-11-22 15:44:09 +02:00
cnstr_shdsc_ablkcipher_decap ( desc , & ctx - > cdata , ivsize , is_rfc3686 ,
ctx1_iv_off ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_dec_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2011-07-15 11:21:42 +08:00
2016-11-22 15:44:09 +02:00
return 0 ;
2011-07-15 11:21:42 +08:00
}
2018-08-06 15:43:59 +03:00
static int xts_skcipher_setkey ( struct crypto_skcipher * skcipher , const u8 * key ,
unsigned int keylen )
2015-10-02 13:13:18 +03:00
{
2018-08-06 15:43:59 +03:00
struct caam_ctx * ctx = crypto_skcipher_ctx ( skcipher ) ;
2015-10-02 13:13:18 +03:00
struct device * jrdev = ctx - > jrdev ;
2016-11-22 15:44:09 +02:00
u32 * desc ;
2015-10-02 13:13:18 +03:00
if ( keylen ! = 2 * AES_MIN_KEY_SIZE & & keylen ! = 2 * AES_MAX_KEY_SIZE ) {
2018-08-06 15:43:59 +03:00
crypto_skcipher_set_flags ( skcipher , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2015-10-02 13:13:18 +03:00
dev_err ( jrdev , " key size mismatch \n " ) ;
return - EINVAL ;
}
2016-11-22 15:44:04 +02:00
ctx - > cdata . keylen = keylen ;
2017-12-19 12:16:05 +02:00
ctx - > cdata . key_virt = key ;
2016-11-22 15:44:04 +02:00
ctx - > cdata . key_inline = true ;
2015-10-02 13:13:18 +03:00
2018-08-06 15:43:59 +03:00
/* xts_skcipher_encrypt shared descriptor */
2015-10-02 13:13:18 +03:00
desc = ctx - > sh_desc_enc ;
2016-11-22 15:44:09 +02:00
cnstr_shdsc_xts_ablkcipher_encap ( desc , & ctx - > cdata ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_enc_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2015-10-02 13:13:18 +03:00
2018-08-06 15:43:59 +03:00
/* xts_skcipher_decrypt shared descriptor */
2015-10-02 13:13:18 +03:00
desc = ctx - > sh_desc_dec ;
2016-11-22 15:44:09 +02:00
cnstr_shdsc_xts_ablkcipher_decap ( desc , & ctx - > cdata ) ;
2017-02-10 14:07:22 +02:00
dma_sync_single_for_device ( jrdev , ctx - > sh_desc_dec_dma ,
2017-12-19 12:16:07 +02:00
desc_bytes ( desc ) , ctx - > dir ) ;
2015-10-02 13:13:18 +03:00
return 0 ;
}
2011-03-13 16:54:26 +08:00
/*
2011-07-15 11:21:42 +08:00
* aead_edesc - s / w - extended aead descriptor
2017-02-10 14:07:19 +02:00
* @ src_nents : number of segments in input s / w scatterlist
* @ dst_nents : number of segments in output s / w scatterlist
2012-06-22 19:48:46 -05:00
* @ sec4_sg_bytes : length of dma mapped sec4_sg space
* @ sec4_sg_dma : bus physical mapped address of h / w link table
2016-11-09 10:46:18 +02:00
* @ sec4_sg : pointer to h / w link table
2011-03-13 16:54:26 +08:00
* @ hw_desc : the h / w job descriptor followed by any referenced link tables
*/
2011-07-15 11:21:41 +08:00
struct aead_edesc {
2011-03-13 16:54:26 +08:00
int src_nents ;
int dst_nents ;
2012-06-22 19:48:46 -05:00
int sec4_sg_bytes ;
dma_addr_t sec4_sg_dma ;
struct sec4_sg_entry * sec4_sg ;
2015-06-16 13:54:23 +08:00
u32 hw_desc [ ] ;
2011-03-13 16:54:26 +08:00
} ;
2011-07-15 11:21:42 +08:00
/*
2018-08-06 15:43:59 +03:00
* skcipher_edesc - s / w - extended skcipher descriptor
2017-02-10 14:07:19 +02:00
* @ src_nents : number of segments in input s / w scatterlist
* @ dst_nents : number of segments in output s / w scatterlist
2011-07-15 11:21:42 +08:00
* @ iv_dma : dma address of iv for checking continuity and link table
2012-06-22 19:48:46 -05:00
* @ sec4_sg_bytes : length of dma mapped sec4_sg space
* @ sec4_sg_dma : bus physical mapped address of h / w link table
2016-11-09 10:46:18 +02:00
* @ sec4_sg : pointer to h / w link table
2011-07-15 11:21:42 +08:00
* @ hw_desc : the h / w job descriptor followed by any referenced link tables
2018-03-28 15:39:18 +03:00
* and IV
2011-07-15 11:21:42 +08:00
*/
2018-08-06 15:43:59 +03:00
struct skcipher_edesc {
2011-07-15 11:21:42 +08:00
int src_nents ;
int dst_nents ;
dma_addr_t iv_dma ;
2012-06-22 19:48:46 -05:00
int sec4_sg_bytes ;
dma_addr_t sec4_sg_dma ;
struct sec4_sg_entry * sec4_sg ;
2011-07-15 11:21:42 +08:00
u32 hw_desc [ 0 ] ;
} ;
2011-07-15 11:21:42 +08:00
static void caam_unmap ( struct device * dev , struct scatterlist * src ,
2012-06-22 19:48:49 -05:00
struct scatterlist * dst , int src_nents ,
2015-09-23 13:55:27 +02:00
int dst_nents ,
2018-08-06 15:43:57 +03:00
dma_addr_t iv_dma , int ivsize , dma_addr_t sec4_sg_dma ,
2012-06-22 19:48:46 -05:00
int sec4_sg_bytes )
2011-03-13 16:54:26 +08:00
{
2012-06-22 19:48:49 -05:00
if ( dst ! = src ) {
2017-02-10 14:07:19 +02:00
if ( src_nents )
dma_unmap_sg ( dev , src , src_nents , DMA_TO_DEVICE ) ;
dma_unmap_sg ( dev , dst , dst_nents , DMA_FROM_DEVICE ) ;
2011-03-13 16:54:26 +08:00
} else {
2017-02-10 14:07:19 +02:00
dma_unmap_sg ( dev , src , src_nents , DMA_BIDIRECTIONAL ) ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
if ( iv_dma )
2018-08-06 15:43:57 +03:00
dma_unmap_single ( dev , iv_dma , ivsize , DMA_TO_DEVICE ) ;
2012-06-22 19:48:46 -05:00
if ( sec4_sg_bytes )
dma_unmap_single ( dev , sec4_sg_dma , sec4_sg_bytes ,
2011-03-13 16:54:26 +08:00
DMA_TO_DEVICE ) ;
}
2011-07-15 11:21:42 +08:00
static void aead_unmap ( struct device * dev ,
struct aead_edesc * edesc ,
struct aead_request * req )
2015-06-16 13:54:23 +08:00
{
caam_unmap ( dev , req - > src , req - > dst ,
2018-08-06 15:43:57 +03:00
edesc - > src_nents , edesc - > dst_nents , 0 , 0 ,
2015-06-16 13:54:23 +08:00
edesc - > sec4_sg_dma , edesc - > sec4_sg_bytes ) ;
}
2018-08-06 15:43:59 +03:00
static void skcipher_unmap ( struct device * dev , struct skcipher_edesc * edesc ,
struct skcipher_request * req )
2011-07-15 11:21:42 +08:00
{
2018-08-06 15:43:59 +03:00
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
int ivsize = crypto_skcipher_ivsize ( skcipher ) ;
2011-07-15 11:21:42 +08:00
caam_unmap ( dev , req - > src , req - > dst ,
2015-09-23 13:55:27 +02:00
edesc - > src_nents , edesc - > dst_nents ,
2018-08-06 15:43:57 +03:00
edesc - > iv_dma , ivsize ,
2012-06-22 19:48:49 -05:00
edesc - > sec4_sg_dma , edesc - > sec4_sg_bytes ) ;
2011-07-15 11:21:42 +08:00
}
2011-07-15 11:21:41 +08:00
static void aead_encrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
2011-03-13 16:54:26 +08:00
void * context )
{
2011-07-15 11:21:41 +08:00
struct aead_request * req = context ;
struct aead_edesc * edesc ;
2015-06-16 13:54:23 +08:00
# ifdef DEBUG
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
edesc = container_of ( desc , struct aead_edesc , hw_desc [ 0 ] ) ;
if ( err )
caam_jr_strstatus ( jrdev , err ) ;
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
aead_request_complete ( req , err ) ;
}
2011-07-15 11:21:41 +08:00
static void aead_decrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
2011-03-13 16:54:26 +08:00
void * context )
{
2011-07-15 11:21:41 +08:00
struct aead_request * req = context ;
struct aead_edesc * edesc ;
2015-06-16 13:54:23 +08:00
# ifdef DEBUG
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
edesc = container_of ( desc , struct aead_edesc , hw_desc [ 0 ] ) ;
if ( err )
caam_jr_strstatus ( jrdev , err ) ;
aead_unmap ( jrdev , edesc , req ) ;
/*
* verify hw auth check passed else return - EBADMSG
*/
if ( ( err & JRSTA_CCBERR_ERRID_MASK ) = = JRSTA_CCBERR_ERRID_ICVCHK )
err = - EBADMSG ;
kfree ( edesc ) ;
aead_request_complete ( req , err ) ;
}
2018-08-06 15:43:59 +03:00
static void skcipher_encrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
void * context )
2011-07-15 11:21:42 +08:00
{
2018-08-06 15:43:59 +03:00
struct skcipher_request * req = context ;
struct skcipher_edesc * edesc ;
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
int ivsize = crypto_skcipher_ivsize ( skcipher ) ;
2011-07-15 11:21:42 +08:00
2017-06-28 15:27:10 +02:00
# ifdef DEBUG
2011-07-15 11:21:42 +08:00
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
2018-08-06 15:43:59 +03:00
edesc = container_of ( desc , struct skcipher_edesc , hw_desc [ 0 ] ) ;
2011-07-15 11:21:42 +08:00
2014-04-24 20:05:12 +02:00
if ( err )
caam_jr_strstatus ( jrdev , err ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " dstiv @ " __stringify ( __LINE__ ) " : " ,
2018-08-06 15:43:59 +03:00
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > iv ,
2011-07-15 11:21:42 +08:00
edesc - > src_nents > 1 ? 100 : ivsize , 1 ) ;
# endif
2017-07-10 08:40:28 +03:00
caam_dump_sg ( KERN_ERR , " dst @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > dst ,
2018-08-06 15:43:59 +03:00
edesc - > dst_nents > 1 ? 100 : req - > cryptlen , 1 ) ;
2011-07-15 11:21:42 +08:00
2018-08-06 15:43:59 +03:00
skcipher_unmap ( jrdev , edesc , req ) ;
2017-06-28 15:27:10 +02:00
/*
2018-08-06 15:43:59 +03:00
* The crypto API expects us to set the IV ( req - > iv ) to the last
2017-06-28 15:27:10 +02:00
* ciphertext block . This is used e . g . by the CTS mode .
*/
2018-08-06 15:43:59 +03:00
scatterwalk_map_and_copy ( req - > iv , req - > dst , req - > cryptlen - ivsize ,
2017-06-28 15:27:10 +02:00
ivsize , 0 ) ;
2011-07-15 11:21:42 +08:00
kfree ( edesc ) ;
2018-08-06 15:43:59 +03:00
skcipher_request_complete ( req , err ) ;
2011-07-15 11:21:42 +08:00
}
2018-08-06 15:43:59 +03:00
static void skcipher_decrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
void * context )
2011-07-15 11:21:42 +08:00
{
2018-08-06 15:43:59 +03:00
struct skcipher_request * req = context ;
struct skcipher_edesc * edesc ;
2018-03-28 15:39:18 +03:00
# ifdef DEBUG
2018-08-06 15:43:59 +03:00
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
int ivsize = crypto_skcipher_ivsize ( skcipher ) ;
2011-07-15 11:21:42 +08:00
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
2018-08-06 15:43:59 +03:00
edesc = container_of ( desc , struct skcipher_edesc , hw_desc [ 0 ] ) ;
2014-04-24 20:05:12 +02:00
if ( err )
caam_jr_strstatus ( jrdev , err ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " dstiv @ " __stringify ( __LINE__ ) " : " ,
2018-08-06 15:43:59 +03:00
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > iv , ivsize , 1 ) ;
2011-07-15 11:21:42 +08:00
# endif
2017-07-10 08:40:28 +03:00
caam_dump_sg ( KERN_ERR , " dst @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > dst ,
2018-08-06 15:43:59 +03:00
edesc - > dst_nents > 1 ? 100 : req - > cryptlen , 1 ) ;
2011-07-15 11:21:42 +08:00
2018-08-06 15:43:59 +03:00
skcipher_unmap ( jrdev , edesc , req ) ;
2011-07-15 11:21:42 +08:00
kfree ( edesc ) ;
2018-08-06 15:43:59 +03:00
skcipher_request_complete ( req , err ) ;
2011-07-15 11:21:42 +08:00
}
2015-06-16 13:54:23 +08:00
/*
* Fill in aead job descriptor
*/
static void init_aead_job ( struct aead_request * req ,
struct aead_edesc * edesc ,
bool all_contig , bool encrypt )
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
int authsize = ctx - > authsize ;
u32 * desc = edesc - > hw_desc ;
u32 out_options , in_options ;
dma_addr_t dst_dma , src_dma ;
int len , sec4_sg_index = 0 ;
dma_addr_t ptr ;
u32 * sh_desc ;
sh_desc = encrypt ? ctx - > sh_desc_enc : ctx - > sh_desc_dec ;
ptr = encrypt ? ctx - > sh_desc_enc_dma : ctx - > sh_desc_dec_dma ;
len = desc_len ( sh_desc ) ;
init_job_desc_shared ( desc , ptr , len , HDR_SHARE_DEFER | HDR_REVERSE ) ;
if ( all_contig ) {
2017-02-10 14:07:19 +02:00
src_dma = edesc - > src_nents ? sg_dma_address ( req - > src ) : 0 ;
2015-06-16 13:54:23 +08:00
in_options = 0 ;
} else {
src_dma = edesc - > sec4_sg_dma ;
sec4_sg_index + = edesc - > src_nents ;
in_options = LDST_SGF ;
}
append_seq_in_ptr ( desc , src_dma , req - > assoclen + req - > cryptlen ,
in_options ) ;
dst_dma = src_dma ;
out_options = in_options ;
if ( unlikely ( req - > src ! = req - > dst ) ) {
2017-02-10 14:07:19 +02:00
if ( edesc - > dst_nents = = 1 ) {
2015-06-16 13:54:23 +08:00
dst_dma = sg_dma_address ( req - > dst ) ;
} else {
dst_dma = edesc - > sec4_sg_dma +
sec4_sg_index *
sizeof ( struct sec4_sg_entry ) ;
out_options = LDST_SGF ;
}
}
if ( encrypt )
append_seq_out_ptr ( desc , dst_dma ,
req - > assoclen + req - > cryptlen + authsize ,
out_options ) ;
else
append_seq_out_ptr ( desc , dst_dma ,
req - > assoclen + req - > cryptlen - authsize ,
out_options ) ;
}
static void init_gcm_job ( struct aead_request * req ,
struct aead_edesc * edesc ,
bool all_contig , bool encrypt )
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
u32 * desc = edesc - > hw_desc ;
2017-08-22 10:08:09 +02:00
bool generic_gcm = ( ivsize = = GCM_AES_IV_SIZE ) ;
2015-06-16 13:54:23 +08:00
unsigned int last ;
init_aead_job ( req , edesc , all_contig , encrypt ) ;
2017-12-19 12:16:07 +02:00
append_math_add_imm_u32 ( desc , REG3 , ZERO , IMM , req - > assoclen ) ;
2015-06-16 13:54:23 +08:00
/* BUG This should not be specific to generic GCM. */
last = 0 ;
if ( encrypt & & generic_gcm & & ! ( req - > assoclen + req - > cryptlen ) )
last = FIFOLD_TYPE_LAST1 ;
/* Read GCM IV */
append_cmd ( desc , CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2017-08-22 10:08:09 +02:00
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last ) ;
2015-06-16 13:54:23 +08:00
/* Append Salt */
if ( ! generic_gcm )
2016-11-22 15:44:04 +02:00
append_data ( desc , ctx - > key + ctx - > cdata . keylen , 4 ) ;
2015-06-16 13:54:23 +08:00
/* Append IV */
append_data ( desc , req - > iv , ivsize ) ;
/* End of blank commands */
}
2015-07-30 17:53:17 +08:00
static void init_authenc_job ( struct aead_request * req ,
struct aead_edesc * edesc ,
bool all_contig , bool encrypt )
2011-07-15 11:21:42 +08:00
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2015-07-30 17:53:17 +08:00
struct caam_aead_alg * alg = container_of ( crypto_aead_alg ( aead ) ,
struct caam_aead_alg , aead ) ;
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
2011-07-15 11:21:42 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
2017-12-19 12:16:07 +02:00
struct caam_drv_private * ctrlpriv = dev_get_drvdata ( ctx - > jrdev - > parent ) ;
2016-11-22 15:44:04 +02:00
const bool ctr_mode = ( ( ctx - > cdata . algtype & OP_ALG_AAI_MASK ) = =
2015-07-30 17:53:17 +08:00
OP_ALG_AAI_CTR_MOD128 ) ;
const bool is_rfc3686 = alg - > caam . rfc3686 ;
2011-07-15 11:21:42 +08:00
u32 * desc = edesc - > hw_desc ;
2015-07-30 17:53:17 +08:00
u32 ivoffset = 0 ;
2011-03-13 16:54:26 +08:00
2015-07-30 17:53:17 +08:00
/*
* AES - CTR needs to load IV in CONTEXT1 reg
* at an offset of 128 bits ( 16 bytes )
* CONTEXT1 [ 255 : 128 ] = IV
*/
if ( ctr_mode )
ivoffset = 16 ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
/*
* RFC3686 specific :
* CONTEXT1 [ 255 : 128 ] = { NONCE , IV , COUNTER }
*/
if ( is_rfc3686 )
ivoffset = 16 + CTR_RFC3686_NONCE_SIZE ;
2011-03-13 16:54:26 +08:00
2015-07-30 17:53:17 +08:00
init_aead_job ( req , edesc , all_contig , encrypt ) ;
2011-07-15 11:21:42 +08:00
2017-12-19 12:16:07 +02:00
/*
* { REG3 , DPOVRD } = assoclen , depending on whether MATH command supports
* having DPOVRD as destination .
*/
if ( ctrlpriv - > era < 3 )
append_math_add_imm_u32 ( desc , REG3 , ZERO , IMM , req - > assoclen ) ;
else
append_math_add_imm_u32 ( desc , DPOVRD , ZERO , IMM , req - > assoclen ) ;
2016-08-29 14:52:14 +03:00
if ( ivsize & & ( ( is_rfc3686 & & encrypt ) | | ! alg - > caam . geniv ) )
2015-07-30 17:53:17 +08:00
append_load_as_imm ( desc , req - > iv , ivsize ,
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
( ivoffset < < LDST_OFFSET_SHIFT ) ) ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
/*
2018-08-06 15:43:59 +03:00
* Fill in skcipher job descriptor
2011-07-15 11:21:42 +08:00
*/
2018-08-06 15:43:59 +03:00
static void init_skcipher_job ( struct skcipher_request * req ,
struct skcipher_edesc * edesc ,
const bool encrypt )
2011-07-15 11:21:42 +08:00
{
2018-08-06 15:43:59 +03:00
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_skcipher_ctx ( skcipher ) ;
int ivsize = crypto_skcipher_ivsize ( skcipher ) ;
2011-07-15 11:21:42 +08:00
u32 * desc = edesc - > hw_desc ;
2018-08-06 15:43:59 +03:00
u32 * sh_desc ;
2018-03-28 15:39:18 +03:00
u32 out_options = 0 ;
2018-08-06 15:43:59 +03:00
dma_addr_t dst_dma , ptr ;
2018-03-28 15:39:18 +03:00
int len ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " presciv@ " __stringify ( __LINE__ ) " : " ,
2018-08-06 15:43:59 +03:00
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > iv , ivsize , 1 ) ;
pr_err ( " asked=%d, cryptlen%d \n " ,
( int ) edesc - > src_nents > 1 ? 100 : req - > cryptlen , req - > cryptlen ) ;
2011-07-15 11:21:42 +08:00
# endif
2017-07-10 08:40:28 +03:00
caam_dump_sg ( KERN_ERR , " src @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > src ,
2018-08-06 15:43:59 +03:00
edesc - > src_nents > 1 ? 100 : req - > cryptlen , 1 ) ;
sh_desc = encrypt ? ctx - > sh_desc_enc : ctx - > sh_desc_dec ;
ptr = encrypt ? ctx - > sh_desc_enc_dma : ctx - > sh_desc_dec_dma ;
2011-07-15 11:21:42 +08:00
len = desc_len ( sh_desc ) ;
init_job_desc_shared ( desc , ptr , len , HDR_SHARE_DEFER | HDR_REVERSE ) ;
2018-08-06 15:43:59 +03:00
append_seq_in_ptr ( desc , edesc - > sec4_sg_dma , req - > cryptlen + ivsize ,
2018-03-28 15:39:18 +03:00
LDST_SGF ) ;
2011-07-15 11:21:42 +08:00
if ( likely ( req - > src = = req - > dst ) ) {
2018-03-28 15:39:18 +03:00
dst_dma = edesc - > sec4_sg_dma + sizeof ( struct sec4_sg_entry ) ;
out_options = LDST_SGF ;
2011-07-15 11:21:42 +08:00
} else {
2017-02-10 14:07:19 +02:00
if ( edesc - > dst_nents = = 1 ) {
2011-07-15 11:21:42 +08:00
dst_dma = sg_dma_address ( req - > dst ) ;
} else {
2018-03-28 15:39:18 +03:00
dst_dma = edesc - > sec4_sg_dma + ( edesc - > src_nents + 1 ) *
sizeof ( struct sec4_sg_entry ) ;
2011-07-15 11:21:42 +08:00
out_options = LDST_SGF ;
}
}
2018-08-06 15:43:59 +03:00
append_seq_out_ptr ( desc , dst_dma , req - > cryptlen , out_options ) ;
2011-07-15 11:21:42 +08:00
}
2011-03-13 16:54:26 +08:00
/*
2011-07-15 11:21:42 +08:00
* allocate and map the aead extended descriptor
2011-03-13 16:54:26 +08:00
*/
2015-07-30 17:53:17 +08:00
static struct aead_edesc * aead_edesc_alloc ( struct aead_request * req ,
int desc_bytes , bool * all_contig_ptr ,
bool encrypt )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:41 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2017-06-19 11:44:46 +03:00
gfp_t flags = ( req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ?
GFP_KERNEL : GFP_ATOMIC ;
2017-02-10 14:07:20 +02:00
int src_nents , mapped_src_nents , dst_nents = 0 , mapped_dst_nents = 0 ;
2011-07-15 11:21:41 +08:00
struct aead_edesc * edesc ;
2017-02-10 14:07:19 +02:00
int sec4_sg_index , sec4_sg_len , sec4_sg_bytes ;
2013-11-28 15:11:16 +02:00
unsigned int authsize = ctx - > authsize ;
2011-07-15 11:21:42 +08:00
2013-11-28 15:11:16 +02:00
if ( unlikely ( req - > dst ! = req - > src ) ) {
2017-02-10 14:07:19 +02:00
src_nents = sg_nents_for_len ( req - > src , req - > assoclen +
req - > cryptlen ) ;
2017-02-10 14:07:18 +02:00
if ( unlikely ( src_nents < 0 ) ) {
dev_err ( jrdev , " Insufficient bytes (%d) in src S/G \n " ,
req - > assoclen + req - > cryptlen ) ;
return ERR_PTR ( src_nents ) ;
}
2017-02-10 14:07:19 +02:00
dst_nents = sg_nents_for_len ( req - > dst , req - > assoclen +
req - > cryptlen +
( encrypt ? authsize :
( - authsize ) ) ) ;
2017-02-10 14:07:18 +02:00
if ( unlikely ( dst_nents < 0 ) ) {
dev_err ( jrdev , " Insufficient bytes (%d) in dst S/G \n " ,
req - > assoclen + req - > cryptlen +
( encrypt ? authsize : ( - authsize ) ) ) ;
return ERR_PTR ( dst_nents ) ;
}
2013-11-28 15:11:16 +02:00
} else {
2017-02-10 14:07:19 +02:00
src_nents = sg_nents_for_len ( req - > src , req - > assoclen +
req - > cryptlen +
( encrypt ? authsize : 0 ) ) ;
2017-02-10 14:07:18 +02:00
if ( unlikely ( src_nents < 0 ) ) {
dev_err ( jrdev , " Insufficient bytes (%d) in src S/G \n " ,
req - > assoclen + req - > cryptlen +
( encrypt ? authsize : 0 ) ) ;
return ERR_PTR ( src_nents ) ;
}
2015-06-16 13:54:23 +08:00
}
2014-10-23 16:11:23 +03:00
2015-06-16 13:54:23 +08:00
if ( likely ( req - > src = = req - > dst ) ) {
2017-02-10 14:07:20 +02:00
mapped_src_nents = dma_map_sg ( jrdev , req - > src , src_nents ,
DMA_BIDIRECTIONAL ) ;
if ( unlikely ( ! mapped_src_nents ) ) {
2015-06-16 13:54:23 +08:00
dev_err ( jrdev , " unable to map source \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
} else {
2017-02-10 14:07:19 +02:00
/* Cover also the case of null (zero length) input data */
if ( src_nents ) {
2017-02-10 14:07:20 +02:00
mapped_src_nents = dma_map_sg ( jrdev , req - > src ,
src_nents , DMA_TO_DEVICE ) ;
if ( unlikely ( ! mapped_src_nents ) ) {
2017-02-10 14:07:19 +02:00
dev_err ( jrdev , " unable to map source \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2017-02-10 14:07:20 +02:00
} else {
mapped_src_nents = 0 ;
2015-06-16 13:54:23 +08:00
}
2017-02-10 14:07:20 +02:00
mapped_dst_nents = dma_map_sg ( jrdev , req - > dst , dst_nents ,
DMA_FROM_DEVICE ) ;
if ( unlikely ( ! mapped_dst_nents ) ) {
2015-06-16 13:54:23 +08:00
dev_err ( jrdev , " unable to map destination \n " ) ;
2017-02-10 14:07:19 +02:00
dma_unmap_sg ( jrdev , req - > src , src_nents , DMA_TO_DEVICE ) ;
2015-06-16 13:54:23 +08:00
return ERR_PTR ( - ENOMEM ) ;
}
}
2017-02-10 14:07:20 +02:00
sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0 ;
sec4_sg_len + = mapped_dst_nents > 1 ? mapped_dst_nents : 0 ;
sec4_sg_bytes = sec4_sg_len * sizeof ( struct sec4_sg_entry ) ;
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kzalloc ( sizeof ( * edesc ) + desc_bytes + sec4_sg_bytes ,
GFP_DMA | flags ) ;
if ( ! edesc ) {
caam_unmap ( jrdev , req - > src , req - > dst , src_nents , dst_nents , 0 ,
2018-08-06 15:43:57 +03:00
0 , 0 , 0 ) ;
2017-02-10 14:07:20 +02:00
return ERR_PTR ( - ENOMEM ) ;
}
2011-03-13 16:54:26 +08:00
edesc - > src_nents = src_nents ;
edesc - > dst_nents = dst_nents ;
2012-06-22 19:48:46 -05:00
edesc - > sec4_sg = ( void * ) edesc + sizeof ( struct aead_edesc ) +
desc_bytes ;
2017-02-10 14:07:20 +02:00
* all_contig_ptr = ! ( mapped_src_nents > 1 ) ;
2011-07-15 11:21:42 +08:00
2012-06-22 19:48:46 -05:00
sec4_sg_index = 0 ;
2017-02-10 14:07:20 +02:00
if ( mapped_src_nents > 1 ) {
sg_to_sec4_sg_last ( req - > src , mapped_src_nents ,
edesc - > sec4_sg + sec4_sg_index , 0 ) ;
sec4_sg_index + = mapped_src_nents ;
2011-07-15 11:21:42 +08:00
}
2017-02-10 14:07:20 +02:00
if ( mapped_dst_nents > 1 ) {
sg_to_sec4_sg_last ( req - > dst , mapped_dst_nents ,
2012-06-22 19:48:46 -05:00
edesc - > sec4_sg + sec4_sg_index , 0 ) ;
2011-07-15 11:21:42 +08:00
}
2015-06-16 13:54:23 +08:00
if ( ! sec4_sg_bytes )
return edesc ;
2014-06-23 19:50:26 +05:30
edesc - > sec4_sg_dma = dma_map_single ( jrdev , edesc - > sec4_sg ,
sec4_sg_bytes , DMA_TO_DEVICE ) ;
2014-07-11 15:34:49 +03:00
if ( dma_mapping_error ( jrdev , edesc - > sec4_sg_dma ) ) {
dev_err ( jrdev , " unable to map S/G table \n " ) ;
2015-06-16 13:54:23 +08:00
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
2014-07-11 15:34:49 +03:00
return ERR_PTR ( - ENOMEM ) ;
}
2011-03-13 16:54:26 +08:00
2015-06-16 13:54:23 +08:00
edesc - > sec4_sg_bytes = sec4_sg_bytes ;
2011-03-13 16:54:26 +08:00
return edesc ;
}
2015-06-16 13:54:23 +08:00
static int gcm_encrypt ( struct aead_request * req )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:41 +08:00
struct aead_edesc * edesc ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2011-07-15 11:21:42 +08:00
bool all_contig ;
2011-03-13 16:54:26 +08:00
u32 * desc ;
2011-07-15 11:21:42 +08:00
int ret = 0 ;
2011-03-13 16:54:26 +08:00
/* allocate extended descriptor */
2015-06-16 13:54:23 +08:00
edesc = aead_edesc_alloc ( req , GCM_DESC_JOB_IO_LEN , & all_contig , true ) ;
2011-03-13 16:54:26 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
2011-07-15 11:21:42 +08:00
/* Create and submit job descriptor */
2015-06-16 13:54:23 +08:00
init_gcm_job ( req , edesc , all_contig , true ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " aead jobdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
desc = edesc - > hw_desc ;
ret = caam_jr_enqueue ( jrdev , desc , aead_encrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
return ret ;
2011-03-13 16:54:26 +08:00
}
2015-07-09 07:17:33 +08:00
static int ipsec_gcm_encrypt ( struct aead_request * req )
{
if ( req - > assoclen < 8 )
return - EINVAL ;
return gcm_encrypt ( req ) ;
}
2015-07-30 17:53:17 +08:00
static int aead_encrypt ( struct aead_request * req )
2015-06-16 13:54:23 +08:00
{
struct aead_edesc * edesc ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
bool all_contig ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
2015-07-30 17:53:17 +08:00
edesc = aead_edesc_alloc ( req , AUTHENC_DESC_JOB_IO_LEN ,
& all_contig , true ) ;
2015-06-16 13:54:23 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor */
2015-07-30 17:53:17 +08:00
init_authenc_job ( req , edesc , all_contig , true ) ;
2015-06-16 13:54:23 +08:00
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead jobdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
desc = edesc - > hw_desc ;
2015-07-30 17:53:17 +08:00
ret = caam_jr_enqueue ( jrdev , desc , aead_encrypt_done , req ) ;
2015-06-16 13:54:23 +08:00
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
2015-07-30 17:53:17 +08:00
aead_unmap ( jrdev , edesc , req ) ;
2015-06-16 13:54:23 +08:00
kfree ( edesc ) ;
}
return ret ;
}
static int gcm_decrypt ( struct aead_request * req )
{
struct aead_edesc * edesc ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
bool all_contig ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
edesc = aead_edesc_alloc ( req , GCM_DESC_JOB_IO_LEN , & all_contig , false ) ;
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor*/
init_gcm_job ( req , edesc , all_contig , false ) ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead jobdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
desc = edesc - > hw_desc ;
ret = caam_jr_enqueue ( jrdev , desc , aead_decrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
return ret ;
}
2015-07-09 07:17:33 +08:00
static int ipsec_gcm_decrypt ( struct aead_request * req )
{
if ( req - > assoclen < 8 )
return - EINVAL ;
return gcm_decrypt ( req ) ;
}
2015-07-30 17:53:17 +08:00
static int aead_decrypt ( struct aead_request * req )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:42 +08:00
struct aead_edesc * edesc ;
2011-03-13 16:54:26 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2011-07-15 11:21:42 +08:00
bool all_contig ;
2011-03-13 16:54:26 +08:00
u32 * desc ;
2011-07-15 11:21:42 +08:00
int ret = 0 ;
2011-03-13 16:54:26 +08:00
2017-07-10 08:40:28 +03:00
caam_dump_sg ( KERN_ERR , " dec src@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > src ,
req - > assoclen + req - > cryptlen , 1 ) ;
2016-09-22 11:57:58 +03:00
2011-03-13 16:54:26 +08:00
/* allocate extended descriptor */
2015-07-30 17:53:17 +08:00
edesc = aead_edesc_alloc ( req , AUTHENC_DESC_JOB_IO_LEN ,
& all_contig , false ) ;
2011-03-13 16:54:26 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
2011-07-15 11:21:42 +08:00
/* Create and submit job descriptor*/
2015-07-30 17:53:17 +08:00
init_authenc_job ( req , edesc , all_contig , false ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " aead jobdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
2011-03-13 16:54:26 +08:00
desc = edesc - > hw_desc ;
2015-07-30 17:53:17 +08:00
ret = caam_jr_enqueue ( jrdev , desc , aead_decrypt_done , req ) ;
2011-07-15 11:21:42 +08:00
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
2015-07-30 17:53:17 +08:00
aead_unmap ( jrdev , edesc , req ) ;
2011-07-15 11:21:42 +08:00
kfree ( edesc ) ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
return ret ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
/*
2018-08-06 15:43:59 +03:00
* allocate and map the skcipher extended descriptor for skcipher
2011-07-15 11:21:42 +08:00
*/
2018-08-06 15:43:59 +03:00
static struct skcipher_edesc * skcipher_edesc_alloc ( struct skcipher_request * req ,
int desc_bytes )
2011-07-15 11:21:42 +08:00
{
2018-08-06 15:43:59 +03:00
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_skcipher_ctx ( skcipher ) ;
2011-07-15 11:21:42 +08:00
struct device * jrdev = ctx - > jrdev ;
2017-06-19 11:44:45 +03:00
gfp_t flags = ( req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ?
2011-07-15 11:21:42 +08:00
GFP_KERNEL : GFP_ATOMIC ;
2017-02-10 14:07:20 +02:00
int src_nents , mapped_src_nents , dst_nents = 0 , mapped_dst_nents = 0 ;
2018-08-06 15:43:59 +03:00
struct skcipher_edesc * edesc ;
2018-03-28 15:39:18 +03:00
dma_addr_t iv_dma ;
u8 * iv ;
2018-08-06 15:43:59 +03:00
int ivsize = crypto_skcipher_ivsize ( skcipher ) ;
2017-02-10 14:07:20 +02:00
int dst_sg_idx , sec4_sg_ents , sec4_sg_bytes ;
2011-07-15 11:21:42 +08:00
2018-08-06 15:43:59 +03:00
src_nents = sg_nents_for_len ( req - > src , req - > cryptlen ) ;
2017-02-10 14:07:18 +02:00
if ( unlikely ( src_nents < 0 ) ) {
dev_err ( jrdev , " Insufficient bytes (%d) in src S/G \n " ,
2018-08-06 15:43:59 +03:00
req - > cryptlen ) ;
2017-02-10 14:07:18 +02:00
return ERR_PTR ( src_nents ) ;
}
2011-07-15 11:21:42 +08:00
2017-02-10 14:07:18 +02:00
if ( req - > dst ! = req - > src ) {
2018-08-06 15:43:59 +03:00
dst_nents = sg_nents_for_len ( req - > dst , req - > cryptlen ) ;
2017-02-10 14:07:18 +02:00
if ( unlikely ( dst_nents < 0 ) ) {
dev_err ( jrdev , " Insufficient bytes (%d) in dst S/G \n " ,
2018-08-06 15:43:59 +03:00
req - > cryptlen ) ;
2017-02-10 14:07:18 +02:00
return ERR_PTR ( dst_nents ) ;
}
}
2011-07-15 11:21:42 +08:00
if ( likely ( req - > src = = req - > dst ) ) {
2017-02-10 14:07:20 +02:00
mapped_src_nents = dma_map_sg ( jrdev , req - > src , src_nents ,
DMA_BIDIRECTIONAL ) ;
if ( unlikely ( ! mapped_src_nents ) ) {
2016-11-09 10:46:20 +02:00
dev_err ( jrdev , " unable to map source \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2011-07-15 11:21:42 +08:00
} else {
2017-02-10 14:07:20 +02:00
mapped_src_nents = dma_map_sg ( jrdev , req - > src , src_nents ,
DMA_TO_DEVICE ) ;
if ( unlikely ( ! mapped_src_nents ) ) {
2016-11-09 10:46:20 +02:00
dev_err ( jrdev , " unable to map source \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2017-02-10 14:07:20 +02:00
mapped_dst_nents = dma_map_sg ( jrdev , req - > dst , dst_nents ,
DMA_FROM_DEVICE ) ;
if ( unlikely ( ! mapped_dst_nents ) ) {
2016-11-09 10:46:20 +02:00
dev_err ( jrdev , " unable to map destination \n " ) ;
2017-02-10 14:07:19 +02:00
dma_unmap_sg ( jrdev , req - > src , src_nents , DMA_TO_DEVICE ) ;
2016-11-09 10:46:20 +02:00
return ERR_PTR ( - ENOMEM ) ;
}
2011-07-15 11:21:42 +08:00
}
2018-03-28 15:39:18 +03:00
sec4_sg_ents = 1 + mapped_src_nents ;
2017-02-10 14:07:19 +02:00
dst_sg_idx = sec4_sg_ents ;
2017-02-10 14:07:20 +02:00
sec4_sg_ents + = mapped_dst_nents > 1 ? mapped_dst_nents : 0 ;
2017-02-10 14:07:19 +02:00
sec4_sg_bytes = sec4_sg_ents * sizeof ( struct sec4_sg_entry ) ;
2011-07-15 11:21:42 +08:00
2018-03-28 15:39:18 +03:00
/*
* allocate space for base edesc and hw desc commands , link tables , IV
*/
edesc = kzalloc ( sizeof ( * edesc ) + desc_bytes + sec4_sg_bytes + ivsize ,
2015-08-05 11:28:39 -07:00
GFP_DMA | flags ) ;
2011-07-15 11:21:42 +08:00
if ( ! edesc ) {
dev_err ( jrdev , " could not allocate extended descriptor \n " ) ;
2018-03-28 15:39:18 +03:00
caam_unmap ( jrdev , req - > src , req - > dst , src_nents , dst_nents , 0 ,
2018-08-06 15:43:57 +03:00
0 , 0 , 0 ) ;
2011-07-15 11:21:42 +08:00
return ERR_PTR ( - ENOMEM ) ;
}
edesc - > src_nents = src_nents ;
edesc - > dst_nents = dst_nents ;
2012-06-22 19:48:46 -05:00
edesc - > sec4_sg_bytes = sec4_sg_bytes ;
2018-08-06 15:43:59 +03:00
edesc - > sec4_sg = ( void * ) edesc + sizeof ( struct skcipher_edesc ) +
2012-06-22 19:48:46 -05:00
desc_bytes ;
2011-07-15 11:21:42 +08:00
2018-03-28 15:39:18 +03:00
/* Make sure IV is located in a DMAable area */
iv = ( u8 * ) edesc - > hw_desc + desc_bytes + sec4_sg_bytes ;
2018-08-06 15:43:59 +03:00
memcpy ( iv , req - > iv , ivsize ) ;
2018-03-28 15:39:18 +03:00
iv_dma = dma_map_single ( jrdev , iv , ivsize , DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , iv_dma ) ) {
dev_err ( jrdev , " unable to map IV \n " ) ;
caam_unmap ( jrdev , req - > src , req - > dst , src_nents , dst_nents , 0 ,
2018-08-06 15:43:57 +03:00
0 , 0 , 0 ) ;
2018-03-28 15:39:18 +03:00
kfree ( edesc ) ;
return ERR_PTR ( - ENOMEM ) ;
2011-07-15 11:21:42 +08:00
}
2018-03-28 15:39:18 +03:00
dma_to_sec4_sg_one ( edesc - > sec4_sg , iv_dma , ivsize , 0 ) ;
sg_to_sec4_sg_last ( req - > src , mapped_src_nents , edesc - > sec4_sg + 1 , 0 ) ;
2017-02-10 14:07:20 +02:00
if ( mapped_dst_nents > 1 ) {
sg_to_sec4_sg_last ( req - > dst , mapped_dst_nents ,
edesc - > sec4_sg + dst_sg_idx , 0 ) ;
2011-07-15 11:21:42 +08:00
}
2012-06-22 19:48:46 -05:00
edesc - > sec4_sg_dma = dma_map_single ( jrdev , edesc - > sec4_sg ,
sec4_sg_bytes , DMA_TO_DEVICE ) ;
2014-07-11 15:34:49 +03:00
if ( dma_mapping_error ( jrdev , edesc - > sec4_sg_dma ) ) {
dev_err ( jrdev , " unable to map S/G table \n " ) ;
2016-11-09 10:46:20 +02:00
caam_unmap ( jrdev , req - > src , req - > dst , src_nents , dst_nents ,
2018-08-06 15:43:57 +03:00
iv_dma , ivsize , 0 , 0 ) ;
2016-11-09 10:46:20 +02:00
kfree ( edesc ) ;
2014-07-11 15:34:49 +03:00
return ERR_PTR ( - ENOMEM ) ;
}
2011-07-15 11:21:42 +08:00
edesc - > iv_dma = iv_dma ;
# ifdef DEBUG
2018-08-06 15:43:59 +03:00
print_hex_dump ( KERN_ERR , " skcipher sec4_sg@ " __stringify ( __LINE__ ) " : " ,
2012-06-22 19:48:46 -05:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > sec4_sg ,
sec4_sg_bytes , 1 ) ;
2011-07-15 11:21:42 +08:00
# endif
return edesc ;
}
2018-08-06 15:43:59 +03:00
static int skcipher_encrypt ( struct skcipher_request * req )
2011-07-15 11:21:42 +08:00
{
2018-08-06 15:43:59 +03:00
struct skcipher_edesc * edesc ;
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_skcipher_ctx ( skcipher ) ;
2011-07-15 11:21:42 +08:00
struct device * jrdev = ctx - > jrdev ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
2018-08-06 15:43:59 +03:00
edesc = skcipher_edesc_alloc ( req , DESC_JOB_IO_LEN * CAAM_CMD_SZ ) ;
2011-07-15 11:21:42 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor*/
2018-08-06 15:43:59 +03:00
init_skcipher_job ( req , edesc , true ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2018-08-06 15:43:59 +03:00
print_hex_dump ( KERN_ERR , " skcipher jobdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
desc = edesc - > hw_desc ;
2018-08-06 15:43:59 +03:00
ret = caam_jr_enqueue ( jrdev , desc , skcipher_encrypt_done , req ) ;
2011-07-15 11:21:42 +08:00
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
2018-08-06 15:43:59 +03:00
skcipher_unmap ( jrdev , edesc , req ) ;
2011-07-15 11:21:42 +08:00
kfree ( edesc ) ;
}
return ret ;
}
2018-08-06 15:43:59 +03:00
static int skcipher_decrypt ( struct skcipher_request * req )
2011-07-15 11:21:42 +08:00
{
2018-08-06 15:43:59 +03:00
struct skcipher_edesc * edesc ;
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_skcipher_ctx ( skcipher ) ;
int ivsize = crypto_skcipher_ivsize ( skcipher ) ;
2011-07-15 11:21:42 +08:00
struct device * jrdev = ctx - > jrdev ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
2018-08-06 15:43:59 +03:00
edesc = skcipher_edesc_alloc ( req , DESC_JOB_IO_LEN * CAAM_CMD_SZ ) ;
2011-07-15 11:21:42 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
2018-03-28 15:39:18 +03:00
/*
2018-08-06 15:43:59 +03:00
* The crypto API expects us to set the IV ( req - > iv ) to the last
2018-03-28 15:39:18 +03:00
* ciphertext block .
*/
2018-08-06 15:43:59 +03:00
scatterwalk_map_and_copy ( req - > iv , req - > src , req - > cryptlen - ivsize ,
2018-03-28 15:39:18 +03:00
ivsize , 0 ) ;
2011-07-15 11:21:42 +08:00
/* Create and submit job descriptor*/
2018-08-06 15:43:59 +03:00
init_skcipher_job ( req , edesc , false ) ;
2011-07-15 11:21:42 +08:00
desc = edesc - > hw_desc ;
# ifdef DEBUG
2018-08-06 15:43:59 +03:00
print_hex_dump ( KERN_ERR , " skcipher jobdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
2018-08-06 15:43:59 +03:00
ret = caam_jr_enqueue ( jrdev , desc , skcipher_decrypt_done , req ) ;
2011-07-15 11:21:42 +08:00
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
2018-08-06 15:43:59 +03:00
skcipher_unmap ( jrdev , edesc , req ) ;
2011-07-15 11:21:42 +08:00
kfree ( edesc ) ;
}
return ret ;
}
2018-08-06 15:43:59 +03:00
static struct caam_skcipher_alg driver_algs [ ] = {
2014-03-14 17:46:52 +02:00
{
2018-08-06 15:43:59 +03:00
. skcipher = {
. base = {
. cra_name = " cbc(aes) " ,
. cra_driver_name = " cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
. setkey = skcipher_setkey ,
. encrypt = skcipher_encrypt ,
. decrypt = skcipher_decrypt ,
2015-07-30 17:53:17 +08:00
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
2018-08-06 15:43:59 +03:00
} ,
. caam . class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
2015-07-30 17:53:17 +08:00
} ,
{
2018-08-06 15:43:59 +03:00
. skcipher = {
. base = {
. cra_name = " cbc(des3_ede) " ,
. cra_driver_name = " cbc-3des-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
. setkey = skcipher_setkey ,
. encrypt = skcipher_encrypt ,
. decrypt = skcipher_decrypt ,
2015-07-30 17:53:17 +08:00
. min_keysize = DES3_EDE_KEY_SIZE ,
. max_keysize = DES3_EDE_KEY_SIZE ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
2018-08-06 15:43:59 +03:00
} ,
. caam . class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
2015-07-30 17:53:17 +08:00
} ,
{
2018-08-06 15:43:59 +03:00
. skcipher = {
. base = {
. cra_name = " cbc(des) " ,
. cra_driver_name = " cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
. setkey = skcipher_setkey ,
. encrypt = skcipher_encrypt ,
. decrypt = skcipher_decrypt ,
2015-07-30 17:53:17 +08:00
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. ivsize = DES_BLOCK_SIZE ,
2018-08-06 15:43:59 +03:00
} ,
. caam . class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
2015-07-30 17:53:17 +08:00
} ,
{
2018-08-06 15:43:59 +03:00
. skcipher = {
. base = {
. cra_name = " ctr(aes) " ,
. cra_driver_name = " ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
. setkey = skcipher_setkey ,
. encrypt = skcipher_encrypt ,
. decrypt = skcipher_decrypt ,
2015-07-30 17:53:17 +08:00
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
2018-08-06 15:43:59 +03:00
. chunksize = AES_BLOCK_SIZE ,
} ,
. caam . class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
2015-07-30 17:53:17 +08:00
} ,
{
2018-08-06 15:43:59 +03:00
. skcipher = {
. base = {
. cra_name = " rfc3686(ctr(aes)) " ,
. cra_driver_name = " rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
. setkey = skcipher_setkey ,
. encrypt = skcipher_encrypt ,
. decrypt = skcipher_decrypt ,
2015-07-30 17:53:17 +08:00
. min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE ,
. ivsize = CTR_RFC3686_IV_SIZE ,
2018-08-06 15:43:59 +03:00
. chunksize = AES_BLOCK_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. rfc3686 = true ,
} ,
2015-10-02 13:13:18 +03:00
} ,
{
2018-08-06 15:43:59 +03:00
. skcipher = {
. base = {
. cra_name = " xts(aes) " ,
. cra_driver_name = " xts-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
. setkey = xts_skcipher_setkey ,
. encrypt = skcipher_encrypt ,
. decrypt = skcipher_decrypt ,
2015-10-02 13:13:18 +03:00
. min_keysize = 2 * AES_MIN_KEY_SIZE ,
. max_keysize = 2 * AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
2018-08-06 15:43:59 +03:00
} ,
. caam . class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS ,
2015-10-02 13:13:18 +03:00
} ,
2015-07-30 17:53:17 +08:00
} ;
static struct caam_aead_alg driver_aeads [ ] = {
{
. aead = {
. base = {
. cra_name = " rfc4106(gcm(aes)) " ,
. cra_driver_name = " rfc4106-gcm-aes-caam " ,
. cra_blocksize = 1 ,
} ,
. setkey = rfc4106_setkey ,
. setauthsize = rfc4106_setauthsize ,
. encrypt = ipsec_gcm_encrypt ,
. decrypt = ipsec_gcm_decrypt ,
2017-08-22 10:08:09 +02:00
. ivsize = GCM_RFC4106_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = AES_BLOCK_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " rfc4543(gcm(aes)) " ,
. cra_driver_name = " rfc4543-gcm-aes-caam " ,
. cra_blocksize = 1 ,
} ,
. setkey = rfc4543_setkey ,
. setauthsize = rfc4543_setauthsize ,
. encrypt = ipsec_gcm_encrypt ,
. decrypt = ipsec_gcm_decrypt ,
2017-08-22 10:08:09 +02:00
. ivsize = GCM_RFC4543_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = AES_BLOCK_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM ,
} ,
} ,
/* Galois Counter Mode */
{
. aead = {
. base = {
. cra_name = " gcm(aes) " ,
. cra_driver_name = " gcm-aes-caam " ,
. cra_blocksize = 1 ,
} ,
. setkey = gcm_setkey ,
. setauthsize = gcm_setauthsize ,
. encrypt = gcm_encrypt ,
. decrypt = gcm_decrypt ,
2017-08-22 10:08:09 +02:00
. ivsize = GCM_AES_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = AES_BLOCK_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM ,
} ,
} ,
/* single-pass ipsec_esp descriptor */
{
. aead = {
. base = {
. cra_name = " authenc(hmac(md5), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-md5- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
2014-03-14 17:46:52 +02:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2014-03-14 17:46:52 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
2014-03-14 17:46:52 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2014-03-14 17:46:52 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
2014-03-14 17:46:52 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2014-03-14 17:46:52 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
2014-03-14 17:46:52 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2014-03-14 17:46:52 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha512), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
2014-03-14 17:46:52 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(md5),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-md5- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
2014-03-14 17:46:52 +02:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2014-03-14 17:46:52 +02:00
} ,
2011-11-21 16:13:27 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(md5), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc-hmac-md5- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2011-11-21 16:13:27 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-11-21 16:13:27 +08:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
2011-11-21 16:13:27 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2011-11-21 16:13:27 +08:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha1), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha1-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha224), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha224-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha256), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha256-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2015-07-30 17:53:17 +08:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha384), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha384-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = AES_BLOCK_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha512),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = AES_BLOCK_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA512_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-05-14 22:08:17 -05:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha512), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha512-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-05-14 22:08:17 -05:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(md5),cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-md5- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2011-05-14 22:08:17 -05:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
}
2011-05-14 22:08:17 -05:00
} ,
2011-11-21 16:13:27 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(md5), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc-hmac-md5- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2011-11-21 16:13:27 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-11-21 16:13:27 +08:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
}
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2011-11-21 16:13:27 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2011-11-21 16:13:27 +08:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha1), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha1- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha224), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha224- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha256), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha256- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha384), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha384- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha512), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-05-14 22:08:17 -05:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha512), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha512- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-05-14 22:08:17 -05:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(md5),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-md5- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2011-05-14 22:08:17 -05:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2011-05-14 22:08:17 -05:00
} ,
2011-11-21 16:13:27 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(md5), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc-hmac-md5- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2011-11-21 16:13:27 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-11-21 16:13:27 +08:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2011-11-21 16:13:27 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2011-11-21 16:13:27 +08:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha1), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha1-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha224), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha224-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha256), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha256-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha384), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha384-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha512),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-05-14 22:08:17 -05:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha512), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha512-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2011-05-14 22:08:17 -05:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. geniv = true ,
} ,
2011-05-14 22:08:17 -05:00
} ,
2014-10-31 12:45:37 +02:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(md5), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-md5- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " seqiv(authenc( "
" hmac(md5),rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-md5- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
. geniv = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " seqiv(authenc( "
" hmac(sha1),rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha1- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
. geniv = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " seqiv(authenc( "
" hmac(sha224),rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha224- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
. geniv = true ,
} ,
2011-07-15 11:21:42 +08:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
2011-07-15 11:21:42 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
} ,
2011-07-15 11:21:42 +08:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " seqiv(authenc(hmac(sha256), "
" rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha256- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
2011-07-15 11:21:42 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2015-07-30 17:53:17 +08:00
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
. geniv = true ,
} ,
2014-10-31 12:45:35 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
2014-10-31 12:45:35 +02:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-10-31 12:45:36 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
} ,
} ,
2015-06-16 13:54:23 +08:00
{
. aead = {
. base = {
2015-07-30 17:53:17 +08:00
. cra_name = " seqiv(authenc(hmac(sha384), "
" rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha384- "
" rfc3686-ctr-aes-caam " ,
2015-06-16 13:54:23 +08:00
. cra_blocksize = 1 ,
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2015-07-30 17:53:17 +08:00
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
2015-06-16 13:54:23 +08:00
} ,
. caam = {
2015-07-30 17:53:17 +08:00
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
. geniv = true ,
2015-06-16 13:54:23 +08:00
} ,
} ,
{
. aead = {
. base = {
2015-07-30 17:53:17 +08:00
. cra_name = " authenc(hmac(sha512), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" rfc3686-ctr-aes-caam " ,
2015-06-16 13:54:23 +08:00
. cra_blocksize = 1 ,
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-06-16 13:54:23 +08:00
} ,
. caam = {
2015-07-30 17:53:17 +08:00
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
2015-06-16 13:54:23 +08:00
} ,
} ,
{
. aead = {
. base = {
2015-07-30 17:53:17 +08:00
. cra_name = " seqiv(authenc(hmac(sha512), "
" rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha512- "
" rfc3686-ctr-aes-caam " ,
2015-06-16 13:54:23 +08:00
. cra_blocksize = 1 ,
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
2016-08-29 14:52:14 +03:00
. decrypt = aead_decrypt ,
2015-07-30 17:53:17 +08:00
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-06-16 13:54:23 +08:00
} ,
. caam = {
2015-07-30 17:53:17 +08:00
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. rfc3686 = true ,
. geniv = true ,
2015-06-16 13:54:23 +08:00
} ,
} ,
} ;
2017-12-19 12:16:07 +02:00
static int caam_init_common ( struct caam_ctx * ctx , struct caam_alg_entry * caam ,
bool uses_dkp )
2011-03-13 16:54:26 +08:00
{
2017-02-10 14:07:22 +02:00
dma_addr_t dma_addr ;
2017-12-19 12:16:07 +02:00
struct caam_drv_private * priv ;
2017-02-10 14:07:22 +02:00
2013-10-25 12:01:03 +05:30
ctx - > jrdev = caam_jr_alloc ( ) ;
if ( IS_ERR ( ctx - > jrdev ) ) {
pr_err ( " Job Ring Device allocation for transform failed \n " ) ;
return PTR_ERR ( ctx - > jrdev ) ;
}
2011-03-13 16:54:26 +08:00
2017-12-19 12:16:07 +02:00
priv = dev_get_drvdata ( ctx - > jrdev - > parent ) ;
if ( priv - > era > = 6 & & uses_dkp )
ctx - > dir = DMA_BIDIRECTIONAL ;
else
ctx - > dir = DMA_TO_DEVICE ;
2017-02-10 14:07:22 +02:00
dma_addr = dma_map_single_attrs ( ctx - > jrdev , ctx - > sh_desc_enc ,
offsetof ( struct caam_ctx ,
sh_desc_enc_dma ) ,
2017-12-19 12:16:07 +02:00
ctx - > dir , DMA_ATTR_SKIP_CPU_SYNC ) ;
2017-02-10 14:07:22 +02:00
if ( dma_mapping_error ( ctx - > jrdev , dma_addr ) ) {
dev_err ( ctx - > jrdev , " unable to map key, shared descriptors \n " ) ;
caam_jr_free ( ctx - > jrdev ) ;
return - ENOMEM ;
}
ctx - > sh_desc_enc_dma = dma_addr ;
ctx - > sh_desc_dec_dma = dma_addr + offsetof ( struct caam_ctx ,
sh_desc_dec ) ;
ctx - > key_dma = dma_addr + offsetof ( struct caam_ctx , key ) ;
2011-03-13 16:54:26 +08:00
/* copy descriptor header template value */
2016-11-22 15:44:04 +02:00
ctx - > cdata . algtype = OP_TYPE_CLASS1_ALG | caam - > class1_alg_type ;
ctx - > adata . algtype = OP_TYPE_CLASS2_ALG | caam - > class2_alg_type ;
2011-03-13 16:54:26 +08:00
return 0 ;
}
2018-08-06 15:43:59 +03:00
static int caam_cra_init ( struct crypto_skcipher * tfm )
2011-03-13 16:54:26 +08:00
{
2018-08-06 15:43:59 +03:00
struct skcipher_alg * alg = crypto_skcipher_alg ( tfm ) ;
struct caam_skcipher_alg * caam_alg =
container_of ( alg , typeof ( * caam_alg ) , skcipher ) ;
2011-03-13 16:54:26 +08:00
2018-08-06 15:43:59 +03:00
return caam_init_common ( crypto_skcipher_ctx ( tfm ) , & caam_alg - > caam ,
false ) ;
2015-06-16 13:54:23 +08:00
}
static int caam_aead_init ( struct crypto_aead * tfm )
{
struct aead_alg * alg = crypto_aead_alg ( tfm ) ;
struct caam_aead_alg * caam_alg =
container_of ( alg , struct caam_aead_alg , aead ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( tfm ) ;
2017-12-19 12:16:07 +02:00
return caam_init_common ( ctx , & caam_alg - > caam ,
alg - > setkey = = aead_setkey ) ;
2015-06-16 13:54:23 +08:00
}
static void caam_exit_common ( struct caam_ctx * ctx )
{
2017-02-10 14:07:22 +02:00
dma_unmap_single_attrs ( ctx - > jrdev , ctx - > sh_desc_enc_dma ,
offsetof ( struct caam_ctx , sh_desc_enc_dma ) ,
2017-12-19 12:16:07 +02:00
ctx - > dir , DMA_ATTR_SKIP_CPU_SYNC ) ;
2013-10-25 12:01:03 +05:30
caam_jr_free ( ctx - > jrdev ) ;
2011-03-13 16:54:26 +08:00
}
2018-08-06 15:43:59 +03:00
static void caam_cra_exit ( struct crypto_skcipher * tfm )
2015-06-16 13:54:23 +08:00
{
2018-08-06 15:43:59 +03:00
caam_exit_common ( crypto_skcipher_ctx ( tfm ) ) ;
2015-06-16 13:54:23 +08:00
}
static void caam_aead_exit ( struct crypto_aead * tfm )
{
caam_exit_common ( crypto_aead_ctx ( tfm ) ) ;
}
2011-03-13 16:54:26 +08:00
static void __exit caam_algapi_exit ( void )
{
2015-06-16 13:54:23 +08:00
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( driver_aeads ) ; i + + ) {
struct caam_aead_alg * t_alg = driver_aeads + i ;
if ( t_alg - > registered )
crypto_unregister_aead ( & t_alg - > aead ) ;
}
2011-03-13 16:54:26 +08:00
2018-08-06 15:43:59 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( driver_algs ) ; i + + ) {
struct caam_skcipher_alg * t_alg = driver_algs + i ;
2011-03-13 16:54:26 +08:00
2018-08-06 15:43:59 +03:00
if ( t_alg - > registered )
crypto_unregister_skcipher ( & t_alg - > skcipher ) ;
2011-03-13 16:54:26 +08:00
}
}
2018-08-06 15:43:59 +03:00
static void caam_skcipher_alg_init ( struct caam_skcipher_alg * t_alg )
2011-03-13 16:54:26 +08:00
{
2018-08-06 15:43:59 +03:00
struct skcipher_alg * alg = & t_alg - > skcipher ;
2011-03-13 16:54:26 +08:00
2018-08-06 15:43:59 +03:00
alg - > base . cra_module = THIS_MODULE ;
alg - > base . cra_priority = CAAM_CRA_PRIORITY ;
alg - > base . cra_ctxsize = sizeof ( struct caam_ctx ) ;
alg - > base . cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY ;
2011-03-13 16:54:26 +08:00
2018-08-06 15:43:59 +03:00
alg - > init = caam_cra_init ;
alg - > exit = caam_cra_exit ;
2011-03-13 16:54:26 +08:00
}
2015-06-16 13:54:23 +08:00
static void caam_aead_alg_init ( struct caam_aead_alg * t_alg )
{
struct aead_alg * alg = & t_alg - > aead ;
alg - > base . cra_module = THIS_MODULE ;
alg - > base . cra_priority = CAAM_CRA_PRIORITY ;
alg - > base . cra_ctxsize = sizeof ( struct caam_ctx ) ;
2015-08-13 17:29:06 +08:00
alg - > base . cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY ;
2015-06-16 13:54:23 +08:00
alg - > init = caam_aead_init ;
alg - > exit = caam_aead_exit ;
}
2011-03-13 16:54:26 +08:00
static int __init caam_algapi_init ( void )
{
2014-07-07 10:42:12 +05:30
struct device_node * dev_node ;
struct platform_device * pdev ;
struct device * ctrldev ;
2015-08-05 11:28:48 -07:00
struct caam_drv_private * priv ;
2011-03-13 16:54:26 +08:00
int i = 0 , err = 0 ;
2015-08-05 11:28:48 -07:00
u32 cha_vid , cha_inst , des_inst , aes_inst , md_inst ;
unsigned int md_limit = SHA512_DIGEST_SIZE ;
2015-06-16 13:54:23 +08:00
bool registered = false ;
2011-03-13 16:54:26 +08:00
2014-07-07 10:42:12 +05:30
dev_node = of_find_compatible_node ( NULL , NULL , " fsl,sec-v4.0 " ) ;
if ( ! dev_node ) {
dev_node = of_find_compatible_node ( NULL , NULL , " fsl,sec4.0 " ) ;
if ( ! dev_node )
return - ENODEV ;
}
pdev = of_find_device_by_node ( dev_node ) ;
if ( ! pdev ) {
of_node_put ( dev_node ) ;
return - ENODEV ;
}
ctrldev = & pdev - > dev ;
priv = dev_get_drvdata ( ctrldev ) ;
of_node_put ( dev_node ) ;
/*
* If priv is NULL , it ' s probably because the caam driver wasn ' t
* properly initialized ( e . g . RNG4 init failed ) . Thus , bail out here .
*/
if ( ! priv )
return - ENODEV ;
2015-08-05 11:28:48 -07:00
/*
* Register crypto algorithms the device supports .
* First , detect presence and attributes of DES , AES , and MD blocks .
*/
cha_vid = rd_reg32 ( & priv - > ctrl - > perfmon . cha_id_ls ) ;
cha_inst = rd_reg32 ( & priv - > ctrl - > perfmon . cha_num_ls ) ;
des_inst = ( cha_inst & CHA_ID_LS_DES_MASK ) > > CHA_ID_LS_DES_SHIFT ;
aes_inst = ( cha_inst & CHA_ID_LS_AES_MASK ) > > CHA_ID_LS_AES_SHIFT ;
md_inst = ( cha_inst & CHA_ID_LS_MD_MASK ) > > CHA_ID_LS_MD_SHIFT ;
/* If MD is present, limit digest size based on LP256 */
if ( md_inst & & ( ( cha_vid & CHA_ID_LS_MD_MASK ) = = CHA_ID_LS_MD_LP256 ) )
md_limit = SHA256_DIGEST_SIZE ;
2011-03-13 16:54:26 +08:00
for ( i = 0 ; i < ARRAY_SIZE ( driver_algs ) ; i + + ) {
2018-08-06 15:43:59 +03:00
struct caam_skcipher_alg * t_alg = driver_algs + i ;
u32 alg_sel = t_alg - > caam . class1_alg_type & OP_ALG_ALGSEL_MASK ;
2015-08-05 11:28:48 -07:00
/* Skip DES algorithms if not supported by device */
if ( ! des_inst & &
( ( alg_sel = = OP_ALG_ALGSEL_3DES ) | |
( alg_sel = = OP_ALG_ALGSEL_DES ) ) )
continue ;
/* Skip AES algorithms if not supported by device */
if ( ! aes_inst & & ( alg_sel = = OP_ALG_ALGSEL_AES ) )
continue ;
2011-03-13 16:54:26 +08:00
2016-11-07 18:51:34 +01:00
/*
* Check support for AES modes not available
* on LP devices .
*/
if ( ( cha_vid & CHA_ID_LS_AES_MASK ) = = CHA_ID_LS_AES_LP )
2018-08-06 15:43:59 +03:00
if ( ( t_alg - > caam . class1_alg_type & OP_ALG_AAI_MASK ) = =
2016-11-07 18:51:34 +01:00
OP_ALG_AAI_XTS )
continue ;
2018-08-06 15:43:59 +03:00
caam_skcipher_alg_init ( t_alg ) ;
2011-03-13 16:54:26 +08:00
2018-08-06 15:43:59 +03:00
err = crypto_register_skcipher ( & t_alg - > skcipher ) ;
2011-03-13 16:54:26 +08:00
if ( err ) {
2013-10-25 12:01:03 +05:30
pr_warn ( " %s alg registration failed \n " ,
2018-08-06 15:43:59 +03:00
t_alg - > skcipher . base . cra_driver_name ) ;
2015-06-16 13:54:23 +08:00
continue ;
}
2018-08-06 15:43:59 +03:00
t_alg - > registered = true ;
2015-06-16 13:54:23 +08:00
registered = true ;
}
for ( i = 0 ; i < ARRAY_SIZE ( driver_aeads ) ; i + + ) {
struct caam_aead_alg * t_alg = driver_aeads + i ;
2015-08-05 11:28:48 -07:00
u32 c1_alg_sel = t_alg - > caam . class1_alg_type &
OP_ALG_ALGSEL_MASK ;
u32 c2_alg_sel = t_alg - > caam . class2_alg_type &
OP_ALG_ALGSEL_MASK ;
u32 alg_aai = t_alg - > caam . class1_alg_type & OP_ALG_AAI_MASK ;
/* Skip DES algorithms if not supported by device */
if ( ! des_inst & &
( ( c1_alg_sel = = OP_ALG_ALGSEL_3DES ) | |
( c1_alg_sel = = OP_ALG_ALGSEL_DES ) ) )
continue ;
/* Skip AES algorithms if not supported by device */
if ( ! aes_inst & & ( c1_alg_sel = = OP_ALG_ALGSEL_AES ) )
continue ;
/*
* Check support for AES algorithms not available
* on LP devices .
*/
if ( ( cha_vid & CHA_ID_LS_AES_MASK ) = = CHA_ID_LS_AES_LP )
if ( alg_aai = = OP_ALG_AAI_GCM )
continue ;
/*
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device .
*/
if ( c2_alg_sel & &
( ! md_inst | | ( t_alg - > aead . maxauthsize > md_limit ) ) )
continue ;
2015-06-16 13:54:23 +08:00
caam_aead_alg_init ( t_alg ) ;
err = crypto_register_aead ( & t_alg - > aead ) ;
if ( err ) {
pr_warn ( " %s alg registration failed \n " ,
t_alg - > aead . base . cra_driver_name ) ;
continue ;
}
t_alg - > registered = true ;
registered = true ;
2011-03-13 16:54:26 +08:00
}
2015-06-16 13:54:23 +08:00
if ( registered )
2013-10-25 12:01:03 +05:30
pr_info ( " caam algorithms registered in /proc/crypto \n " ) ;
2011-03-13 16:54:26 +08:00
return err ;
}
module_init ( caam_algapi_init ) ;
module_exit ( caam_algapi_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " FSL CAAM support for crypto API " ) ;
MODULE_AUTHOR ( " Freescale Semiconductor - NMG/STC " ) ;