2011-03-13 16:54:26 +08:00
/*
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008 - 2011 Freescale Semiconductor , Inc .
*
* Based on talitos crypto API driver .
*
* relationship of job descriptors to shared descriptors ( SteveC Dec 10 2008 ) :
*
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* | JobDesc # 1 | - - - - - - - - - - - - - - - - - - - - > | ShareDesc |
* | * ( packet 1 ) | | ( PDB ) |
* - - - - - - - - - - - - - - - | - - - - - - - - - - - - - > | ( hashKey ) |
* . | | ( cipherKey ) |
* . | | - - - - - - - - > | ( operation ) |
* - - - - - - - - - - - - - - - | | - - - - - - - - - - - - - - -
* | JobDesc # 2 | - - - - - - | |
* | * ( packet 2 ) | |
* - - - - - - - - - - - - - - - |
* . |
* . |
* - - - - - - - - - - - - - - - |
* | JobDesc # 3 | - - - - - - - - - - - -
* | * ( packet 3 ) |
* - - - - - - - - - - - - - - -
*
* The SharedDesc never changes for a connection unless rekeyed , but
* each packet will likely be in a different place . So all we need
* to know to process the packet is where the input is , where the
* output goes , and what context we want to process with . Context is
* in the SharedDesc , packet references in the JobDesc .
*
* So , a job desc looks like :
*
* - - - - - - - - - - - - - - - - - - - - -
* | Header |
* | ShareDesc Pointer |
* | SEQ_OUT_PTR |
* | ( output buffer ) |
2012-06-22 19:48:43 -05:00
* | ( output length ) |
2011-03-13 16:54:26 +08:00
* | SEQ_IN_PTR |
* | ( input buffer ) |
2012-06-22 19:48:43 -05:00
* | ( input length ) |
2011-03-13 16:54:26 +08:00
* - - - - - - - - - - - - - - - - - - - - -
*/
# include "compat.h"
# include "regs.h"
# include "intern.h"
# include "desc_constr.h"
# include "jr.h"
# include "error.h"
2012-06-22 19:48:46 -05:00
# include "sg_sw_sec4.h"
2012-06-22 19:48:45 -05:00
# include "key_gen.h"
2011-03-13 16:54:26 +08:00
/*
* crypto alg
*/
# define CAAM_CRA_PRIORITY 3000
/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
# define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
2014-10-31 12:45:37 +02:00
CTR_RFC3686_NONCE_SIZE + \
2011-03-13 16:54:26 +08:00
SHA512_DIGEST_SIZE * 2 )
/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
# define CAAM_MAX_IV_LENGTH 16
2015-06-16 13:54:23 +08:00
# define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
# define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 4 )
2015-07-30 17:53:17 +08:00
# define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5 )
2015-06-16 13:54:23 +08:00
2011-05-14 22:08:17 -05:00
/* length of descriptors text */
2011-07-15 11:21:42 +08:00
# define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
2015-07-30 17:53:17 +08:00
# define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
# define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
# define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
2011-07-15 11:21:42 +08:00
2014-10-31 12:45:37 +02:00
/* Note: Nonce is counted in enckeylen */
2015-07-30 17:53:17 +08:00
# define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
2014-10-31 12:45:37 +02:00
2014-03-14 17:46:52 +02:00
# define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
2015-07-30 17:53:17 +08:00
# define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
# define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
2014-03-14 17:46:52 +02:00
2014-10-23 16:11:23 +03:00
# define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
2015-06-16 13:54:23 +08:00
# define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
# define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
2014-10-23 16:11:23 +03:00
2014-10-23 16:14:03 +03:00
# define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
2015-07-30 22:11:18 +03:00
# define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
# define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
2014-10-23 16:14:03 +03:00
2014-10-30 18:55:07 +02:00
# define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
2015-06-16 13:54:23 +08:00
# define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
# define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
2014-10-30 18:55:07 +02:00
2011-07-15 11:21:42 +08:00
# define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
# define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
20 * CAAM_CMD_SZ )
# define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
15 * CAAM_CMD_SZ )
2015-06-18 14:25:55 +08:00
# define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
# define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
2011-05-14 22:08:17 -05:00
2011-03-13 16:54:26 +08:00
# ifdef DEBUG
/* for print_hex_dumps with line references */
# define debug(format, arg...) printk(format, arg)
# else
# define debug(format, arg...)
# endif
2013-10-25 12:01:03 +05:30
static struct list_head alg_list ;
2011-03-13 16:54:26 +08:00
2015-07-30 17:53:17 +08:00
struct caam_alg_entry {
int class1_alg_type ;
int class2_alg_type ;
int alg_op ;
bool rfc3686 ;
bool geniv ;
} ;
struct caam_aead_alg {
struct aead_alg aead ;
struct caam_alg_entry caam ;
bool registered ;
} ;
2011-07-15 11:21:42 +08:00
/* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1 ( u32 * desc , u32 type )
{
u32 * jump_cmd , * uncond_jump_cmd ;
2014-07-11 15:46:58 +03:00
/* DK bit is valid only for AES */
if ( ( type & OP_ALG_ALGSEL_MASK ) ! = OP_ALG_ALGSEL_AES ) {
append_operation ( desc , type | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT ) ;
return ;
}
2011-07-15 11:21:42 +08:00
jump_cmd = append_jump ( desc , JUMP_TEST_ALL | JUMP_COND_SHRD ) ;
append_operation ( desc , type | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT ) ;
uncond_jump_cmd = append_jump ( desc , JUMP_TEST_ALL ) ;
set_jump_tgt_here ( desc , jump_cmd ) ;
append_operation ( desc , type | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_AAI_DK ) ;
set_jump_tgt_here ( desc , uncond_jump_cmd ) ;
}
/*
* For aead functions , read payload and write payload ,
* both of which are specified in req - > src and req - > dst
*/
static inline void aead_append_src_dst ( u32 * desc , u32 msg_type )
{
2014-03-14 17:46:52 +02:00
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_MESSAGE_DATA | KEY_VLF ) ;
2011-07-15 11:21:42 +08:00
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_BOTH |
KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH ) ;
}
2011-07-15 11:21:42 +08:00
/*
* For ablkcipher encrypt and decrypt , read from req - > src and
* write to req - > dst
*/
static inline void ablkcipher_append_src_dst ( u32 * desc )
{
2012-06-22 19:42:35 -05:00
append_math_add ( desc , VARSEQOUTLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQINLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 |
KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1 ) ;
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_MESSAGE_DATA | KEY_VLF ) ;
2011-07-15 11:21:42 +08:00
}
2011-03-13 16:54:26 +08:00
/*
* per - session context
*/
struct caam_ctx {
struct device * jrdev ;
2011-07-15 11:21:42 +08:00
u32 sh_desc_enc [ DESC_MAX_USED_LEN ] ;
u32 sh_desc_dec [ DESC_MAX_USED_LEN ] ;
u32 sh_desc_givenc [ DESC_MAX_USED_LEN ] ;
dma_addr_t sh_desc_enc_dma ;
dma_addr_t sh_desc_dec_dma ;
dma_addr_t sh_desc_givenc_dma ;
2011-03-13 16:54:26 +08:00
u32 class1_alg_type ;
u32 class2_alg_type ;
u32 alg_op ;
2011-07-15 11:21:42 +08:00
u8 key [ CAAM_MAX_KEY_SIZE ] ;
2011-07-15 11:21:41 +08:00
dma_addr_t key_dma ;
2011-03-13 16:54:26 +08:00
unsigned int enckeylen ;
unsigned int split_key_len ;
unsigned int split_key_pad_len ;
unsigned int authsize ;
} ;
2011-07-15 11:21:42 +08:00
static void append_key_aead ( u32 * desc , struct caam_ctx * ctx ,
2014-10-31 12:45:37 +02:00
int keys_fit_inline , bool is_rfc3686 )
2011-07-15 11:21:42 +08:00
{
2014-10-31 12:45:37 +02:00
u32 * nonce ;
unsigned int enckeylen = ctx - > enckeylen ;
/*
* RFC3686 specific :
* | ctx - > key = { AUTH_KEY , ENC_KEY , NONCE }
* | enckeylen = encryption key size + nonce size
*/
if ( is_rfc3686 )
enckeylen - = CTR_RFC3686_NONCE_SIZE ;
2011-07-15 11:21:42 +08:00
if ( keys_fit_inline ) {
append_key_as_imm ( desc , ctx - > key , ctx - > split_key_pad_len ,
ctx - > split_key_len , CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC ) ;
append_key_as_imm ( desc , ( void * ) ctx - > key +
2014-10-31 12:45:37 +02:00
ctx - > split_key_pad_len , enckeylen ,
enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
2011-07-15 11:21:42 +08:00
} else {
append_key ( desc , ctx - > key_dma , ctx - > split_key_len , CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC ) ;
append_key ( desc , ctx - > key_dma + ctx - > split_key_pad_len ,
2014-10-31 12:45:37 +02:00
enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
}
/* Load Counter into CONTEXT1 reg */
if ( is_rfc3686 ) {
nonce = ( u32 * ) ( ( void * ) ctx - > key + ctx - > split_key_pad_len +
enckeylen ) ;
append_load_imm_u32 ( desc , * nonce , LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM ) ;
append_move ( desc ,
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
( 16 < < MOVE_OFFSET_SHIFT ) |
( CTR_RFC3686_NONCE_SIZE < < MOVE_LEN_SHIFT ) ) ;
2011-07-15 11:21:42 +08:00
}
}
static void init_sh_desc_key_aead ( u32 * desc , struct caam_ctx * ctx ,
2014-10-31 12:45:37 +02:00
int keys_fit_inline , bool is_rfc3686 )
2011-07-15 11:21:42 +08:00
{
u32 * key_jump_cmd ;
2014-10-31 12:45:37 +02:00
/* Note: Context registers are saved. */
init_sh_desc ( desc , HDR_SHARE_SERIAL | HDR_SAVECTX ) ;
2011-07-15 11:21:42 +08:00
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
2014-10-31 12:45:37 +02:00
append_key_aead ( desc , ctx , keys_fit_inline , is_rfc3686 ) ;
2011-07-15 11:21:42 +08:00
set_jump_tgt_here ( desc , key_jump_cmd ) ;
}
2014-03-14 17:46:52 +02:00
static int aead_null_set_sh_desc ( struct crypto_aead * aead )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
bool keys_fit_inline = false ;
u32 * key_jump_cmd , * jump_cmd , * read_move_cmd , * write_move_cmd ;
u32 * desc ;
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2015-07-30 17:53:17 +08:00
if ( DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
2014-03-14 17:46:52 +02:00
ctx - > split_key_pad_len < = CAAM_DESC_BYTES_MAX )
keys_fit_inline = true ;
2015-07-30 17:53:17 +08:00
/* aead_encrypt shared descriptor */
2014-03-14 17:46:52 +02:00
desc = ctx - > sh_desc_enc ;
init_sh_desc ( desc , HDR_SHARE_SERIAL ) ;
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
if ( keys_fit_inline )
append_key_as_imm ( desc , ctx - > key , ctx - > split_key_pad_len ,
ctx - > split_key_len , CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC ) ;
else
append_key ( desc , ctx - > key_dma , ctx - > split_key_len , CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
2015-07-30 17:53:17 +08:00
/* assoclen + cryptlen = seqinlen */
append_math_sub ( desc , REG3 , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
2014-03-14 17:46:52 +02:00
2015-07-30 17:53:17 +08:00
/* Prepare to read and write cryptlen + assoclen bytes */
2014-03-14 17:46:52 +02:00
append_math_add ( desc , VARSEQINLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
/*
* MOVE_LEN opcode is not available in all SEC HW revisions ,
* thus need to do some magic , i . e . self - patch the descriptor
* buffer .
*/
read_move_cmd = append_move ( desc , MOVE_SRC_DESCBUF |
MOVE_DEST_MATH3 |
( 0x6 < < MOVE_LEN_SHIFT ) ) ;
write_move_cmd = append_move ( desc , MOVE_SRC_MATH3 |
MOVE_DEST_DESCBUF |
MOVE_WAITCOMP |
( 0x8 < < MOVE_LEN_SHIFT ) ) ;
/* Class 2 operation */
append_operation ( desc , ctx - > class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* Read and write cryptlen bytes */
aead_append_src_dst ( desc , FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1 ) ;
set_move_tgt_here ( desc , read_move_cmd ) ;
set_move_tgt_here ( desc , write_move_cmd ) ;
append_cmd ( desc , CMD_LOAD | DISABLE_AUTO_INFO_FIFO ) ;
append_move ( desc , MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
MOVE_AUX_LS ) ;
/* Write ICV */
append_seq_store ( desc , ctx - > authsize , LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT ) ;
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR ,
" aead null enc shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2014-05-09 20:34:40 -05:00
keys_fit_inline = false ;
2014-03-14 17:46:52 +02:00
if ( DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
ctx - > split_key_pad_len < = CAAM_DESC_BYTES_MAX )
keys_fit_inline = true ;
desc = ctx - > sh_desc_dec ;
2015-07-30 17:53:17 +08:00
/* aead_decrypt shared descriptor */
2014-03-14 17:46:52 +02:00
init_sh_desc ( desc , HDR_SHARE_SERIAL ) ;
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
if ( keys_fit_inline )
append_key_as_imm ( desc , ctx - > key , ctx - > split_key_pad_len ,
ctx - > split_key_len , CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC ) ;
else
append_key ( desc , ctx - > key_dma , ctx - > split_key_len , CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* Class 2 operation */
append_operation ( desc , ctx - > class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON ) ;
2015-07-30 17:53:17 +08:00
/* assoclen + cryptlen = seqoutlen */
2014-03-14 17:46:52 +02:00
append_math_sub ( desc , REG2 , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
2015-07-30 17:53:17 +08:00
/* Prepare to read and write cryptlen + assoclen bytes */
2014-03-14 17:46:52 +02:00
append_math_add ( desc , VARSEQINLEN , ZERO , REG2 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG2 , CAAM_CMD_SZ ) ;
/*
* MOVE_LEN opcode is not available in all SEC HW revisions ,
* thus need to do some magic , i . e . self - patch the descriptor
* buffer .
*/
read_move_cmd = append_move ( desc , MOVE_SRC_DESCBUF |
MOVE_DEST_MATH2 |
( 0x6 < < MOVE_LEN_SHIFT ) ) ;
write_move_cmd = append_move ( desc , MOVE_SRC_MATH2 |
MOVE_DEST_DESCBUF |
MOVE_WAITCOMP |
( 0x8 < < MOVE_LEN_SHIFT ) ) ;
/* Read and write cryptlen bytes */
aead_append_src_dst ( desc , FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1 ) ;
/*
* Insert a NOP here , since we need at least 4 instructions between
* code patching the descriptor buffer and the location being patched .
*/
jump_cmd = append_jump ( desc , JUMP_TEST_ALL ) ;
set_jump_tgt_here ( desc , jump_cmd ) ;
set_move_tgt_here ( desc , read_move_cmd ) ;
set_move_tgt_here ( desc , write_move_cmd ) ;
append_cmd ( desc , CMD_LOAD | DISABLE_AUTO_INFO_FIFO ) ;
append_move ( desc , MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
MOVE_AUX_LS ) ;
append_cmd ( desc , CMD_LOAD | ENABLE_AUTO_INFO_FIFO ) ;
/* Load ICV */
append_seq_fifo_load ( desc , ctx - > authsize , FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV ) ;
ctx - > sh_desc_dec_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_dec_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR ,
" aead null dec shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
return 0 ;
}
2011-07-15 11:21:42 +08:00
static int aead_set_sh_desc ( struct crypto_aead * aead )
{
2015-07-30 17:53:17 +08:00
struct caam_aead_alg * alg = container_of ( crypto_aead_alg ( aead ) ,
struct caam_aead_alg , aead ) ;
2015-05-11 17:47:50 +08:00
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
2011-07-15 11:21:42 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2014-10-31 12:45:37 +02:00
bool keys_fit_inline ;
2011-07-15 11:21:42 +08:00
u32 geniv , moveiv ;
2014-10-31 12:45:37 +02:00
u32 ctx1_iv_off = 0 ;
2011-07-15 11:21:42 +08:00
u32 * desc ;
2014-10-31 12:45:37 +02:00
const bool ctr_mode = ( ( ctx - > class1_alg_type & OP_ALG_AAI_MASK ) = =
OP_ALG_AAI_CTR_MOD128 ) ;
2015-07-30 17:53:17 +08:00
const bool is_rfc3686 = alg - > caam . rfc3686 ;
2011-07-15 11:21:42 +08:00
2014-03-14 17:46:52 +02:00
/* NULL encryption / decryption */
if ( ! ctx - > enckeylen )
return aead_null_set_sh_desc ( aead ) ;
2014-10-31 12:45:37 +02:00
/*
* AES - CTR needs to load IV in CONTEXT1 reg
* at an offset of 128 bits ( 16 bytes )
* CONTEXT1 [ 255 : 128 ] = IV
*/
if ( ctr_mode )
ctx1_iv_off = 16 ;
/*
* RFC3686 specific :
* CONTEXT1 [ 255 : 128 ] = { NONCE , IV , COUNTER }
*/
if ( is_rfc3686 )
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE ;
2015-07-30 17:53:17 +08:00
if ( alg - > caam . geniv )
goto skip_enc ;
2011-07-15 11:21:42 +08:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2014-10-31 12:45:37 +02:00
keys_fit_inline = false ;
2015-07-30 17:53:17 +08:00
if ( DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
2014-10-31 12:45:37 +02:00
ctx - > split_key_pad_len + ctx - > enckeylen +
( is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0 ) < =
2011-07-15 11:21:42 +08:00
CAAM_DESC_BYTES_MAX )
2012-09-07 04:17:03 +08:00
keys_fit_inline = true ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
/* aead_encrypt shared descriptor */
2011-07-15 11:21:42 +08:00
desc = ctx - > sh_desc_enc ;
2014-10-31 12:45:37 +02:00
/* Note: Context registers are saved. */
init_sh_desc_key_aead ( desc , ctx , keys_fit_inline , is_rfc3686 ) ;
2011-07-15 11:21:42 +08:00
/* Class 2 operation */
append_operation ( desc , ctx - > class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
2015-07-30 17:53:17 +08:00
/* Read and write assoclen bytes */
append_math_add ( desc , VARSEQINLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
/* Skip assoc data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_SKIP | FIFOLDST_VLF ) ;
2011-07-15 11:21:42 +08:00
/* read assoc before reading payload */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
2015-07-30 17:53:17 +08:00
FIFOLDST_VLF ) ;
2014-10-31 12:45:37 +02:00
/* Load Counter into CONTEXT1 reg */
if ( is_rfc3686 )
append_load_imm_u32 ( desc , be32_to_cpu ( 1 ) , LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
( ( ctx1_iv_off + CTR_RFC3686_IV_SIZE ) < <
LDST_OFFSET_SHIFT ) ) ;
2011-07-15 11:21:42 +08:00
/* Class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* Read and write cryptlen bytes */
2015-07-30 17:53:17 +08:00
append_math_add ( desc , VARSEQINLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQOUTLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
2011-07-15 11:21:42 +08:00
aead_append_src_dst ( desc , FIFOLD_TYPE_MSG1OUT2 ) ;
/* Write ICV */
append_seq_store ( desc , ctx - > authsize , LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT ) ;
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " aead enc shdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
2015-07-30 17:53:17 +08:00
skip_enc :
2011-07-15 11:21:42 +08:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2014-05-09 20:34:40 -05:00
keys_fit_inline = false ;
2015-07-30 17:53:17 +08:00
if ( DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
2014-10-31 12:45:37 +02:00
ctx - > split_key_pad_len + ctx - > enckeylen +
( is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0 ) < =
2011-07-15 11:21:42 +08:00
CAAM_DESC_BYTES_MAX )
2012-09-07 04:17:03 +08:00
keys_fit_inline = true ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
/* aead_decrypt shared descriptor */
2014-03-14 17:46:49 +02:00
desc = ctx - > sh_desc_dec ;
2011-07-15 11:21:42 +08:00
2014-10-31 12:45:37 +02:00
/* Note: Context registers are saved. */
init_sh_desc_key_aead ( desc , ctx , keys_fit_inline , is_rfc3686 ) ;
2011-07-15 11:21:42 +08:00
/* Class 2 operation */
append_operation ( desc , ctx - > class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON ) ;
2015-07-30 17:53:17 +08:00
/* Read and write assoclen bytes */
append_math_add ( desc , VARSEQINLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
/* Skip assoc data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_SKIP | FIFOLDST_VLF ) ;
2011-07-15 11:21:42 +08:00
/* read assoc before reading payload */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF ) ;
2014-10-31 12:45:37 +02:00
/* Load Counter into CONTEXT1 reg */
if ( is_rfc3686 )
append_load_imm_u32 ( desc , be32_to_cpu ( 1 ) , LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
( ( ctx1_iv_off + CTR_RFC3686_IV_SIZE ) < <
LDST_OFFSET_SHIFT ) ) ;
/* Choose operation */
if ( ctr_mode )
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT ) ;
else
append_dec_op1 ( desc , ctx - > class1_alg_type ) ;
2011-07-15 11:21:42 +08:00
/* Read and write cryptlen bytes */
2015-07-30 17:53:17 +08:00
append_math_add ( desc , VARSEQINLEN , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQOUTLEN , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
2011-07-15 11:21:42 +08:00
aead_append_src_dst ( desc , FIFOLD_TYPE_MSG ) ;
/* Load ICV */
append_seq_fifo_load ( desc , ctx - > authsize , FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV ) ;
ctx - > sh_desc_dec_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_dec_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " aead dec shdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
2015-07-30 17:53:17 +08:00
if ( ! alg - > caam . geniv )
goto skip_givenc ;
2011-07-15 11:21:42 +08:00
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
2014-05-09 20:34:40 -05:00
keys_fit_inline = false ;
2015-07-30 17:53:17 +08:00
if ( DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
2014-10-31 12:45:37 +02:00
ctx - > split_key_pad_len + ctx - > enckeylen +
( is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0 ) < =
2011-07-15 11:21:42 +08:00
CAAM_DESC_BYTES_MAX )
2012-09-07 04:17:03 +08:00
keys_fit_inline = true ;
2011-07-15 11:21:42 +08:00
/* aead_givencrypt shared descriptor */
desc = ctx - > sh_desc_givenc ;
2014-10-31 12:45:37 +02:00
/* Note: Context registers are saved. */
init_sh_desc_key_aead ( desc , ctx , keys_fit_inline , is_rfc3686 ) ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
if ( is_rfc3686 )
goto copy_iv ;
2011-07-15 11:21:42 +08:00
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
2015-05-11 17:47:50 +08:00
NFIFOENTRY_PTYPE_RND | ( ivsize < < NFIFOENTRY_DLEN_SHIFT ) ;
2011-07-15 11:21:42 +08:00
append_load_imm_u32 ( desc , geniv , LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM ) ;
append_cmd ( desc , CMD_LOAD | DISABLE_AUTO_INFO_FIFO ) ;
2014-10-31 12:45:37 +02:00
append_move ( desc , MOVE_WAITCOMP |
MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
( ctx1_iv_off < < MOVE_OFFSET_SHIFT ) |
2015-05-11 17:47:50 +08:00
( ivsize < < MOVE_LEN_SHIFT ) ) ;
2011-07-15 11:21:42 +08:00
append_cmd ( desc , CMD_LOAD | ENABLE_AUTO_INFO_FIFO ) ;
2015-07-30 17:53:17 +08:00
copy_iv :
2011-07-15 11:21:42 +08:00
/* Copy IV to class 1 context */
2014-10-31 12:45:37 +02:00
append_move ( desc , MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
( ctx1_iv_off < < MOVE_OFFSET_SHIFT ) |
2015-05-11 17:47:50 +08:00
( ivsize < < MOVE_LEN_SHIFT ) ) ;
2011-07-15 11:21:42 +08:00
/* Return to encryption */
append_operation ( desc , ctx - > class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* ivsize + cryptlen = seqoutlen - authsize */
append_math_sub_imm_u32 ( desc , REG3 , SEQOUTLEN , IMM , ctx - > authsize ) ;
2015-07-30 17:53:17 +08:00
/* Read and write assoclen bytes */
append_math_add ( desc , VARSEQINLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
/* Skip assoc data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_SKIP | FIFOLDST_VLF ) ;
2011-07-15 11:21:42 +08:00
/* read assoc before reading payload */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF ) ;
2014-10-31 12:45:37 +02:00
/* Copy iv from outfifo to class 2 fifo */
2011-07-15 11:21:42 +08:00
moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
2015-05-11 17:47:50 +08:00
NFIFOENTRY_DTYPE_MSG | ( ivsize < < NFIFOENTRY_DLEN_SHIFT ) ;
2011-07-15 11:21:42 +08:00
append_load_imm_u32 ( desc , moveiv , LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM ) ;
2015-05-11 17:47:50 +08:00
append_load_imm_u32 ( desc , ivsize , LDST_CLASS_2_CCB |
2011-07-15 11:21:42 +08:00
LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM ) ;
2014-10-31 12:45:37 +02:00
/* Load Counter into CONTEXT1 reg */
if ( is_rfc3686 )
append_load_imm_u32 ( desc , be32_to_cpu ( 1 ) , LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
( ( ctx1_iv_off + CTR_RFC3686_IV_SIZE ) < <
LDST_OFFSET_SHIFT ) ) ;
2011-07-15 11:21:42 +08:00
/* Class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* Will write ivsize + cryptlen */
append_math_add ( desc , VARSEQOUTLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
/* Not need to reload iv */
2015-05-11 17:47:50 +08:00
append_seq_fifo_load ( desc , ivsize ,
2011-07-15 11:21:42 +08:00
FIFOLD_CLASS_SKIP ) ;
/* Will read cryptlen */
append_math_add ( desc , VARSEQINLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
aead_append_src_dst ( desc , FIFOLD_TYPE_MSG1OUT2 ) ;
/* Write ICV */
append_seq_store ( desc , ctx - > authsize , LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT ) ;
2015-07-30 17:53:17 +08:00
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
2011-07-15 11:21:42 +08:00
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_givenc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " aead givenc shdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
2015-07-30 17:53:17 +08:00
skip_givenc :
2011-07-15 11:21:42 +08:00
return 0 ;
}
2011-07-15 11:21:41 +08:00
static int aead_setauthsize ( struct crypto_aead * authenc ,
2011-03-13 16:54:26 +08:00
unsigned int authsize )
{
struct caam_ctx * ctx = crypto_aead_ctx ( authenc ) ;
ctx - > authsize = authsize ;
2011-07-15 11:21:42 +08:00
aead_set_sh_desc ( authenc ) ;
2011-03-13 16:54:26 +08:00
return 0 ;
}
2014-10-23 16:11:23 +03:00
static int gcm_set_sh_desc ( struct crypto_aead * aead )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
bool keys_fit_inline = false ;
u32 * key_jump_cmd , * zero_payload_jump_cmd ,
* zero_assoc_jump_cmd1 , * zero_assoc_jump_cmd2 ;
u32 * desc ;
if ( ! ctx - > enckeylen | | ! ctx - > authsize )
return 0 ;
/*
* AES GCM encrypt shared descriptor
* Job Descriptor and Shared Descriptor
* must fit into the 64 - word Descriptor h / w Buffer
*/
2015-06-16 13:54:23 +08:00
if ( DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
2014-10-23 16:11:23 +03:00
ctx - > enckeylen < = CAAM_DESC_BYTES_MAX )
keys_fit_inline = true ;
desc = ctx - > sh_desc_enc ;
init_sh_desc ( desc , HDR_SHARE_SERIAL ) ;
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD | JUMP_COND_SELF ) ;
if ( keys_fit_inline )
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
else
append_key ( desc , ctx - > key_dma , ctx - > enckeylen ,
CLASS_1 | KEY_DEST_CLASS_REG ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
2015-06-16 13:54:23 +08:00
/* if assoclen + cryptlen is ZERO, skip to ICV write */
append_math_sub ( desc , VARSEQOUTLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
zero_assoc_jump_cmd2 = append_jump ( desc , JUMP_TEST_ALL |
JUMP_COND_MATH_Z ) ;
2014-10-23 16:11:23 +03:00
2015-06-16 13:54:23 +08:00
/* if assoclen is ZERO, skip reading the assoc data */
append_math_add ( desc , VARSEQINLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
zero_assoc_jump_cmd1 = append_jump ( desc , JUMP_TEST_ALL |
JUMP_COND_MATH_Z ) ;
2014-10-23 16:11:23 +03:00
2015-06-16 13:54:23 +08:00
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
/* skip assoc data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_SKIP | FIFOLDST_VLF ) ;
/* cryptlen = seqinlen - assoclen */
append_math_sub ( desc , VARSEQOUTLEN , SEQINLEN , REG3 , CAAM_CMD_SZ ) ;
2014-10-23 16:11:23 +03:00
/* if cryptlen is ZERO jump to zero-payload commands */
zero_payload_jump_cmd = append_jump ( desc , JUMP_TEST_ALL |
JUMP_COND_MATH_Z ) ;
/* read assoc data */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1 ) ;
set_jump_tgt_here ( desc , zero_assoc_jump_cmd1 ) ;
2015-06-16 13:54:23 +08:00
append_math_sub ( desc , VARSEQINLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
2014-10-23 16:11:23 +03:00
/* write encrypted data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF ) ;
/* read payload data */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1 ) ;
/* jump the zero-payload commands */
2015-06-16 13:54:23 +08:00
append_jump ( desc , JUMP_TEST_ALL | 2 ) ;
2014-10-23 16:11:23 +03:00
/* zero-payload commands */
set_jump_tgt_here ( desc , zero_payload_jump_cmd ) ;
/* read assoc data */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1 ) ;
2015-06-16 13:54:23 +08:00
/* There is no input data */
2014-10-23 16:11:23 +03:00
set_jump_tgt_here ( desc , zero_assoc_jump_cmd2 ) ;
/* write ICV */
append_seq_store ( desc , ctx - > authsize , LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT ) ;
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " gcm enc shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
keys_fit_inline = false ;
2015-06-16 13:54:23 +08:00
if ( DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
2014-10-23 16:11:23 +03:00
ctx - > enckeylen < = CAAM_DESC_BYTES_MAX )
keys_fit_inline = true ;
desc = ctx - > sh_desc_dec ;
init_sh_desc ( desc , HDR_SHARE_SERIAL ) ;
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump ( desc , JUMP_JSL |
JUMP_TEST_ALL | JUMP_COND_SHRD |
JUMP_COND_SELF ) ;
if ( keys_fit_inline )
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
else
append_key ( desc , ctx - > key_dma , ctx - > enckeylen ,
CLASS_1 | KEY_DEST_CLASS_REG ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON ) ;
2015-06-16 13:54:23 +08:00
/* if assoclen is ZERO, skip reading the assoc data */
append_math_add ( desc , VARSEQINLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
zero_assoc_jump_cmd1 = append_jump ( desc , JUMP_TEST_ALL |
JUMP_COND_MATH_Z ) ;
2014-10-23 16:11:23 +03:00
2015-06-16 13:54:23 +08:00
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
2014-10-23 16:11:23 +03:00
2015-06-16 13:54:23 +08:00
/* skip assoc data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_SKIP | FIFOLDST_VLF ) ;
2014-10-23 16:11:23 +03:00
/* read assoc data */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1 ) ;
2015-06-16 13:54:23 +08:00
2014-10-23 16:11:23 +03:00
set_jump_tgt_here ( desc , zero_assoc_jump_cmd1 ) ;
2015-06-16 13:54:23 +08:00
/* cryptlen = seqoutlen - assoclen */
append_math_sub ( desc , VARSEQINLEN , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
/* jump to zero-payload command if cryptlen is zero */
zero_payload_jump_cmd = append_jump ( desc , JUMP_TEST_ALL |
JUMP_COND_MATH_Z ) ;
append_math_sub ( desc , VARSEQOUTLEN , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
2014-10-23 16:11:23 +03:00
/* store encrypted data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF ) ;
/* read payload data */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1 ) ;
/* zero-payload command */
set_jump_tgt_here ( desc , zero_payload_jump_cmd ) ;
/* read ICV */
append_seq_fifo_load ( desc , ctx - > authsize , FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1 ) ;
ctx - > sh_desc_dec_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_dec_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " gcm dec shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
return 0 ;
}
static int gcm_setauthsize ( struct crypto_aead * authenc , unsigned int authsize )
{
struct caam_ctx * ctx = crypto_aead_ctx ( authenc ) ;
ctx - > authsize = authsize ;
gcm_set_sh_desc ( authenc ) ;
return 0 ;
}
2014-10-23 16:14:03 +03:00
static int rfc4106_set_sh_desc ( struct crypto_aead * aead )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
bool keys_fit_inline = false ;
2015-06-16 13:54:23 +08:00
u32 * key_jump_cmd ;
2014-10-23 16:14:03 +03:00
u32 * desc ;
if ( ! ctx - > enckeylen | | ! ctx - > authsize )
return 0 ;
/*
* RFC4106 encrypt shared descriptor
* Job Descriptor and Shared Descriptor
* must fit into the 64 - word Descriptor h / w Buffer
*/
2015-06-16 13:54:23 +08:00
if ( DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
2014-10-23 16:14:03 +03:00
ctx - > enckeylen < = CAAM_DESC_BYTES_MAX )
keys_fit_inline = true ;
desc = ctx - > sh_desc_enc ;
init_sh_desc ( desc , HDR_SHARE_SERIAL ) ;
/* Skip key loading if it is loaded due to sharing */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
if ( keys_fit_inline )
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
else
append_key ( desc , ctx - > key_dma , ctx - > enckeylen ,
CLASS_1 | KEY_DEST_CLASS_REG ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* Class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
2015-07-09 07:17:33 +08:00
append_math_sub_imm_u32 ( desc , VARSEQINLEN , REG3 , IMM , 8 ) ;
2014-10-23 16:14:03 +03:00
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
/* Read assoc data */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1 ) ;
2015-07-09 07:17:33 +08:00
/* Skip IV */
append_seq_fifo_load ( desc , 8 , FIFOLD_CLASS_SKIP ) ;
2015-06-16 13:54:23 +08:00
2014-10-23 16:14:03 +03:00
/* Will read cryptlen bytes */
2015-06-16 13:54:23 +08:00
append_math_sub ( desc , VARSEQINLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
2014-10-23 16:14:03 +03:00
2015-07-30 22:11:18 +03:00
/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG ) ;
2014-10-23 16:14:03 +03:00
2015-07-09 07:17:33 +08:00
/* Skip assoc data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_SKIP | FIFOLDST_VLF ) ;
/* cryptlen = seqoutlen - assoclen */
2015-07-30 22:11:18 +03:00
append_math_sub ( desc , VARSEQOUTLEN , VARSEQINLEN , REG0 , CAAM_CMD_SZ ) ;
2015-07-09 07:17:33 +08:00
/* Write encrypted data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF ) ;
2015-07-30 22:11:18 +03:00
/* Read payload data */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1 ) ;
2014-10-23 16:14:03 +03:00
/* Write ICV */
append_seq_store ( desc , ctx - > authsize , LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT ) ;
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " rfc4106 enc shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
keys_fit_inline = false ;
if ( DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
ctx - > enckeylen < = CAAM_DESC_BYTES_MAX )
keys_fit_inline = true ;
desc = ctx - > sh_desc_dec ;
init_sh_desc ( desc , HDR_SHARE_SERIAL ) ;
/* Skip key loading if it is loaded due to sharing */
key_jump_cmd = append_jump ( desc , JUMP_JSL |
JUMP_TEST_ALL | JUMP_COND_SHRD ) ;
if ( keys_fit_inline )
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
else
append_key ( desc , ctx - > key_dma , ctx - > enckeylen ,
CLASS_1 | KEY_DEST_CLASS_REG ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* Class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON ) ;
2015-07-09 07:17:33 +08:00
append_math_sub_imm_u32 ( desc , VARSEQINLEN , REG3 , IMM , 8 ) ;
2015-06-16 13:54:23 +08:00
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
2014-10-23 16:14:03 +03:00
/* Read assoc data */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1 ) ;
2015-07-09 07:17:33 +08:00
/* Skip IV */
append_seq_fifo_load ( desc , 8 , FIFOLD_CLASS_SKIP ) ;
2015-06-16 13:54:23 +08:00
2014-10-23 16:14:03 +03:00
/* Will read cryptlen bytes */
2015-07-09 07:17:33 +08:00
append_math_sub ( desc , VARSEQINLEN , SEQOUTLEN , REG3 , CAAM_CMD_SZ ) ;
2014-10-23 16:14:03 +03:00
2015-07-30 22:11:18 +03:00
/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG ) ;
2014-10-23 16:14:03 +03:00
2015-07-09 07:17:33 +08:00
/* Skip assoc data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_SKIP | FIFOLDST_VLF ) ;
/* Will write cryptlen bytes */
append_math_sub ( desc , VARSEQOUTLEN , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
/* Store payload data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF ) ;
2015-07-30 22:11:18 +03:00
/* Read encrypted data */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1 ) ;
2014-10-23 16:14:03 +03:00
/* Read ICV */
append_seq_fifo_load ( desc , ctx - > authsize , FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1 ) ;
ctx - > sh_desc_dec_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_dec_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " rfc4106 dec shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
return 0 ;
}
static int rfc4106_setauthsize ( struct crypto_aead * authenc ,
unsigned int authsize )
{
struct caam_ctx * ctx = crypto_aead_ctx ( authenc ) ;
ctx - > authsize = authsize ;
rfc4106_set_sh_desc ( authenc ) ;
return 0 ;
}
2014-10-30 18:55:07 +02:00
static int rfc4543_set_sh_desc ( struct crypto_aead * aead )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
bool keys_fit_inline = false ;
2015-06-16 13:54:23 +08:00
u32 * key_jump_cmd ;
2014-10-30 18:55:07 +02:00
u32 * read_move_cmd , * write_move_cmd ;
u32 * desc ;
if ( ! ctx - > enckeylen | | ! ctx - > authsize )
return 0 ;
/*
* RFC4543 encrypt shared descriptor
* Job Descriptor and Shared Descriptor
* must fit into the 64 - word Descriptor h / w Buffer
*/
2015-06-16 13:54:23 +08:00
if ( DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
2014-10-30 18:55:07 +02:00
ctx - > enckeylen < = CAAM_DESC_BYTES_MAX )
keys_fit_inline = true ;
desc = ctx - > sh_desc_enc ;
init_sh_desc ( desc , HDR_SHARE_SERIAL ) ;
/* Skip key loading if it is loaded due to sharing */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
if ( keys_fit_inline )
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
else
append_key ( desc , ctx - > key_dma , ctx - > enckeylen ,
CLASS_1 | KEY_DEST_CLASS_REG ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* Class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
2015-06-16 13:54:23 +08:00
/* assoclen + cryptlen = seqinlen */
append_math_sub ( desc , REG3 , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
2014-10-30 18:55:07 +02:00
/*
* MOVE_LEN opcode is not available in all SEC HW revisions ,
* thus need to do some magic , i . e . self - patch the descriptor
* buffer .
*/
read_move_cmd = append_move ( desc , MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
( 0x6 < < MOVE_LEN_SHIFT ) ) ;
write_move_cmd = append_move ( desc , MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
( 0x8 < < MOVE_LEN_SHIFT ) ) ;
2015-06-16 13:54:23 +08:00
/* Will read assoclen + cryptlen bytes */
append_math_sub ( desc , VARSEQINLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
2014-10-30 18:55:07 +02:00
2015-06-16 13:54:23 +08:00
/* Will write assoclen + cryptlen bytes */
append_math_sub ( desc , VARSEQOUTLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
/* Read and write assoclen + cryptlen bytes */
2014-10-30 18:55:07 +02:00
aead_append_src_dst ( desc , FIFOLD_TYPE_AAD ) ;
set_move_tgt_here ( desc , read_move_cmd ) ;
set_move_tgt_here ( desc , write_move_cmd ) ;
append_cmd ( desc , CMD_LOAD | DISABLE_AUTO_INFO_FIFO ) ;
/* Move payload data to OFIFO */
append_move ( desc , MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO ) ;
/* Write ICV */
append_seq_store ( desc , ctx - > authsize , LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT ) ;
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " rfc4543 enc shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
keys_fit_inline = false ;
2015-06-16 13:54:23 +08:00
if ( DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
2014-10-30 18:55:07 +02:00
ctx - > enckeylen < = CAAM_DESC_BYTES_MAX )
keys_fit_inline = true ;
desc = ctx - > sh_desc_dec ;
init_sh_desc ( desc , HDR_SHARE_SERIAL ) ;
/* Skip key loading if it is loaded due to sharing */
key_jump_cmd = append_jump ( desc , JUMP_JSL |
JUMP_TEST_ALL | JUMP_COND_SHRD ) ;
if ( keys_fit_inline )
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
else
append_key ( desc , ctx - > key_dma , ctx - > enckeylen ,
CLASS_1 | KEY_DEST_CLASS_REG ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* Class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON ) ;
2015-06-16 13:54:23 +08:00
/* assoclen + cryptlen = seqoutlen */
append_math_sub ( desc , REG3 , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
2014-10-30 18:55:07 +02:00
/*
* MOVE_LEN opcode is not available in all SEC HW revisions ,
* thus need to do some magic , i . e . self - patch the descriptor
* buffer .
*/
read_move_cmd = append_move ( desc , MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
( 0x6 < < MOVE_LEN_SHIFT ) ) ;
write_move_cmd = append_move ( desc , MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
( 0x8 < < MOVE_LEN_SHIFT ) ) ;
2015-06-16 13:54:23 +08:00
/* Will read assoclen + cryptlen bytes */
append_math_sub ( desc , VARSEQINLEN , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
2014-10-30 18:55:07 +02:00
2015-06-16 13:54:23 +08:00
/* Will write assoclen + cryptlen bytes */
append_math_sub ( desc , VARSEQOUTLEN , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
2014-10-30 18:55:07 +02:00
/* Store payload data */
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF ) ;
2015-06-16 13:54:23 +08:00
/* In-snoop assoclen + cryptlen data */
2014-10-30 18:55:07 +02:00
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1 ) ;
set_move_tgt_here ( desc , read_move_cmd ) ;
set_move_tgt_here ( desc , write_move_cmd ) ;
append_cmd ( desc , CMD_LOAD | DISABLE_AUTO_INFO_FIFO ) ;
/* Move payload data to OFIFO */
append_move ( desc , MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO ) ;
append_cmd ( desc , CMD_LOAD | ENABLE_AUTO_INFO_FIFO ) ;
/* Read ICV */
append_seq_fifo_load ( desc , ctx - > authsize , FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1 ) ;
ctx - > sh_desc_dec_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_dec_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " rfc4543 dec shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
2015-06-16 13:54:23 +08:00
return 0 ;
}
2014-10-30 18:55:07 +02:00
2015-06-16 13:54:23 +08:00
static int rfc4543_setauthsize ( struct crypto_aead * authenc ,
unsigned int authsize )
{
struct caam_ctx * ctx = crypto_aead_ctx ( authenc ) ;
2014-10-30 18:55:07 +02:00
2015-06-16 13:54:23 +08:00
ctx - > authsize = authsize ;
rfc4543_set_sh_desc ( authenc ) ;
2014-10-30 18:55:07 +02:00
2015-06-16 13:54:23 +08:00
return 0 ;
}
2014-10-30 18:55:07 +02:00
2012-06-22 19:48:45 -05:00
static u32 gen_split_aead_key ( struct caam_ctx * ctx , const u8 * key_in ,
u32 authkeylen )
2011-03-13 16:54:26 +08:00
{
2012-06-22 19:48:45 -05:00
return gen_split_key ( ctx - > jrdev , ctx - > key , ctx - > split_key_len ,
ctx - > split_key_pad_len , key_in , authkeylen ,
ctx - > alg_op ) ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:41 +08:00
static int aead_setkey ( struct crypto_aead * aead ,
2011-03-13 16:54:26 +08:00
const u8 * key , unsigned int keylen )
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
static const u8 mdpadlen [ ] = { 16 , 20 , 32 , 32 , 64 , 64 } ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2013-12-19 17:27:35 +02:00
struct crypto_authenc_keys keys ;
2011-03-13 16:54:26 +08:00
int ret = 0 ;
2013-12-19 17:27:35 +02:00
if ( crypto_authenc_extractkeys ( & keys , key , keylen ) ! = 0 )
2011-03-13 16:54:26 +08:00
goto badkey ;
/* Pick class 2 key length from algorithm submask */
ctx - > split_key_len = mdpadlen [ ( ctx - > alg_op & OP_ALG_ALGSEL_SUBMASK ) > >
OP_ALG_ALGSEL_SHIFT ] * 2 ;
ctx - > split_key_pad_len = ALIGN ( ctx - > split_key_len , 16 ) ;
2013-12-19 17:27:35 +02:00
if ( ctx - > split_key_pad_len + keys . enckeylen > CAAM_MAX_KEY_SIZE )
goto badkey ;
2011-03-13 16:54:26 +08:00
# ifdef DEBUG
printk ( KERN_ERR " keylen %d enckeylen %d authkeylen %d \n " ,
2013-12-19 17:27:35 +02:00
keys . authkeylen + keys . enckeylen , keys . enckeylen ,
keys . authkeylen ) ;
2011-03-13 16:54:26 +08:00
printk ( KERN_ERR " split_key_len %d split_key_pad_len %d \n " ,
ctx - > split_key_len , ctx - > split_key_pad_len ) ;
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
2011-03-13 16:54:26 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
2013-12-19 17:27:35 +02:00
ret = gen_split_aead_key ( ctx , keys . authkey , keys . authkeylen ) ;
2011-03-13 16:54:26 +08:00
if ( ret ) {
goto badkey ;
}
/* postpend encryption key to auth split key */
2013-12-19 17:27:35 +02:00
memcpy ( ctx - > key + ctx - > split_key_pad_len , keys . enckey , keys . enckeylen ) ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:41 +08:00
ctx - > key_dma = dma_map_single ( jrdev , ctx - > key , ctx - > split_key_pad_len +
2013-12-19 17:27:35 +02:00
keys . enckeylen , DMA_TO_DEVICE ) ;
2011-07-15 11:21:41 +08:00
if ( dma_mapping_error ( jrdev , ctx - > key_dma ) ) {
2011-03-13 16:54:26 +08:00
dev_err ( jrdev , " unable to map key i/o memory \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " ctx.key@ " __stringify ( __LINE__ ) " : " ,
2011-03-13 16:54:26 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , ctx - > key ,
2013-12-19 17:27:35 +02:00
ctx - > split_key_pad_len + keys . enckeylen , 1 ) ;
2011-03-13 16:54:26 +08:00
# endif
2013-12-19 17:27:35 +02:00
ctx - > enckeylen = keys . enckeylen ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
ret = aead_set_sh_desc ( aead ) ;
2011-03-13 16:54:26 +08:00
if ( ret ) {
2011-07-15 11:21:41 +08:00
dma_unmap_single ( jrdev , ctx - > key_dma , ctx - > split_key_pad_len +
2013-12-19 17:27:35 +02:00
keys . enckeylen , DMA_TO_DEVICE ) ;
2011-03-13 16:54:26 +08:00
}
return ret ;
badkey :
crypto_aead_set_flags ( aead , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return - EINVAL ;
}
2014-10-23 16:11:23 +03:00
static int gcm_setkey ( struct crypto_aead * aead ,
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
int ret = 0 ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
memcpy ( ctx - > key , key , keylen ) ;
ctx - > key_dma = dma_map_single ( jrdev , ctx - > key , keylen ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > key_dma ) ) {
dev_err ( jrdev , " unable to map key i/o memory \n " ) ;
return - ENOMEM ;
}
ctx - > enckeylen = keylen ;
ret = gcm_set_sh_desc ( aead ) ;
if ( ret ) {
dma_unmap_single ( jrdev , ctx - > key_dma , ctx - > enckeylen ,
DMA_TO_DEVICE ) ;
}
return ret ;
}
2014-10-23 16:14:03 +03:00
static int rfc4106_setkey ( struct crypto_aead * aead ,
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
int ret = 0 ;
if ( keylen < 4 )
return - EINVAL ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
memcpy ( ctx - > key , key , keylen ) ;
/*
* The last four bytes of the key material are used as the salt value
* in the nonce . Update the AES key length .
*/
ctx - > enckeylen = keylen - 4 ;
ctx - > key_dma = dma_map_single ( jrdev , ctx - > key , ctx - > enckeylen ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > key_dma ) ) {
dev_err ( jrdev , " unable to map key i/o memory \n " ) ;
return - ENOMEM ;
}
ret = rfc4106_set_sh_desc ( aead ) ;
if ( ret ) {
dma_unmap_single ( jrdev , ctx - > key_dma , ctx - > enckeylen ,
DMA_TO_DEVICE ) ;
}
return ret ;
}
2014-10-30 18:55:07 +02:00
static int rfc4543_setkey ( struct crypto_aead * aead ,
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
int ret = 0 ;
if ( keylen < 4 )
return - EINVAL ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
memcpy ( ctx - > key , key , keylen ) ;
/*
* The last four bytes of the key material are used as the salt value
* in the nonce . Update the AES key length .
*/
ctx - > enckeylen = keylen - 4 ;
ctx - > key_dma = dma_map_single ( jrdev , ctx - > key , ctx - > enckeylen ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > key_dma ) ) {
dev_err ( jrdev , " unable to map key i/o memory \n " ) ;
return - ENOMEM ;
}
ret = rfc4543_set_sh_desc ( aead ) ;
if ( ret ) {
dma_unmap_single ( jrdev , ctx - > key_dma , ctx - > enckeylen ,
DMA_TO_DEVICE ) ;
}
return ret ;
}
2011-07-15 11:21:42 +08:00
static int ablkcipher_setkey ( struct crypto_ablkcipher * ablkcipher ,
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
2014-10-31 12:45:36 +02:00
struct ablkcipher_tfm * crt = & ablkcipher - > base . crt_ablkcipher ;
struct crypto_tfm * tfm = crypto_ablkcipher_tfm ( ablkcipher ) ;
const char * alg_name = crypto_tfm_alg_name ( tfm ) ;
2011-07-15 11:21:42 +08:00
struct device * jrdev = ctx - > jrdev ;
int ret = 0 ;
2014-03-14 17:46:49 +02:00
u32 * key_jump_cmd ;
2011-07-15 11:21:42 +08:00
u32 * desc ;
2014-10-31 12:45:36 +02:00
u32 * nonce ;
2014-10-31 12:45:38 +02:00
u32 geniv ;
2014-10-31 12:45:35 +02:00
u32 ctx1_iv_off = 0 ;
const bool ctr_mode = ( ( ctx - > class1_alg_type & OP_ALG_AAI_MASK ) = =
OP_ALG_AAI_CTR_MOD128 ) ;
2014-10-31 12:45:36 +02:00
const bool is_rfc3686 = ( ctr_mode & &
( strstr ( alg_name , " rfc3686 " ) ! = NULL ) ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " key in @ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
2014-10-31 12:45:35 +02:00
/*
* AES - CTR needs to load IV in CONTEXT1 reg
* at an offset of 128 bits ( 16 bytes )
* CONTEXT1 [ 255 : 128 ] = IV
*/
if ( ctr_mode )
ctx1_iv_off = 16 ;
2011-07-15 11:21:42 +08:00
2014-10-31 12:45:36 +02:00
/*
* RFC3686 specific :
* | CONTEXT1 [ 255 : 128 ] = { NONCE , IV , COUNTER }
* | * key = { KEY , NONCE }
*/
if ( is_rfc3686 ) {
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE ;
keylen - = CTR_RFC3686_NONCE_SIZE ;
}
2011-07-15 11:21:42 +08:00
memcpy ( ctx - > key , key , keylen ) ;
ctx - > key_dma = dma_map_single ( jrdev , ctx - > key , keylen ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > key_dma ) ) {
dev_err ( jrdev , " unable to map key i/o memory \n " ) ;
return - ENOMEM ;
}
ctx - > enckeylen = keylen ;
/* ablkcipher_encrypt shared descriptor */
desc = ctx - > sh_desc_enc ;
2014-10-31 12:45:36 +02:00
init_sh_desc ( desc , HDR_SHARE_SERIAL | HDR_SAVECTX ) ;
2011-07-15 11:21:42 +08:00
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
/* Load class1 key only */
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 |
KEY_DEST_CLASS_REG ) ;
2014-10-31 12:45:36 +02:00
/* Load nonce into CONTEXT1 reg */
if ( is_rfc3686 ) {
nonce = ( u32 * ) ( key + keylen ) ;
append_load_imm_u32 ( desc , * nonce , LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM ) ;
append_move ( desc , MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
( 16 < < MOVE_OFFSET_SHIFT ) |
( CTR_RFC3686_NONCE_SIZE < < MOVE_LEN_SHIFT ) ) ;
}
2011-07-15 11:21:42 +08:00
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* Load iv */
2014-10-31 12:45:36 +02:00
append_seq_load ( desc , crt - > ivsize , LDST_SRCDST_BYTE_CONTEXT |
2014-10-31 12:45:35 +02:00
LDST_CLASS_1_CCB | ( ctx1_iv_off < < LDST_OFFSET_SHIFT ) ) ;
2011-07-15 11:21:42 +08:00
2014-10-31 12:45:36 +02:00
/* Load counter into CONTEXT1 reg */
if ( is_rfc3686 )
append_load_imm_u32 ( desc , be32_to_cpu ( 1 ) , LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
( ( ctx1_iv_off + CTR_RFC3686_IV_SIZE ) < <
LDST_OFFSET_SHIFT ) ) ;
2011-07-15 11:21:42 +08:00
/* Load operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* Perform operation */
ablkcipher_append_src_dst ( desc ) ;
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR ,
" ablkcipher enc shdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
/* ablkcipher_decrypt shared descriptor */
desc = ctx - > sh_desc_dec ;
2014-10-31 12:45:36 +02:00
init_sh_desc ( desc , HDR_SHARE_SERIAL | HDR_SAVECTX ) ;
2011-07-15 11:21:42 +08:00
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
/* Load class1 key only */
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 |
KEY_DEST_CLASS_REG ) ;
2014-10-31 12:45:36 +02:00
/* Load nonce into CONTEXT1 reg */
if ( is_rfc3686 ) {
nonce = ( u32 * ) ( key + keylen ) ;
append_load_imm_u32 ( desc , * nonce , LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM ) ;
append_move ( desc , MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
( 16 < < MOVE_OFFSET_SHIFT ) |
( CTR_RFC3686_NONCE_SIZE < < MOVE_LEN_SHIFT ) ) ;
}
2011-07-15 11:21:42 +08:00
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* load IV */
2014-10-31 12:45:36 +02:00
append_seq_load ( desc , crt - > ivsize , LDST_SRCDST_BYTE_CONTEXT |
2014-10-31 12:45:35 +02:00
LDST_CLASS_1_CCB | ( ctx1_iv_off < < LDST_OFFSET_SHIFT ) ) ;
2011-07-15 11:21:42 +08:00
2014-10-31 12:45:36 +02:00
/* Load counter into CONTEXT1 reg */
if ( is_rfc3686 )
append_load_imm_u32 ( desc , be32_to_cpu ( 1 ) , LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
( ( ctx1_iv_off + CTR_RFC3686_IV_SIZE ) < <
LDST_OFFSET_SHIFT ) ) ;
2011-07-15 11:21:42 +08:00
/* Choose operation */
2014-10-31 12:45:35 +02:00
if ( ctr_mode )
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT ) ;
else
append_dec_op1 ( desc , ctx - > class1_alg_type ) ;
2011-07-15 11:21:42 +08:00
/* Perform operation */
ablkcipher_append_src_dst ( desc ) ;
ctx - > sh_desc_dec_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
2014-07-11 15:34:48 +03:00
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_dec_dma ) ) {
2011-07-15 11:21:42 +08:00
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR ,
" ablkcipher dec shdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
2014-10-31 12:45:38 +02:00
/* ablkcipher_givencrypt shared descriptor */
desc = ctx - > sh_desc_givenc ;
init_sh_desc ( desc , HDR_SHARE_SERIAL | HDR_SAVECTX ) ;
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
/* Load class1 key only */
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 |
KEY_DEST_CLASS_REG ) ;
/* Load Nonce into CONTEXT1 reg */
if ( is_rfc3686 ) {
nonce = ( u32 * ) ( key + keylen ) ;
append_load_imm_u32 ( desc , * nonce , LDST_CLASS_IND_CCB |
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM ) ;
append_move ( desc , MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
( 16 < < MOVE_OFFSET_SHIFT ) |
( CTR_RFC3686_NONCE_SIZE < < MOVE_LEN_SHIFT ) ) ;
}
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
NFIFOENTRY_PTYPE_RND | ( crt - > ivsize < < NFIFOENTRY_DLEN_SHIFT ) ;
append_load_imm_u32 ( desc , geniv , LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM ) ;
append_cmd ( desc , CMD_LOAD | DISABLE_AUTO_INFO_FIFO ) ;
append_move ( desc , MOVE_WAITCOMP |
MOVE_SRC_INFIFO |
MOVE_DEST_CLASS1CTX |
( crt - > ivsize < < MOVE_LEN_SHIFT ) |
( ctx1_iv_off < < MOVE_OFFSET_SHIFT ) ) ;
append_cmd ( desc , CMD_LOAD | ENABLE_AUTO_INFO_FIFO ) ;
/* Copy generated IV to memory */
append_seq_store ( desc , crt - > ivsize ,
LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
( ctx1_iv_off < < LDST_OFFSET_SHIFT ) ) ;
/* Load Counter into CONTEXT1 reg */
if ( is_rfc3686 )
append_load_imm_u32 ( desc , ( u32 ) 1 , LDST_IMM |
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
( ( ctx1_iv_off + CTR_RFC3686_IV_SIZE ) < <
LDST_OFFSET_SHIFT ) ) ;
if ( ctx1_iv_off )
append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
( 1 < < JUMP_OFFSET_SHIFT ) ) ;
/* Load operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* Perform operation */
ablkcipher_append_src_dst ( desc ) ;
ctx - > sh_desc_givenc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_givenc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR ,
" ablkcipher givenc shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
2011-07-15 11:21:42 +08:00
return ret ;
}
2015-10-02 13:13:18 +03:00
static int xts_ablkcipher_setkey ( struct crypto_ablkcipher * ablkcipher ,
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct device * jrdev = ctx - > jrdev ;
u32 * key_jump_cmd , * desc ;
__be64 sector_size = cpu_to_be64 ( 512 ) ;
if ( keylen ! = 2 * AES_MIN_KEY_SIZE & & keylen ! = 2 * AES_MAX_KEY_SIZE ) {
crypto_ablkcipher_set_flags ( ablkcipher ,
CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
dev_err ( jrdev , " key size mismatch \n " ) ;
return - EINVAL ;
}
memcpy ( ctx - > key , key , keylen ) ;
ctx - > key_dma = dma_map_single ( jrdev , ctx - > key , keylen , DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > key_dma ) ) {
dev_err ( jrdev , " unable to map key i/o memory \n " ) ;
return - ENOMEM ;
}
ctx - > enckeylen = keylen ;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx - > sh_desc_enc ;
init_sh_desc ( desc , HDR_SHARE_SERIAL | HDR_SAVECTX ) ;
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
/* Load class1 keys only */
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
/* Load sector size with index 40 bytes (0x28) */
append_cmd ( desc , CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | ( 0x28 < < LDST_OFFSET_SHIFT ) | 8 ) ;
append_data ( desc , ( void * ) & sector_size , 8 ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/*
* create sequence for loading the sector index
* Upper 8 B of IV - will be used as sector index
* Lower 8 B of IV - will be discarded
*/
append_cmd ( desc , CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | ( 0x20 < < LDST_OFFSET_SHIFT ) | 8 ) ;
append_seq_fifo_load ( desc , 8 , FIFOLD_CLASS_SKIP ) ;
/* Load operation */
append_operation ( desc , ctx - > class1_alg_type | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT ) ;
/* Perform operation */
ablkcipher_append_src_dst ( desc ) ;
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc , desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR ,
" xts ablkcipher enc shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc , desc_bytes ( desc ) , 1 ) ;
# endif
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx - > sh_desc_dec ;
init_sh_desc ( desc , HDR_SHARE_SERIAL | HDR_SAVECTX ) ;
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
/* Load class1 key only */
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
/* Load sector size with index 40 bytes (0x28) */
append_cmd ( desc , CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | ( 0x28 < < LDST_OFFSET_SHIFT ) | 8 ) ;
append_data ( desc , ( void * ) & sector_size , 8 ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/*
* create sequence for loading the sector index
* Upper 8 B of IV - will be used as sector index
* Lower 8 B of IV - will be discarded
*/
append_cmd ( desc , CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | ( 0x20 < < LDST_OFFSET_SHIFT ) | 8 ) ;
append_seq_fifo_load ( desc , 8 , FIFOLD_CLASS_SKIP ) ;
/* Load operation */
append_dec_op1 ( desc , ctx - > class1_alg_type ) ;
/* Perform operation */
ablkcipher_append_src_dst ( desc ) ;
ctx - > sh_desc_dec_dma = dma_map_single ( jrdev , desc , desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_dec_dma ) ) {
dma_unmap_single ( jrdev , ctx - > sh_desc_enc_dma ,
desc_bytes ( ctx - > sh_desc_enc ) , DMA_TO_DEVICE ) ;
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR ,
" xts ablkcipher dec shdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc , desc_bytes ( desc ) , 1 ) ;
# endif
return 0 ;
}
2011-03-13 16:54:26 +08:00
/*
2011-07-15 11:21:42 +08:00
* aead_edesc - s / w - extended aead descriptor
* @ assoc_nents : number of segments in associated data ( SPI + Seq ) scatterlist
2011-03-13 16:54:26 +08:00
* @ src_nents : number of segments in input scatterlist
* @ dst_nents : number of segments in output scatterlist
2011-07-15 11:21:42 +08:00
* @ iv_dma : dma address of iv for checking continuity and link table
2011-03-13 16:54:26 +08:00
* @ desc : h / w descriptor ( variable length ; must not exceed MAX_CAAM_DESCSIZE )
2012-06-22 19:48:46 -05:00
* @ sec4_sg_bytes : length of dma mapped sec4_sg space
* @ sec4_sg_dma : bus physical mapped address of h / w link table
2011-03-13 16:54:26 +08:00
* @ hw_desc : the h / w job descriptor followed by any referenced link tables
*/
2011-07-15 11:21:41 +08:00
struct aead_edesc {
2011-03-13 16:54:26 +08:00
int assoc_nents ;
int src_nents ;
int dst_nents ;
2011-07-15 11:21:42 +08:00
dma_addr_t iv_dma ;
2012-06-22 19:48:46 -05:00
int sec4_sg_bytes ;
dma_addr_t sec4_sg_dma ;
struct sec4_sg_entry * sec4_sg ;
2015-06-16 13:54:23 +08:00
u32 hw_desc [ ] ;
2011-03-13 16:54:26 +08:00
} ;
2011-07-15 11:21:42 +08:00
/*
* ablkcipher_edesc - s / w - extended ablkcipher descriptor
* @ src_nents : number of segments in input scatterlist
* @ dst_nents : number of segments in output scatterlist
* @ iv_dma : dma address of iv for checking continuity and link table
* @ desc : h / w descriptor ( variable length ; must not exceed MAX_CAAM_DESCSIZE )
2012-06-22 19:48:46 -05:00
* @ sec4_sg_bytes : length of dma mapped sec4_sg space
* @ sec4_sg_dma : bus physical mapped address of h / w link table
2011-07-15 11:21:42 +08:00
* @ hw_desc : the h / w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
int src_nents ;
int dst_nents ;
dma_addr_t iv_dma ;
2012-06-22 19:48:46 -05:00
int sec4_sg_bytes ;
dma_addr_t sec4_sg_dma ;
struct sec4_sg_entry * sec4_sg ;
2011-07-15 11:21:42 +08:00
u32 hw_desc [ 0 ] ;
} ;
2011-07-15 11:21:42 +08:00
static void caam_unmap ( struct device * dev , struct scatterlist * src ,
2012-06-22 19:48:49 -05:00
struct scatterlist * dst , int src_nents ,
2015-09-23 13:55:27 +02:00
int dst_nents ,
2012-06-22 19:48:46 -05:00
dma_addr_t iv_dma , int ivsize , dma_addr_t sec4_sg_dma ,
int sec4_sg_bytes )
2011-03-13 16:54:26 +08:00
{
2012-06-22 19:48:49 -05:00
if ( dst ! = src ) {
2015-09-23 13:55:27 +02:00
dma_unmap_sg ( dev , src , src_nents ? : 1 , DMA_TO_DEVICE ) ;
dma_unmap_sg ( dev , dst , dst_nents ? : 1 , DMA_FROM_DEVICE ) ;
2011-03-13 16:54:26 +08:00
} else {
2015-09-23 13:55:27 +02:00
dma_unmap_sg ( dev , src , src_nents ? : 1 , DMA_BIDIRECTIONAL ) ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
if ( iv_dma )
dma_unmap_single ( dev , iv_dma , ivsize , DMA_TO_DEVICE ) ;
2012-06-22 19:48:46 -05:00
if ( sec4_sg_bytes )
dma_unmap_single ( dev , sec4_sg_dma , sec4_sg_bytes ,
2011-03-13 16:54:26 +08:00
DMA_TO_DEVICE ) ;
}
2011-07-15 11:21:42 +08:00
static void aead_unmap ( struct device * dev ,
struct aead_edesc * edesc ,
struct aead_request * req )
2015-06-16 13:54:23 +08:00
{
caam_unmap ( dev , req - > src , req - > dst ,
2015-09-23 13:55:27 +02:00
edesc - > src_nents , edesc - > dst_nents , 0 , 0 ,
2015-06-16 13:54:23 +08:00
edesc - > sec4_sg_dma , edesc - > sec4_sg_bytes ) ;
}
2011-07-15 11:21:42 +08:00
static void ablkcipher_unmap ( struct device * dev ,
struct ablkcipher_edesc * edesc ,
struct ablkcipher_request * req )
{
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
caam_unmap ( dev , req - > src , req - > dst ,
2015-09-23 13:55:27 +02:00
edesc - > src_nents , edesc - > dst_nents ,
edesc - > iv_dma , ivsize ,
2012-06-22 19:48:49 -05:00
edesc - > sec4_sg_dma , edesc - > sec4_sg_bytes ) ;
2011-07-15 11:21:42 +08:00
}
2011-07-15 11:21:41 +08:00
static void aead_encrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
2011-03-13 16:54:26 +08:00
void * context )
{
2011-07-15 11:21:41 +08:00
struct aead_request * req = context ;
struct aead_edesc * edesc ;
2015-06-16 13:54:23 +08:00
# ifdef DEBUG
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
edesc = container_of ( desc , struct aead_edesc , hw_desc [ 0 ] ) ;
if ( err )
caam_jr_strstatus ( jrdev , err ) ;
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
aead_request_complete ( req , err ) ;
}
2011-07-15 11:21:41 +08:00
static void aead_decrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
2011-03-13 16:54:26 +08:00
void * context )
{
2011-07-15 11:21:41 +08:00
struct aead_request * req = context ;
struct aead_edesc * edesc ;
2015-06-16 13:54:23 +08:00
# ifdef DEBUG
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
edesc = container_of ( desc , struct aead_edesc , hw_desc [ 0 ] ) ;
if ( err )
caam_jr_strstatus ( jrdev , err ) ;
aead_unmap ( jrdev , edesc , req ) ;
/*
* verify hw auth check passed else return - EBADMSG
*/
if ( ( err & JRSTA_CCBERR_ERRID_MASK ) = = JRSTA_CCBERR_ERRID_ICVCHK )
err = - EBADMSG ;
kfree ( edesc ) ;
aead_request_complete ( req , err ) ;
}
2011-07-15 11:21:42 +08:00
static void ablkcipher_encrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
void * context )
{
struct ablkcipher_request * req = context ;
struct ablkcipher_edesc * edesc ;
# ifdef DEBUG
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
edesc = ( struct ablkcipher_edesc * ) ( ( char * ) desc -
offsetof ( struct ablkcipher_edesc , hw_desc ) ) ;
2014-04-24 20:05:12 +02:00
if ( err )
caam_jr_strstatus ( jrdev , err ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " dstiv @ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > info ,
edesc - > src_nents > 1 ? 100 : ivsize , 1 ) ;
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " dst @ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
edesc - > dst_nents > 1 ? 100 : req - > nbytes , 1 ) ;
# endif
ablkcipher_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
ablkcipher_request_complete ( req , err ) ;
}
static void ablkcipher_decrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
void * context )
{
struct ablkcipher_request * req = context ;
struct ablkcipher_edesc * edesc ;
# ifdef DEBUG
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
edesc = ( struct ablkcipher_edesc * ) ( ( char * ) desc -
offsetof ( struct ablkcipher_edesc , hw_desc ) ) ;
2014-04-24 20:05:12 +02:00
if ( err )
caam_jr_strstatus ( jrdev , err ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " dstiv @ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > info ,
ivsize , 1 ) ;
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " dst @ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
edesc - > dst_nents > 1 ? 100 : req - > nbytes , 1 ) ;
# endif
ablkcipher_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
ablkcipher_request_complete ( req , err ) ;
}
2015-06-16 13:54:23 +08:00
/*
* Fill in aead job descriptor
*/
static void init_aead_job ( struct aead_request * req ,
struct aead_edesc * edesc ,
bool all_contig , bool encrypt )
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
int authsize = ctx - > authsize ;
u32 * desc = edesc - > hw_desc ;
u32 out_options , in_options ;
dma_addr_t dst_dma , src_dma ;
int len , sec4_sg_index = 0 ;
dma_addr_t ptr ;
u32 * sh_desc ;
sh_desc = encrypt ? ctx - > sh_desc_enc : ctx - > sh_desc_dec ;
ptr = encrypt ? ctx - > sh_desc_enc_dma : ctx - > sh_desc_dec_dma ;
len = desc_len ( sh_desc ) ;
init_job_desc_shared ( desc , ptr , len , HDR_SHARE_DEFER | HDR_REVERSE ) ;
if ( all_contig ) {
src_dma = sg_dma_address ( req - > src ) ;
in_options = 0 ;
} else {
src_dma = edesc - > sec4_sg_dma ;
sec4_sg_index + = edesc - > src_nents ;
in_options = LDST_SGF ;
}
append_seq_in_ptr ( desc , src_dma , req - > assoclen + req - > cryptlen ,
in_options ) ;
dst_dma = src_dma ;
out_options = in_options ;
if ( unlikely ( req - > src ! = req - > dst ) ) {
if ( ! edesc - > dst_nents ) {
dst_dma = sg_dma_address ( req - > dst ) ;
} else {
dst_dma = edesc - > sec4_sg_dma +
sec4_sg_index *
sizeof ( struct sec4_sg_entry ) ;
out_options = LDST_SGF ;
}
}
if ( encrypt )
append_seq_out_ptr ( desc , dst_dma ,
req - > assoclen + req - > cryptlen + authsize ,
out_options ) ;
else
append_seq_out_ptr ( desc , dst_dma ,
req - > assoclen + req - > cryptlen - authsize ,
out_options ) ;
/* REG3 = assoclen */
append_math_add_imm_u32 ( desc , REG3 , ZERO , IMM , req - > assoclen ) ;
}
static void init_gcm_job ( struct aead_request * req ,
struct aead_edesc * edesc ,
bool all_contig , bool encrypt )
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
u32 * desc = edesc - > hw_desc ;
bool generic_gcm = ( ivsize = = 12 ) ;
unsigned int last ;
init_aead_job ( req , edesc , all_contig , encrypt ) ;
/* BUG This should not be specific to generic GCM. */
last = 0 ;
if ( encrypt & & generic_gcm & & ! ( req - > assoclen + req - > cryptlen ) )
last = FIFOLD_TYPE_LAST1 ;
/* Read GCM IV */
append_cmd ( desc , CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last ) ;
/* Append Salt */
if ( ! generic_gcm )
append_data ( desc , ctx - > key + ctx - > enckeylen , 4 ) ;
/* Append IV */
append_data ( desc , req - > iv , ivsize ) ;
/* End of blank commands */
}
2015-07-30 17:53:17 +08:00
static void init_authenc_job ( struct aead_request * req ,
struct aead_edesc * edesc ,
bool all_contig , bool encrypt )
2011-07-15 11:21:42 +08:00
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2015-07-30 17:53:17 +08:00
struct caam_aead_alg * alg = container_of ( crypto_aead_alg ( aead ) ,
struct caam_aead_alg , aead ) ;
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
2011-07-15 11:21:42 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
2015-07-30 17:53:17 +08:00
const bool ctr_mode = ( ( ctx - > class1_alg_type & OP_ALG_AAI_MASK ) = =
OP_ALG_AAI_CTR_MOD128 ) ;
const bool is_rfc3686 = alg - > caam . rfc3686 ;
2011-07-15 11:21:42 +08:00
u32 * desc = edesc - > hw_desc ;
2015-07-30 17:53:17 +08:00
u32 ivoffset = 0 ;
2011-03-13 16:54:26 +08:00
2015-07-30 17:53:17 +08:00
/*
* AES - CTR needs to load IV in CONTEXT1 reg
* at an offset of 128 bits ( 16 bytes )
* CONTEXT1 [ 255 : 128 ] = IV
*/
if ( ctr_mode )
ivoffset = 16 ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
/*
* RFC3686 specific :
* CONTEXT1 [ 255 : 128 ] = { NONCE , IV , COUNTER }
*/
if ( is_rfc3686 )
ivoffset = 16 + CTR_RFC3686_NONCE_SIZE ;
2011-03-13 16:54:26 +08:00
2015-07-30 17:53:17 +08:00
init_aead_job ( req , edesc , all_contig , encrypt ) ;
2011-07-15 11:21:42 +08:00
2015-07-30 17:53:17 +08:00
if ( ivsize & & ( is_rfc3686 | | ! ( alg - > caam . geniv & & encrypt ) ) )
append_load_as_imm ( desc , req - > iv , ivsize ,
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
( ivoffset < < LDST_OFFSET_SHIFT ) ) ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
/*
* Fill in ablkcipher job descriptor
*/
static void init_ablkcipher_job ( u32 * sh_desc , dma_addr_t ptr ,
struct ablkcipher_edesc * edesc ,
struct ablkcipher_request * req ,
bool iv_contig )
{
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
u32 * desc = edesc - > hw_desc ;
u32 out_options = 0 , in_options ;
dma_addr_t dst_dma , src_dma ;
2012-06-22 19:48:46 -05:00
int len , sec4_sg_index = 0 ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " presciv@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > info ,
ivsize , 1 ) ;
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " src @ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
edesc - > src_nents ? 100 : req - > nbytes , 1 ) ;
# endif
len = desc_len ( sh_desc ) ;
init_job_desc_shared ( desc , ptr , len , HDR_SHARE_DEFER | HDR_REVERSE ) ;
if ( iv_contig ) {
src_dma = edesc - > iv_dma ;
in_options = 0 ;
} else {
2012-06-22 19:48:46 -05:00
src_dma = edesc - > sec4_sg_dma ;
2015-01-21 11:53:30 +02:00
sec4_sg_index + = edesc - > src_nents + 1 ;
2011-07-15 11:21:42 +08:00
in_options = LDST_SGF ;
}
append_seq_in_ptr ( desc , src_dma , req - > nbytes + ivsize , in_options ) ;
if ( likely ( req - > src = = req - > dst ) ) {
if ( ! edesc - > src_nents & & iv_contig ) {
dst_dma = sg_dma_address ( req - > src ) ;
} else {
2012-06-22 19:48:46 -05:00
dst_dma = edesc - > sec4_sg_dma +
sizeof ( struct sec4_sg_entry ) ;
2011-07-15 11:21:42 +08:00
out_options = LDST_SGF ;
}
} else {
if ( ! edesc - > dst_nents ) {
dst_dma = sg_dma_address ( req - > dst ) ;
} else {
2012-06-22 19:48:46 -05:00
dst_dma = edesc - > sec4_sg_dma +
sec4_sg_index * sizeof ( struct sec4_sg_entry ) ;
2011-07-15 11:21:42 +08:00
out_options = LDST_SGF ;
}
}
append_seq_out_ptr ( desc , dst_dma , req - > nbytes , out_options ) ;
}
2014-10-31 12:45:38 +02:00
/*
* Fill in ablkcipher givencrypt job descriptor
*/
static void init_ablkcipher_giv_job ( u32 * sh_desc , dma_addr_t ptr ,
struct ablkcipher_edesc * edesc ,
struct ablkcipher_request * req ,
bool iv_contig )
{
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
u32 * desc = edesc - > hw_desc ;
u32 out_options , in_options ;
dma_addr_t dst_dma , src_dma ;
int len , sec4_sg_index = 0 ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " presciv@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > info ,
ivsize , 1 ) ;
print_hex_dump ( KERN_ERR , " src @ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
edesc - > src_nents ? 100 : req - > nbytes , 1 ) ;
# endif
len = desc_len ( sh_desc ) ;
init_job_desc_shared ( desc , ptr , len , HDR_SHARE_DEFER | HDR_REVERSE ) ;
if ( ! edesc - > src_nents ) {
src_dma = sg_dma_address ( req - > src ) ;
in_options = 0 ;
} else {
src_dma = edesc - > sec4_sg_dma ;
sec4_sg_index + = edesc - > src_nents ;
in_options = LDST_SGF ;
}
append_seq_in_ptr ( desc , src_dma , req - > nbytes , in_options ) ;
if ( iv_contig ) {
dst_dma = edesc - > iv_dma ;
out_options = 0 ;
} else {
dst_dma = edesc - > sec4_sg_dma +
sec4_sg_index * sizeof ( struct sec4_sg_entry ) ;
out_options = LDST_SGF ;
}
append_seq_out_ptr ( desc , dst_dma , req - > nbytes + ivsize , out_options ) ;
}
2011-03-13 16:54:26 +08:00
/*
2011-07-15 11:21:42 +08:00
* allocate and map the aead extended descriptor
2011-03-13 16:54:26 +08:00
*/
2015-07-30 17:53:17 +08:00
static struct aead_edesc * aead_edesc_alloc ( struct aead_request * req ,
int desc_bytes , bool * all_contig_ptr ,
bool encrypt )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:41 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2011-07-15 11:21:42 +08:00
gfp_t flags = ( req - > base . flags & ( CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP ) ) ? GFP_KERNEL : GFP_ATOMIC ;
2015-07-30 17:53:17 +08:00
int src_nents , dst_nents = 0 ;
2011-07-15 11:21:41 +08:00
struct aead_edesc * edesc ;
2011-07-15 11:21:42 +08:00
int sgc ;
bool all_contig = true ;
2012-06-22 19:48:46 -05:00
int sec4_sg_index , sec4_sg_len = 0 , sec4_sg_bytes ;
2013-11-28 15:11:16 +02:00
unsigned int authsize = ctx - > authsize ;
2011-07-15 11:21:42 +08:00
2013-11-28 15:11:16 +02:00
if ( unlikely ( req - > dst ! = req - > src ) ) {
2015-09-23 13:55:27 +02:00
src_nents = sg_count ( req - > src , req - > assoclen + req - > cryptlen ) ;
2013-11-28 15:11:16 +02:00
dst_nents = sg_count ( req - > dst ,
2015-07-30 17:53:17 +08:00
req - > assoclen + req - > cryptlen +
2015-09-23 13:55:27 +02:00
( encrypt ? authsize : ( - authsize ) ) ) ;
2013-11-28 15:11:16 +02:00
} else {
src_nents = sg_count ( req - > src ,
2015-07-30 17:53:17 +08:00
req - > assoclen + req - > cryptlen +
2015-09-23 13:55:27 +02:00
( encrypt ? authsize : 0 ) ) ;
2015-06-16 13:54:23 +08:00
}
2014-10-23 16:11:23 +03:00
2015-06-16 13:54:23 +08:00
/* Check if data are contiguous. */
all_contig = ! src_nents ;
2014-10-23 16:11:23 +03:00
if ( ! all_contig ) {
2011-07-15 11:21:42 +08:00
src_nents = src_nents ? : 1 ;
2015-06-16 13:54:23 +08:00
sec4_sg_len = src_nents ;
2011-03-13 16:54:26 +08:00
}
2014-10-23 16:11:23 +03:00
2012-06-22 19:48:46 -05:00
sec4_sg_len + = dst_nents ;
2011-03-13 16:54:26 +08:00
2012-06-22 19:48:46 -05:00
sec4_sg_bytes = sec4_sg_len * sizeof ( struct sec4_sg_entry ) ;
2011-03-13 16:54:26 +08:00
/* allocate space for base edesc and hw desc commands, link tables */
2015-08-05 11:28:39 -07:00
edesc = kzalloc ( sizeof ( * edesc ) + desc_bytes + sec4_sg_bytes ,
GFP_DMA | flags ) ;
2011-03-13 16:54:26 +08:00
if ( ! edesc ) {
dev_err ( jrdev , " could not allocate extended descriptor \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2015-06-16 13:54:23 +08:00
if ( likely ( req - > src = = req - > dst ) ) {
2015-09-23 13:55:27 +02:00
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_BIDIRECTIONAL ) ;
2015-06-16 13:54:23 +08:00
if ( unlikely ( ! sgc ) ) {
dev_err ( jrdev , " unable to map source \n " ) ;
kfree ( edesc ) ;
return ERR_PTR ( - ENOMEM ) ;
}
} else {
2015-09-23 13:55:27 +02:00
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_TO_DEVICE ) ;
2015-06-16 13:54:23 +08:00
if ( unlikely ( ! sgc ) ) {
dev_err ( jrdev , " unable to map source \n " ) ;
kfree ( edesc ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2015-09-23 13:55:27 +02:00
sgc = dma_map_sg ( jrdev , req - > dst , dst_nents ? : 1 ,
DMA_FROM_DEVICE ) ;
2015-06-16 13:54:23 +08:00
if ( unlikely ( ! sgc ) ) {
dev_err ( jrdev , " unable to map destination \n " ) ;
2015-09-23 13:55:27 +02:00
dma_unmap_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_TO_DEVICE ) ;
2015-06-16 13:54:23 +08:00
kfree ( edesc ) ;
return ERR_PTR ( - ENOMEM ) ;
}
}
2011-03-13 16:54:26 +08:00
edesc - > src_nents = src_nents ;
edesc - > dst_nents = dst_nents ;
2012-06-22 19:48:46 -05:00
edesc - > sec4_sg = ( void * ) edesc + sizeof ( struct aead_edesc ) +
desc_bytes ;
2011-07-15 11:21:42 +08:00
* all_contig_ptr = all_contig ;
2012-06-22 19:48:46 -05:00
sec4_sg_index = 0 ;
2011-07-15 11:21:42 +08:00
if ( ! all_contig ) {
2015-06-18 14:25:56 +08:00
sg_to_sec4_sg_last ( req - > src , src_nents ,
2015-06-16 13:54:23 +08:00
edesc - > sec4_sg + sec4_sg_index , 0 ) ;
2015-01-21 11:53:30 +02:00
sec4_sg_index + = src_nents ;
2011-07-15 11:21:42 +08:00
}
if ( dst_nents ) {
2012-06-22 19:48:46 -05:00
sg_to_sec4_sg_last ( req - > dst , dst_nents ,
edesc - > sec4_sg + sec4_sg_index , 0 ) ;
2011-07-15 11:21:42 +08:00
}
2015-06-16 13:54:23 +08:00
if ( ! sec4_sg_bytes )
return edesc ;
2014-06-23 19:50:26 +05:30
edesc - > sec4_sg_dma = dma_map_single ( jrdev , edesc - > sec4_sg ,
sec4_sg_bytes , DMA_TO_DEVICE ) ;
2014-07-11 15:34:49 +03:00
if ( dma_mapping_error ( jrdev , edesc - > sec4_sg_dma ) ) {
dev_err ( jrdev , " unable to map S/G table \n " ) ;
2015-06-16 13:54:23 +08:00
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
2014-07-11 15:34:49 +03:00
return ERR_PTR ( - ENOMEM ) ;
}
2011-03-13 16:54:26 +08:00
2015-06-16 13:54:23 +08:00
edesc - > sec4_sg_bytes = sec4_sg_bytes ;
2011-03-13 16:54:26 +08:00
return edesc ;
}
2015-06-16 13:54:23 +08:00
static int gcm_encrypt ( struct aead_request * req )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:41 +08:00
struct aead_edesc * edesc ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2011-07-15 11:21:42 +08:00
bool all_contig ;
2011-03-13 16:54:26 +08:00
u32 * desc ;
2011-07-15 11:21:42 +08:00
int ret = 0 ;
2011-03-13 16:54:26 +08:00
/* allocate extended descriptor */
2015-06-16 13:54:23 +08:00
edesc = aead_edesc_alloc ( req , GCM_DESC_JOB_IO_LEN , & all_contig , true ) ;
2011-03-13 16:54:26 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
2011-07-15 11:21:42 +08:00
/* Create and submit job descriptor */
2015-06-16 13:54:23 +08:00
init_gcm_job ( req , edesc , all_contig , true ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " aead jobdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
desc = edesc - > hw_desc ;
ret = caam_jr_enqueue ( jrdev , desc , aead_encrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
return ret ;
2011-03-13 16:54:26 +08:00
}
2015-07-09 07:17:33 +08:00
static int ipsec_gcm_encrypt ( struct aead_request * req )
{
if ( req - > assoclen < 8 )
return - EINVAL ;
return gcm_encrypt ( req ) ;
}
2015-07-30 17:53:17 +08:00
static int aead_encrypt ( struct aead_request * req )
2015-06-16 13:54:23 +08:00
{
struct aead_edesc * edesc ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
bool all_contig ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
2015-07-30 17:53:17 +08:00
edesc = aead_edesc_alloc ( req , AUTHENC_DESC_JOB_IO_LEN ,
& all_contig , true ) ;
2015-06-16 13:54:23 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor */
2015-07-30 17:53:17 +08:00
init_authenc_job ( req , edesc , all_contig , true ) ;
2015-06-16 13:54:23 +08:00
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead jobdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
desc = edesc - > hw_desc ;
2015-07-30 17:53:17 +08:00
ret = caam_jr_enqueue ( jrdev , desc , aead_encrypt_done , req ) ;
2015-06-16 13:54:23 +08:00
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
2015-07-30 17:53:17 +08:00
aead_unmap ( jrdev , edesc , req ) ;
2015-06-16 13:54:23 +08:00
kfree ( edesc ) ;
}
return ret ;
}
static int gcm_decrypt ( struct aead_request * req )
{
struct aead_edesc * edesc ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
bool all_contig ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
edesc = aead_edesc_alloc ( req , GCM_DESC_JOB_IO_LEN , & all_contig , false ) ;
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor*/
init_gcm_job ( req , edesc , all_contig , false ) ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead jobdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
desc = edesc - > hw_desc ;
ret = caam_jr_enqueue ( jrdev , desc , aead_decrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
return ret ;
}
2015-07-09 07:17:33 +08:00
static int ipsec_gcm_decrypt ( struct aead_request * req )
{
if ( req - > assoclen < 8 )
return - EINVAL ;
return gcm_decrypt ( req ) ;
}
2015-07-30 17:53:17 +08:00
static int aead_decrypt ( struct aead_request * req )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:42 +08:00
struct aead_edesc * edesc ;
2011-03-13 16:54:26 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2011-07-15 11:21:42 +08:00
bool all_contig ;
2011-03-13 16:54:26 +08:00
u32 * desc ;
2011-07-15 11:21:42 +08:00
int ret = 0 ;
2011-03-13 16:54:26 +08:00
/* allocate extended descriptor */
2015-07-30 17:53:17 +08:00
edesc = aead_edesc_alloc ( req , AUTHENC_DESC_JOB_IO_LEN ,
& all_contig , false ) ;
2011-03-13 16:54:26 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " dec src@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
2015-07-30 17:53:17 +08:00
req - > assoclen + req - > cryptlen , 1 ) ;
2011-07-15 11:21:42 +08:00
# endif
/* Create and submit job descriptor*/
2015-07-30 17:53:17 +08:00
init_authenc_job ( req , edesc , all_contig , false ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " aead jobdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
2011-03-13 16:54:26 +08:00
desc = edesc - > hw_desc ;
2015-07-30 17:53:17 +08:00
ret = caam_jr_enqueue ( jrdev , desc , aead_decrypt_done , req ) ;
2011-07-15 11:21:42 +08:00
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
2015-07-30 17:53:17 +08:00
aead_unmap ( jrdev , edesc , req ) ;
2011-07-15 11:21:42 +08:00
kfree ( edesc ) ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
return ret ;
}
2011-03-13 16:54:26 +08:00
2015-07-30 17:53:17 +08:00
static int aead_givdecrypt ( struct aead_request * req )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:41 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2015-07-30 17:53:17 +08:00
unsigned int ivsize = crypto_aead_ivsize ( aead ) ;
2011-03-13 16:54:26 +08:00
2015-07-30 17:53:17 +08:00
if ( req - > cryptlen < ivsize )
return - EINVAL ;
2011-03-13 16:54:26 +08:00
2015-07-30 17:53:17 +08:00
req - > cryptlen - = ivsize ;
req - > assoclen + = ivsize ;
2011-03-13 16:54:26 +08:00
2015-07-30 17:53:17 +08:00
return aead_decrypt ( req ) ;
2014-03-14 17:46:52 +02:00
}
2011-07-15 11:21:42 +08:00
/*
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
static struct ablkcipher_edesc * ablkcipher_edesc_alloc ( struct ablkcipher_request
* req , int desc_bytes ,
bool * iv_contig_out )
{
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct device * jrdev = ctx - > jrdev ;
gfp_t flags = ( req - > base . flags & ( CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP ) ) ?
GFP_KERNEL : GFP_ATOMIC ;
2012-06-22 19:48:46 -05:00
int src_nents , dst_nents = 0 , sec4_sg_bytes ;
2011-07-15 11:21:42 +08:00
struct ablkcipher_edesc * edesc ;
dma_addr_t iv_dma = 0 ;
bool iv_contig = false ;
int sgc ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
2012-06-22 19:48:46 -05:00
int sec4_sg_index ;
2011-07-15 11:21:42 +08:00
2015-09-23 13:55:27 +02:00
src_nents = sg_count ( req - > src , req - > nbytes ) ;
2011-07-15 11:21:42 +08:00
2012-06-22 19:48:49 -05:00
if ( req - > dst ! = req - > src )
2015-09-23 13:55:27 +02:00
dst_nents = sg_count ( req - > dst , req - > nbytes ) ;
2011-07-15 11:21:42 +08:00
if ( likely ( req - > src = = req - > dst ) ) {
2015-09-23 13:55:27 +02:00
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_BIDIRECTIONAL ) ;
2011-07-15 11:21:42 +08:00
} else {
2015-09-23 13:55:27 +02:00
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_TO_DEVICE ) ;
sgc = dma_map_sg ( jrdev , req - > dst , dst_nents ? : 1 ,
DMA_FROM_DEVICE ) ;
2011-07-15 11:21:42 +08:00
}
2014-07-11 15:34:49 +03:00
iv_dma = dma_map_single ( jrdev , req - > info , ivsize , DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , iv_dma ) ) {
dev_err ( jrdev , " unable to map IV \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2011-07-15 11:21:42 +08:00
/*
* Check if iv can be contiguous with source and destination .
* If so , include it . If not , create scatterlist .
*/
if ( ! src_nents & & iv_dma + ivsize = = sg_dma_address ( req - > src ) )
iv_contig = true ;
else
src_nents = src_nents ? : 1 ;
2012-06-22 19:48:46 -05:00
sec4_sg_bytes = ( ( iv_contig ? 0 : 1 ) + src_nents + dst_nents ) *
sizeof ( struct sec4_sg_entry ) ;
2011-07-15 11:21:42 +08:00
/* allocate space for base edesc and hw desc commands, link tables */
2015-08-05 11:28:39 -07:00
edesc = kzalloc ( sizeof ( * edesc ) + desc_bytes + sec4_sg_bytes ,
GFP_DMA | flags ) ;
2011-07-15 11:21:42 +08:00
if ( ! edesc ) {
dev_err ( jrdev , " could not allocate extended descriptor \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
edesc - > src_nents = src_nents ;
edesc - > dst_nents = dst_nents ;
2012-06-22 19:48:46 -05:00
edesc - > sec4_sg_bytes = sec4_sg_bytes ;
edesc - > sec4_sg = ( void * ) edesc + sizeof ( struct ablkcipher_edesc ) +
desc_bytes ;
2011-07-15 11:21:42 +08:00
2012-06-22 19:48:46 -05:00
sec4_sg_index = 0 ;
2011-07-15 11:21:42 +08:00
if ( ! iv_contig ) {
2012-06-22 19:48:46 -05:00
dma_to_sec4_sg_one ( edesc - > sec4_sg , iv_dma , ivsize , 0 ) ;
sg_to_sec4_sg_last ( req - > src , src_nents ,
edesc - > sec4_sg + 1 , 0 ) ;
sec4_sg_index + = 1 + src_nents ;
2011-07-15 11:21:42 +08:00
}
2012-06-22 19:48:49 -05:00
if ( dst_nents ) {
2012-06-22 19:48:46 -05:00
sg_to_sec4_sg_last ( req - > dst , dst_nents ,
edesc - > sec4_sg + sec4_sg_index , 0 ) ;
2011-07-15 11:21:42 +08:00
}
2012-06-22 19:48:46 -05:00
edesc - > sec4_sg_dma = dma_map_single ( jrdev , edesc - > sec4_sg ,
sec4_sg_bytes , DMA_TO_DEVICE ) ;
2014-07-11 15:34:49 +03:00
if ( dma_mapping_error ( jrdev , edesc - > sec4_sg_dma ) ) {
dev_err ( jrdev , " unable to map S/G table \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2011-07-15 11:21:42 +08:00
edesc - > iv_dma = iv_dma ;
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " ablkcipher sec4_sg@ " __stringify ( __LINE__ ) " : " ,
2012-06-22 19:48:46 -05:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > sec4_sg ,
sec4_sg_bytes , 1 ) ;
2011-07-15 11:21:42 +08:00
# endif
* iv_contig_out = iv_contig ;
return edesc ;
}
static int ablkcipher_encrypt ( struct ablkcipher_request * req )
{
struct ablkcipher_edesc * edesc ;
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct device * jrdev = ctx - > jrdev ;
bool iv_contig ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc ( req , DESC_JOB_IO_LEN *
CAAM_CMD_SZ , & iv_contig ) ;
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor*/
init_ablkcipher_job ( ctx - > sh_desc_enc ,
ctx - > sh_desc_enc_dma , edesc , req , iv_contig ) ;
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " ablkcipher jobdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
desc = edesc - > hw_desc ;
ret = caam_jr_enqueue ( jrdev , desc , ablkcipher_encrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
ablkcipher_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
return ret ;
}
static int ablkcipher_decrypt ( struct ablkcipher_request * req )
{
struct ablkcipher_edesc * edesc ;
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct device * jrdev = ctx - > jrdev ;
bool iv_contig ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc ( req , DESC_JOB_IO_LEN *
CAAM_CMD_SZ , & iv_contig ) ;
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor*/
init_ablkcipher_job ( ctx - > sh_desc_dec ,
ctx - > sh_desc_dec_dma , edesc , req , iv_contig ) ;
desc = edesc - > hw_desc ;
# ifdef DEBUG
2013-08-14 18:56:45 +03:00
print_hex_dump ( KERN_ERR , " ablkcipher jobdesc@ " __stringify ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
ret = caam_jr_enqueue ( jrdev , desc , ablkcipher_decrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
ablkcipher_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
return ret ;
}
2014-10-31 12:45:38 +02:00
/*
* allocate and map the ablkcipher extended descriptor
* for ablkcipher givencrypt
*/
static struct ablkcipher_edesc * ablkcipher_giv_edesc_alloc (
struct skcipher_givcrypt_request * greq ,
int desc_bytes ,
bool * iv_contig_out )
{
struct ablkcipher_request * req = & greq - > creq ;
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct device * jrdev = ctx - > jrdev ;
gfp_t flags = ( req - > base . flags & ( CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP ) ) ?
GFP_KERNEL : GFP_ATOMIC ;
int src_nents , dst_nents = 0 , sec4_sg_bytes ;
struct ablkcipher_edesc * edesc ;
dma_addr_t iv_dma = 0 ;
bool iv_contig = false ;
int sgc ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
int sec4_sg_index ;
2015-09-23 13:55:27 +02:00
src_nents = sg_count ( req - > src , req - > nbytes ) ;
2014-10-31 12:45:38 +02:00
if ( unlikely ( req - > dst ! = req - > src ) )
2015-09-23 13:55:27 +02:00
dst_nents = sg_count ( req - > dst , req - > nbytes ) ;
2014-10-31 12:45:38 +02:00
if ( likely ( req - > src = = req - > dst ) ) {
2015-09-23 13:55:27 +02:00
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_BIDIRECTIONAL ) ;
2014-10-31 12:45:38 +02:00
} else {
2015-09-23 13:55:27 +02:00
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_TO_DEVICE ) ;
sgc = dma_map_sg ( jrdev , req - > dst , dst_nents ? : 1 ,
DMA_FROM_DEVICE ) ;
2014-10-31 12:45:38 +02:00
}
/*
* Check if iv can be contiguous with source and destination .
* If so , include it . If not , create scatterlist .
*/
iv_dma = dma_map_single ( jrdev , greq - > giv , ivsize , DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , iv_dma ) ) {
dev_err ( jrdev , " unable to map IV \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
if ( ! dst_nents & & iv_dma + ivsize = = sg_dma_address ( req - > dst ) )
iv_contig = true ;
else
dst_nents = dst_nents ? : 1 ;
sec4_sg_bytes = ( ( iv_contig ? 0 : 1 ) + src_nents + dst_nents ) *
sizeof ( struct sec4_sg_entry ) ;
/* allocate space for base edesc and hw desc commands, link tables */
2015-08-05 11:28:39 -07:00
edesc = kzalloc ( sizeof ( * edesc ) + desc_bytes + sec4_sg_bytes ,
GFP_DMA | flags ) ;
2014-10-31 12:45:38 +02:00
if ( ! edesc ) {
dev_err ( jrdev , " could not allocate extended descriptor \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
edesc - > src_nents = src_nents ;
edesc - > dst_nents = dst_nents ;
edesc - > sec4_sg_bytes = sec4_sg_bytes ;
edesc - > sec4_sg = ( void * ) edesc + sizeof ( struct ablkcipher_edesc ) +
desc_bytes ;
sec4_sg_index = 0 ;
if ( src_nents ) {
sg_to_sec4_sg_last ( req - > src , src_nents , edesc - > sec4_sg , 0 ) ;
sec4_sg_index + = src_nents ;
}
if ( ! iv_contig ) {
dma_to_sec4_sg_one ( edesc - > sec4_sg + sec4_sg_index ,
iv_dma , ivsize , 0 ) ;
sec4_sg_index + = 1 ;
sg_to_sec4_sg_last ( req - > dst , dst_nents ,
edesc - > sec4_sg + sec4_sg_index , 0 ) ;
}
edesc - > sec4_sg_dma = dma_map_single ( jrdev , edesc - > sec4_sg ,
sec4_sg_bytes , DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , edesc - > sec4_sg_dma ) ) {
dev_err ( jrdev , " unable to map S/G table \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
edesc - > iv_dma = iv_dma ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR ,
" ablkcipher sec4_sg@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > sec4_sg ,
sec4_sg_bytes , 1 ) ;
# endif
* iv_contig_out = iv_contig ;
return edesc ;
}
static int ablkcipher_givencrypt ( struct skcipher_givcrypt_request * creq )
{
struct ablkcipher_request * req = & creq - > creq ;
struct ablkcipher_edesc * edesc ;
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct device * jrdev = ctx - > jrdev ;
bool iv_contig ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
edesc = ablkcipher_giv_edesc_alloc ( creq , DESC_JOB_IO_LEN *
CAAM_CMD_SZ , & iv_contig ) ;
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor*/
init_ablkcipher_giv_job ( ctx - > sh_desc_givenc , ctx - > sh_desc_givenc_dma ,
edesc , req , iv_contig ) ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR ,
" ablkcipher jobdesc@ " __stringify ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
desc = edesc - > hw_desc ;
ret = caam_jr_enqueue ( jrdev , desc , ablkcipher_encrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
ablkcipher_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
return ret ;
}
2011-07-15 11:21:41 +08:00
# define template_aead template_u.aead
2011-07-15 11:21:42 +08:00
# define template_ablkcipher template_u.ablkcipher
2011-03-13 16:54:26 +08:00
struct caam_alg_template {
char name [ CRYPTO_MAX_ALG_NAME ] ;
char driver_name [ CRYPTO_MAX_ALG_NAME ] ;
unsigned int blocksize ;
2011-07-15 11:21:41 +08:00
u32 type ;
union {
struct ablkcipher_alg ablkcipher ;
} template_u ;
2011-03-13 16:54:26 +08:00
u32 class1_alg_type ;
u32 class2_alg_type ;
u32 alg_op ;
} ;
static struct caam_alg_template driver_algs [ ] = {
2015-07-30 17:53:17 +08:00
/* ablkcipher descriptor */
2014-03-14 17:46:52 +02:00
{
2015-07-30 17:53:17 +08:00
. name = " cbc(aes) " ,
. driver_name = " cbc-aes-caam " ,
. blocksize = AES_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_GIVCIPHER ,
. template_ablkcipher = {
. setkey = ablkcipher_setkey ,
. encrypt = ablkcipher_encrypt ,
. decrypt = ablkcipher_decrypt ,
. givencrypt = ablkcipher_givencrypt ,
. geniv = " <built-in> " ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
} ,
{
. name = " cbc(des3_ede) " ,
. driver_name = " cbc-3des-caam " ,
. blocksize = DES3_EDE_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_GIVCIPHER ,
. template_ablkcipher = {
. setkey = ablkcipher_setkey ,
. encrypt = ablkcipher_encrypt ,
. decrypt = ablkcipher_decrypt ,
. givencrypt = ablkcipher_givencrypt ,
. geniv = " <built-in> " ,
. min_keysize = DES3_EDE_KEY_SIZE ,
. max_keysize = DES3_EDE_KEY_SIZE ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
} ,
{
. name = " cbc(des) " ,
. driver_name = " cbc-des-caam " ,
. blocksize = DES_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_GIVCIPHER ,
. template_ablkcipher = {
. setkey = ablkcipher_setkey ,
. encrypt = ablkcipher_encrypt ,
. decrypt = ablkcipher_decrypt ,
. givencrypt = ablkcipher_givencrypt ,
. geniv = " <built-in> " ,
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. ivsize = DES_BLOCK_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
} ,
{
. name = " ctr(aes) " ,
. driver_name = " ctr-aes-caam " ,
. blocksize = 1 ,
. type = CRYPTO_ALG_TYPE_ABLKCIPHER ,
. template_ablkcipher = {
. setkey = ablkcipher_setkey ,
. encrypt = ablkcipher_encrypt ,
. decrypt = ablkcipher_decrypt ,
. geniv = " chainiv " ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128 ,
} ,
{
. name = " rfc3686(ctr(aes)) " ,
. driver_name = " rfc3686-ctr-aes-caam " ,
. blocksize = 1 ,
. type = CRYPTO_ALG_TYPE_GIVCIPHER ,
. template_ablkcipher = {
. setkey = ablkcipher_setkey ,
. encrypt = ablkcipher_encrypt ,
. decrypt = ablkcipher_decrypt ,
. givencrypt = ablkcipher_givencrypt ,
2014-03-14 17:46:52 +02:00
. geniv = " <built-in> " ,
2015-07-30 17:53:17 +08:00
. min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE ,
. ivsize = CTR_RFC3686_IV_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128 ,
2015-10-02 13:13:18 +03:00
} ,
{
. name = " xts(aes) " ,
. driver_name = " xts-aes-caam " ,
. blocksize = AES_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_ABLKCIPHER ,
. template_ablkcipher = {
. setkey = xts_ablkcipher_setkey ,
. encrypt = ablkcipher_encrypt ,
. decrypt = ablkcipher_decrypt ,
. geniv = " eseqiv " ,
. min_keysize = 2 * AES_MIN_KEY_SIZE ,
. max_keysize = 2 * AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS ,
} ,
2015-07-30 17:53:17 +08:00
} ;
static struct caam_aead_alg driver_aeads [ ] = {
{
. aead = {
. base = {
. cra_name = " rfc4106(gcm(aes)) " ,
. cra_driver_name = " rfc4106-gcm-aes-caam " ,
. cra_blocksize = 1 ,
} ,
. setkey = rfc4106_setkey ,
. setauthsize = rfc4106_setauthsize ,
. encrypt = ipsec_gcm_encrypt ,
. decrypt = ipsec_gcm_decrypt ,
. ivsize = 8 ,
. maxauthsize = AES_BLOCK_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " rfc4543(gcm(aes)) " ,
. cra_driver_name = " rfc4543-gcm-aes-caam " ,
. cra_blocksize = 1 ,
} ,
. setkey = rfc4543_setkey ,
. setauthsize = rfc4543_setauthsize ,
. encrypt = ipsec_gcm_encrypt ,
. decrypt = ipsec_gcm_decrypt ,
. ivsize = 8 ,
. maxauthsize = AES_BLOCK_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM ,
} ,
} ,
/* Galois Counter Mode */
{
. aead = {
. base = {
. cra_name = " gcm(aes) " ,
. cra_driver_name = " gcm-aes-caam " ,
. cra_blocksize = 1 ,
} ,
. setkey = gcm_setkey ,
. setauthsize = gcm_setauthsize ,
. encrypt = gcm_encrypt ,
. decrypt = gcm_decrypt ,
. ivsize = 12 ,
. maxauthsize = AES_BLOCK_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM ,
} ,
} ,
/* single-pass ipsec_esp descriptor */
{
. aead = {
. base = {
. cra_name = " authenc(hmac(md5), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-md5- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
2014-03-14 17:46:52 +02:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
} ,
2014-03-14 17:46:52 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
2014-03-14 17:46:52 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
} ,
2014-03-14 17:46:52 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
2014-03-14 17:46:52 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
} ,
2014-03-14 17:46:52 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
2014-03-14 17:46:52 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
} ,
2014-03-14 17:46:52 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha512), "
" ecb(cipher_null)) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" ecb-cipher_null-caam " ,
. cra_blocksize = NULL_BLOCK_SIZE ,
} ,
2014-03-14 17:46:52 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-03-14 17:46:52 +02:00
. ivsize = NULL_IV_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(md5),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-md5- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
2014-03-14 17:46:52 +02:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
} ,
2014-03-14 17:46:52 +02:00
} ,
2011-11-21 16:13:27 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(md5), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc-hmac-md5- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2011-11-21 16:13:27 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-11-21 16:13:27 +08:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
2011-11-21 16:13:27 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
} ,
2011-11-21 16:13:27 +08:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha1), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha1-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha224), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha224-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha256), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha256-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha384), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha384-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = AES_BLOCK_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha512),cbc(aes)) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = AES_BLOCK_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA512_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-05-14 22:08:17 -05:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha512), "
" cbc(aes))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha512-cbc-aes-caam " ,
. cra_blocksize = AES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-05-14 22:08:17 -05:00
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(md5),cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-md5- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2011-05-14 22:08:17 -05:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
}
2011-05-14 22:08:17 -05:00
} ,
2011-11-21 16:13:27 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(md5), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc-hmac-md5- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2011-11-21 16:13:27 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-11-21 16:13:27 +08:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
. geniv = true ,
}
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2011-11-21 16:13:27 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
} ,
2011-11-21 16:13:27 +08:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha1), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha1- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha224), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha224- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha256), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha256- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha384), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha384- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha512), "
" cbc(des3_ede)) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-05-14 22:08:17 -05:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha512), "
" cbc(des3_ede))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha512- "
" cbc-des3_ede-caam " ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-05-14 22:08:17 -05:00
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(md5),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-md5- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2011-05-14 22:08:17 -05:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
} ,
2011-05-14 22:08:17 -05:00
} ,
2011-11-21 16:13:27 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(md5), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc-hmac-md5- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2011-11-21 16:13:27 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-11-21 16:13:27 +08:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2011-11-21 16:13:27 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
} ,
2011-11-21 16:13:27 +08:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha1), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha1-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha224), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha224-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-03-13 16:54:26 +08:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha256), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha256-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-03-13 16:54:26 +08:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2011-03-13 16:54:26 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
} ,
2012-01-09 18:26:44 -06:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha384), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha384-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2012-01-09 18:26:44 -06:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2012-01-09 18:26:44 -06:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
} ,
{
. aead = {
. base = {
. cra_name = " authenc(hmac(sha512),cbc(des)) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
2012-01-09 18:26:44 -06:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
} ,
2011-05-14 22:08:17 -05:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " echainiv(authenc(hmac(sha512), "
" cbc(des))) " ,
. cra_driver_name = " echainiv-authenc- "
" hmac-sha512-cbc-des-caam " ,
. cra_blocksize = DES_BLOCK_SIZE ,
} ,
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2011-05-14 22:08:17 -05:00
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
. geniv = true ,
} ,
2011-05-14 22:08:17 -05:00
} ,
2014-10-31 12:45:37 +02:00
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(md5), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-md5- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
2015-07-30 17:53:17 +08:00
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " seqiv(authenc( "
" hmac(md5),rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-md5- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
. geniv = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha1), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha1- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " seqiv(authenc( "
" hmac(sha1),rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha1- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
. geniv = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha224), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha224- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
} ,
2014-10-31 12:45:37 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " seqiv(authenc( "
" hmac(sha224),rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha224- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
} ,
2014-10-31 12:45:37 +02:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
2015-07-30 17:53:17 +08:00
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
2014-10-31 12:45:37 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
. geniv = true ,
} ,
2011-07-15 11:21:42 +08:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha256), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha256- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
2011-07-15 11:21:42 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
} ,
2011-07-15 11:21:42 +08:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " seqiv(authenc(hmac(sha256), "
" rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha256- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
2011-07-15 11:21:42 +08:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
. geniv = true ,
} ,
2014-10-31 12:45:35 +02:00
} ,
{
2015-07-30 17:53:17 +08:00
. aead = {
. base = {
. cra_name = " authenc(hmac(sha384), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha384- "
" rfc3686-ctr-aes-caam " ,
. cra_blocksize = 1 ,
2014-10-31 12:45:35 +02:00
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
2014-10-31 12:45:36 +02:00
. ivsize = CTR_RFC3686_IV_SIZE ,
2015-07-30 17:53:17 +08:00
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. caam = {
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
} ,
} ,
2015-06-16 13:54:23 +08:00
{
. aead = {
. base = {
2015-07-30 17:53:17 +08:00
. cra_name = " seqiv(authenc(hmac(sha384), "
" rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha384- "
" rfc3686-ctr-aes-caam " ,
2015-06-16 13:54:23 +08:00
. cra_blocksize = 1 ,
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
2015-06-16 13:54:23 +08:00
} ,
. caam = {
2015-07-30 17:53:17 +08:00
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
. geniv = true ,
2015-06-16 13:54:23 +08:00
} ,
} ,
{
. aead = {
. base = {
2015-07-30 17:53:17 +08:00
. cra_name = " authenc(hmac(sha512), "
" rfc3686(ctr(aes))) " ,
. cra_driver_name = " authenc-hmac-sha512- "
" rfc3686-ctr-aes-caam " ,
2015-06-16 13:54:23 +08:00
. cra_blocksize = 1 ,
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-06-16 13:54:23 +08:00
} ,
. caam = {
2015-07-30 17:53:17 +08:00
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
2015-06-16 13:54:23 +08:00
} ,
} ,
{
. aead = {
. base = {
2015-07-30 17:53:17 +08:00
. cra_name = " seqiv(authenc(hmac(sha512), "
" rfc3686(ctr(aes)))) " ,
. cra_driver_name = " seqiv-authenc-hmac-sha512- "
" rfc3686-ctr-aes-caam " ,
2015-06-16 13:54:23 +08:00
. cra_blocksize = 1 ,
} ,
2015-07-30 17:53:17 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_givdecrypt ,
. ivsize = CTR_RFC3686_IV_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
2015-06-16 13:54:23 +08:00
} ,
. caam = {
2015-07-30 17:53:17 +08:00
. class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128 ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
. rfc3686 = true ,
. geniv = true ,
2015-06-16 13:54:23 +08:00
} ,
} ,
} ;
struct caam_crypto_alg {
2011-03-13 16:54:26 +08:00
struct crypto_alg crypto_alg ;
2015-06-16 13:54:23 +08:00
struct list_head entry ;
struct caam_alg_entry caam ;
2011-03-13 16:54:26 +08:00
} ;
2015-06-16 13:54:23 +08:00
static int caam_init_common ( struct caam_ctx * ctx , struct caam_alg_entry * caam )
2011-03-13 16:54:26 +08:00
{
2013-10-25 12:01:03 +05:30
ctx - > jrdev = caam_jr_alloc ( ) ;
if ( IS_ERR ( ctx - > jrdev ) ) {
pr_err ( " Job Ring Device allocation for transform failed \n " ) ;
return PTR_ERR ( ctx - > jrdev ) ;
}
2011-03-13 16:54:26 +08:00
/* copy descriptor header template value */
2015-06-16 13:54:23 +08:00
ctx - > class1_alg_type = OP_TYPE_CLASS1_ALG | caam - > class1_alg_type ;
ctx - > class2_alg_type = OP_TYPE_CLASS2_ALG | caam - > class2_alg_type ;
ctx - > alg_op = OP_TYPE_CLASS2_ALG | caam - > alg_op ;
2011-03-13 16:54:26 +08:00
return 0 ;
}
2015-06-16 13:54:23 +08:00
static int caam_cra_init ( struct crypto_tfm * tfm )
2011-03-13 16:54:26 +08:00
{
2015-06-16 13:54:23 +08:00
struct crypto_alg * alg = tfm - > __crt_alg ;
struct caam_crypto_alg * caam_alg =
container_of ( alg , struct caam_crypto_alg , crypto_alg ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2015-06-16 13:54:23 +08:00
return caam_init_common ( ctx , & caam_alg - > caam ) ;
}
static int caam_aead_init ( struct crypto_aead * tfm )
{
struct aead_alg * alg = crypto_aead_alg ( tfm ) ;
struct caam_aead_alg * caam_alg =
container_of ( alg , struct caam_aead_alg , aead ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( tfm ) ;
return caam_init_common ( ctx , & caam_alg - > caam ) ;
}
static void caam_exit_common ( struct caam_ctx * ctx )
{
2011-07-15 11:21:42 +08:00
if ( ctx - > sh_desc_enc_dma & &
! dma_mapping_error ( ctx - > jrdev , ctx - > sh_desc_enc_dma ) )
dma_unmap_single ( ctx - > jrdev , ctx - > sh_desc_enc_dma ,
desc_bytes ( ctx - > sh_desc_enc ) , DMA_TO_DEVICE ) ;
if ( ctx - > sh_desc_dec_dma & &
! dma_mapping_error ( ctx - > jrdev , ctx - > sh_desc_dec_dma ) )
dma_unmap_single ( ctx - > jrdev , ctx - > sh_desc_dec_dma ,
desc_bytes ( ctx - > sh_desc_dec ) , DMA_TO_DEVICE ) ;
if ( ctx - > sh_desc_givenc_dma & &
! dma_mapping_error ( ctx - > jrdev , ctx - > sh_desc_givenc_dma ) )
dma_unmap_single ( ctx - > jrdev , ctx - > sh_desc_givenc_dma ,
desc_bytes ( ctx - > sh_desc_givenc ) ,
2011-05-14 22:08:17 -05:00
DMA_TO_DEVICE ) ;
2014-03-14 17:48:30 +02:00
if ( ctx - > key_dma & &
! dma_mapping_error ( ctx - > jrdev , ctx - > key_dma ) )
dma_unmap_single ( ctx - > jrdev , ctx - > key_dma ,
ctx - > enckeylen + ctx - > split_key_pad_len ,
DMA_TO_DEVICE ) ;
2013-10-25 12:01:03 +05:30
caam_jr_free ( ctx - > jrdev ) ;
2011-03-13 16:54:26 +08:00
}
2015-06-16 13:54:23 +08:00
static void caam_cra_exit ( struct crypto_tfm * tfm )
{
caam_exit_common ( crypto_tfm_ctx ( tfm ) ) ;
}
static void caam_aead_exit ( struct crypto_aead * tfm )
{
caam_exit_common ( crypto_aead_ctx ( tfm ) ) ;
}
2011-03-13 16:54:26 +08:00
static void __exit caam_algapi_exit ( void )
{
struct caam_crypto_alg * t_alg , * n ;
2015-06-16 13:54:23 +08:00
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( driver_aeads ) ; i + + ) {
struct caam_aead_alg * t_alg = driver_aeads + i ;
if ( t_alg - > registered )
crypto_unregister_aead ( & t_alg - > aead ) ;
}
2011-03-13 16:54:26 +08:00
2013-10-25 12:01:03 +05:30
if ( ! alg_list . next )
2011-03-13 16:54:26 +08:00
return ;
2013-10-25 12:01:03 +05:30
list_for_each_entry_safe ( t_alg , n , & alg_list , entry ) {
2011-03-13 16:54:26 +08:00
crypto_unregister_alg ( & t_alg - > crypto_alg ) ;
list_del ( & t_alg - > entry ) ;
kfree ( t_alg ) ;
}
}
2013-10-25 12:01:03 +05:30
static struct caam_crypto_alg * caam_alg_alloc ( struct caam_alg_template
2011-03-13 16:54:26 +08:00
* template )
{
struct caam_crypto_alg * t_alg ;
struct crypto_alg * alg ;
2015-08-21 13:52:00 -03:00
t_alg = kzalloc ( sizeof ( * t_alg ) , GFP_KERNEL ) ;
2011-03-13 16:54:26 +08:00
if ( ! t_alg ) {
2013-10-25 12:01:03 +05:30
pr_err ( " failed to allocate t_alg \n " ) ;
2011-03-13 16:54:26 +08:00
return ERR_PTR ( - ENOMEM ) ;
}
alg = & t_alg - > crypto_alg ;
snprintf ( alg - > cra_name , CRYPTO_MAX_ALG_NAME , " %s " , template - > name ) ;
snprintf ( alg - > cra_driver_name , CRYPTO_MAX_ALG_NAME , " %s " ,
template - > driver_name ) ;
alg - > cra_module = THIS_MODULE ;
alg - > cra_init = caam_cra_init ;
alg - > cra_exit = caam_cra_exit ;
alg - > cra_priority = CAAM_CRA_PRIORITY ;
alg - > cra_blocksize = template - > blocksize ;
alg - > cra_alignmask = 0 ;
alg - > cra_ctxsize = sizeof ( struct caam_ctx ) ;
2011-11-01 13:39:56 +01:00
alg - > cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
template - > type ;
2011-07-15 11:21:41 +08:00
switch ( template - > type ) {
2014-10-31 12:45:38 +02:00
case CRYPTO_ALG_TYPE_GIVCIPHER :
alg - > cra_type = & crypto_givcipher_type ;
alg - > cra_ablkcipher = template - > template_ablkcipher ;
break ;
2011-07-15 11:21:42 +08:00
case CRYPTO_ALG_TYPE_ABLKCIPHER :
alg - > cra_type = & crypto_ablkcipher_type ;
alg - > cra_ablkcipher = template - > template_ablkcipher ;
break ;
2011-07-15 11:21:41 +08:00
}
2011-03-13 16:54:26 +08:00
2015-06-16 13:54:23 +08:00
t_alg - > caam . class1_alg_type = template - > class1_alg_type ;
t_alg - > caam . class2_alg_type = template - > class2_alg_type ;
t_alg - > caam . alg_op = template - > alg_op ;
2011-03-13 16:54:26 +08:00
return t_alg ;
}
2015-06-16 13:54:23 +08:00
static void caam_aead_alg_init ( struct caam_aead_alg * t_alg )
{
struct aead_alg * alg = & t_alg - > aead ;
alg - > base . cra_module = THIS_MODULE ;
alg - > base . cra_priority = CAAM_CRA_PRIORITY ;
alg - > base . cra_ctxsize = sizeof ( struct caam_ctx ) ;
2015-08-13 17:29:06 +08:00
alg - > base . cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY ;
2015-06-16 13:54:23 +08:00
alg - > init = caam_aead_init ;
alg - > exit = caam_aead_exit ;
}
2011-03-13 16:54:26 +08:00
static int __init caam_algapi_init ( void )
{
2014-07-07 10:42:12 +05:30
struct device_node * dev_node ;
struct platform_device * pdev ;
struct device * ctrldev ;
2015-08-05 11:28:48 -07:00
struct caam_drv_private * priv ;
2011-03-13 16:54:26 +08:00
int i = 0 , err = 0 ;
2015-08-05 11:28:48 -07:00
u32 cha_vid , cha_inst , des_inst , aes_inst , md_inst ;
unsigned int md_limit = SHA512_DIGEST_SIZE ;
2015-06-16 13:54:23 +08:00
bool registered = false ;
2011-03-13 16:54:26 +08:00
2014-07-07 10:42:12 +05:30
dev_node = of_find_compatible_node ( NULL , NULL , " fsl,sec-v4.0 " ) ;
if ( ! dev_node ) {
dev_node = of_find_compatible_node ( NULL , NULL , " fsl,sec4.0 " ) ;
if ( ! dev_node )
return - ENODEV ;
}
pdev = of_find_device_by_node ( dev_node ) ;
if ( ! pdev ) {
of_node_put ( dev_node ) ;
return - ENODEV ;
}
ctrldev = & pdev - > dev ;
priv = dev_get_drvdata ( ctrldev ) ;
of_node_put ( dev_node ) ;
/*
* If priv is NULL , it ' s probably because the caam driver wasn ' t
* properly initialized ( e . g . RNG4 init failed ) . Thus , bail out here .
*/
if ( ! priv )
return - ENODEV ;
2013-10-25 12:01:03 +05:30
INIT_LIST_HEAD ( & alg_list ) ;
2011-03-13 16:54:26 +08:00
2015-08-05 11:28:48 -07:00
/*
* Register crypto algorithms the device supports .
* First , detect presence and attributes of DES , AES , and MD blocks .
*/
cha_vid = rd_reg32 ( & priv - > ctrl - > perfmon . cha_id_ls ) ;
cha_inst = rd_reg32 ( & priv - > ctrl - > perfmon . cha_num_ls ) ;
des_inst = ( cha_inst & CHA_ID_LS_DES_MASK ) > > CHA_ID_LS_DES_SHIFT ;
aes_inst = ( cha_inst & CHA_ID_LS_AES_MASK ) > > CHA_ID_LS_AES_SHIFT ;
md_inst = ( cha_inst & CHA_ID_LS_MD_MASK ) > > CHA_ID_LS_MD_SHIFT ;
/* If MD is present, limit digest size based on LP256 */
if ( md_inst & & ( ( cha_vid & CHA_ID_LS_MD_MASK ) = = CHA_ID_LS_MD_LP256 ) )
md_limit = SHA256_DIGEST_SIZE ;
2011-03-13 16:54:26 +08:00
for ( i = 0 ; i < ARRAY_SIZE ( driver_algs ) ; i + + ) {
struct caam_crypto_alg * t_alg ;
2015-08-05 11:28:48 -07:00
struct caam_alg_template * alg = driver_algs + i ;
u32 alg_sel = alg - > class1_alg_type & OP_ALG_ALGSEL_MASK ;
/* Skip DES algorithms if not supported by device */
if ( ! des_inst & &
( ( alg_sel = = OP_ALG_ALGSEL_3DES ) | |
( alg_sel = = OP_ALG_ALGSEL_DES ) ) )
continue ;
/* Skip AES algorithms if not supported by device */
if ( ! aes_inst & & ( alg_sel = = OP_ALG_ALGSEL_AES ) )
continue ;
2011-03-13 16:54:26 +08:00
2015-08-05 11:28:48 -07:00
t_alg = caam_alg_alloc ( alg ) ;
2011-03-13 16:54:26 +08:00
if ( IS_ERR ( t_alg ) ) {
err = PTR_ERR ( t_alg ) ;
2015-08-05 11:28:48 -07:00
pr_warn ( " %s alg allocation failed \n " , alg - > driver_name ) ;
2011-03-13 16:54:26 +08:00
continue ;
}
err = crypto_register_alg ( & t_alg - > crypto_alg ) ;
if ( err ) {
2013-10-25 12:01:03 +05:30
pr_warn ( " %s alg registration failed \n " ,
2011-03-13 16:54:26 +08:00
t_alg - > crypto_alg . cra_driver_name ) ;
kfree ( t_alg ) ;
2015-06-16 13:54:23 +08:00
continue ;
}
list_add_tail ( & t_alg - > entry , & alg_list ) ;
registered = true ;
}
for ( i = 0 ; i < ARRAY_SIZE ( driver_aeads ) ; i + + ) {
struct caam_aead_alg * t_alg = driver_aeads + i ;
2015-08-05 11:28:48 -07:00
u32 c1_alg_sel = t_alg - > caam . class1_alg_type &
OP_ALG_ALGSEL_MASK ;
u32 c2_alg_sel = t_alg - > caam . class2_alg_type &
OP_ALG_ALGSEL_MASK ;
u32 alg_aai = t_alg - > caam . class1_alg_type & OP_ALG_AAI_MASK ;
/* Skip DES algorithms if not supported by device */
if ( ! des_inst & &
( ( c1_alg_sel = = OP_ALG_ALGSEL_3DES ) | |
( c1_alg_sel = = OP_ALG_ALGSEL_DES ) ) )
continue ;
/* Skip AES algorithms if not supported by device */
if ( ! aes_inst & & ( c1_alg_sel = = OP_ALG_ALGSEL_AES ) )
continue ;
/*
* Check support for AES algorithms not available
* on LP devices .
*/
if ( ( cha_vid & CHA_ID_LS_AES_MASK ) = = CHA_ID_LS_AES_LP )
if ( alg_aai = = OP_ALG_AAI_GCM )
continue ;
/*
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device .
*/
if ( c2_alg_sel & &
( ! md_inst | | ( t_alg - > aead . maxauthsize > md_limit ) ) )
continue ;
2015-06-16 13:54:23 +08:00
caam_aead_alg_init ( t_alg ) ;
err = crypto_register_aead ( & t_alg - > aead ) ;
if ( err ) {
pr_warn ( " %s alg registration failed \n " ,
t_alg - > aead . base . cra_driver_name ) ;
continue ;
}
t_alg - > registered = true ;
registered = true ;
2011-03-13 16:54:26 +08:00
}
2015-06-16 13:54:23 +08:00
if ( registered )
2013-10-25 12:01:03 +05:30
pr_info ( " caam algorithms registered in /proc/crypto \n " ) ;
2011-03-13 16:54:26 +08:00
return err ;
}
module_init ( caam_algapi_init ) ;
module_exit ( caam_algapi_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " FSL CAAM support for crypto API " ) ;
MODULE_AUTHOR ( " Freescale Semiconductor - NMG/STC " ) ;