2011-03-13 16:54:26 +08:00
/*
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008 - 2011 Freescale Semiconductor , Inc .
*
* Based on talitos crypto API driver .
*
* relationship of job descriptors to shared descriptors ( SteveC Dec 10 2008 ) :
*
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* | JobDesc # 1 | - - - - - - - - - - - - - - - - - - - - > | ShareDesc |
* | * ( packet 1 ) | | ( PDB ) |
* - - - - - - - - - - - - - - - | - - - - - - - - - - - - - > | ( hashKey ) |
* . | | ( cipherKey ) |
* . | | - - - - - - - - > | ( operation ) |
* - - - - - - - - - - - - - - - | | - - - - - - - - - - - - - - -
* | JobDesc # 2 | - - - - - - | |
* | * ( packet 2 ) | |
* - - - - - - - - - - - - - - - |
* . |
* . |
* - - - - - - - - - - - - - - - |
* | JobDesc # 3 | - - - - - - - - - - - -
* | * ( packet 3 ) |
* - - - - - - - - - - - - - - -
*
* The SharedDesc never changes for a connection unless rekeyed , but
* each packet will likely be in a different place . So all we need
* to know to process the packet is where the input is , where the
* output goes , and what context we want to process with . Context is
* in the SharedDesc , packet references in the JobDesc .
*
* So , a job desc looks like :
*
* - - - - - - - - - - - - - - - - - - - - -
* | Header |
* | ShareDesc Pointer |
* | SEQ_OUT_PTR |
* | ( output buffer ) |
2012-06-22 19:48:43 -05:00
* | ( output length ) |
2011-03-13 16:54:26 +08:00
* | SEQ_IN_PTR |
* | ( input buffer ) |
2012-06-22 19:48:43 -05:00
* | ( input length ) |
2011-03-13 16:54:26 +08:00
* - - - - - - - - - - - - - - - - - - - - -
*/
# include "compat.h"
# include "regs.h"
# include "intern.h"
# include "desc_constr.h"
# include "jr.h"
# include "error.h"
/*
* crypto alg
*/
# define CAAM_CRA_PRIORITY 3000
/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
# define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
SHA512_DIGEST_SIZE * 2 )
/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
# define CAAM_MAX_IV_LENGTH 16
2011-05-14 22:08:17 -05:00
/* length of descriptors text */
2012-06-22 19:48:43 -05:00
# define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
2011-07-15 11:21:42 +08:00
# define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
# define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
# define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
# define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
2011-07-15 11:21:42 +08:00
# define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
# define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
20 * CAAM_CMD_SZ )
# define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
15 * CAAM_CMD_SZ )
2011-07-15 11:21:42 +08:00
# define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
CAAM_MAX_KEY_SIZE )
# define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
2011-05-14 22:08:17 -05:00
2011-03-13 16:54:26 +08:00
# ifdef DEBUG
/* for print_hex_dumps with line references */
# define xstr(s) str(s)
# define str(s) #s
# define debug(format, arg...) printk(format, arg)
# else
# define debug(format, arg...)
# endif
2011-07-15 11:21:42 +08:00
/* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1 ( u32 * desc , u32 type )
{
u32 * jump_cmd , * uncond_jump_cmd ;
jump_cmd = append_jump ( desc , JUMP_TEST_ALL | JUMP_COND_SHRD ) ;
append_operation ( desc , type | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT ) ;
uncond_jump_cmd = append_jump ( desc , JUMP_TEST_ALL ) ;
set_jump_tgt_here ( desc , jump_cmd ) ;
append_operation ( desc , type | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_AAI_DK ) ;
set_jump_tgt_here ( desc , uncond_jump_cmd ) ;
}
/*
* Wait for completion of class 1 key loading before allowing
* error propagation
*/
static inline void append_dec_shr_done ( u32 * desc )
{
u32 * jump_cmd ;
jump_cmd = append_jump ( desc , JUMP_CLASS_CLASS1 | JUMP_TEST_ALL ) ;
set_jump_tgt_here ( desc , jump_cmd ) ;
2011-12-12 14:59:15 -06:00
append_cmd ( desc , SET_OK_NO_PROP_ERRORS | CMD_LOAD ) ;
2011-07-15 11:21:42 +08:00
}
/*
* For aead functions , read payload and write payload ,
* both of which are specified in req - > src and req - > dst
*/
static inline void aead_append_src_dst ( u32 * desc , u32 msg_type )
{
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_BOTH |
KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH ) ;
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_MESSAGE_DATA | KEY_VLF ) ;
}
/*
* For aead encrypt and decrypt , read iv for both classes
*/
static inline void aead_append_ld_iv ( u32 * desc , int ivsize )
{
append_cmd ( desc , CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | ivsize ) ;
append_move ( desc , MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize ) ;
}
2011-07-15 11:21:42 +08:00
/*
* For ablkcipher encrypt and decrypt , read from req - > src and
* write to req - > dst
*/
static inline void ablkcipher_append_src_dst ( u32 * desc )
{
2012-06-22 19:42:35 -05:00
append_math_add ( desc , VARSEQOUTLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQINLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS1 |
KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1 ) ;
append_seq_fifo_store ( desc , 0 , FIFOST_TYPE_MESSAGE_DATA | KEY_VLF ) ;
2011-07-15 11:21:42 +08:00
}
2011-07-15 11:21:42 +08:00
/*
* If all data , including src ( with assoc and iv ) or dst ( with iv only ) are
* contiguous
*/
# define GIV_SRC_CONTIG 1
# define GIV_DST_CONTIG (1 << 1)
2011-03-13 16:54:26 +08:00
/*
* per - session context
*/
struct caam_ctx {
struct device * jrdev ;
2011-07-15 11:21:42 +08:00
u32 sh_desc_enc [ DESC_MAX_USED_LEN ] ;
u32 sh_desc_dec [ DESC_MAX_USED_LEN ] ;
u32 sh_desc_givenc [ DESC_MAX_USED_LEN ] ;
dma_addr_t sh_desc_enc_dma ;
dma_addr_t sh_desc_dec_dma ;
dma_addr_t sh_desc_givenc_dma ;
2011-03-13 16:54:26 +08:00
u32 class1_alg_type ;
u32 class2_alg_type ;
u32 alg_op ;
2011-07-15 11:21:42 +08:00
u8 key [ CAAM_MAX_KEY_SIZE ] ;
2011-07-15 11:21:41 +08:00
dma_addr_t key_dma ;
2011-03-13 16:54:26 +08:00
unsigned int enckeylen ;
unsigned int split_key_len ;
unsigned int split_key_pad_len ;
unsigned int authsize ;
} ;
2011-07-15 11:21:42 +08:00
static void append_key_aead ( u32 * desc , struct caam_ctx * ctx ,
int keys_fit_inline )
{
if ( keys_fit_inline ) {
append_key_as_imm ( desc , ctx - > key , ctx - > split_key_pad_len ,
ctx - > split_key_len , CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC ) ;
append_key_as_imm ( desc , ( void * ) ctx - > key +
ctx - > split_key_pad_len , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
} else {
append_key ( desc , ctx - > key_dma , ctx - > split_key_len , CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC ) ;
append_key ( desc , ctx - > key_dma + ctx - > split_key_pad_len ,
ctx - > enckeylen , CLASS_1 | KEY_DEST_CLASS_REG ) ;
}
}
static void init_sh_desc_key_aead ( u32 * desc , struct caam_ctx * ctx ,
int keys_fit_inline )
{
u32 * key_jump_cmd ;
init_sh_desc ( desc , HDR_SHARE_WAIT ) ;
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
append_key_aead ( desc , ctx , keys_fit_inline ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* Propagate errors from shared to job descriptor */
2011-12-12 14:59:15 -06:00
append_cmd ( desc , SET_OK_NO_PROP_ERRORS | CMD_LOAD ) ;
2011-07-15 11:21:42 +08:00
}
static int aead_set_sh_desc ( struct crypto_aead * aead )
{
struct aead_tfm * tfm = & aead - > base . crt_aead ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
bool keys_fit_inline = 0 ;
u32 * key_jump_cmd , * jump_cmd ;
u32 geniv , moveiv ;
u32 * desc ;
if ( ! ctx - > enckeylen | | ! ctx - > authsize )
return 0 ;
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
if ( DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
ctx - > split_key_pad_len + ctx - > enckeylen < =
CAAM_DESC_BYTES_MAX )
keys_fit_inline = 1 ;
/* aead_encrypt shared descriptor */
desc = ctx - > sh_desc_enc ;
init_sh_desc_key_aead ( desc , ctx , keys_fit_inline ) ;
/* Class 2 operation */
append_operation ( desc , ctx - > class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* cryptlen = seqoutlen - authsize */
append_math_sub_imm_u32 ( desc , REG3 , SEQOUTLEN , IMM , ctx - > authsize ) ;
/* assoclen + cryptlen = seqinlen - ivsize */
append_math_sub_imm_u32 ( desc , REG2 , SEQINLEN , IMM , tfm - > ivsize ) ;
/* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
append_math_sub ( desc , VARSEQINLEN , REG2 , REG3 , CAAM_CMD_SZ ) ;
/* read assoc before reading payload */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF ) ;
aead_append_ld_iv ( desc , tfm - > ivsize ) ;
/* Class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* Read and write cryptlen bytes */
append_math_add ( desc , VARSEQINLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG3 , CAAM_CMD_SZ ) ;
aead_append_src_dst ( desc , FIFOLD_TYPE_MSG1OUT2 ) ;
/* Write ICV */
append_seq_store ( desc , ctx - > authsize , LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT ) ;
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead enc shdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
if ( DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
ctx - > split_key_pad_len + ctx - > enckeylen < =
CAAM_DESC_BYTES_MAX )
keys_fit_inline = 1 ;
desc = ctx - > sh_desc_dec ;
/* aead_decrypt shared descriptor */
init_sh_desc ( desc , HDR_SHARE_WAIT ) ;
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
append_key_aead ( desc , ctx , keys_fit_inline ) ;
/* Only propagate error immediately if shared */
jump_cmd = append_jump ( desc , JUMP_TEST_ALL ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
2011-12-12 14:59:15 -06:00
append_cmd ( desc , SET_OK_NO_PROP_ERRORS | CMD_LOAD ) ;
2011-07-15 11:21:42 +08:00
set_jump_tgt_here ( desc , jump_cmd ) ;
/* Class 2 operation */
append_operation ( desc , ctx - > class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON ) ;
/* assoclen + cryptlen = seqinlen - ivsize */
append_math_sub_imm_u32 ( desc , REG3 , SEQINLEN , IMM ,
ctx - > authsize + tfm - > ivsize )
/* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub ( desc , REG2 , SEQOUTLEN , REG0 , CAAM_CMD_SZ ) ;
append_math_sub ( desc , VARSEQINLEN , REG3 , REG2 , CAAM_CMD_SZ ) ;
/* read assoc before reading payload */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF ) ;
aead_append_ld_iv ( desc , tfm - > ivsize ) ;
append_dec_op1 ( desc , ctx - > class1_alg_type ) ;
/* Read and write cryptlen bytes */
append_math_add ( desc , VARSEQINLEN , ZERO , REG2 , CAAM_CMD_SZ ) ;
append_math_add ( desc , VARSEQOUTLEN , ZERO , REG2 , CAAM_CMD_SZ ) ;
aead_append_src_dst ( desc , FIFOLD_TYPE_MSG ) ;
/* Load ICV */
append_seq_fifo_load ( desc , ctx - > authsize , FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV ) ;
append_dec_shr_done ( desc ) ;
ctx - > sh_desc_dec_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_dec_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead dec shdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64 - word Descriptor h / w Buffer
*/
if ( DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
ctx - > split_key_pad_len + ctx - > enckeylen < =
CAAM_DESC_BYTES_MAX )
keys_fit_inline = 1 ;
/* aead_givencrypt shared descriptor */
desc = ctx - > sh_desc_givenc ;
init_sh_desc_key_aead ( desc , ctx , keys_fit_inline ) ;
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
NFIFOENTRY_PTYPE_RND | ( tfm - > ivsize < < NFIFOENTRY_DLEN_SHIFT ) ;
append_load_imm_u32 ( desc , geniv , LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM ) ;
append_cmd ( desc , CMD_LOAD | DISABLE_AUTO_INFO_FIFO ) ;
append_move ( desc , MOVE_SRC_INFIFO |
MOVE_DEST_CLASS1CTX | ( tfm - > ivsize < < MOVE_LEN_SHIFT ) ) ;
append_cmd ( desc , CMD_LOAD | ENABLE_AUTO_INFO_FIFO ) ;
/* Copy IV to class 1 context */
append_move ( desc , MOVE_SRC_CLASS1CTX |
MOVE_DEST_OUTFIFO | ( tfm - > ivsize < < MOVE_LEN_SHIFT ) ) ;
/* Return to encryption */
append_operation ( desc , ctx - > class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* ivsize + cryptlen = seqoutlen - authsize */
append_math_sub_imm_u32 ( desc , REG3 , SEQOUTLEN , IMM , ctx - > authsize ) ;
/* assoclen = seqinlen - (ivsize + cryptlen) */
append_math_sub ( desc , VARSEQINLEN , SEQINLEN , REG3 , CAAM_CMD_SZ ) ;
/* read assoc before reading payload */
append_seq_fifo_load ( desc , 0 , FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF ) ;
/* Copy iv from class 1 ctx to class 2 fifo*/
moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
NFIFOENTRY_DTYPE_MSG | ( tfm - > ivsize < < NFIFOENTRY_DLEN_SHIFT ) ;
append_load_imm_u32 ( desc , moveiv , LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM ) ;
append_load_imm_u32 ( desc , tfm - > ivsize , LDST_CLASS_2_CCB |
LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM ) ;
/* Class 1 operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* Will write ivsize + cryptlen */
append_math_add ( desc , VARSEQOUTLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
/* Not need to reload iv */
append_seq_fifo_load ( desc , tfm - > ivsize ,
FIFOLD_CLASS_SKIP ) ;
/* Will read cryptlen */
append_math_add ( desc , VARSEQINLEN , SEQINLEN , REG0 , CAAM_CMD_SZ ) ;
aead_append_src_dst ( desc , FIFOLD_TYPE_MSG1OUT2 ) ;
/* Write ICV */
append_seq_store ( desc , ctx - > authsize , LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT ) ;
ctx - > sh_desc_givenc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_givenc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead givenc shdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
return 0 ;
}
2011-07-15 11:21:41 +08:00
static int aead_setauthsize ( struct crypto_aead * authenc ,
2011-03-13 16:54:26 +08:00
unsigned int authsize )
{
struct caam_ctx * ctx = crypto_aead_ctx ( authenc ) ;
ctx - > authsize = authsize ;
2011-07-15 11:21:42 +08:00
aead_set_sh_desc ( authenc ) ;
2011-03-13 16:54:26 +08:00
return 0 ;
}
struct split_key_result {
struct completion completion ;
int err ;
} ;
static void split_key_done ( struct device * dev , u32 * desc , u32 err ,
void * context )
{
struct split_key_result * res = context ;
# ifdef DEBUG
dev_err ( dev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
2011-07-15 11:21:42 +08:00
2011-03-13 16:54:26 +08:00
if ( err ) {
2011-05-02 18:29:17 -05:00
char tmp [ CAAM_ERROR_STR_MAX ] ;
2011-03-13 16:54:26 +08:00
dev_err ( dev , " %08x: %s \n " , err , caam_jr_strstatus ( tmp , err ) ) ;
}
res - > err = err ;
complete ( & res - > completion ) ;
}
/*
get a split ipad / opad key
Split key generation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
[ 00 ] 0xb0810008 jobdesc : stidx = 1 share = never len = 8
[ 01 ] 0x04000014 key : class2 - > keyreg len = 20
@ 0xffe01000
[ 03 ] 0x84410014 operation : cls2 - op sha1 hmac init dec
[ 04 ] 0x24940000 fifold : class2 msgdata - last2 len = 0 imm
[ 05 ] 0xa4000001 jump : class2 local all - > 1 [ 06 ]
[ 06 ] 0x64260028 fifostr : class2 mdsplit - jdk len = 40
@ 0xffe04000
*/
static u32 gen_split_key ( struct caam_ctx * ctx , const u8 * key_in , u32 authkeylen )
{
struct device * jrdev = ctx - > jrdev ;
u32 * desc ;
struct split_key_result result ;
dma_addr_t dma_addr_in , dma_addr_out ;
int ret = 0 ;
desc = kmalloc ( CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2 , GFP_KERNEL | GFP_DMA ) ;
init_job_desc ( desc , 0 ) ;
dma_addr_in = dma_map_single ( jrdev , ( void * ) key_in , authkeylen ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , dma_addr_in ) ) {
dev_err ( jrdev , " unable to map key input memory \n " ) ;
kfree ( desc ) ;
return - ENOMEM ;
}
append_key ( desc , dma_addr_in , authkeylen , CLASS_2 |
KEY_DEST_CLASS_REG ) ;
/* Sets MDHA up into an HMAC-INIT */
append_operation ( desc , ctx - > alg_op | OP_ALG_DECRYPT |
OP_ALG_AS_INIT ) ;
/*
* do a FIFO_LOAD of zero , this will trigger the internal key expansion
into both pads inside MDHA
*/
append_fifo_load_as_imm ( desc , NULL , 0 , LDST_CLASS_2_CCB |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2 ) ;
/*
* FIFO_STORE with the explicit split - key content store
* ( 0x26 output type )
*/
dma_addr_out = dma_map_single ( jrdev , ctx - > key , ctx - > split_key_pad_len ,
DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( jrdev , dma_addr_out ) ) {
dev_err ( jrdev , " unable to map key output memory \n " ) ;
kfree ( desc ) ;
return - ENOMEM ;
}
append_fifo_store ( desc , dma_addr_out , ctx - > split_key_len ,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK ) ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " ctx.key@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , key_in , authkeylen , 1 ) ;
print_hex_dump ( KERN_ERR , " jobdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc , desc_bytes ( desc ) , 1 ) ;
# endif
result . err = 0 ;
init_completion ( & result . completion ) ;
ret = caam_jr_enqueue ( jrdev , desc , split_key_done , & result ) ;
if ( ! ret ) {
/* in progress */
wait_for_completion_interruptible ( & result . completion ) ;
ret = result . err ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " ctx.key@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , ctx - > key ,
ctx - > split_key_pad_len , 1 ) ;
# endif
}
dma_unmap_single ( jrdev , dma_addr_out , ctx - > split_key_pad_len ,
DMA_FROM_DEVICE ) ;
dma_unmap_single ( jrdev , dma_addr_in , authkeylen , DMA_TO_DEVICE ) ;
kfree ( desc ) ;
return ret ;
}
2011-07-15 11:21:41 +08:00
static int aead_setkey ( struct crypto_aead * aead ,
2011-03-13 16:54:26 +08:00
const u8 * key , unsigned int keylen )
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
static const u8 mdpadlen [ ] = { 16 , 20 , 32 , 32 , 64 , 64 } ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
struct rtattr * rta = ( void * ) key ;
struct crypto_authenc_key_param * param ;
unsigned int authkeylen ;
unsigned int enckeylen ;
int ret = 0 ;
param = RTA_DATA ( rta ) ;
enckeylen = be32_to_cpu ( param - > enckeylen ) ;
key + = RTA_ALIGN ( rta - > rta_len ) ;
keylen - = RTA_ALIGN ( rta - > rta_len ) ;
if ( keylen < enckeylen )
goto badkey ;
authkeylen = keylen - enckeylen ;
if ( keylen > CAAM_MAX_KEY_SIZE )
goto badkey ;
/* Pick class 2 key length from algorithm submask */
ctx - > split_key_len = mdpadlen [ ( ctx - > alg_op & OP_ALG_ALGSEL_SUBMASK ) > >
OP_ALG_ALGSEL_SHIFT ] * 2 ;
ctx - > split_key_pad_len = ALIGN ( ctx - > split_key_len , 16 ) ;
# ifdef DEBUG
printk ( KERN_ERR " keylen %d enckeylen %d authkeylen %d \n " ,
keylen , enckeylen , authkeylen ) ;
printk ( KERN_ERR " split_key_len %d split_key_pad_len %d \n " ,
ctx - > split_key_len , ctx - > split_key_pad_len ) ;
print_hex_dump ( KERN_ERR , " key in @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
ret = gen_split_key ( ctx , key , authkeylen ) ;
if ( ret ) {
goto badkey ;
}
/* postpend encryption key to auth split key */
memcpy ( ctx - > key + ctx - > split_key_pad_len , key + authkeylen , enckeylen ) ;
2011-07-15 11:21:41 +08:00
ctx - > key_dma = dma_map_single ( jrdev , ctx - > key , ctx - > split_key_pad_len +
2011-03-13 16:54:26 +08:00
enckeylen , DMA_TO_DEVICE ) ;
2011-07-15 11:21:41 +08:00
if ( dma_mapping_error ( jrdev , ctx - > key_dma ) ) {
2011-03-13 16:54:26 +08:00
dev_err ( jrdev , " unable to map key i/o memory \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " ctx.key@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , ctx - > key ,
ctx - > split_key_pad_len + enckeylen , 1 ) ;
# endif
ctx - > enckeylen = enckeylen ;
2011-07-15 11:21:42 +08:00
ret = aead_set_sh_desc ( aead ) ;
2011-03-13 16:54:26 +08:00
if ( ret ) {
2011-07-15 11:21:41 +08:00
dma_unmap_single ( jrdev , ctx - > key_dma , ctx - > split_key_pad_len +
2011-03-13 16:54:26 +08:00
enckeylen , DMA_TO_DEVICE ) ;
}
return ret ;
badkey :
crypto_aead_set_flags ( aead , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return - EINVAL ;
}
2011-07-15 11:21:42 +08:00
static int ablkcipher_setkey ( struct crypto_ablkcipher * ablkcipher ,
const u8 * key , unsigned int keylen )
{
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct ablkcipher_tfm * tfm = & ablkcipher - > base . crt_ablkcipher ;
struct device * jrdev = ctx - > jrdev ;
int ret = 0 ;
u32 * key_jump_cmd , * jump_cmd ;
u32 * desc ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " key in @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , key , keylen , 1 ) ;
# endif
memcpy ( ctx - > key , key , keylen ) ;
ctx - > key_dma = dma_map_single ( jrdev , ctx - > key , keylen ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > key_dma ) ) {
dev_err ( jrdev , " unable to map key i/o memory \n " ) ;
return - ENOMEM ;
}
ctx - > enckeylen = keylen ;
/* ablkcipher_encrypt shared descriptor */
desc = ctx - > sh_desc_enc ;
init_sh_desc ( desc , HDR_SHARE_WAIT ) ;
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
/* Load class1 key only */
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 |
KEY_DEST_CLASS_REG ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
/* Propagate errors from shared to job descriptor */
2011-12-12 14:59:15 -06:00
append_cmd ( desc , SET_OK_NO_PROP_ERRORS | CMD_LOAD ) ;
2011-07-15 11:21:42 +08:00
/* Load iv */
append_cmd ( desc , CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | tfm - > ivsize ) ;
/* Load operation */
append_operation ( desc , ctx - > class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT ) ;
/* Perform operation */
ablkcipher_append_src_dst ( desc ) ;
ctx - > sh_desc_enc_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " ablkcipher enc shdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
/* ablkcipher_decrypt shared descriptor */
desc = ctx - > sh_desc_dec ;
init_sh_desc ( desc , HDR_SHARE_WAIT ) ;
/* Skip if already shared */
key_jump_cmd = append_jump ( desc , JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD ) ;
/* Load class1 key only */
append_key_as_imm ( desc , ( void * ) ctx - > key , ctx - > enckeylen ,
ctx - > enckeylen , CLASS_1 |
KEY_DEST_CLASS_REG ) ;
/* For aead, only propagate error immediately if shared */
jump_cmd = append_jump ( desc , JUMP_TEST_ALL ) ;
set_jump_tgt_here ( desc , key_jump_cmd ) ;
2011-12-12 14:59:15 -06:00
append_cmd ( desc , SET_OK_NO_PROP_ERRORS | CMD_LOAD ) ;
2011-07-15 11:21:42 +08:00
set_jump_tgt_here ( desc , jump_cmd ) ;
/* load IV */
append_cmd ( desc , CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | tfm - > ivsize ) ;
/* Choose operation */
append_dec_op1 ( desc , ctx - > class1_alg_type ) ;
/* Perform operation */
ablkcipher_append_src_dst ( desc ) ;
/* Wait for key to load before allowing propagating error */
append_dec_shr_done ( desc ) ;
ctx - > sh_desc_dec_dma = dma_map_single ( jrdev , desc ,
desc_bytes ( desc ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( jrdev , ctx - > sh_desc_enc_dma ) ) {
dev_err ( jrdev , " unable to map shared descriptor \n " ) ;
return - ENOMEM ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " ablkcipher dec shdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , desc ,
desc_bytes ( desc ) , 1 ) ;
# endif
return ret ;
}
2011-03-13 16:54:26 +08:00
struct link_tbl_entry {
u64 ptr ;
u32 len ;
u8 reserved ;
u8 buf_pool_id ;
u16 offset ;
} ;
/*
2011-07-15 11:21:42 +08:00
* aead_edesc - s / w - extended aead descriptor
* @ assoc_nents : number of segments in associated data ( SPI + Seq ) scatterlist
2011-03-13 16:54:26 +08:00
* @ src_nents : number of segments in input scatterlist
* @ dst_nents : number of segments in output scatterlist
2011-07-15 11:21:42 +08:00
* @ iv_dma : dma address of iv for checking continuity and link table
2011-03-13 16:54:26 +08:00
* @ desc : h / w descriptor ( variable length ; must not exceed MAX_CAAM_DESCSIZE )
* @ link_tbl_bytes : length of dma mapped link_tbl space
* @ link_tbl_dma : bus physical mapped address of h / w link table
* @ hw_desc : the h / w job descriptor followed by any referenced link tables
*/
2011-07-15 11:21:41 +08:00
struct aead_edesc {
2011-03-13 16:54:26 +08:00
int assoc_nents ;
int src_nents ;
int dst_nents ;
2011-07-15 11:21:42 +08:00
dma_addr_t iv_dma ;
2011-03-13 16:54:26 +08:00
int link_tbl_bytes ;
dma_addr_t link_tbl_dma ;
struct link_tbl_entry * link_tbl ;
u32 hw_desc [ 0 ] ;
} ;
2011-07-15 11:21:42 +08:00
/*
* ablkcipher_edesc - s / w - extended ablkcipher descriptor
* @ src_nents : number of segments in input scatterlist
* @ dst_nents : number of segments in output scatterlist
* @ iv_dma : dma address of iv for checking continuity and link table
* @ desc : h / w descriptor ( variable length ; must not exceed MAX_CAAM_DESCSIZE )
* @ link_tbl_bytes : length of dma mapped link_tbl space
* @ link_tbl_dma : bus physical mapped address of h / w link table
* @ hw_desc : the h / w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
int src_nents ;
int dst_nents ;
dma_addr_t iv_dma ;
int link_tbl_bytes ;
dma_addr_t link_tbl_dma ;
struct link_tbl_entry * link_tbl ;
u32 hw_desc [ 0 ] ;
} ;
2011-07-15 11:21:42 +08:00
static void caam_unmap ( struct device * dev , struct scatterlist * src ,
struct scatterlist * dst , int src_nents , int dst_nents ,
dma_addr_t iv_dma , int ivsize , dma_addr_t link_tbl_dma ,
int link_tbl_bytes )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:42 +08:00
if ( unlikely ( dst ! = src ) ) {
dma_unmap_sg ( dev , src , src_nents , DMA_TO_DEVICE ) ;
dma_unmap_sg ( dev , dst , dst_nents , DMA_FROM_DEVICE ) ;
2011-03-13 16:54:26 +08:00
} else {
2011-07-15 11:21:42 +08:00
dma_unmap_sg ( dev , src , src_nents , DMA_BIDIRECTIONAL ) ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
if ( iv_dma )
dma_unmap_single ( dev , iv_dma , ivsize , DMA_TO_DEVICE ) ;
if ( link_tbl_bytes )
dma_unmap_single ( dev , link_tbl_dma , link_tbl_bytes ,
2011-03-13 16:54:26 +08:00
DMA_TO_DEVICE ) ;
}
2011-07-15 11:21:42 +08:00
static void aead_unmap ( struct device * dev ,
struct aead_edesc * edesc ,
struct aead_request * req )
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
int ivsize = crypto_aead_ivsize ( aead ) ;
dma_unmap_sg ( dev , req - > assoc , edesc - > assoc_nents , DMA_TO_DEVICE ) ;
caam_unmap ( dev , req - > src , req - > dst ,
edesc - > src_nents , edesc - > dst_nents ,
edesc - > iv_dma , ivsize , edesc - > link_tbl_dma ,
edesc - > link_tbl_bytes ) ;
}
2011-07-15 11:21:42 +08:00
static void ablkcipher_unmap ( struct device * dev ,
struct ablkcipher_edesc * edesc ,
struct ablkcipher_request * req )
{
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
caam_unmap ( dev , req - > src , req - > dst ,
edesc - > src_nents , edesc - > dst_nents ,
edesc - > iv_dma , ivsize , edesc - > link_tbl_dma ,
edesc - > link_tbl_bytes ) ;
}
2011-07-15 11:21:41 +08:00
static void aead_encrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
2011-03-13 16:54:26 +08:00
void * context )
{
2011-07-15 11:21:41 +08:00
struct aead_request * req = context ;
struct aead_edesc * edesc ;
2011-03-13 16:54:26 +08:00
# ifdef DEBUG
2011-07-15 11:21:41 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
2011-07-15 11:21:42 +08:00
int ivsize = crypto_aead_ivsize ( aead ) ;
2011-03-13 16:54:26 +08:00
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
2011-07-15 11:21:42 +08:00
2011-07-15 11:21:41 +08:00
edesc = ( struct aead_edesc * ) ( ( char * ) desc -
offsetof ( struct aead_edesc , hw_desc ) ) ;
2011-03-13 16:54:26 +08:00
if ( err ) {
2011-05-02 18:29:17 -05:00
char tmp [ CAAM_ERROR_STR_MAX ] ;
2011-03-13 16:54:26 +08:00
dev_err ( jrdev , " %08x: %s \n " , err , caam_jr_strstatus ( tmp , err ) ) ;
}
2011-07-15 11:21:41 +08:00
aead_unmap ( jrdev , edesc , req ) ;
2011-03-13 16:54:26 +08:00
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " assoc @ " xstr ( __LINE__ ) " : " ,
2011-07-15 11:21:41 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > assoc ) ,
req - > assoclen , 1 ) ;
2011-03-13 16:54:26 +08:00
print_hex_dump ( KERN_ERR , " dstiv @ " xstr ( __LINE__ ) " : " ,
2011-07-15 11:21:41 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) - ivsize ,
2011-03-13 16:54:26 +08:00
edesc - > src_nents ? 100 : ivsize , 1 ) ;
print_hex_dump ( KERN_ERR , " dst @ " xstr ( __LINE__ ) " : " ,
2011-07-15 11:21:41 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
edesc - > src_nents ? 100 : req - > cryptlen +
2011-03-13 16:54:26 +08:00
ctx - > authsize + 4 , 1 ) ;
# endif
kfree ( edesc ) ;
2011-07-15 11:21:41 +08:00
aead_request_complete ( req , err ) ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:41 +08:00
static void aead_decrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
2011-03-13 16:54:26 +08:00
void * context )
{
2011-07-15 11:21:41 +08:00
struct aead_request * req = context ;
struct aead_edesc * edesc ;
2011-03-13 16:54:26 +08:00
# ifdef DEBUG
2011-07-15 11:21:41 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
2011-07-15 11:21:42 +08:00
int ivsize = crypto_aead_ivsize ( aead ) ;
2011-03-13 16:54:26 +08:00
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
2011-07-15 11:21:42 +08:00
2011-07-15 11:21:41 +08:00
edesc = ( struct aead_edesc * ) ( ( char * ) desc -
offsetof ( struct aead_edesc , hw_desc ) ) ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " dstiv @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > iv ,
ivsize , 1 ) ;
print_hex_dump ( KERN_ERR , " dst @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > dst ) ,
req - > cryptlen , 1 ) ;
# endif
2011-03-13 16:54:26 +08:00
if ( err ) {
2011-05-02 18:29:17 -05:00
char tmp [ CAAM_ERROR_STR_MAX ] ;
2011-03-13 16:54:26 +08:00
dev_err ( jrdev , " %08x: %s \n " , err , caam_jr_strstatus ( tmp , err ) ) ;
}
2011-07-15 11:21:41 +08:00
aead_unmap ( jrdev , edesc , req ) ;
2011-03-13 16:54:26 +08:00
/*
* verify hw auth check passed else return - EBADMSG
*/
if ( ( err & JRSTA_CCBERR_ERRID_MASK ) = = JRSTA_CCBERR_ERRID_ICVCHK )
err = - EBADMSG ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " iphdrout@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 ,
2011-07-15 11:21:41 +08:00
( ( char * ) sg_virt ( req - > assoc ) - sizeof ( struct iphdr ) ) ,
sizeof ( struct iphdr ) + req - > assoclen +
( ( req - > cryptlen > 1500 ) ? 1500 : req - > cryptlen ) +
2011-03-13 16:54:26 +08:00
ctx - > authsize + 36 , 1 ) ;
if ( ! err & & edesc - > link_tbl_bytes ) {
2011-07-15 11:21:41 +08:00
struct scatterlist * sg = sg_last ( req - > src , edesc - > src_nents ) ;
2011-03-13 16:54:26 +08:00
print_hex_dump ( KERN_ERR , " sglastout@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( sg ) ,
sg - > length + ctx - > authsize + 16 , 1 ) ;
}
# endif
2011-07-15 11:21:42 +08:00
2011-03-13 16:54:26 +08:00
kfree ( edesc ) ;
2011-07-15 11:21:41 +08:00
aead_request_complete ( req , err ) ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
static void ablkcipher_encrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
void * context )
{
struct ablkcipher_request * req = context ;
struct ablkcipher_edesc * edesc ;
# ifdef DEBUG
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
edesc = ( struct ablkcipher_edesc * ) ( ( char * ) desc -
offsetof ( struct ablkcipher_edesc , hw_desc ) ) ;
if ( err ) {
char tmp [ CAAM_ERROR_STR_MAX ] ;
dev_err ( jrdev , " %08x: %s \n " , err , caam_jr_strstatus ( tmp , err ) ) ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " dstiv @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > info ,
edesc - > src_nents > 1 ? 100 : ivsize , 1 ) ;
print_hex_dump ( KERN_ERR , " dst @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
edesc - > dst_nents > 1 ? 100 : req - > nbytes , 1 ) ;
# endif
ablkcipher_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
ablkcipher_request_complete ( req , err ) ;
}
static void ablkcipher_decrypt_done ( struct device * jrdev , u32 * desc , u32 err ,
void * context )
{
struct ablkcipher_request * req = context ;
struct ablkcipher_edesc * edesc ;
# ifdef DEBUG
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
dev_err ( jrdev , " %s %d: err 0x%x \n " , __func__ , __LINE__ , err ) ;
# endif
edesc = ( struct ablkcipher_edesc * ) ( ( char * ) desc -
offsetof ( struct ablkcipher_edesc , hw_desc ) ) ;
if ( err ) {
char tmp [ CAAM_ERROR_STR_MAX ] ;
dev_err ( jrdev , " %08x: %s \n " , err , caam_jr_strstatus ( tmp , err ) ) ;
}
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " dstiv @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > info ,
ivsize , 1 ) ;
print_hex_dump ( KERN_ERR , " dst @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
edesc - > dst_nents > 1 ? 100 : req - > nbytes , 1 ) ;
# endif
ablkcipher_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
ablkcipher_request_complete ( req , err ) ;
}
2011-07-15 11:21:42 +08:00
static void sg_to_link_tbl_one ( struct link_tbl_entry * link_tbl_ptr ,
dma_addr_t dma , u32 len , u32 offset )
{
link_tbl_ptr - > ptr = dma ;
link_tbl_ptr - > len = len ;
link_tbl_ptr - > reserved = 0 ;
link_tbl_ptr - > buf_pool_id = 0 ;
link_tbl_ptr - > offset = offset ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " link_tbl_ptr@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , link_tbl_ptr ,
sizeof ( struct link_tbl_entry ) , 1 ) ;
# endif
}
2011-03-13 16:54:26 +08:00
/*
* convert scatterlist to h / w link table format
2011-07-15 11:21:42 +08:00
* but does not have final bit ; instead , returns last entry
2011-03-13 16:54:26 +08:00
*/
2011-07-15 11:21:42 +08:00
static struct link_tbl_entry * sg_to_link_tbl ( struct scatterlist * sg ,
int sg_count , struct link_tbl_entry
* link_tbl_ptr , u32 offset )
2011-03-13 16:54:26 +08:00
{
while ( sg_count ) {
2011-07-15 11:21:42 +08:00
sg_to_link_tbl_one ( link_tbl_ptr , sg_dma_address ( sg ) ,
sg_dma_len ( sg ) , offset ) ;
2011-03-13 16:54:26 +08:00
link_tbl_ptr + + ;
sg = sg_next ( sg ) ;
sg_count - - ;
}
2011-07-15 11:21:42 +08:00
return link_tbl_ptr - 1 ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
/*
* convert scatterlist to h / w link table format
* scatterlist must have been previously dma mapped
*/
static void sg_to_link_tbl_last ( struct scatterlist * sg , int sg_count ,
struct link_tbl_entry * link_tbl_ptr , u32 offset )
{
link_tbl_ptr = sg_to_link_tbl ( sg , sg_count , link_tbl_ptr , offset ) ;
2011-03-13 16:54:26 +08:00
link_tbl_ptr - > len | = 0x40000000 ;
}
/*
2011-07-15 11:21:42 +08:00
* Fill in aead job descriptor
2011-03-13 16:54:26 +08:00
*/
2011-07-15 11:21:42 +08:00
static void init_aead_job ( u32 * sh_desc , dma_addr_t ptr ,
struct aead_edesc * edesc ,
struct aead_request * req ,
bool all_contig , bool encrypt )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:41 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
int ivsize = crypto_aead_ivsize ( aead ) ;
int authsize = ctx - > authsize ;
2011-07-15 11:21:42 +08:00
u32 * desc = edesc - > hw_desc ;
u32 out_options = 0 , in_options ;
dma_addr_t dst_dma , src_dma ;
int len , link_tbl_index = 0 ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
2011-03-13 16:54:26 +08:00
debug ( " assoclen %d cryptlen %d authsize %d \n " ,
2011-07-15 11:21:41 +08:00
req - > assoclen , req - > cryptlen , authsize ) ;
2011-03-13 16:54:26 +08:00
print_hex_dump ( KERN_ERR , " assoc @ " xstr ( __LINE__ ) " : " ,
2011-07-15 11:21:41 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > assoc ) ,
req - > assoclen , 1 ) ;
2011-03-13 16:54:26 +08:00
print_hex_dump ( KERN_ERR , " presciv@ " xstr ( __LINE__ ) " : " ,
2011-07-15 11:21:42 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > iv ,
2011-03-13 16:54:26 +08:00
edesc - > src_nents ? 100 : ivsize , 1 ) ;
print_hex_dump ( KERN_ERR , " src @ " xstr ( __LINE__ ) " : " ,
2011-07-15 11:21:41 +08:00
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
2011-07-15 11:21:42 +08:00
edesc - > src_nents ? 100 : req - > cryptlen , 1 ) ;
2011-03-13 16:54:26 +08:00
print_hex_dump ( KERN_ERR , " shrdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sh_desc ,
desc_bytes ( sh_desc ) , 1 ) ;
# endif
2011-07-15 11:21:42 +08:00
len = desc_len ( sh_desc ) ;
init_job_desc_shared ( desc , ptr , len , HDR_SHARE_DEFER | HDR_REVERSE ) ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
if ( all_contig ) {
src_dma = sg_dma_address ( req - > assoc ) ;
in_options = 0 ;
2011-03-13 16:54:26 +08:00
} else {
2011-07-15 11:21:42 +08:00
src_dma = edesc - > link_tbl_dma ;
link_tbl_index + = ( edesc - > assoc_nents ? : 1 ) + 1 +
( edesc - > src_nents ? : 1 ) ;
in_options = LDST_SGF ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
if ( encrypt )
append_seq_in_ptr ( desc , src_dma , req - > assoclen + ivsize +
req - > cryptlen - authsize , in_options ) ;
else
append_seq_in_ptr ( desc , src_dma , req - > assoclen + ivsize +
req - > cryptlen , in_options ) ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
if ( likely ( req - > src = = req - > dst ) ) {
if ( all_contig ) {
dst_dma = sg_dma_address ( req - > src ) ;
} else {
dst_dma = src_dma + sizeof ( struct link_tbl_entry ) *
( ( edesc - > assoc_nents ? : 1 ) + 1 ) ;
out_options = LDST_SGF ;
}
2011-03-13 16:54:26 +08:00
} else {
if ( ! edesc - > dst_nents ) {
2011-07-15 11:21:41 +08:00
dst_dma = sg_dma_address ( req - > dst ) ;
2011-03-13 16:54:26 +08:00
} else {
2011-07-15 11:21:42 +08:00
dst_dma = edesc - > link_tbl_dma +
link_tbl_index *
2011-03-13 16:54:26 +08:00
sizeof ( struct link_tbl_entry ) ;
2011-07-15 11:21:42 +08:00
out_options = LDST_SGF ;
2011-03-13 16:54:26 +08:00
}
}
if ( encrypt )
2011-07-15 11:21:42 +08:00
append_seq_out_ptr ( desc , dst_dma , req - > cryptlen , out_options ) ;
2011-03-13 16:54:26 +08:00
else
2011-07-15 11:21:42 +08:00
append_seq_out_ptr ( desc , dst_dma , req - > cryptlen - authsize ,
out_options ) ;
}
/*
* Fill in aead givencrypt job descriptor
*/
static void init_aead_giv_job ( u32 * sh_desc , dma_addr_t ptr ,
struct aead_edesc * edesc ,
struct aead_request * req ,
int contig )
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
int ivsize = crypto_aead_ivsize ( aead ) ;
int authsize = ctx - > authsize ;
u32 * desc = edesc - > hw_desc ;
u32 out_options = 0 , in_options ;
dma_addr_t dst_dma , src_dma ;
int len , link_tbl_index = 0 ;
2011-03-13 16:54:26 +08:00
# ifdef DEBUG
2011-07-15 11:21:42 +08:00
debug ( " assoclen %d cryptlen %d authsize %d \n " ,
req - > assoclen , req - > cryptlen , authsize ) ;
print_hex_dump ( KERN_ERR , " assoc @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > assoc ) ,
req - > assoclen , 1 ) ;
print_hex_dump ( KERN_ERR , " presciv@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > iv , ivsize , 1 ) ;
print_hex_dump ( KERN_ERR , " src @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
edesc - > src_nents > 1 ? 100 : req - > cryptlen , 1 ) ;
print_hex_dump ( KERN_ERR , " shrdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sh_desc ,
desc_bytes ( sh_desc ) , 1 ) ;
2011-03-13 16:54:26 +08:00
# endif
2011-07-15 11:21:42 +08:00
len = desc_len ( sh_desc ) ;
init_job_desc_shared ( desc , ptr , len , HDR_SHARE_DEFER | HDR_REVERSE ) ;
if ( contig & GIV_SRC_CONTIG ) {
src_dma = sg_dma_address ( req - > assoc ) ;
in_options = 0 ;
} else {
src_dma = edesc - > link_tbl_dma ;
link_tbl_index + = edesc - > assoc_nents + 1 + edesc - > src_nents ;
in_options = LDST_SGF ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
append_seq_in_ptr ( desc , src_dma , req - > assoclen + ivsize +
req - > cryptlen - authsize , in_options ) ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
if ( contig & GIV_DST_CONTIG ) {
dst_dma = edesc - > iv_dma ;
} else {
if ( likely ( req - > src = = req - > dst ) ) {
dst_dma = src_dma + sizeof ( struct link_tbl_entry ) *
edesc - > assoc_nents ;
out_options = LDST_SGF ;
} else {
dst_dma = edesc - > link_tbl_dma +
link_tbl_index *
sizeof ( struct link_tbl_entry ) ;
out_options = LDST_SGF ;
}
}
append_seq_out_ptr ( desc , dst_dma , ivsize + req - > cryptlen , out_options ) ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
/*
* Fill in ablkcipher job descriptor
*/
static void init_ablkcipher_job ( u32 * sh_desc , dma_addr_t ptr ,
struct ablkcipher_edesc * edesc ,
struct ablkcipher_request * req ,
bool iv_contig )
{
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
u32 * desc = edesc - > hw_desc ;
u32 out_options = 0 , in_options ;
dma_addr_t dst_dma , src_dma ;
int len , link_tbl_index = 0 ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " presciv@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , req - > info ,
ivsize , 1 ) ;
print_hex_dump ( KERN_ERR , " src @ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
edesc - > src_nents ? 100 : req - > nbytes , 1 ) ;
# endif
len = desc_len ( sh_desc ) ;
init_job_desc_shared ( desc , ptr , len , HDR_SHARE_DEFER | HDR_REVERSE ) ;
if ( iv_contig ) {
src_dma = edesc - > iv_dma ;
in_options = 0 ;
} else {
src_dma = edesc - > link_tbl_dma ;
link_tbl_index + = ( iv_contig ? 0 : 1 ) + edesc - > src_nents ;
in_options = LDST_SGF ;
}
append_seq_in_ptr ( desc , src_dma , req - > nbytes + ivsize , in_options ) ;
if ( likely ( req - > src = = req - > dst ) ) {
if ( ! edesc - > src_nents & & iv_contig ) {
dst_dma = sg_dma_address ( req - > src ) ;
} else {
dst_dma = edesc - > link_tbl_dma +
sizeof ( struct link_tbl_entry ) ;
out_options = LDST_SGF ;
}
} else {
if ( ! edesc - > dst_nents ) {
dst_dma = sg_dma_address ( req - > dst ) ;
} else {
dst_dma = edesc - > link_tbl_dma +
link_tbl_index * sizeof ( struct link_tbl_entry ) ;
out_options = LDST_SGF ;
}
}
append_seq_out_ptr ( desc , dst_dma , req - > nbytes , out_options ) ;
}
2011-03-13 16:54:26 +08:00
/*
* derive number of elements in scatterlist
*/
2011-07-15 11:21:42 +08:00
static int sg_count ( struct scatterlist * sg_list , int nbytes )
2011-03-13 16:54:26 +08:00
{
struct scatterlist * sg = sg_list ;
int sg_nents = 0 ;
while ( nbytes > 0 ) {
sg_nents + + ;
nbytes - = sg - > length ;
if ( ! sg_is_last ( sg ) & & ( sg + 1 ) - > length = = 0 )
2011-07-15 11:21:42 +08:00
BUG ( ) ; /* Not support chaining */
2011-03-13 16:54:26 +08:00
sg = scatterwalk_sg_next ( sg ) ;
}
2011-07-15 11:21:42 +08:00
if ( likely ( sg_nents = = 1 ) )
return 0 ;
2011-03-13 16:54:26 +08:00
return sg_nents ;
}
/*
2011-07-15 11:21:42 +08:00
* allocate and map the aead extended descriptor
2011-03-13 16:54:26 +08:00
*/
2011-07-15 11:21:41 +08:00
static struct aead_edesc * aead_edesc_alloc ( struct aead_request * req ,
2011-07-15 11:21:42 +08:00
int desc_bytes , bool * all_contig_ptr )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:41 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2011-07-15 11:21:42 +08:00
gfp_t flags = ( req - > base . flags & ( CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP ) ) ? GFP_KERNEL : GFP_ATOMIC ;
int assoc_nents , src_nents , dst_nents = 0 ;
2011-07-15 11:21:41 +08:00
struct aead_edesc * edesc ;
2011-07-15 11:21:42 +08:00
dma_addr_t iv_dma = 0 ;
int sgc ;
bool all_contig = true ;
int ivsize = crypto_aead_ivsize ( aead ) ;
int link_tbl_index , link_tbl_len = 0 , link_tbl_bytes ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
assoc_nents = sg_count ( req - > assoc , req - > assoclen ) ;
src_nents = sg_count ( req - > src , req - > cryptlen ) ;
if ( unlikely ( req - > dst ! = req - > src ) )
dst_nents = sg_count ( req - > dst , req - > cryptlen ) ;
sgc = dma_map_sg ( jrdev , req - > assoc , assoc_nents ? : 1 ,
DMA_BIDIRECTIONAL ) ;
if ( likely ( req - > src = = req - > dst ) ) {
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_BIDIRECTIONAL ) ;
} else {
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_TO_DEVICE ) ;
sgc = dma_map_sg ( jrdev , req - > dst , dst_nents ? : 1 ,
DMA_FROM_DEVICE ) ;
}
/* Check if data are contiguous */
iv_dma = dma_map_single ( jrdev , req - > iv , ivsize , DMA_TO_DEVICE ) ;
if ( assoc_nents | | sg_dma_address ( req - > assoc ) + req - > assoclen ! =
iv_dma | | src_nents | | iv_dma + ivsize ! =
sg_dma_address ( req - > src ) ) {
all_contig = false ;
assoc_nents = assoc_nents ? : 1 ;
src_nents = src_nents ? : 1 ;
link_tbl_len = assoc_nents + 1 + src_nents ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
link_tbl_len + = dst_nents ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
link_tbl_bytes = link_tbl_len * sizeof ( struct link_tbl_entry ) ;
2011-03-13 16:54:26 +08:00
/* allocate space for base edesc and hw desc commands, link tables */
2011-07-15 11:21:41 +08:00
edesc = kmalloc ( sizeof ( struct aead_edesc ) + desc_bytes +
2011-03-13 16:54:26 +08:00
link_tbl_bytes , GFP_DMA | flags ) ;
if ( ! edesc ) {
dev_err ( jrdev , " could not allocate extended descriptor \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
edesc - > assoc_nents = assoc_nents ;
edesc - > src_nents = src_nents ;
edesc - > dst_nents = dst_nents ;
2011-07-15 11:21:42 +08:00
edesc - > iv_dma = iv_dma ;
edesc - > link_tbl_bytes = link_tbl_bytes ;
2011-07-15 11:21:41 +08:00
edesc - > link_tbl = ( void * ) edesc + sizeof ( struct aead_edesc ) +
2011-03-13 16:54:26 +08:00
desc_bytes ;
edesc - > link_tbl_dma = dma_map_single ( jrdev , edesc - > link_tbl ,
link_tbl_bytes , DMA_TO_DEVICE ) ;
2011-07-15 11:21:42 +08:00
* all_contig_ptr = all_contig ;
link_tbl_index = 0 ;
if ( ! all_contig ) {
sg_to_link_tbl ( req - > assoc ,
( assoc_nents ? : 1 ) ,
edesc - > link_tbl +
link_tbl_index , 0 ) ;
link_tbl_index + = assoc_nents ? : 1 ;
sg_to_link_tbl_one ( edesc - > link_tbl + link_tbl_index ,
iv_dma , ivsize , 0 ) ;
link_tbl_index + = 1 ;
sg_to_link_tbl_last ( req - > src ,
( src_nents ? : 1 ) ,
edesc - > link_tbl +
link_tbl_index , 0 ) ;
link_tbl_index + = src_nents ? : 1 ;
}
if ( dst_nents ) {
sg_to_link_tbl_last ( req - > dst , dst_nents ,
edesc - > link_tbl + link_tbl_index , 0 ) ;
}
2011-03-13 16:54:26 +08:00
return edesc ;
}
2011-07-15 11:21:41 +08:00
static int aead_encrypt ( struct aead_request * req )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:41 +08:00
struct aead_edesc * edesc ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2011-07-15 11:21:42 +08:00
bool all_contig ;
2011-03-13 16:54:26 +08:00
u32 * desc ;
2011-07-15 11:21:42 +08:00
int ret = 0 ;
req - > cryptlen + = ctx - > authsize ;
2011-03-13 16:54:26 +08:00
/* allocate extended descriptor */
2011-07-15 11:21:42 +08:00
edesc = aead_edesc_alloc ( req , DESC_JOB_IO_LEN *
CAAM_CMD_SZ , & all_contig ) ;
2011-03-13 16:54:26 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
2011-07-15 11:21:42 +08:00
/* Create and submit job descriptor */
init_aead_job ( ctx - > sh_desc_enc , ctx - > sh_desc_enc_dma , edesc , req ,
all_contig , true ) ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead jobdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
desc = edesc - > hw_desc ;
ret = caam_jr_enqueue ( jrdev , desc , aead_encrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
return ret ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:41 +08:00
static int aead_decrypt ( struct aead_request * req )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:42 +08:00
struct aead_edesc * edesc ;
2011-03-13 16:54:26 +08:00
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2011-07-15 11:21:42 +08:00
bool all_contig ;
2011-03-13 16:54:26 +08:00
u32 * desc ;
2011-07-15 11:21:42 +08:00
int ret = 0 ;
2011-03-13 16:54:26 +08:00
/* allocate extended descriptor */
2011-07-15 11:21:42 +08:00
edesc = aead_edesc_alloc ( req , DESC_JOB_IO_LEN *
CAAM_CMD_SZ , & all_contig ) ;
2011-03-13 16:54:26 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " dec src@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
req - > cryptlen , 1 ) ;
# endif
/* Create and submit job descriptor*/
init_aead_job ( ctx - > sh_desc_dec ,
ctx - > sh_desc_dec_dma , edesc , req , all_contig , false ) ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead jobdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
2011-03-13 16:54:26 +08:00
desc = edesc - > hw_desc ;
2011-07-15 11:21:42 +08:00
ret = caam_jr_enqueue ( jrdev , desc , aead_decrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
return ret ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
/*
* allocate and map the aead extended descriptor for aead givencrypt
*/
static struct aead_edesc * aead_giv_edesc_alloc ( struct aead_givcrypt_request
* greq , int desc_bytes ,
u32 * contig_ptr )
{
struct aead_request * req = & greq - > areq ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
gfp_t flags = ( req - > base . flags & ( CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP ) ) ? GFP_KERNEL : GFP_ATOMIC ;
int assoc_nents , src_nents , dst_nents = 0 ;
struct aead_edesc * edesc ;
dma_addr_t iv_dma = 0 ;
int sgc ;
u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG ;
int ivsize = crypto_aead_ivsize ( aead ) ;
int link_tbl_index , link_tbl_len = 0 , link_tbl_bytes ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
assoc_nents = sg_count ( req - > assoc , req - > assoclen ) ;
src_nents = sg_count ( req - > src , req - > cryptlen ) ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
if ( unlikely ( req - > dst ! = req - > src ) )
dst_nents = sg_count ( req - > dst , req - > cryptlen ) ;
sgc = dma_map_sg ( jrdev , req - > assoc , assoc_nents ? : 1 ,
DMA_BIDIRECTIONAL ) ;
if ( likely ( req - > src = = req - > dst ) ) {
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_BIDIRECTIONAL ) ;
} else {
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_TO_DEVICE ) ;
sgc = dma_map_sg ( jrdev , req - > dst , dst_nents ? : 1 ,
DMA_FROM_DEVICE ) ;
}
/* Check if data are contiguous */
iv_dma = dma_map_single ( jrdev , greq - > giv , ivsize , DMA_TO_DEVICE ) ;
if ( assoc_nents | | sg_dma_address ( req - > assoc ) + req - > assoclen ! =
iv_dma | | src_nents | | iv_dma + ivsize ! = sg_dma_address ( req - > src ) )
contig & = ~ GIV_SRC_CONTIG ;
if ( dst_nents | | iv_dma + ivsize ! = sg_dma_address ( req - > dst ) )
contig & = ~ GIV_DST_CONTIG ;
if ( unlikely ( req - > src ! = req - > dst ) ) {
dst_nents = dst_nents ? : 1 ;
link_tbl_len + = 1 ;
}
if ( ! ( contig & GIV_SRC_CONTIG ) ) {
assoc_nents = assoc_nents ? : 1 ;
src_nents = src_nents ? : 1 ;
link_tbl_len + = assoc_nents + 1 + src_nents ;
if ( likely ( req - > src = = req - > dst ) )
contig & = ~ GIV_DST_CONTIG ;
}
link_tbl_len + = dst_nents ;
link_tbl_bytes = link_tbl_len * sizeof ( struct link_tbl_entry ) ;
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc ( sizeof ( struct aead_edesc ) + desc_bytes +
link_tbl_bytes , GFP_DMA | flags ) ;
if ( ! edesc ) {
dev_err ( jrdev , " could not allocate extended descriptor \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
edesc - > assoc_nents = assoc_nents ;
edesc - > src_nents = src_nents ;
edesc - > dst_nents = dst_nents ;
edesc - > iv_dma = iv_dma ;
edesc - > link_tbl_bytes = link_tbl_bytes ;
edesc - > link_tbl = ( void * ) edesc + sizeof ( struct aead_edesc ) +
desc_bytes ;
edesc - > link_tbl_dma = dma_map_single ( jrdev , edesc - > link_tbl ,
link_tbl_bytes , DMA_TO_DEVICE ) ;
* contig_ptr = contig ;
link_tbl_index = 0 ;
if ( ! ( contig & GIV_SRC_CONTIG ) ) {
sg_to_link_tbl ( req - > assoc , assoc_nents ,
edesc - > link_tbl +
link_tbl_index , 0 ) ;
link_tbl_index + = assoc_nents ;
sg_to_link_tbl_one ( edesc - > link_tbl + link_tbl_index ,
iv_dma , ivsize , 0 ) ;
link_tbl_index + = 1 ;
sg_to_link_tbl_last ( req - > src , src_nents ,
edesc - > link_tbl +
link_tbl_index , 0 ) ;
link_tbl_index + = src_nents ;
}
if ( unlikely ( req - > src ! = req - > dst & & ! ( contig & GIV_DST_CONTIG ) ) ) {
sg_to_link_tbl_one ( edesc - > link_tbl + link_tbl_index ,
iv_dma , ivsize , 0 ) ;
link_tbl_index + = 1 ;
sg_to_link_tbl_last ( req - > dst , dst_nents ,
edesc - > link_tbl + link_tbl_index , 0 ) ;
}
return edesc ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:41 +08:00
static int aead_givencrypt ( struct aead_givcrypt_request * areq )
2011-03-13 16:54:26 +08:00
{
2011-07-15 11:21:41 +08:00
struct aead_request * req = & areq - > areq ;
struct aead_edesc * edesc ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
2011-03-13 16:54:26 +08:00
struct caam_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct device * jrdev = ctx - > jrdev ;
2011-07-15 11:21:42 +08:00
u32 contig ;
2011-03-13 16:54:26 +08:00
u32 * desc ;
2011-07-15 11:21:42 +08:00
int ret = 0 ;
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
req - > cryptlen + = ctx - > authsize ;
2011-03-13 16:54:26 +08:00
/* allocate extended descriptor */
2011-07-15 11:21:42 +08:00
edesc = aead_giv_edesc_alloc ( areq , DESC_JOB_IO_LEN *
CAAM_CMD_SZ , & contig ) ;
2011-03-13 16:54:26 +08:00
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
2011-07-15 11:21:42 +08:00
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " giv src@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , sg_virt ( req - > src ) ,
req - > cryptlen , 1 ) ;
# endif
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
/* Create and submit job descriptor*/
init_aead_giv_job ( ctx - > sh_desc_givenc ,
ctx - > sh_desc_givenc_dma , edesc , req , contig ) ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " aead jobdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
desc = edesc - > hw_desc ;
ret = caam_jr_enqueue ( jrdev , desc , aead_encrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
aead_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
2011-03-13 16:54:26 +08:00
2011-07-15 11:21:42 +08:00
return ret ;
2011-03-13 16:54:26 +08:00
}
2011-07-15 11:21:42 +08:00
/*
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
static struct ablkcipher_edesc * ablkcipher_edesc_alloc ( struct ablkcipher_request
* req , int desc_bytes ,
bool * iv_contig_out )
{
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct device * jrdev = ctx - > jrdev ;
gfp_t flags = ( req - > base . flags & ( CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP ) ) ?
GFP_KERNEL : GFP_ATOMIC ;
int src_nents , dst_nents = 0 , link_tbl_bytes ;
struct ablkcipher_edesc * edesc ;
dma_addr_t iv_dma = 0 ;
bool iv_contig = false ;
int sgc ;
int ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
int link_tbl_index ;
src_nents = sg_count ( req - > src , req - > nbytes ) ;
if ( unlikely ( req - > dst ! = req - > src ) )
dst_nents = sg_count ( req - > dst , req - > nbytes ) ;
if ( likely ( req - > src = = req - > dst ) ) {
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_BIDIRECTIONAL ) ;
} else {
sgc = dma_map_sg ( jrdev , req - > src , src_nents ? : 1 ,
DMA_TO_DEVICE ) ;
sgc = dma_map_sg ( jrdev , req - > dst , dst_nents ? : 1 ,
DMA_FROM_DEVICE ) ;
}
/*
* Check if iv can be contiguous with source and destination .
* If so , include it . If not , create scatterlist .
*/
iv_dma = dma_map_single ( jrdev , req - > info , ivsize , DMA_TO_DEVICE ) ;
if ( ! src_nents & & iv_dma + ivsize = = sg_dma_address ( req - > src ) )
iv_contig = true ;
else
src_nents = src_nents ? : 1 ;
link_tbl_bytes = ( ( iv_contig ? 0 : 1 ) + src_nents + dst_nents ) *
sizeof ( struct link_tbl_entry ) ;
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc ( sizeof ( struct ablkcipher_edesc ) + desc_bytes +
link_tbl_bytes , GFP_DMA | flags ) ;
if ( ! edesc ) {
dev_err ( jrdev , " could not allocate extended descriptor \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
edesc - > src_nents = src_nents ;
edesc - > dst_nents = dst_nents ;
edesc - > link_tbl_bytes = link_tbl_bytes ;
edesc - > link_tbl = ( void * ) edesc + sizeof ( struct ablkcipher_edesc ) +
desc_bytes ;
link_tbl_index = 0 ;
if ( ! iv_contig ) {
sg_to_link_tbl_one ( edesc - > link_tbl , iv_dma , ivsize , 0 ) ;
sg_to_link_tbl_last ( req - > src , src_nents ,
edesc - > link_tbl + 1 , 0 ) ;
link_tbl_index + = 1 + src_nents ;
}
if ( unlikely ( dst_nents ) ) {
sg_to_link_tbl_last ( req - > dst , dst_nents ,
edesc - > link_tbl + link_tbl_index , 0 ) ;
}
edesc - > link_tbl_dma = dma_map_single ( jrdev , edesc - > link_tbl ,
link_tbl_bytes , DMA_TO_DEVICE ) ;
edesc - > iv_dma = iv_dma ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " ablkcipher link_tbl@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > link_tbl ,
link_tbl_bytes , 1 ) ;
# endif
* iv_contig_out = iv_contig ;
return edesc ;
}
static int ablkcipher_encrypt ( struct ablkcipher_request * req )
{
struct ablkcipher_edesc * edesc ;
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct device * jrdev = ctx - > jrdev ;
bool iv_contig ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc ( req , DESC_JOB_IO_LEN *
CAAM_CMD_SZ , & iv_contig ) ;
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor*/
init_ablkcipher_job ( ctx - > sh_desc_enc ,
ctx - > sh_desc_enc_dma , edesc , req , iv_contig ) ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " ablkcipher jobdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
desc = edesc - > hw_desc ;
ret = caam_jr_enqueue ( jrdev , desc , ablkcipher_encrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
ablkcipher_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
return ret ;
}
static int ablkcipher_decrypt ( struct ablkcipher_request * req )
{
struct ablkcipher_edesc * edesc ;
struct crypto_ablkcipher * ablkcipher = crypto_ablkcipher_reqtfm ( req ) ;
struct caam_ctx * ctx = crypto_ablkcipher_ctx ( ablkcipher ) ;
struct device * jrdev = ctx - > jrdev ;
bool iv_contig ;
u32 * desc ;
int ret = 0 ;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc ( req , DESC_JOB_IO_LEN *
CAAM_CMD_SZ , & iv_contig ) ;
if ( IS_ERR ( edesc ) )
return PTR_ERR ( edesc ) ;
/* Create and submit job descriptor*/
init_ablkcipher_job ( ctx - > sh_desc_dec ,
ctx - > sh_desc_dec_dma , edesc , req , iv_contig ) ;
desc = edesc - > hw_desc ;
# ifdef DEBUG
print_hex_dump ( KERN_ERR , " ablkcipher jobdesc@ " xstr ( __LINE__ ) " : " ,
DUMP_PREFIX_ADDRESS , 16 , 4 , edesc - > hw_desc ,
desc_bytes ( edesc - > hw_desc ) , 1 ) ;
# endif
ret = caam_jr_enqueue ( jrdev , desc , ablkcipher_decrypt_done , req ) ;
if ( ! ret ) {
ret = - EINPROGRESS ;
} else {
ablkcipher_unmap ( jrdev , edesc , req ) ;
kfree ( edesc ) ;
}
return ret ;
}
2011-07-15 11:21:41 +08:00
# define template_aead template_u.aead
2011-07-15 11:21:42 +08:00
# define template_ablkcipher template_u.ablkcipher
2011-03-13 16:54:26 +08:00
struct caam_alg_template {
char name [ CRYPTO_MAX_ALG_NAME ] ;
char driver_name [ CRYPTO_MAX_ALG_NAME ] ;
unsigned int blocksize ;
2011-07-15 11:21:41 +08:00
u32 type ;
union {
struct ablkcipher_alg ablkcipher ;
struct aead_alg aead ;
struct blkcipher_alg blkcipher ;
struct cipher_alg cipher ;
struct compress_alg compress ;
struct rng_alg rng ;
} template_u ;
2011-03-13 16:54:26 +08:00
u32 class1_alg_type ;
u32 class2_alg_type ;
u32 alg_op ;
} ;
static struct caam_alg_template driver_algs [ ] = {
/* single-pass ipsec_esp descriptor */
2011-11-21 16:13:27 +08:00
{
. name = " authenc(hmac(md5),cbc(aes)) " ,
. driver_name = " authenc-hmac-md5-cbc-aes-caam " ,
. blocksize = AES_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
. geniv = " <built-in> " ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
{
. name = " authenc(hmac(sha1),cbc(aes)) " ,
. driver_name = " authenc-hmac-sha1-cbc-aes-caam " ,
. blocksize = AES_BLOCK_SIZE ,
2011-07-15 11:21:41 +08:00
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
2011-03-13 16:54:26 +08:00
. geniv = " <built-in> " ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
{
. name = " authenc(hmac(sha224),cbc(aes)) " ,
. driver_name = " authenc-hmac-sha224-cbc-aes-caam " ,
. blocksize = AES_BLOCK_SIZE ,
. template_aead = {
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
. geniv = " <built-in> " ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
{
. name = " authenc(hmac(sha256),cbc(aes)) " ,
. driver_name = " authenc-hmac-sha256-cbc-aes-caam " ,
. blocksize = AES_BLOCK_SIZE ,
2011-07-15 11:21:41 +08:00
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
2011-03-13 16:54:26 +08:00
. geniv = " <built-in> " ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
{
. name = " authenc(hmac(sha384),cbc(aes)) " ,
. driver_name = " authenc-hmac-sha384-cbc-aes-caam " ,
. blocksize = AES_BLOCK_SIZE ,
. template_aead = {
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
. geniv = " <built-in> " ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
} ,
2011-05-14 22:08:17 -05:00
{
. name = " authenc(hmac(sha512),cbc(aes)) " ,
. driver_name = " authenc-hmac-sha512-cbc-aes-caam " ,
. blocksize = AES_BLOCK_SIZE ,
2011-07-15 11:21:41 +08:00
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
2011-05-14 22:08:17 -05:00
. geniv = " <built-in> " ,
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
} ,
2011-11-21 16:13:27 +08:00
{
. name = " authenc(hmac(md5),cbc(des3_ede)) " ,
. driver_name = " authenc-hmac-md5-cbc-des3_ede-caam " ,
. blocksize = DES3_EDE_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
. geniv = " <built-in> " ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
{
. name = " authenc(hmac(sha1),cbc(des3_ede)) " ,
. driver_name = " authenc-hmac-sha1-cbc-des3_ede-caam " ,
. blocksize = DES3_EDE_BLOCK_SIZE ,
2011-07-15 11:21:41 +08:00
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
2011-03-13 16:54:26 +08:00
. geniv = " <built-in> " ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
{
. name = " authenc(hmac(sha224),cbc(des3_ede)) " ,
. driver_name = " authenc-hmac-sha224-cbc-des3_ede-caam " ,
. blocksize = DES3_EDE_BLOCK_SIZE ,
. template_aead = {
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
. geniv = " <built-in> " ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
{
. name = " authenc(hmac(sha256),cbc(des3_ede)) " ,
. driver_name = " authenc-hmac-sha256-cbc-des3_ede-caam " ,
. blocksize = DES3_EDE_BLOCK_SIZE ,
2011-07-15 11:21:41 +08:00
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
2011-03-13 16:54:26 +08:00
. geniv = " <built-in> " ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
{
. name = " authenc(hmac(sha384),cbc(des3_ede)) " ,
. driver_name = " authenc-hmac-sha384-cbc-des3_ede-caam " ,
. blocksize = DES3_EDE_BLOCK_SIZE ,
. template_aead = {
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
. geniv = " <built-in> " ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
} ,
2011-05-14 22:08:17 -05:00
{
. name = " authenc(hmac(sha512),cbc(des3_ede)) " ,
. driver_name = " authenc-hmac-sha512-cbc-des3_ede-caam " ,
. blocksize = DES3_EDE_BLOCK_SIZE ,
2011-07-15 11:21:41 +08:00
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
2011-05-14 22:08:17 -05:00
. geniv = " <built-in> " ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
} ,
2011-11-21 16:13:27 +08:00
{
. name = " authenc(hmac(md5),cbc(des)) " ,
. driver_name = " authenc-hmac-md5-cbc-des-caam " ,
. blocksize = DES_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
. geniv = " <built-in> " ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = MD5_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
{
. name = " authenc(hmac(sha1),cbc(des)) " ,
. driver_name = " authenc-hmac-sha1-cbc-des-caam " ,
. blocksize = DES_BLOCK_SIZE ,
2011-07-15 11:21:41 +08:00
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
2011-03-13 16:54:26 +08:00
. geniv = " <built-in> " ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA1_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
{
. name = " authenc(hmac(sha224),cbc(des)) " ,
. driver_name = " authenc-hmac-sha224-cbc-des-caam " ,
. blocksize = DES_BLOCK_SIZE ,
. template_aead = {
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
. geniv = " <built-in> " ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA224_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC ,
} ,
2011-03-13 16:54:26 +08:00
{
. name = " authenc(hmac(sha256),cbc(des)) " ,
. driver_name = " authenc-hmac-sha256-cbc-des-caam " ,
. blocksize = DES_BLOCK_SIZE ,
2011-07-15 11:21:41 +08:00
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
2011-03-13 16:54:26 +08:00
. geniv = " <built-in> " ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA256_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC ,
} ,
2012-01-09 18:26:44 -06:00
{
. name = " authenc(hmac(sha384),cbc(des)) " ,
. driver_name = " authenc-hmac-sha384-cbc-des-caam " ,
. blocksize = DES_BLOCK_SIZE ,
. template_aead = {
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
. geniv = " <built-in> " ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA384_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC ,
} ,
2011-05-14 22:08:17 -05:00
{
. name = " authenc(hmac(sha512),cbc(des)) " ,
. driver_name = " authenc-hmac-sha512-cbc-des-caam " ,
. blocksize = DES_BLOCK_SIZE ,
2011-07-15 11:21:41 +08:00
. type = CRYPTO_ALG_TYPE_AEAD ,
. template_aead = {
2011-07-15 11:21:41 +08:00
. setkey = aead_setkey ,
. setauthsize = aead_setauthsize ,
. encrypt = aead_encrypt ,
. decrypt = aead_decrypt ,
. givencrypt = aead_givencrypt ,
2011-05-14 22:08:17 -05:00
. geniv = " <built-in> " ,
. ivsize = DES_BLOCK_SIZE ,
. maxauthsize = SHA512_DIGEST_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
. class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP ,
. alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC ,
} ,
2011-07-15 11:21:42 +08:00
/* ablkcipher descriptor */
{
. name = " cbc(aes) " ,
. driver_name = " cbc-aes-caam " ,
. blocksize = AES_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_ABLKCIPHER ,
. template_ablkcipher = {
. setkey = ablkcipher_setkey ,
. encrypt = ablkcipher_encrypt ,
. decrypt = ablkcipher_decrypt ,
. geniv = " eseqiv " ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC ,
} ,
{
. name = " cbc(des3_ede) " ,
. driver_name = " cbc-3des-caam " ,
. blocksize = DES3_EDE_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_ABLKCIPHER ,
. template_ablkcipher = {
. setkey = ablkcipher_setkey ,
. encrypt = ablkcipher_encrypt ,
. decrypt = ablkcipher_decrypt ,
. geniv = " eseqiv " ,
. min_keysize = DES3_EDE_KEY_SIZE ,
. max_keysize = DES3_EDE_KEY_SIZE ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC ,
} ,
{
. name = " cbc(des) " ,
. driver_name = " cbc-des-caam " ,
. blocksize = DES_BLOCK_SIZE ,
. type = CRYPTO_ALG_TYPE_ABLKCIPHER ,
. template_ablkcipher = {
. setkey = ablkcipher_setkey ,
. encrypt = ablkcipher_encrypt ,
. decrypt = ablkcipher_decrypt ,
. geniv = " eseqiv " ,
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. ivsize = DES_BLOCK_SIZE ,
} ,
. class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC ,
}
2011-03-13 16:54:26 +08:00
} ;
struct caam_crypto_alg {
struct list_head entry ;
struct device * ctrldev ;
int class1_alg_type ;
int class2_alg_type ;
int alg_op ;
struct crypto_alg crypto_alg ;
} ;
static int caam_cra_init ( struct crypto_tfm * tfm )
{
struct crypto_alg * alg = tfm - > __crt_alg ;
struct caam_crypto_alg * caam_alg =
container_of ( alg , struct caam_crypto_alg , crypto_alg ) ;
struct caam_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct caam_drv_private * priv = dev_get_drvdata ( caam_alg - > ctrldev ) ;
int tgt_jr = atomic_inc_return ( & priv - > tfm_count ) ;
/*
* distribute tfms across job rings to ensure in - order
* crypto request processing per tfm
*/
ctx - > jrdev = priv - > algapi_jr [ ( tgt_jr / 2 ) % priv - > num_jrs_for_algapi ] ;
/* copy descriptor header template value */
ctx - > class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg - > class1_alg_type ;
ctx - > class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg - > class2_alg_type ;
ctx - > alg_op = OP_TYPE_CLASS2_ALG | caam_alg - > alg_op ;
return 0 ;
}
static void caam_cra_exit ( struct crypto_tfm * tfm )
{
struct caam_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2011-07-15 11:21:42 +08:00
if ( ctx - > sh_desc_enc_dma & &
! dma_mapping_error ( ctx - > jrdev , ctx - > sh_desc_enc_dma ) )
dma_unmap_single ( ctx - > jrdev , ctx - > sh_desc_enc_dma ,
desc_bytes ( ctx - > sh_desc_enc ) , DMA_TO_DEVICE ) ;
if ( ctx - > sh_desc_dec_dma & &
! dma_mapping_error ( ctx - > jrdev , ctx - > sh_desc_dec_dma ) )
dma_unmap_single ( ctx - > jrdev , ctx - > sh_desc_dec_dma ,
desc_bytes ( ctx - > sh_desc_dec ) , DMA_TO_DEVICE ) ;
if ( ctx - > sh_desc_givenc_dma & &
! dma_mapping_error ( ctx - > jrdev , ctx - > sh_desc_givenc_dma ) )
dma_unmap_single ( ctx - > jrdev , ctx - > sh_desc_givenc_dma ,
desc_bytes ( ctx - > sh_desc_givenc ) ,
2011-05-14 22:08:17 -05:00
DMA_TO_DEVICE ) ;
2011-03-13 16:54:26 +08:00
}
static void __exit caam_algapi_exit ( void )
{
struct device_node * dev_node ;
struct platform_device * pdev ;
struct device * ctrldev ;
struct caam_drv_private * priv ;
struct caam_crypto_alg * t_alg , * n ;
int i , err ;
2011-03-23 21:15:44 +08:00
dev_node = of_find_compatible_node ( NULL , NULL , " fsl,sec-v4.0 " ) ;
2012-03-21 14:09:10 +08:00
if ( ! dev_node ) {
dev_node = of_find_compatible_node ( NULL , NULL , " fsl,sec4.0 " ) ;
if ( ! dev_node )
return ;
}
2011-03-13 16:54:26 +08:00
pdev = of_find_device_by_node ( dev_node ) ;
if ( ! pdev )
return ;
ctrldev = & pdev - > dev ;
of_node_put ( dev_node ) ;
priv = dev_get_drvdata ( ctrldev ) ;
if ( ! priv - > alg_list . next )
return ;
list_for_each_entry_safe ( t_alg , n , & priv - > alg_list , entry ) {
crypto_unregister_alg ( & t_alg - > crypto_alg ) ;
list_del ( & t_alg - > entry ) ;
kfree ( t_alg ) ;
}
for ( i = 0 ; i < priv - > total_jobrs ; i + + ) {
err = caam_jr_deregister ( priv - > algapi_jr [ i ] ) ;
if ( err < 0 )
break ;
}
kfree ( priv - > algapi_jr ) ;
}
static struct caam_crypto_alg * caam_alg_alloc ( struct device * ctrldev ,
struct caam_alg_template
* template )
{
struct caam_crypto_alg * t_alg ;
struct crypto_alg * alg ;
t_alg = kzalloc ( sizeof ( struct caam_crypto_alg ) , GFP_KERNEL ) ;
if ( ! t_alg ) {
dev_err ( ctrldev , " failed to allocate t_alg \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
alg = & t_alg - > crypto_alg ;
snprintf ( alg - > cra_name , CRYPTO_MAX_ALG_NAME , " %s " , template - > name ) ;
snprintf ( alg - > cra_driver_name , CRYPTO_MAX_ALG_NAME , " %s " ,
template - > driver_name ) ;
alg - > cra_module = THIS_MODULE ;
alg - > cra_init = caam_cra_init ;
alg - > cra_exit = caam_cra_exit ;
alg - > cra_priority = CAAM_CRA_PRIORITY ;
alg - > cra_blocksize = template - > blocksize ;
alg - > cra_alignmask = 0 ;
alg - > cra_ctxsize = sizeof ( struct caam_ctx ) ;
2011-11-01 13:39:56 +01:00
alg - > cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
template - > type ;
2011-07-15 11:21:41 +08:00
switch ( template - > type ) {
2011-07-15 11:21:42 +08:00
case CRYPTO_ALG_TYPE_ABLKCIPHER :
alg - > cra_type = & crypto_ablkcipher_type ;
alg - > cra_ablkcipher = template - > template_ablkcipher ;
break ;
2011-07-15 11:21:41 +08:00
case CRYPTO_ALG_TYPE_AEAD :
alg - > cra_type = & crypto_aead_type ;
alg - > cra_aead = template - > template_aead ;
break ;
}
2011-03-13 16:54:26 +08:00
t_alg - > class1_alg_type = template - > class1_alg_type ;
t_alg - > class2_alg_type = template - > class2_alg_type ;
t_alg - > alg_op = template - > alg_op ;
t_alg - > ctrldev = ctrldev ;
return t_alg ;
}
static int __init caam_algapi_init ( void )
{
struct device_node * dev_node ;
struct platform_device * pdev ;
struct device * ctrldev , * * jrdev ;
struct caam_drv_private * priv ;
int i = 0 , err = 0 ;
2011-03-23 21:15:44 +08:00
dev_node = of_find_compatible_node ( NULL , NULL , " fsl,sec-v4.0 " ) ;
2012-03-21 14:09:10 +08:00
if ( ! dev_node ) {
dev_node = of_find_compatible_node ( NULL , NULL , " fsl,sec4.0 " ) ;
if ( ! dev_node )
return - ENODEV ;
}
2011-03-13 16:54:26 +08:00
pdev = of_find_device_by_node ( dev_node ) ;
if ( ! pdev )
return - ENODEV ;
ctrldev = & pdev - > dev ;
priv = dev_get_drvdata ( ctrldev ) ;
of_node_put ( dev_node ) ;
INIT_LIST_HEAD ( & priv - > alg_list ) ;
jrdev = kmalloc ( sizeof ( * jrdev ) * priv - > total_jobrs , GFP_KERNEL ) ;
if ( ! jrdev )
return - ENOMEM ;
for ( i = 0 ; i < priv - > total_jobrs ; i + + ) {
err = caam_jr_register ( ctrldev , & jrdev [ i ] ) ;
if ( err < 0 )
break ;
}
if ( err < 0 & & i = = 0 ) {
dev_err ( ctrldev , " algapi error in job ring registration: %d \n " ,
err ) ;
2011-04-08 20:39:23 +08:00
kfree ( jrdev ) ;
2011-03-13 16:54:26 +08:00
return err ;
}
priv - > num_jrs_for_algapi = i ;
priv - > algapi_jr = jrdev ;
atomic_set ( & priv - > tfm_count , - 1 ) ;
/* register crypto algorithms the device supports */
for ( i = 0 ; i < ARRAY_SIZE ( driver_algs ) ; i + + ) {
/* TODO: check if h/w supports alg */
struct caam_crypto_alg * t_alg ;
t_alg = caam_alg_alloc ( ctrldev , & driver_algs [ i ] ) ;
if ( IS_ERR ( t_alg ) ) {
err = PTR_ERR ( t_alg ) ;
dev_warn ( ctrldev , " %s alg allocation failed \n " ,
2011-03-23 21:20:27 +08:00
driver_algs [ i ] . driver_name ) ;
2011-03-13 16:54:26 +08:00
continue ;
}
err = crypto_register_alg ( & t_alg - > crypto_alg ) ;
if ( err ) {
dev_warn ( ctrldev , " %s alg registration failed \n " ,
t_alg - > crypto_alg . cra_driver_name ) ;
kfree ( t_alg ) ;
2012-01-09 18:26:49 -06:00
} else
2011-03-13 16:54:26 +08:00
list_add_tail ( & t_alg - > entry , & priv - > alg_list ) ;
}
2012-01-09 18:26:49 -06:00
if ( ! list_empty ( & priv - > alg_list ) )
dev_info ( ctrldev , " %s algorithms registered in /proc/crypto \n " ,
( char * ) of_get_property ( dev_node , " compatible " , NULL ) ) ;
2011-03-13 16:54:26 +08:00
return err ;
}
module_init ( caam_algapi_init ) ;
module_exit ( caam_algapi_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " FSL CAAM support for crypto API " ) ;
MODULE_AUTHOR ( " Freescale Semiconductor - NMG/STC " ) ;