2015-06-18 16:46:20 +03:00
/*
* Cipher algorithms supported by the CESA : DES , 3 DES and AES .
*
* Author : Boris Brezillon < boris . brezillon @ free - electrons . com >
* Author : Arnaud Ebalard < arno @ natisbad . org >
*
* This work is based on an initial version written by
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation .
*/
# include <crypto/aes.h>
2015-06-18 16:46:22 +03:00
# include <crypto/des.h>
2015-06-18 16:46:20 +03:00
# include "cesa.h"
2015-06-18 16:46:22 +03:00
struct mv_cesa_des_ctx {
struct mv_cesa_ctx base ;
u8 key [ DES_KEY_SIZE ] ;
} ;
2015-06-18 16:46:23 +03:00
struct mv_cesa_des3_ctx {
struct mv_cesa_ctx base ;
u8 key [ DES3_EDE_KEY_SIZE ] ;
} ;
2015-06-18 16:46:20 +03:00
struct mv_cesa_aes_ctx {
struct mv_cesa_ctx base ;
struct crypto_aes_ctx aes ;
} ;
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_dma_iter {
2015-06-18 16:46:21 +03:00
struct mv_cesa_dma_iter base ;
struct mv_cesa_sg_dma_iter src ;
struct mv_cesa_sg_dma_iter dst ;
} ;
static inline void
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_req_iter_init ( struct mv_cesa_skcipher_dma_iter * iter ,
struct skcipher_request * req )
2015-06-18 16:46:21 +03:00
{
2017-10-13 16:30:32 +03:00
mv_cesa_req_dma_iter_init ( & iter - > base , req - > cryptlen ) ;
2015-06-18 16:46:21 +03:00
mv_cesa_sg_dma_iter_init ( & iter - > src , req - > src , DMA_TO_DEVICE ) ;
mv_cesa_sg_dma_iter_init ( & iter - > dst , req - > dst , DMA_FROM_DEVICE ) ;
}
static inline bool
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_req_iter_next_op ( struct mv_cesa_skcipher_dma_iter * iter )
2015-06-18 16:46:21 +03:00
{
iter - > src . op_offset = 0 ;
iter - > dst . op_offset = 0 ;
return mv_cesa_req_dma_iter_next_op ( & iter - > base ) ;
}
static inline void
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_dma_cleanup ( struct skcipher_request * req )
2015-06-18 16:46:21 +03:00
{
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
2015-06-18 16:46:21 +03:00
if ( req - > dst ! = req - > src ) {
dma_unmap_sg ( cesa_dev - > dev , req - > dst , creq - > dst_nents ,
DMA_FROM_DEVICE ) ;
dma_unmap_sg ( cesa_dev - > dev , req - > src , creq - > src_nents ,
DMA_TO_DEVICE ) ;
} else {
dma_unmap_sg ( cesa_dev - > dev , req - > src , creq - > src_nents ,
DMA_BIDIRECTIONAL ) ;
}
2016-06-21 11:08:35 +03:00
mv_cesa_dma_cleanup ( & creq - > base ) ;
2015-06-18 16:46:21 +03:00
}
2017-10-13 16:30:32 +03:00
static inline void mv_cesa_skcipher_cleanup ( struct skcipher_request * req )
2015-06-18 16:46:21 +03:00
{
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
2015-06-18 16:46:21 +03:00
2016-06-21 11:08:35 +03:00
if ( mv_cesa_req_get_type ( & creq - > base ) = = CESA_DMA_REQ )
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_dma_cleanup ( req ) ;
2015-06-18 16:46:21 +03:00
}
2017-10-13 16:30:32 +03:00
static void mv_cesa_skcipher_std_step ( struct skcipher_request * req )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
struct mv_cesa_skcipher_std_req * sreq = & creq - > std ;
2016-06-21 11:08:35 +03:00
struct mv_cesa_engine * engine = creq - > base . engine ;
2017-10-13 16:30:32 +03:00
size_t len = min_t ( size_t , req - > cryptlen - sreq - > offset ,
2015-06-18 16:46:20 +03:00
CESA_SA_SRAM_PAYLOAD_SIZE ) ;
2016-06-21 11:08:37 +03:00
mv_cesa_adjust_op ( engine , & sreq - > op ) ;
memcpy_toio ( engine - > sram , & sreq - > op , sizeof ( sreq - > op ) ) ;
2015-06-18 16:46:20 +03:00
len = sg_pcopy_to_buffer ( req - > src , creq - > src_nents ,
engine - > sram + CESA_SA_DATA_SRAM_OFFSET ,
len , sreq - > offset ) ;
sreq - > size = len ;
mv_cesa_set_crypt_op_len ( & sreq - > op , len ) ;
/* FIXME: only update enc_len field */
if ( ! sreq - > skip_ctx ) {
2015-10-18 20:31:15 +03:00
memcpy_toio ( engine - > sram , & sreq - > op , sizeof ( sreq - > op ) ) ;
2015-06-18 16:46:20 +03:00
sreq - > skip_ctx = true ;
} else {
2015-10-18 20:31:15 +03:00
memcpy_toio ( engine - > sram , & sreq - > op , sizeof ( sreq - > op . desc ) ) ;
2015-06-18 16:46:20 +03:00
}
mv_cesa_set_int_mask ( engine , CESA_SA_INT_ACCEL0_DONE ) ;
2015-10-18 20:31:00 +03:00
writel_relaxed ( CESA_SA_CFG_PARA_DIS , engine - > regs + CESA_SA_CFG ) ;
2016-06-21 11:08:32 +03:00
BUG_ON ( readl ( engine - > regs + CESA_SA_CMD ) &
CESA_SA_CMD_EN_CESA_SA_ACCL0 ) ;
2015-06-18 16:46:20 +03:00
writel ( CESA_SA_CMD_EN_CESA_SA_ACCL0 , engine - > regs + CESA_SA_CMD ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_skcipher_std_process ( struct skcipher_request * req ,
u32 status )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
struct mv_cesa_skcipher_std_req * sreq = & creq - > std ;
2016-06-21 11:08:35 +03:00
struct mv_cesa_engine * engine = creq - > base . engine ;
2015-06-18 16:46:20 +03:00
size_t len ;
len = sg_pcopy_from_buffer ( req - > dst , creq - > dst_nents ,
engine - > sram + CESA_SA_DATA_SRAM_OFFSET ,
sreq - > size , sreq - > offset ) ;
sreq - > offset + = len ;
2017-10-13 16:30:32 +03:00
if ( sreq - > offset < req - > cryptlen )
2015-06-18 16:46:20 +03:00
return - EINPROGRESS ;
return 0 ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_skcipher_process ( struct crypto_async_request * req ,
u32 status )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
struct skcipher_request * skreq = skcipher_request_cast ( req ) ;
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( skreq ) ;
2016-06-21 11:08:35 +03:00
struct mv_cesa_req * basereq = & creq - > base ;
2015-06-18 16:46:20 +03:00
2016-06-21 11:08:35 +03:00
if ( mv_cesa_req_get_type ( basereq ) = = CESA_STD_REQ )
2017-10-13 16:30:32 +03:00
return mv_cesa_skcipher_std_process ( skreq , status ) ;
2015-06-18 16:46:21 +03:00
2016-07-28 12:59:43 +03:00
return mv_cesa_dma_process ( basereq , status ) ;
2015-06-18 16:46:20 +03:00
}
2017-10-13 16:30:32 +03:00
static void mv_cesa_skcipher_step ( struct crypto_async_request * req )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
struct skcipher_request * skreq = skcipher_request_cast ( req ) ;
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( skreq ) ;
2015-06-18 16:46:20 +03:00
2016-06-21 11:08:35 +03:00
if ( mv_cesa_req_get_type ( & creq - > base ) = = CESA_DMA_REQ )
mv_cesa_dma_step ( & creq - > base ) ;
2015-06-18 16:46:21 +03:00
else
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_std_step ( skreq ) ;
2015-06-18 16:46:21 +03:00
}
static inline void
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_dma_prepare ( struct skcipher_request * req )
2015-06-18 16:46:21 +03:00
{
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
2016-06-21 11:08:35 +03:00
struct mv_cesa_req * basereq = & creq - > base ;
2015-06-18 16:46:21 +03:00
2016-06-21 11:08:35 +03:00
mv_cesa_dma_prepare ( basereq , basereq - > engine ) ;
2015-06-18 16:46:20 +03:00
}
static inline void
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_std_prepare ( struct skcipher_request * req )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
struct mv_cesa_skcipher_std_req * sreq = & creq - > std ;
2015-06-18 16:46:20 +03:00
sreq - > size = 0 ;
sreq - > offset = 0 ;
}
2017-10-13 16:30:32 +03:00
static inline void mv_cesa_skcipher_prepare ( struct crypto_async_request * req ,
struct mv_cesa_engine * engine )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
struct skcipher_request * skreq = skcipher_request_cast ( req ) ;
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( skreq ) ;
2016-06-21 11:08:35 +03:00
creq - > base . engine = engine ;
2015-06-18 16:46:20 +03:00
2016-06-21 11:08:35 +03:00
if ( mv_cesa_req_get_type ( & creq - > base ) = = CESA_DMA_REQ )
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_dma_prepare ( skreq ) ;
2015-06-18 16:46:21 +03:00
else
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_std_prepare ( skreq ) ;
2015-06-18 16:46:20 +03:00
}
static inline void
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_req_cleanup ( struct crypto_async_request * req )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
struct skcipher_request * skreq = skcipher_request_cast ( req ) ;
2015-06-18 16:46:21 +03:00
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_cleanup ( skreq ) ;
2015-06-18 16:46:20 +03:00
}
2016-06-21 11:08:36 +03:00
static void
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_complete ( struct crypto_async_request * req )
2016-06-21 11:08:36 +03:00
{
2017-10-13 16:30:32 +03:00
struct skcipher_request * skreq = skcipher_request_cast ( req ) ;
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( skreq ) ;
2016-06-21 11:08:36 +03:00
struct mv_cesa_engine * engine = creq - > base . engine ;
unsigned int ivsize ;
2017-10-13 16:30:32 +03:00
atomic_sub ( skreq - > cryptlen , & engine - > load ) ;
ivsize = crypto_skcipher_ivsize ( crypto_skcipher_reqtfm ( skreq ) ) ;
2016-06-21 11:08:36 +03:00
if ( mv_cesa_req_get_type ( & creq - > base ) = = CESA_DMA_REQ ) {
struct mv_cesa_req * basereq ;
basereq = & creq - > base ;
2017-10-13 16:30:32 +03:00
memcpy ( skreq - > iv , basereq - > chain . last - > op - > ctx . blkcipher . iv ,
2016-10-05 10:56:32 +03:00
ivsize ) ;
2016-06-21 11:08:36 +03:00
} else {
2017-10-13 16:30:32 +03:00
memcpy_fromio ( skreq - > iv ,
2016-06-21 11:08:36 +03:00
engine - > sram + CESA_SA_CRYPT_IV_SRAM_OFFSET ,
ivsize ) ;
}
}
2017-10-13 16:30:32 +03:00
static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
. step = mv_cesa_skcipher_step ,
. process = mv_cesa_skcipher_process ,
. cleanup = mv_cesa_skcipher_req_cleanup ,
. complete = mv_cesa_skcipher_complete ,
2015-06-18 16:46:20 +03:00
} ;
2017-10-13 16:30:32 +03:00
static void mv_cesa_skcipher_cra_exit ( struct crypto_tfm * tfm )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
void * ctx = crypto_tfm_ctx ( tfm ) ;
memzero_explicit ( ctx , tfm - > __crt_alg - > cra_ctxsize ) ;
}
static int mv_cesa_skcipher_cra_init ( struct crypto_tfm * tfm )
{
struct mv_cesa_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2015-06-18 16:46:20 +03:00
2017-10-13 16:30:32 +03:00
ctx - > ops = & mv_cesa_skcipher_req_ops ;
2015-06-18 16:46:20 +03:00
2017-10-13 16:30:32 +03:00
crypto_skcipher_set_reqsize ( __crypto_skcipher_cast ( tfm ) ,
sizeof ( struct mv_cesa_skcipher_req ) ) ;
2015-06-18 16:46:20 +03:00
return 0 ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_aes_setkey ( struct crypto_skcipher * cipher , const u8 * key ,
2015-06-18 16:46:20 +03:00
unsigned int len )
{
2017-10-13 16:30:32 +03:00
struct crypto_tfm * tfm = crypto_skcipher_tfm ( cipher ) ;
2015-06-18 16:46:20 +03:00
struct mv_cesa_aes_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
int remaining ;
int offset ;
int ret ;
int i ;
ret = crypto_aes_expand_key ( & ctx - > aes , key , len ) ;
if ( ret ) {
2017-10-13 16:30:32 +03:00
crypto_skcipher_set_flags ( cipher , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2015-06-18 16:46:20 +03:00
return ret ;
}
remaining = ( ctx - > aes . key_length - 16 ) / 4 ;
offset = ctx - > aes . key_length + 24 - remaining ;
for ( i = 0 ; i < remaining ; i + + )
ctx - > aes . key_dec [ 4 + i ] =
cpu_to_le32 ( ctx - > aes . key_enc [ offset + i ] ) ;
return 0 ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_des_setkey ( struct crypto_skcipher * cipher , const u8 * key ,
2015-06-18 16:46:22 +03:00
unsigned int len )
{
2017-10-13 16:30:32 +03:00
struct crypto_tfm * tfm = crypto_skcipher_tfm ( cipher ) ;
2015-06-18 16:46:22 +03:00
struct mv_cesa_des_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
u32 tmp [ DES_EXPKEY_WORDS ] ;
int ret ;
if ( len ! = DES_KEY_SIZE ) {
2017-10-13 16:30:32 +03:00
crypto_skcipher_set_flags ( cipher , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2015-06-18 16:46:22 +03:00
return - EINVAL ;
}
ret = des_ekey ( tmp , key ) ;
2019-01-19 09:48:00 +03:00
if ( ! ret & & ( tfm - > crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS ) ) {
2015-06-18 16:46:22 +03:00
tfm - > crt_flags | = CRYPTO_TFM_RES_WEAK_KEY ;
return - EINVAL ;
}
memcpy ( ctx - > key , key , DES_KEY_SIZE ) ;
return 0 ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_des3_ede_setkey ( struct crypto_skcipher * cipher ,
2015-06-18 16:46:23 +03:00
const u8 * key , unsigned int len )
{
2017-10-13 16:30:32 +03:00
struct crypto_tfm * tfm = crypto_skcipher_tfm ( cipher ) ;
2015-06-18 16:46:23 +03:00
struct mv_cesa_des_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
if ( len ! = DES3_EDE_KEY_SIZE ) {
2017-10-13 16:30:32 +03:00
crypto_skcipher_set_flags ( cipher , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
2015-06-18 16:46:23 +03:00
return - EINVAL ;
}
memcpy ( ctx - > key , key , DES3_EDE_KEY_SIZE ) ;
return 0 ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_skcipher_dma_req_init ( struct skcipher_request * req ,
const struct mv_cesa_op_ctx * op_templ )
2015-06-18 16:46:21 +03:00
{
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
2015-06-18 16:46:21 +03:00
gfp_t flags = ( req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ?
GFP_KERNEL : GFP_ATOMIC ;
2016-06-21 11:08:35 +03:00
struct mv_cesa_req * basereq = & creq - > base ;
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_dma_iter iter ;
2015-06-18 16:46:21 +03:00
bool skip_ctx = false ;
int ret ;
2016-06-21 11:08:35 +03:00
basereq - > chain . first = NULL ;
basereq - > chain . last = NULL ;
2015-06-18 16:46:21 +03:00
if ( req - > src ! = req - > dst ) {
ret = dma_map_sg ( cesa_dev - > dev , req - > src , creq - > src_nents ,
DMA_TO_DEVICE ) ;
if ( ! ret )
return - ENOMEM ;
ret = dma_map_sg ( cesa_dev - > dev , req - > dst , creq - > dst_nents ,
DMA_FROM_DEVICE ) ;
if ( ! ret ) {
ret = - ENOMEM ;
goto err_unmap_src ;
}
} else {
ret = dma_map_sg ( cesa_dev - > dev , req - > src , creq - > src_nents ,
DMA_BIDIRECTIONAL ) ;
if ( ! ret )
return - ENOMEM ;
}
2016-07-22 15:40:39 +03:00
mv_cesa_tdma_desc_iter_init ( & basereq - > chain ) ;
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_req_iter_init ( & iter , req ) ;
2015-06-18 16:46:21 +03:00
do {
struct mv_cesa_op_ctx * op ;
2016-07-22 15:40:39 +03:00
op = mv_cesa_dma_add_op ( & basereq - > chain , op_templ , skip_ctx , flags ) ;
2015-06-18 16:46:21 +03:00
if ( IS_ERR ( op ) ) {
ret = PTR_ERR ( op ) ;
goto err_free_tdma ;
}
skip_ctx = true ;
mv_cesa_set_crypt_op_len ( op , iter . base . op_len ) ;
/* Add input transfers */
2016-07-22 15:40:39 +03:00
ret = mv_cesa_dma_add_op_transfers ( & basereq - > chain , & iter . base ,
2015-06-18 16:46:21 +03:00
& iter . src , flags ) ;
if ( ret )
goto err_free_tdma ;
/* Add dummy desc to launch the crypto operation */
2016-07-22 15:40:39 +03:00
ret = mv_cesa_dma_add_dummy_launch ( & basereq - > chain , flags ) ;
2015-06-18 16:46:21 +03:00
if ( ret )
goto err_free_tdma ;
/* Add output transfers */
2016-07-22 15:40:39 +03:00
ret = mv_cesa_dma_add_op_transfers ( & basereq - > chain , & iter . base ,
2015-06-18 16:46:21 +03:00
& iter . dst , flags ) ;
if ( ret )
goto err_free_tdma ;
2017-10-13 16:30:32 +03:00
} while ( mv_cesa_skcipher_req_iter_next_op ( & iter ) ) ;
2015-06-18 16:46:21 +03:00
2016-06-21 11:08:34 +03:00
/* Add output data for IV */
2016-10-05 10:56:32 +03:00
ret = mv_cesa_dma_add_result_op ( & basereq - > chain , CESA_SA_CFG_SRAM_OFFSET ,
CESA_SA_DATA_SRAM_OFFSET ,
CESA_TDMA_SRC_IN_SRAM , flags ) ;
2016-06-21 11:08:34 +03:00
if ( ret )
goto err_free_tdma ;
2016-06-21 11:08:39 +03:00
basereq - > chain . last - > flags | = CESA_TDMA_END_OF_REQ ;
2015-06-18 16:46:21 +03:00
return 0 ;
err_free_tdma :
2016-06-21 11:08:35 +03:00
mv_cesa_dma_cleanup ( basereq ) ;
2015-06-18 16:46:21 +03:00
if ( req - > dst ! = req - > src )
dma_unmap_sg ( cesa_dev - > dev , req - > dst , creq - > dst_nents ,
DMA_FROM_DEVICE ) ;
err_unmap_src :
dma_unmap_sg ( cesa_dev - > dev , req - > src , creq - > src_nents ,
req - > dst ! = req - > src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL ) ;
return ret ;
}
2015-06-18 16:46:20 +03:00
static inline int
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_std_req_init ( struct skcipher_request * req ,
const struct mv_cesa_op_ctx * op_templ )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
struct mv_cesa_skcipher_std_req * sreq = & creq - > std ;
2016-06-21 11:08:35 +03:00
struct mv_cesa_req * basereq = & creq - > base ;
2015-06-18 16:46:20 +03:00
sreq - > op = * op_templ ;
sreq - > skip_ctx = false ;
2016-06-21 11:08:35 +03:00
basereq - > chain . first = NULL ;
basereq - > chain . last = NULL ;
2015-06-18 16:46:20 +03:00
return 0 ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_skcipher_req_init ( struct skcipher_request * req ,
struct mv_cesa_op_ctx * tmpl )
2015-06-18 16:46:20 +03:00
{
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
unsigned int blksize = crypto_skcipher_blocksize ( tfm ) ;
2015-06-18 16:46:21 +03:00
int ret ;
2015-06-18 16:46:20 +03:00
2017-10-13 16:30:32 +03:00
if ( ! IS_ALIGNED ( req - > cryptlen , blksize ) )
2015-06-18 16:46:20 +03:00
return - EINVAL ;
2017-10-13 16:30:32 +03:00
creq - > src_nents = sg_nents_for_len ( req - > src , req - > cryptlen ) ;
2015-11-04 23:13:33 +03:00
if ( creq - > src_nents < 0 ) {
dev_err ( cesa_dev - > dev , " Invalid number of src SG " ) ;
return creq - > src_nents ;
}
2017-10-13 16:30:32 +03:00
creq - > dst_nents = sg_nents_for_len ( req - > dst , req - > cryptlen ) ;
2015-11-04 23:13:33 +03:00
if ( creq - > dst_nents < 0 ) {
dev_err ( cesa_dev - > dev , " Invalid number of dst SG " ) ;
return creq - > dst_nents ;
}
2015-06-18 16:46:20 +03:00
mv_cesa_update_op_cfg ( tmpl , CESA_SA_DESC_CFG_OP_CRYPT_ONLY ,
CESA_SA_DESC_CFG_OP_MSK ) ;
2015-06-18 16:46:21 +03:00
if ( cesa_dev - > caps - > has_tdma )
2017-10-13 16:30:32 +03:00
ret = mv_cesa_skcipher_dma_req_init ( req , tmpl ) ;
2015-06-18 16:46:21 +03:00
else
2017-10-13 16:30:32 +03:00
ret = mv_cesa_skcipher_std_req_init ( req , tmpl ) ;
2015-06-18 16:46:21 +03:00
return ret ;
2015-06-18 16:46:20 +03:00
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_skcipher_queue_req ( struct skcipher_request * req ,
struct mv_cesa_op_ctx * tmpl )
2015-06-18 16:46:22 +03:00
{
int ret ;
2017-10-13 16:30:32 +03:00
struct mv_cesa_skcipher_req * creq = skcipher_request_ctx ( req ) ;
2016-06-21 11:08:38 +03:00
struct mv_cesa_engine * engine ;
2015-06-18 16:46:22 +03:00
2017-10-13 16:30:32 +03:00
ret = mv_cesa_skcipher_req_init ( req , tmpl ) ;
2015-06-18 16:46:22 +03:00
if ( ret )
return ret ;
2017-10-13 16:30:32 +03:00
engine = mv_cesa_select_engine ( req - > cryptlen ) ;
mv_cesa_skcipher_prepare ( & req - > base , engine ) ;
2016-06-21 11:08:38 +03:00
2016-06-21 11:08:35 +03:00
ret = mv_cesa_queue_req ( & req - > base , & creq - > base ) ;
2016-06-21 11:08:38 +03:00
2015-09-18 18:25:36 +03:00
if ( mv_cesa_req_needs_cleanup ( & req - > base , ret ) )
2017-10-13 16:30:32 +03:00
mv_cesa_skcipher_cleanup ( req ) ;
2015-06-18 16:46:22 +03:00
return ret ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_des_op ( struct skcipher_request * req ,
2016-06-21 11:08:38 +03:00
struct mv_cesa_op_ctx * tmpl )
{
struct mv_cesa_des_ctx * ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
mv_cesa_update_op_cfg ( tmpl , CESA_SA_DESC_CFG_CRYPTM_DES ,
CESA_SA_DESC_CFG_CRYPTM_MSK ) ;
memcpy ( tmpl - > ctx . blkcipher . key , ctx - > key , DES_KEY_SIZE ) ;
2017-10-13 16:30:32 +03:00
return mv_cesa_skcipher_queue_req ( req , tmpl ) ;
2016-06-21 11:08:38 +03:00
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_ecb_des_encrypt ( struct skcipher_request * req )
2015-06-18 16:46:22 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl ,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_DIR_ENC ) ;
return mv_cesa_des_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_ecb_des_decrypt ( struct skcipher_request * req )
2015-06-18 16:46:22 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl ,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_DIR_DEC ) ;
return mv_cesa_des_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
struct skcipher_alg mv_cesa_ecb_des_alg = {
. setkey = mv_cesa_des_setkey ,
. encrypt = mv_cesa_ecb_des_encrypt ,
. decrypt = mv_cesa_ecb_des_decrypt ,
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. base = {
. cra_name = " ecb(des) " ,
. cra_driver_name = " mv-ecb-des " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC ,
. cra_blocksize = DES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct mv_cesa_des_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
. cra_init = mv_cesa_skcipher_cra_init ,
. cra_exit = mv_cesa_skcipher_cra_exit ,
2015-06-18 16:46:22 +03:00
} ,
} ;
2017-10-13 16:30:32 +03:00
static int mv_cesa_cbc_des_op ( struct skcipher_request * req ,
2015-06-18 16:46:22 +03:00
struct mv_cesa_op_ctx * tmpl )
{
mv_cesa_update_op_cfg ( tmpl , CESA_SA_DESC_CFG_CRYPTCM_CBC ,
CESA_SA_DESC_CFG_CRYPTCM_MSK ) ;
2017-10-13 16:30:32 +03:00
memcpy ( tmpl - > ctx . blkcipher . iv , req - > iv , DES_BLOCK_SIZE ) ;
2015-06-18 16:46:22 +03:00
return mv_cesa_des_op ( req , tmpl ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_cbc_des_encrypt ( struct skcipher_request * req )
2015-06-18 16:46:22 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl , CESA_SA_DESC_CFG_DIR_ENC ) ;
return mv_cesa_cbc_des_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_cbc_des_decrypt ( struct skcipher_request * req )
2015-06-18 16:46:22 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl , CESA_SA_DESC_CFG_DIR_DEC ) ;
return mv_cesa_cbc_des_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
struct skcipher_alg mv_cesa_cbc_des_alg = {
. setkey = mv_cesa_des_setkey ,
. encrypt = mv_cesa_cbc_des_encrypt ,
. decrypt = mv_cesa_cbc_des_decrypt ,
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. ivsize = DES_BLOCK_SIZE ,
. base = {
. cra_name = " cbc(des) " ,
. cra_driver_name = " mv-cbc-des " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC ,
. cra_blocksize = DES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct mv_cesa_des_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
. cra_init = mv_cesa_skcipher_cra_init ,
. cra_exit = mv_cesa_skcipher_cra_exit ,
2015-06-18 16:46:22 +03:00
} ,
} ;
2017-10-13 16:30:32 +03:00
static int mv_cesa_des3_op ( struct skcipher_request * req ,
2015-06-18 16:46:23 +03:00
struct mv_cesa_op_ctx * tmpl )
{
struct mv_cesa_des3_ctx * ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
mv_cesa_update_op_cfg ( tmpl , CESA_SA_DESC_CFG_CRYPTM_3DES ,
CESA_SA_DESC_CFG_CRYPTM_MSK ) ;
memcpy ( tmpl - > ctx . blkcipher . key , ctx - > key , DES3_EDE_KEY_SIZE ) ;
2017-10-13 16:30:32 +03:00
return mv_cesa_skcipher_queue_req ( req , tmpl ) ;
2015-06-18 16:46:23 +03:00
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_ecb_des3_ede_encrypt ( struct skcipher_request * req )
2015-06-18 16:46:23 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl ,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_3DES_EDE |
CESA_SA_DESC_CFG_DIR_ENC ) ;
return mv_cesa_des3_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_ecb_des3_ede_decrypt ( struct skcipher_request * req )
2015-06-18 16:46:23 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl ,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_3DES_EDE |
CESA_SA_DESC_CFG_DIR_DEC ) ;
return mv_cesa_des3_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
. setkey = mv_cesa_des3_ede_setkey ,
. encrypt = mv_cesa_ecb_des3_ede_encrypt ,
. decrypt = mv_cesa_ecb_des3_ede_decrypt ,
. min_keysize = DES3_EDE_KEY_SIZE ,
. max_keysize = DES3_EDE_KEY_SIZE ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. base = {
. cra_name = " ecb(des3_ede) " ,
. cra_driver_name = " mv-ecb-des3-ede " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct mv_cesa_des3_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
. cra_init = mv_cesa_skcipher_cra_init ,
. cra_exit = mv_cesa_skcipher_cra_exit ,
2015-06-18 16:46:23 +03:00
} ,
} ;
2017-10-13 16:30:32 +03:00
static int mv_cesa_cbc_des3_op ( struct skcipher_request * req ,
2015-06-18 16:46:23 +03:00
struct mv_cesa_op_ctx * tmpl )
{
2017-10-13 16:30:32 +03:00
memcpy ( tmpl - > ctx . blkcipher . iv , req - > iv , DES3_EDE_BLOCK_SIZE ) ;
2015-06-18 16:46:23 +03:00
return mv_cesa_des3_op ( req , tmpl ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_cbc_des3_ede_encrypt ( struct skcipher_request * req )
2015-06-18 16:46:23 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl ,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
CESA_SA_DESC_CFG_3DES_EDE |
CESA_SA_DESC_CFG_DIR_ENC ) ;
return mv_cesa_cbc_des3_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_cbc_des3_ede_decrypt ( struct skcipher_request * req )
2015-06-18 16:46:23 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl ,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
CESA_SA_DESC_CFG_3DES_EDE |
CESA_SA_DESC_CFG_DIR_DEC ) ;
return mv_cesa_cbc_des3_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
. setkey = mv_cesa_des3_ede_setkey ,
. encrypt = mv_cesa_cbc_des3_ede_encrypt ,
. decrypt = mv_cesa_cbc_des3_ede_decrypt ,
. min_keysize = DES3_EDE_KEY_SIZE ,
. max_keysize = DES3_EDE_KEY_SIZE ,
. ivsize = DES3_EDE_BLOCK_SIZE ,
. base = {
. cra_name = " cbc(des3_ede) " ,
. cra_driver_name = " mv-cbc-des3-ede " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC ,
. cra_blocksize = DES3_EDE_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct mv_cesa_des3_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
. cra_init = mv_cesa_skcipher_cra_init ,
. cra_exit = mv_cesa_skcipher_cra_exit ,
2015-06-18 16:46:23 +03:00
} ,
} ;
2017-10-13 16:30:32 +03:00
static int mv_cesa_aes_op ( struct skcipher_request * req ,
2015-06-18 16:46:20 +03:00
struct mv_cesa_op_ctx * tmpl )
{
struct mv_cesa_aes_ctx * ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
2016-06-21 11:08:38 +03:00
int i ;
2015-06-18 16:46:20 +03:00
u32 * key ;
u32 cfg ;
cfg = CESA_SA_DESC_CFG_CRYPTM_AES ;
if ( mv_cesa_get_op_cfg ( tmpl ) & CESA_SA_DESC_CFG_DIR_DEC )
key = ctx - > aes . key_dec ;
else
key = ctx - > aes . key_enc ;
for ( i = 0 ; i < ctx - > aes . key_length / sizeof ( u32 ) ; i + + )
tmpl - > ctx . blkcipher . key [ i ] = cpu_to_le32 ( key [ i ] ) ;
if ( ctx - > aes . key_length = = 24 )
cfg | = CESA_SA_DESC_CFG_AES_LEN_192 ;
else if ( ctx - > aes . key_length = = 32 )
cfg | = CESA_SA_DESC_CFG_AES_LEN_256 ;
mv_cesa_update_op_cfg ( tmpl , cfg ,
CESA_SA_DESC_CFG_CRYPTM_MSK |
CESA_SA_DESC_CFG_AES_LEN_MSK ) ;
2017-10-13 16:30:32 +03:00
return mv_cesa_skcipher_queue_req ( req , tmpl ) ;
2015-06-18 16:46:20 +03:00
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_ecb_aes_encrypt ( struct skcipher_request * req )
2015-06-18 16:46:20 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl ,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_DIR_ENC ) ;
return mv_cesa_aes_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_ecb_aes_decrypt ( struct skcipher_request * req )
2015-06-18 16:46:20 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl ,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
CESA_SA_DESC_CFG_DIR_DEC ) ;
return mv_cesa_aes_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
struct skcipher_alg mv_cesa_ecb_aes_alg = {
. setkey = mv_cesa_aes_setkey ,
. encrypt = mv_cesa_ecb_aes_encrypt ,
. decrypt = mv_cesa_ecb_aes_decrypt ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. base = {
. cra_name = " ecb(aes) " ,
. cra_driver_name = " mv-ecb-aes " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct mv_cesa_aes_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
. cra_init = mv_cesa_skcipher_cra_init ,
. cra_exit = mv_cesa_skcipher_cra_exit ,
2015-06-18 16:46:20 +03:00
} ,
} ;
2017-10-13 16:30:32 +03:00
static int mv_cesa_cbc_aes_op ( struct skcipher_request * req ,
2015-06-18 16:46:20 +03:00
struct mv_cesa_op_ctx * tmpl )
{
mv_cesa_update_op_cfg ( tmpl , CESA_SA_DESC_CFG_CRYPTCM_CBC ,
CESA_SA_DESC_CFG_CRYPTCM_MSK ) ;
2017-10-13 16:30:32 +03:00
memcpy ( tmpl - > ctx . blkcipher . iv , req - > iv , AES_BLOCK_SIZE ) ;
2015-06-18 16:46:20 +03:00
return mv_cesa_aes_op ( req , tmpl ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_cbc_aes_encrypt ( struct skcipher_request * req )
2015-06-18 16:46:20 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl , CESA_SA_DESC_CFG_DIR_ENC ) ;
return mv_cesa_cbc_aes_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
static int mv_cesa_cbc_aes_decrypt ( struct skcipher_request * req )
2015-06-18 16:46:20 +03:00
{
struct mv_cesa_op_ctx tmpl ;
mv_cesa_set_op_cfg ( & tmpl , CESA_SA_DESC_CFG_DIR_DEC ) ;
return mv_cesa_cbc_aes_op ( req , & tmpl ) ;
}
2017-10-13 16:30:32 +03:00
struct skcipher_alg mv_cesa_cbc_aes_alg = {
. setkey = mv_cesa_aes_setkey ,
. encrypt = mv_cesa_cbc_aes_encrypt ,
. decrypt = mv_cesa_cbc_aes_decrypt ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. base = {
. cra_name = " cbc(aes) " ,
. cra_driver_name = " mv-cbc-aes " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct mv_cesa_aes_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
. cra_init = mv_cesa_skcipher_cra_init ,
. cra_exit = mv_cesa_skcipher_cra_exit ,
2015-06-18 16:46:20 +03:00
} ,
} ;