2019-06-04 10:11:33 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2016-12-19 10:20:44 +08:00
/*
* Cryptographic API .
*
* Driver for EIP97 AES acceleration .
*
* Copyright ( c ) 2016 Ryder Lee < ryder . lee @ mediatek . com >
*
* Some ideas are from atmel - aes . c drivers .
*/
# include <crypto/aes.h>
2017-08-22 10:08:14 +02:00
# include <crypto/gcm.h>
2016-12-19 10:20:44 +08:00
# include "mtk-platform.h"
# define AES_QUEUE_SIZE 512
# define AES_BUF_ORDER 2
# define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
& ~ ( AES_BLOCK_SIZE - 1 ) )
2017-03-09 10:11:19 +08:00
# define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
AES_BLOCK_SIZE * 2 )
# define AES_MAX_CT_SIZE 6
2016-12-19 10:20:44 +08:00
# define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
2017-01-20 13:41:14 +08:00
/* AES-CBC/ECB/CTR command token */
2017-01-20 13:41:08 +08:00
# define AES_CMD0 cpu_to_le32(0x05000000)
# define AES_CMD1 cpu_to_le32(0x2d060000)
# define AES_CMD2 cpu_to_le32(0xe4a63806)
2017-01-20 13:41:15 +08:00
/* AES-GCM command token */
# define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
# define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
# define AES_GCM_CMD2 cpu_to_le32(0x25000010)
# define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
# define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
# define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
# define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
2017-01-20 13:41:08 +08:00
/* AES transform information word 0 fields */
# define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
# define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
2017-01-20 13:41:15 +08:00
# define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
# define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
2016-12-19 10:20:44 +08:00
# define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
# define AES_TFM_128BITS cpu_to_le32(0xb << 16)
# define AES_TFM_192BITS cpu_to_le32(0xd << 16)
# define AES_TFM_256BITS cpu_to_le32(0xf << 16)
2017-03-09 10:11:19 +08:00
# define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
# define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
2017-01-20 13:41:08 +08:00
/* AES transform information word 1 fields */
# define AES_TFM_ECB cpu_to_le32(0x0 << 0)
# define AES_TFM_CBC cpu_to_le32(0x1 << 0)
2017-01-20 13:41:15 +08:00
# define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
2017-01-20 13:41:14 +08:00
# define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
2017-01-20 13:41:15 +08:00
# define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
2017-01-20 13:41:14 +08:00
# define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
2017-01-20 13:41:15 +08:00
# define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
# define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
2016-12-19 10:20:44 +08:00
/* AES flags */
2017-03-09 10:11:19 +08:00
# define AES_FLAGS_CIPHER_MSK GENMASK(2, 0)
2016-12-19 10:20:44 +08:00
# define AES_FLAGS_ECB BIT(0)
# define AES_FLAGS_CBC BIT(1)
2017-01-20 13:41:14 +08:00
# define AES_FLAGS_CTR BIT(2)
2017-01-20 13:41:15 +08:00
# define AES_FLAGS_GCM BIT(3)
# define AES_FLAGS_ENCRYPT BIT(4)
# define AES_FLAGS_BUSY BIT(5)
2016-12-19 10:20:44 +08:00
2017-03-09 10:11:18 +08:00
# define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
2016-12-19 10:20:44 +08:00
/**
2017-03-09 10:11:19 +08:00
* mtk_aes_info - hardware information of AES
* @ cmd : command token , hardware instruction
* @ tfm : transform state of cipher algorithm .
* @ state : contains keys and initial vectors .
2017-01-20 13:41:15 +08:00
*
2017-03-09 10:11:19 +08:00
* Memory layout of GCM buffer :
2017-01-20 13:41:15 +08:00
* / - - - - - - - - - - - \
* | AES KEY | 128 / 196 / 256 bits
* | - - - - - - - - - - - |
* | HASH KEY | a string 128 zero bits encrypted using the block cipher
* | - - - - - - - - - - - |
* | IVs | 4 * 4 bytes
* \ - - - - - - - - - - - /
2017-03-09 10:11:19 +08:00
*
* The engine requires all these info to do :
* - Commands decoding and control of the engine ' s data path .
* - Coordinating hardware data fetch and store operations .
* - Result token construction and output .
2016-12-19 10:20:44 +08:00
*/
2017-03-09 10:11:19 +08:00
struct mtk_aes_info {
__le32 cmd [ AES_MAX_CT_SIZE ] ;
__le32 tfm [ 2 ] ;
__le32 state [ AES_MAX_STATE_BUF_SIZE ] ;
2016-12-19 10:20:44 +08:00
} ;
struct mtk_aes_reqctx {
u64 mode ;
} ;
2017-01-20 13:41:10 +08:00
struct mtk_aes_base_ctx {
2016-12-19 10:20:44 +08:00
struct mtk_cryp * cryp ;
u32 keylen ;
2017-03-09 10:11:19 +08:00
__le32 keymode ;
2017-01-20 13:41:10 +08:00
mtk_aes_fn start ;
2017-01-20 13:41:08 +08:00
2017-03-09 10:11:19 +08:00
struct mtk_aes_info info ;
2017-01-20 13:41:08 +08:00
dma_addr_t ct_dma ;
dma_addr_t tfm_dma ;
__le32 ct_hdr ;
u32 ct_size ;
2016-12-19 10:20:44 +08:00
} ;
2017-01-20 13:41:10 +08:00
struct mtk_aes_ctx {
struct mtk_aes_base_ctx base ;
} ;
2017-01-20 13:41:14 +08:00
struct mtk_aes_ctr_ctx {
struct mtk_aes_base_ctx base ;
u32 iv [ AES_BLOCK_SIZE / sizeof ( u32 ) ] ;
size_t offset ;
struct scatterlist src [ 2 ] ;
struct scatterlist dst [ 2 ] ;
} ;
2017-01-20 13:41:15 +08:00
struct mtk_aes_gcm_ctx {
struct mtk_aes_base_ctx base ;
u32 authsize ;
size_t textlen ;
struct crypto_skcipher * ctr ;
} ;
2016-12-19 10:20:44 +08:00
struct mtk_aes_drv {
struct list_head dev_list ;
/* Device list lock */
spinlock_t lock ;
} ;
static struct mtk_aes_drv mtk_aes = {
. dev_list = LIST_HEAD_INIT ( mtk_aes . dev_list ) ,
. lock = __SPIN_LOCK_UNLOCKED ( mtk_aes . lock ) ,
} ;
static inline u32 mtk_aes_read ( struct mtk_cryp * cryp , u32 offset )
{
return readl_relaxed ( cryp - > base + offset ) ;
}
static inline void mtk_aes_write ( struct mtk_cryp * cryp ,
u32 offset , u32 value )
{
writel_relaxed ( value , cryp - > base + offset ) ;
}
2017-01-20 13:41:10 +08:00
static struct mtk_cryp * mtk_aes_find_dev ( struct mtk_aes_base_ctx * ctx )
2016-12-19 10:20:44 +08:00
{
struct mtk_cryp * cryp = NULL ;
struct mtk_cryp * tmp ;
spin_lock_bh ( & mtk_aes . lock ) ;
if ( ! ctx - > cryp ) {
list_for_each_entry ( tmp , & mtk_aes . dev_list , aes_list ) {
cryp = tmp ;
break ;
}
ctx - > cryp = cryp ;
} else {
cryp = ctx - > cryp ;
}
spin_unlock_bh ( & mtk_aes . lock ) ;
return cryp ;
}
static inline size_t mtk_aes_padlen ( size_t len )
{
len & = AES_BLOCK_SIZE - 1 ;
return len ? AES_BLOCK_SIZE - len : 0 ;
}
static bool mtk_aes_check_aligned ( struct scatterlist * sg , size_t len ,
struct mtk_aes_dma * dma )
{
int nents ;
if ( ! IS_ALIGNED ( len , AES_BLOCK_SIZE ) )
return false ;
for ( nents = 0 ; sg ; sg = sg_next ( sg ) , + + nents ) {
if ( ! IS_ALIGNED ( sg - > offset , sizeof ( u32 ) ) )
return false ;
if ( len < = sg - > length ) {
if ( ! IS_ALIGNED ( len , AES_BLOCK_SIZE ) )
return false ;
dma - > nents = nents + 1 ;
dma - > remainder = sg - > length - len ;
sg - > length = len ;
return true ;
}
if ( ! IS_ALIGNED ( sg - > length , AES_BLOCK_SIZE ) )
return false ;
len - = sg - > length ;
}
return false ;
}
2017-01-20 13:41:12 +08:00
static inline void mtk_aes_set_mode ( struct mtk_aes_rec * aes ,
const struct mtk_aes_reqctx * rctx )
2016-12-19 10:20:44 +08:00
{
2017-01-20 13:41:12 +08:00
/* Clear all but persistent flags and set request flags. */
aes - > flags = ( aes - > flags & AES_FLAGS_BUSY ) | rctx - > mode ;
}
2016-12-19 10:20:44 +08:00
2017-01-20 13:41:12 +08:00
static inline void mtk_aes_restore_sg ( const struct mtk_aes_dma * dma )
{
struct scatterlist * sg = dma - > sg ;
int nents = dma - > nents ;
2017-01-20 13:41:08 +08:00
2017-01-20 13:41:12 +08:00
if ( ! dma - > remainder )
return ;
2016-12-19 10:20:44 +08:00
2017-01-20 13:41:12 +08:00
while ( - - nents > 0 & & sg )
sg = sg_next ( sg ) ;
2017-01-20 13:41:08 +08:00
2017-01-20 13:41:12 +08:00
if ( ! sg )
return ;
2016-12-19 10:20:44 +08:00
2017-01-20 13:41:12 +08:00
sg - > length + = dma - > remainder ;
2016-12-19 10:20:44 +08:00
}
2017-03-09 10:11:19 +08:00
static inline void mtk_aes_write_state_le ( __le32 * dst , const u32 * src , u32 size )
{
int i ;
for ( i = 0 ; i < SIZE_IN_WORDS ( size ) ; i + + )
dst [ i ] = cpu_to_le32 ( src [ i ] ) ;
}
static inline void mtk_aes_write_state_be ( __be32 * dst , const u32 * src , u32 size )
{
int i ;
for ( i = 0 ; i < SIZE_IN_WORDS ( size ) ; i + + )
dst [ i ] = cpu_to_be32 ( src [ i ] ) ;
}
2017-03-09 10:11:17 +08:00
static inline int mtk_aes_complete ( struct mtk_cryp * cryp ,
struct mtk_aes_rec * aes ,
int err )
{
aes - > flags & = ~ AES_FLAGS_BUSY ;
aes - > areq - > complete ( aes - > areq , err ) ;
/* Handle new request */
tasklet_schedule ( & aes - > queue_task ) ;
return err ;
}
2017-01-20 13:41:09 +08:00
/*
* Write descriptors for processing . This will configure the engine , load
* the transform information and then start the packet processing .
*/
2016-12-19 10:20:44 +08:00
static int mtk_aes_xmit ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes )
{
struct mtk_ring * ring = cryp - > ring [ aes - > id ] ;
struct mtk_desc * cmd = NULL , * res = NULL ;
2017-01-20 13:41:09 +08:00
struct scatterlist * ssg = aes - > src . sg , * dsg = aes - > dst . sg ;
u32 slen = aes - > src . sg_len , dlen = aes - > dst . sg_len ;
2016-12-19 10:20:44 +08:00
int nents ;
2017-01-20 13:41:09 +08:00
/* Write command descriptors */
for ( nents = 0 ; nents < slen ; + + nents , ssg = sg_next ( ssg ) ) {
2017-03-09 10:11:15 +08:00
cmd = ring - > cmd_next ;
2016-12-19 10:20:44 +08:00
cmd - > hdr = MTK_DESC_BUF_LEN ( ssg - > length ) ;
cmd - > buf = cpu_to_le32 ( sg_dma_address ( ssg ) ) ;
if ( nents = = 0 ) {
cmd - > hdr | = MTK_DESC_FIRST |
2017-01-20 13:41:08 +08:00
MTK_DESC_CT_LEN ( aes - > ctx - > ct_size ) ;
cmd - > ct = cpu_to_le32 ( aes - > ctx - > ct_dma ) ;
cmd - > ct_hdr = aes - > ctx - > ct_hdr ;
cmd - > tfm = cpu_to_le32 ( aes - > ctx - > tfm_dma ) ;
2016-12-19 10:20:44 +08:00
}
2017-03-09 10:11:15 +08:00
/* Shift ring buffer and check boundary */
if ( + + ring - > cmd_next = = ring - > cmd_base + MTK_DESC_NUM )
ring - > cmd_next = ring - > cmd_base ;
2016-12-19 10:20:44 +08:00
}
cmd - > hdr | = MTK_DESC_LAST ;
2017-01-20 13:41:09 +08:00
/* Prepare result descriptors */
for ( nents = 0 ; nents < dlen ; + + nents , dsg = sg_next ( dsg ) ) {
2017-03-09 10:11:15 +08:00
res = ring - > res_next ;
2017-01-20 13:41:09 +08:00
res - > hdr = MTK_DESC_BUF_LEN ( dsg - > length ) ;
res - > buf = cpu_to_le32 ( sg_dma_address ( dsg ) ) ;
if ( nents = = 0 )
res - > hdr | = MTK_DESC_FIRST ;
2017-03-09 10:11:15 +08:00
/* Shift ring buffer and check boundary */
if ( + + ring - > res_next = = ring - > res_base + MTK_DESC_NUM )
ring - > res_next = ring - > res_base ;
2017-01-20 13:41:09 +08:00
}
2016-12-19 10:20:44 +08:00
res - > hdr | = MTK_DESC_LAST ;
2017-03-09 10:11:18 +08:00
/* Pointer to current result descriptor */
ring - > res_prev = res ;
2017-01-20 13:41:15 +08:00
/* Prepare enough space for authenticated tag */
if ( aes - > flags & AES_FLAGS_GCM )
res - > hdr + = AES_BLOCK_SIZE ;
2016-12-19 10:20:44 +08:00
/*
* Make sure that all changes to the DMA ring are done before we
* start engine .
*/
wmb ( ) ;
/* Start DMA transfer */
2017-01-20 13:41:09 +08:00
mtk_aes_write ( cryp , RDR_PREP_COUNT ( aes - > id ) , MTK_DESC_CNT ( dlen ) ) ;
mtk_aes_write ( cryp , CDR_PREP_COUNT ( aes - > id ) , MTK_DESC_CNT ( slen ) ) ;
2016-12-19 10:20:44 +08:00
return - EINPROGRESS ;
}
2017-01-20 13:41:12 +08:00
static void mtk_aes_unmap ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes )
2016-12-19 10:20:44 +08:00
{
2017-01-20 13:41:12 +08:00
struct mtk_aes_base_ctx * ctx = aes - > ctx ;
2016-12-19 10:20:44 +08:00
2017-03-09 10:11:19 +08:00
dma_unmap_single ( cryp - > dev , ctx - > ct_dma , sizeof ( ctx - > info ) ,
2017-01-20 13:41:12 +08:00
DMA_TO_DEVICE ) ;
2016-12-19 10:20:44 +08:00
2017-01-20 13:41:12 +08:00
if ( aes - > src . sg = = aes - > dst . sg ) {
dma_unmap_sg ( cryp - > dev , aes - > src . sg , aes - > src . nents ,
DMA_BIDIRECTIONAL ) ;
2016-12-19 10:20:44 +08:00
2017-01-20 13:41:12 +08:00
if ( aes - > src . sg ! = & aes - > aligned_sg )
mtk_aes_restore_sg ( & aes - > src ) ;
} else {
dma_unmap_sg ( cryp - > dev , aes - > dst . sg , aes - > dst . nents ,
DMA_FROM_DEVICE ) ;
2016-12-19 10:20:44 +08:00
2017-01-20 13:41:12 +08:00
if ( aes - > dst . sg ! = & aes - > aligned_sg )
mtk_aes_restore_sg ( & aes - > dst ) ;
dma_unmap_sg ( cryp - > dev , aes - > src . sg , aes - > src . nents ,
DMA_TO_DEVICE ) ;
if ( aes - > src . sg ! = & aes - > aligned_sg )
mtk_aes_restore_sg ( & aes - > src ) ;
}
if ( aes - > dst . sg = = & aes - > aligned_sg )
sg_copy_from_buffer ( aes - > real_dst , sg_nents ( aes - > real_dst ) ,
aes - > buf , aes - > total ) ;
2016-12-19 10:20:44 +08:00
}
2017-01-20 13:41:12 +08:00
static int mtk_aes_map ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes )
{
struct mtk_aes_base_ctx * ctx = aes - > ctx ;
2017-03-09 10:11:19 +08:00
struct mtk_aes_info * info = & ctx - > info ;
2017-01-20 13:41:12 +08:00
2017-03-09 10:11:19 +08:00
ctx - > ct_dma = dma_map_single ( cryp - > dev , info , sizeof ( * info ) ,
2017-01-20 13:41:12 +08:00
DMA_TO_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( cryp - > dev , ctx - > ct_dma ) ) )
2017-03-09 10:11:17 +08:00
goto exit ;
2017-01-20 13:41:12 +08:00
2017-03-09 10:11:19 +08:00
ctx - > tfm_dma = ctx - > ct_dma + sizeof ( info - > cmd ) ;
2017-01-20 13:41:12 +08:00
if ( aes - > src . sg = = aes - > dst . sg ) {
aes - > src . sg_len = dma_map_sg ( cryp - > dev , aes - > src . sg ,
aes - > src . nents ,
DMA_BIDIRECTIONAL ) ;
aes - > dst . sg_len = aes - > src . sg_len ;
if ( unlikely ( ! aes - > src . sg_len ) )
goto sg_map_err ;
} else {
aes - > src . sg_len = dma_map_sg ( cryp - > dev , aes - > src . sg ,
aes - > src . nents , DMA_TO_DEVICE ) ;
if ( unlikely ( ! aes - > src . sg_len ) )
goto sg_map_err ;
aes - > dst . sg_len = dma_map_sg ( cryp - > dev , aes - > dst . sg ,
aes - > dst . nents , DMA_FROM_DEVICE ) ;
if ( unlikely ( ! aes - > dst . sg_len ) ) {
2017-01-20 13:41:13 +08:00
dma_unmap_sg ( cryp - > dev , aes - > src . sg , aes - > src . nents ,
DMA_TO_DEVICE ) ;
2017-01-20 13:41:12 +08:00
goto sg_map_err ;
}
}
return mtk_aes_xmit ( cryp , aes ) ;
sg_map_err :
2017-03-09 10:11:19 +08:00
dma_unmap_single ( cryp - > dev , ctx - > ct_dma , sizeof ( * info ) , DMA_TO_DEVICE ) ;
2017-03-09 10:11:17 +08:00
exit :
return mtk_aes_complete ( cryp , aes , - EINVAL ) ;
2017-01-20 13:41:12 +08:00
}
2017-01-20 13:41:14 +08:00
/* Initialize transform information of CBC/ECB/CTR mode */
2017-01-20 13:41:12 +08:00
static void mtk_aes_info_init ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes ,
size_t len )
{
struct ablkcipher_request * req = ablkcipher_request_cast ( aes - > areq ) ;
struct mtk_aes_base_ctx * ctx = aes - > ctx ;
2017-03-09 10:11:19 +08:00
struct mtk_aes_info * info = & ctx - > info ;
u32 cnt = 0 ;
2017-01-20 13:41:12 +08:00
ctx - > ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32 ( len ) ;
2017-03-09 10:11:19 +08:00
info - > cmd [ cnt + + ] = AES_CMD0 | cpu_to_le32 ( len ) ;
info - > cmd [ cnt + + ] = AES_CMD1 ;
2017-01-20 13:41:12 +08:00
2017-03-09 10:11:19 +08:00
info - > tfm [ 0 ] = AES_TFM_SIZE ( ctx - > keylen ) | ctx - > keymode ;
2017-01-20 13:41:12 +08:00
if ( aes - > flags & AES_FLAGS_ENCRYPT )
2017-03-09 10:11:19 +08:00
info - > tfm [ 0 ] | = AES_TFM_BASIC_OUT ;
2017-01-20 13:41:12 +08:00
else
2017-03-09 10:11:19 +08:00
info - > tfm [ 0 ] | = AES_TFM_BASIC_IN ;
2017-01-20 13:41:12 +08:00
2017-03-09 10:11:19 +08:00
switch ( aes - > flags & AES_FLAGS_CIPHER_MSK ) {
case AES_FLAGS_CBC :
info - > tfm [ 1 ] = AES_TFM_CBC ;
break ;
case AES_FLAGS_ECB :
info - > tfm [ 1 ] = AES_TFM_ECB ;
goto ecb ;
case AES_FLAGS_CTR :
info - > tfm [ 1 ] = AES_TFM_CTR_LOAD ;
goto ctr ;
default :
/* Should not happen... */
return ;
2017-01-20 13:41:12 +08:00
}
2017-03-09 10:11:19 +08:00
mtk_aes_write_state_le ( info - > state + ctx - > keylen , req - > info ,
AES_BLOCK_SIZE ) ;
ctr :
info - > tfm [ 0 ] + = AES_TFM_SIZE ( SIZE_IN_WORDS ( AES_BLOCK_SIZE ) ) ;
info - > tfm [ 1 ] | = AES_TFM_FULL_IV ;
info - > cmd [ cnt + + ] = AES_CMD2 ;
ecb :
ctx - > ct_size = cnt ;
2017-01-20 13:41:12 +08:00
}
static int mtk_aes_dma ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes ,
2017-01-20 13:41:10 +08:00
struct scatterlist * src , struct scatterlist * dst ,
size_t len )
2016-12-19 10:20:44 +08:00
{
size_t padlen = 0 ;
bool src_aligned , dst_aligned ;
aes - > total = len ;
aes - > src . sg = src ;
aes - > dst . sg = dst ;
aes - > real_dst = dst ;
src_aligned = mtk_aes_check_aligned ( src , len , & aes - > src ) ;
if ( src = = dst )
dst_aligned = src_aligned ;
else
dst_aligned = mtk_aes_check_aligned ( dst , len , & aes - > dst ) ;
if ( ! src_aligned | | ! dst_aligned ) {
padlen = mtk_aes_padlen ( len ) ;
if ( len + padlen > AES_BUF_SIZE )
2017-03-09 10:11:17 +08:00
return mtk_aes_complete ( cryp , aes , - ENOMEM ) ;
2016-12-19 10:20:44 +08:00
if ( ! src_aligned ) {
sg_copy_to_buffer ( src , sg_nents ( src ) , aes - > buf , len ) ;
aes - > src . sg = & aes - > aligned_sg ;
aes - > src . nents = 1 ;
aes - > src . remainder = 0 ;
}
if ( ! dst_aligned ) {
aes - > dst . sg = & aes - > aligned_sg ;
aes - > dst . nents = 1 ;
aes - > dst . remainder = 0 ;
}
sg_init_table ( & aes - > aligned_sg , 1 ) ;
sg_set_buf ( & aes - > aligned_sg , aes - > buf , len + padlen ) ;
}
2017-01-20 13:41:12 +08:00
mtk_aes_info_init ( cryp , aes , len + padlen ) ;
2016-12-19 10:20:44 +08:00
2017-01-20 13:41:12 +08:00
return mtk_aes_map ( cryp , aes ) ;
2016-12-19 10:20:44 +08:00
}
static int mtk_aes_handle_queue ( struct mtk_cryp * cryp , u8 id ,
2017-01-20 13:41:10 +08:00
struct crypto_async_request * new_areq )
2016-12-19 10:20:44 +08:00
{
struct mtk_aes_rec * aes = cryp - > aes [ id ] ;
struct crypto_async_request * areq , * backlog ;
2017-01-20 13:41:10 +08:00
struct mtk_aes_base_ctx * ctx ;
2016-12-19 10:20:44 +08:00
unsigned long flags ;
2017-01-20 13:41:10 +08:00
int ret = 0 ;
2016-12-19 10:20:44 +08:00
spin_lock_irqsave ( & aes - > lock , flags ) ;
2017-01-20 13:41:10 +08:00
if ( new_areq )
ret = crypto_enqueue_request ( & aes - > queue , new_areq ) ;
2016-12-19 10:20:44 +08:00
if ( aes - > flags & AES_FLAGS_BUSY ) {
spin_unlock_irqrestore ( & aes - > lock , flags ) ;
return ret ;
}
backlog = crypto_get_backlog ( & aes - > queue ) ;
areq = crypto_dequeue_request ( & aes - > queue ) ;
if ( areq )
aes - > flags | = AES_FLAGS_BUSY ;
spin_unlock_irqrestore ( & aes - > lock , flags ) ;
if ( ! areq )
return ret ;
if ( backlog )
backlog - > complete ( backlog , - EINPROGRESS ) ;
2017-01-20 13:41:10 +08:00
ctx = crypto_tfm_ctx ( areq - > tfm ) ;
aes - > areq = areq ;
aes - > ctx = ctx ;
return ctx - > start ( cryp , aes ) ;
}
2017-03-09 10:11:17 +08:00
static int mtk_aes_transfer_complete ( struct mtk_cryp * cryp ,
struct mtk_aes_rec * aes )
2017-01-20 13:41:11 +08:00
{
2017-03-09 10:11:17 +08:00
return mtk_aes_complete ( cryp , aes , 0 ) ;
2017-01-20 13:41:11 +08:00
}
2017-01-20 13:41:10 +08:00
static int mtk_aes_start ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes )
{
struct ablkcipher_request * req = ablkcipher_request_cast ( aes - > areq ) ;
struct mtk_aes_reqctx * rctx = ablkcipher_request_ctx ( req ) ;
2016-12-19 10:20:44 +08:00
2017-01-20 13:41:12 +08:00
mtk_aes_set_mode ( aes , rctx ) ;
2017-03-09 10:11:17 +08:00
aes - > resume = mtk_aes_transfer_complete ;
2017-01-20 13:41:11 +08:00
2017-01-20 13:41:12 +08:00
return mtk_aes_dma ( cryp , aes , req - > src , req - > dst , req - > nbytes ) ;
2016-12-19 10:20:44 +08:00
}
2017-01-20 13:41:14 +08:00
static inline struct mtk_aes_ctr_ctx *
mtk_aes_ctr_ctx_cast ( struct mtk_aes_base_ctx * ctx )
{
return container_of ( ctx , struct mtk_aes_ctr_ctx , base ) ;
}
static int mtk_aes_ctr_transfer ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes )
{
struct mtk_aes_base_ctx * ctx = aes - > ctx ;
struct mtk_aes_ctr_ctx * cctx = mtk_aes_ctr_ctx_cast ( ctx ) ;
struct ablkcipher_request * req = ablkcipher_request_cast ( aes - > areq ) ;
struct scatterlist * src , * dst ;
2017-03-09 10:11:19 +08:00
u32 start , end , ctr , blocks ;
2017-01-20 13:41:14 +08:00
size_t datalen ;
bool fragmented = false ;
/* Check for transfer completion. */
cctx - > offset + = aes - > total ;
if ( cctx - > offset > = req - > nbytes )
2017-03-09 10:11:17 +08:00
return mtk_aes_transfer_complete ( cryp , aes ) ;
2017-01-20 13:41:14 +08:00
/* Compute data length. */
datalen = req - > nbytes - cctx - > offset ;
blocks = DIV_ROUND_UP ( datalen , AES_BLOCK_SIZE ) ;
ctr = be32_to_cpu ( cctx - > iv [ 3 ] ) ;
/* Check 32bit counter overflow. */
start = ctr ;
end = start + blocks - 1 ;
if ( end < start ) {
ctr | = 0xffffffff ;
datalen = AES_BLOCK_SIZE * - start ;
fragmented = true ;
}
/* Jump to offset. */
src = scatterwalk_ffwd ( cctx - > src , req - > src , cctx - > offset ) ;
dst = ( ( req - > src = = req - > dst ) ? src :
scatterwalk_ffwd ( cctx - > dst , req - > dst , cctx - > offset ) ) ;
/* Write IVs into transform state buffer. */
2017-03-09 10:11:19 +08:00
mtk_aes_write_state_le ( ctx - > info . state + ctx - > keylen , cctx - > iv ,
AES_BLOCK_SIZE ) ;
2017-01-20 13:41:14 +08:00
if ( unlikely ( fragmented ) ) {
/*
* Increment the counter manually to cope with the hardware
* counter overflow .
*/
cctx - > iv [ 3 ] = cpu_to_be32 ( ctr ) ;
crypto_inc ( ( u8 * ) cctx - > iv , AES_BLOCK_SIZE ) ;
}
return mtk_aes_dma ( cryp , aes , src , dst , datalen ) ;
}
static int mtk_aes_ctr_start ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes )
{
struct mtk_aes_ctr_ctx * cctx = mtk_aes_ctr_ctx_cast ( aes - > ctx ) ;
struct ablkcipher_request * req = ablkcipher_request_cast ( aes - > areq ) ;
struct mtk_aes_reqctx * rctx = ablkcipher_request_ctx ( req ) ;
mtk_aes_set_mode ( aes , rctx ) ;
memcpy ( cctx - > iv , req - > info , AES_BLOCK_SIZE ) ;
cctx - > offset = 0 ;
aes - > total = 0 ;
2017-03-09 10:11:17 +08:00
aes - > resume = mtk_aes_ctr_transfer ;
2017-01-20 13:41:14 +08:00
return mtk_aes_ctr_transfer ( cryp , aes ) ;
}
2016-12-19 10:20:44 +08:00
/* Check and set the AES key to transform state buffer */
static int mtk_aes_setkey ( struct crypto_ablkcipher * tfm ,
const u8 * key , u32 keylen )
{
2017-01-20 13:41:10 +08:00
struct mtk_aes_base_ctx * ctx = crypto_ablkcipher_ctx ( tfm ) ;
2016-12-19 10:20:44 +08:00
2017-03-09 10:11:19 +08:00
switch ( keylen ) {
case AES_KEYSIZE_128 :
ctx - > keymode = AES_TFM_128BITS ;
break ;
case AES_KEYSIZE_192 :
ctx - > keymode = AES_TFM_192BITS ;
break ;
case AES_KEYSIZE_256 :
ctx - > keymode = AES_TFM_256BITS ;
break ;
default :
2016-12-19 10:20:44 +08:00
crypto_ablkcipher_set_flags ( tfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return - EINVAL ;
}
ctx - > keylen = SIZE_IN_WORDS ( keylen ) ;
2017-03-09 10:11:19 +08:00
mtk_aes_write_state_le ( ctx - > info . state , ( const u32 * ) key , keylen ) ;
2016-12-19 10:20:44 +08:00
return 0 ;
}
static int mtk_aes_crypt ( struct ablkcipher_request * req , u64 mode )
{
2017-01-20 13:41:10 +08:00
struct mtk_aes_base_ctx * ctx ;
struct mtk_aes_reqctx * rctx ;
2016-12-19 10:20:44 +08:00
2017-01-20 13:41:10 +08:00
ctx = crypto_ablkcipher_ctx ( crypto_ablkcipher_reqtfm ( req ) ) ;
rctx = ablkcipher_request_ctx ( req ) ;
2016-12-19 10:20:44 +08:00
rctx - > mode = mode ;
2017-01-20 13:41:13 +08:00
return mtk_aes_handle_queue ( ctx - > cryp , ! ( mode & AES_FLAGS_ENCRYPT ) ,
& req - > base ) ;
2016-12-19 10:20:44 +08:00
}
2017-01-20 13:41:13 +08:00
static int mtk_aes_ecb_encrypt ( struct ablkcipher_request * req )
2016-12-19 10:20:44 +08:00
{
return mtk_aes_crypt ( req , AES_FLAGS_ENCRYPT | AES_FLAGS_ECB ) ;
}
2017-01-20 13:41:13 +08:00
static int mtk_aes_ecb_decrypt ( struct ablkcipher_request * req )
2016-12-19 10:20:44 +08:00
{
return mtk_aes_crypt ( req , AES_FLAGS_ECB ) ;
}
2017-01-20 13:41:13 +08:00
static int mtk_aes_cbc_encrypt ( struct ablkcipher_request * req )
2016-12-19 10:20:44 +08:00
{
return mtk_aes_crypt ( req , AES_FLAGS_ENCRYPT | AES_FLAGS_CBC ) ;
}
2017-01-20 13:41:13 +08:00
static int mtk_aes_cbc_decrypt ( struct ablkcipher_request * req )
2016-12-19 10:20:44 +08:00
{
return mtk_aes_crypt ( req , AES_FLAGS_CBC ) ;
}
2017-01-20 13:41:14 +08:00
static int mtk_aes_ctr_encrypt ( struct ablkcipher_request * req )
{
return mtk_aes_crypt ( req , AES_FLAGS_ENCRYPT | AES_FLAGS_CTR ) ;
}
static int mtk_aes_ctr_decrypt ( struct ablkcipher_request * req )
{
return mtk_aes_crypt ( req , AES_FLAGS_CTR ) ;
}
2016-12-19 10:20:44 +08:00
static int mtk_aes_cra_init ( struct crypto_tfm * tfm )
{
struct mtk_aes_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct mtk_cryp * cryp = NULL ;
2017-01-20 13:41:10 +08:00
cryp = mtk_aes_find_dev ( & ctx - > base ) ;
2016-12-19 10:20:44 +08:00
if ( ! cryp ) {
pr_err ( " can't find crypto device \n " ) ;
return - ENODEV ;
}
2017-01-20 13:41:10 +08:00
tfm - > crt_ablkcipher . reqsize = sizeof ( struct mtk_aes_reqctx ) ;
ctx - > base . start = mtk_aes_start ;
2016-12-19 10:20:44 +08:00
return 0 ;
}
2017-01-20 13:41:14 +08:00
static int mtk_aes_ctr_cra_init ( struct crypto_tfm * tfm )
{
struct mtk_aes_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct mtk_cryp * cryp = NULL ;
cryp = mtk_aes_find_dev ( & ctx - > base ) ;
if ( ! cryp ) {
pr_err ( " can't find crypto device \n " ) ;
return - ENODEV ;
}
tfm - > crt_ablkcipher . reqsize = sizeof ( struct mtk_aes_reqctx ) ;
ctx - > base . start = mtk_aes_ctr_start ;
return 0 ;
}
2016-12-19 10:20:44 +08:00
static struct crypto_alg aes_algs [ ] = {
{
2017-01-20 13:41:13 +08:00
. cra_name = " cbc(aes) " ,
. cra_driver_name = " cbc-aes-mtk " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC ,
. cra_init = mtk_aes_cra_init ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct mtk_aes_ctx ) ,
. cra_alignmask = 0xf ,
. cra_type = & crypto_ablkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_u . ablkcipher = {
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = mtk_aes_setkey ,
. encrypt = mtk_aes_cbc_encrypt ,
. decrypt = mtk_aes_cbc_decrypt ,
. ivsize = AES_BLOCK_SIZE ,
2016-12-19 10:20:44 +08:00
}
} ,
{
2017-01-20 13:41:13 +08:00
. cra_name = " ecb(aes) " ,
. cra_driver_name = " ecb-aes-mtk " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC ,
. cra_init = mtk_aes_cra_init ,
. cra_blocksize = AES_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct mtk_aes_ctx ) ,
. cra_alignmask = 0xf ,
. cra_type = & crypto_ablkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_u . ablkcipher = {
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = mtk_aes_setkey ,
. encrypt = mtk_aes_ecb_encrypt ,
. decrypt = mtk_aes_ecb_decrypt ,
2016-12-19 10:20:44 +08:00
}
} ,
2017-01-20 13:41:14 +08:00
{
. cra_name = " ctr(aes) " ,
. cra_driver_name = " ctr-aes-mtk " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC ,
. cra_init = mtk_aes_ctr_cra_init ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct mtk_aes_ctr_ctx ) ,
. cra_alignmask = 0xf ,
. cra_type = & crypto_ablkcipher_type ,
. cra_module = THIS_MODULE ,
. cra_u . ablkcipher = {
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = mtk_aes_setkey ,
. encrypt = mtk_aes_ctr_encrypt ,
. decrypt = mtk_aes_ctr_decrypt ,
}
} ,
2016-12-19 10:20:44 +08:00
} ;
2017-01-20 13:41:15 +08:00
static inline struct mtk_aes_gcm_ctx *
mtk_aes_gcm_ctx_cast ( struct mtk_aes_base_ctx * ctx )
{
return container_of ( ctx , struct mtk_aes_gcm_ctx , base ) ;
}
2017-03-09 10:11:18 +08:00
/*
* Engine will verify and compare tag automatically , so we just need
* to check returned status which stored in the result descriptor .
*/
static int mtk_aes_gcm_tag_verify ( struct mtk_cryp * cryp ,
struct mtk_aes_rec * aes )
{
u32 status = cryp - > ring [ aes - > id ] - > res_prev - > ct ;
return mtk_aes_complete ( cryp , aes , ( status & AES_AUTH_TAG_ERR ) ?
- EBADMSG : 0 ) ;
}
2017-01-20 13:41:15 +08:00
/* Initialize transform information of GCM mode */
static void mtk_aes_gcm_info_init ( struct mtk_cryp * cryp ,
struct mtk_aes_rec * aes ,
size_t len )
{
struct aead_request * req = aead_request_cast ( aes - > areq ) ;
struct mtk_aes_base_ctx * ctx = aes - > ctx ;
struct mtk_aes_gcm_ctx * gctx = mtk_aes_gcm_ctx_cast ( ctx ) ;
2017-03-09 10:11:19 +08:00
struct mtk_aes_info * info = & ctx - > info ;
2017-01-20 13:41:15 +08:00
u32 ivsize = crypto_aead_ivsize ( crypto_aead_reqtfm ( req ) ) ;
2017-03-09 10:11:19 +08:00
u32 cnt = 0 ;
2017-01-20 13:41:15 +08:00
ctx - > ct_hdr = AES_CT_CTRL_HDR | len ;
2017-03-09 10:11:19 +08:00
info - > cmd [ cnt + + ] = AES_GCM_CMD0 | cpu_to_le32 ( req - > assoclen ) ;
info - > cmd [ cnt + + ] = AES_GCM_CMD1 | cpu_to_le32 ( req - > assoclen ) ;
info - > cmd [ cnt + + ] = AES_GCM_CMD2 ;
info - > cmd [ cnt + + ] = AES_GCM_CMD3 | cpu_to_le32 ( gctx - > textlen ) ;
2017-01-20 13:41:15 +08:00
if ( aes - > flags & AES_FLAGS_ENCRYPT ) {
2017-03-09 10:11:19 +08:00
info - > cmd [ cnt + + ] = AES_GCM_CMD4 | cpu_to_le32 ( gctx - > authsize ) ;
info - > tfm [ 0 ] = AES_TFM_GCM_OUT ;
2017-01-20 13:41:15 +08:00
} else {
2017-03-09 10:11:19 +08:00
info - > cmd [ cnt + + ] = AES_GCM_CMD5 | cpu_to_le32 ( gctx - > authsize ) ;
info - > cmd [ cnt + + ] = AES_GCM_CMD6 | cpu_to_le32 ( gctx - > authsize ) ;
info - > tfm [ 0 ] = AES_TFM_GCM_IN ;
2017-01-20 13:41:15 +08:00
}
2017-03-09 10:11:19 +08:00
ctx - > ct_size = cnt ;
2017-01-20 13:41:15 +08:00
2017-03-09 10:11:19 +08:00
info - > tfm [ 0 ] | = AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE (
ctx - > keylen + SIZE_IN_WORDS ( AES_BLOCK_SIZE + ivsize ) ) |
ctx - > keymode ;
info - > tfm [ 1 ] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
AES_TFM_ENC_HASH ;
2017-01-20 13:41:15 +08:00
2017-03-09 10:11:19 +08:00
mtk_aes_write_state_le ( info - > state + ctx - > keylen + SIZE_IN_WORDS (
AES_BLOCK_SIZE ) , ( const u32 * ) req - > iv , ivsize ) ;
2017-01-20 13:41:15 +08:00
}
static int mtk_aes_gcm_dma ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes ,
struct scatterlist * src , struct scatterlist * dst ,
size_t len )
{
bool src_aligned , dst_aligned ;
aes - > src . sg = src ;
aes - > dst . sg = dst ;
aes - > real_dst = dst ;
src_aligned = mtk_aes_check_aligned ( src , len , & aes - > src ) ;
if ( src = = dst )
dst_aligned = src_aligned ;
else
dst_aligned = mtk_aes_check_aligned ( dst , len , & aes - > dst ) ;
if ( ! src_aligned | | ! dst_aligned ) {
if ( aes - > total > AES_BUF_SIZE )
2017-03-09 10:11:17 +08:00
return mtk_aes_complete ( cryp , aes , - ENOMEM ) ;
2017-01-20 13:41:15 +08:00
if ( ! src_aligned ) {
sg_copy_to_buffer ( src , sg_nents ( src ) , aes - > buf , len ) ;
aes - > src . sg = & aes - > aligned_sg ;
aes - > src . nents = 1 ;
aes - > src . remainder = 0 ;
}
if ( ! dst_aligned ) {
aes - > dst . sg = & aes - > aligned_sg ;
aes - > dst . nents = 1 ;
aes - > dst . remainder = 0 ;
}
sg_init_table ( & aes - > aligned_sg , 1 ) ;
sg_set_buf ( & aes - > aligned_sg , aes - > buf , aes - > total ) ;
}
mtk_aes_gcm_info_init ( cryp , aes , len ) ;
return mtk_aes_map ( cryp , aes ) ;
}
/* Todo: GMAC */
static int mtk_aes_gcm_start ( struct mtk_cryp * cryp , struct mtk_aes_rec * aes )
{
struct mtk_aes_gcm_ctx * gctx = mtk_aes_gcm_ctx_cast ( aes - > ctx ) ;
struct aead_request * req = aead_request_cast ( aes - > areq ) ;
struct mtk_aes_reqctx * rctx = aead_request_ctx ( req ) ;
u32 len = req - > assoclen + req - > cryptlen ;
mtk_aes_set_mode ( aes , rctx ) ;
if ( aes - > flags & AES_FLAGS_ENCRYPT ) {
u32 tag [ 4 ] ;
2017-03-09 10:11:18 +08:00
aes - > resume = mtk_aes_transfer_complete ;
2017-01-20 13:41:15 +08:00
/* Compute total process length. */
aes - > total = len + gctx - > authsize ;
/* Compute text length. */
gctx - > textlen = req - > cryptlen ;
/* Hardware will append authenticated tag to output buffer */
scatterwalk_map_and_copy ( tag , req - > dst , len , gctx - > authsize , 1 ) ;
} else {
2017-03-09 10:11:18 +08:00
aes - > resume = mtk_aes_gcm_tag_verify ;
2017-01-20 13:41:15 +08:00
aes - > total = len ;
gctx - > textlen = req - > cryptlen - gctx - > authsize ;
}
return mtk_aes_gcm_dma ( cryp , aes , req - > src , req - > dst , len ) ;
}
static int mtk_aes_gcm_crypt ( struct aead_request * req , u64 mode )
{
struct mtk_aes_base_ctx * ctx = crypto_aead_ctx ( crypto_aead_reqtfm ( req ) ) ;
2017-08-22 15:53:39 +08:00
struct mtk_aes_gcm_ctx * gctx = mtk_aes_gcm_ctx_cast ( ctx ) ;
2017-01-20 13:41:15 +08:00
struct mtk_aes_reqctx * rctx = aead_request_ctx ( req ) ;
2017-08-22 15:53:39 +08:00
/* Empty messages are not supported yet */
if ( ! gctx - > textlen & & ! req - > assoclen )
return - EINVAL ;
2017-01-20 13:41:15 +08:00
rctx - > mode = AES_FLAGS_GCM | mode ;
return mtk_aes_handle_queue ( ctx - > cryp , ! ! ( mode & AES_FLAGS_ENCRYPT ) ,
2017-03-09 10:11:18 +08:00
& req - > base ) ;
2017-01-20 13:41:15 +08:00
}
/*
* Because of the hardware limitation , we need to pre - calculate key ( H )
* for the GHASH operation . The result of the encryption operation
* need to be stored in the transform state buffer .
*/
static int mtk_aes_gcm_setkey ( struct crypto_aead * aead , const u8 * key ,
u32 keylen )
{
struct mtk_aes_base_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct mtk_aes_gcm_ctx * gctx = mtk_aes_gcm_ctx_cast ( ctx ) ;
struct crypto_skcipher * ctr = gctx - > ctr ;
struct {
u32 hash [ 4 ] ;
u8 iv [ 8 ] ;
2017-10-18 08:00:51 +01:00
struct crypto_wait wait ;
2017-01-20 13:41:15 +08:00
struct scatterlist sg [ 1 ] ;
struct skcipher_request req ;
} * data ;
2017-03-09 10:11:19 +08:00
int err ;
2017-01-20 13:41:15 +08:00
2017-03-09 10:11:19 +08:00
switch ( keylen ) {
case AES_KEYSIZE_128 :
ctx - > keymode = AES_TFM_128BITS ;
break ;
case AES_KEYSIZE_192 :
ctx - > keymode = AES_TFM_192BITS ;
break ;
case AES_KEYSIZE_256 :
ctx - > keymode = AES_TFM_256BITS ;
break ;
default :
2017-01-20 13:41:15 +08:00
crypto_aead_set_flags ( aead , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return - EINVAL ;
}
ctx - > keylen = SIZE_IN_WORDS ( keylen ) ;
/* Same as crypto_gcm_setkey() from crypto/gcm.c */
crypto_skcipher_clear_flags ( ctr , CRYPTO_TFM_REQ_MASK ) ;
crypto_skcipher_set_flags ( ctr , crypto_aead_get_flags ( aead ) &
CRYPTO_TFM_REQ_MASK ) ;
err = crypto_skcipher_setkey ( ctr , key , keylen ) ;
crypto_aead_set_flags ( aead , crypto_skcipher_get_flags ( ctr ) &
CRYPTO_TFM_RES_MASK ) ;
if ( err )
return err ;
data = kzalloc ( sizeof ( * data ) + crypto_skcipher_reqsize ( ctr ) ,
GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
2017-10-18 08:00:51 +01:00
crypto_init_wait ( & data - > wait ) ;
2017-01-20 13:41:15 +08:00
sg_init_one ( data - > sg , & data - > hash , AES_BLOCK_SIZE ) ;
skcipher_request_set_tfm ( & data - > req , ctr ) ;
skcipher_request_set_callback ( & data - > req , CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG ,
2017-10-18 08:00:51 +01:00
crypto_req_done , & data - > wait ) ;
2017-01-20 13:41:15 +08:00
skcipher_request_set_crypt ( & data - > req , data - > sg , data - > sg ,
AES_BLOCK_SIZE , data - > iv ) ;
2017-10-18 08:00:51 +01:00
err = crypto_wait_req ( crypto_skcipher_encrypt ( & data - > req ) ,
& data - > wait ) ;
2017-01-20 13:41:15 +08:00
if ( err )
goto out ;
2017-03-09 10:11:19 +08:00
/* Write key into state buffer */
mtk_aes_write_state_le ( ctx - > info . state , ( const u32 * ) key , keylen ) ;
/* Write key(H) into state buffer */
mtk_aes_write_state_be ( ctx - > info . state + ctx - > keylen , data - > hash ,
AES_BLOCK_SIZE ) ;
2017-01-20 13:41:15 +08:00
out :
kzfree ( data ) ;
return err ;
}
static int mtk_aes_gcm_setauthsize ( struct crypto_aead * aead ,
u32 authsize )
{
struct mtk_aes_base_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct mtk_aes_gcm_ctx * gctx = mtk_aes_gcm_ctx_cast ( ctx ) ;
/* Same as crypto_gcm_authsize() from crypto/gcm.c */
switch ( authsize ) {
case 8 :
case 12 :
case 16 :
break ;
default :
return - EINVAL ;
}
gctx - > authsize = authsize ;
return 0 ;
}
static int mtk_aes_gcm_encrypt ( struct aead_request * req )
{
return mtk_aes_gcm_crypt ( req , AES_FLAGS_ENCRYPT ) ;
}
static int mtk_aes_gcm_decrypt ( struct aead_request * req )
{
return mtk_aes_gcm_crypt ( req , 0 ) ;
}
static int mtk_aes_gcm_init ( struct crypto_aead * aead )
{
struct mtk_aes_gcm_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct mtk_cryp * cryp = NULL ;
cryp = mtk_aes_find_dev ( & ctx - > base ) ;
if ( ! cryp ) {
pr_err ( " can't find crypto device \n " ) ;
return - ENODEV ;
}
ctx - > ctr = crypto_alloc_skcipher ( " ctr(aes) " , 0 ,
CRYPTO_ALG_ASYNC ) ;
if ( IS_ERR ( ctx - > ctr ) ) {
pr_err ( " Error allocating ctr(aes) \n " ) ;
return PTR_ERR ( ctx - > ctr ) ;
}
crypto_aead_set_reqsize ( aead , sizeof ( struct mtk_aes_reqctx ) ) ;
ctx - > base . start = mtk_aes_gcm_start ;
return 0 ;
}
static void mtk_aes_gcm_exit ( struct crypto_aead * aead )
{
struct mtk_aes_gcm_ctx * ctx = crypto_aead_ctx ( aead ) ;
crypto_free_skcipher ( ctx - > ctr ) ;
}
static struct aead_alg aes_gcm_alg = {
. setkey = mtk_aes_gcm_setkey ,
. setauthsize = mtk_aes_gcm_setauthsize ,
. encrypt = mtk_aes_gcm_encrypt ,
. decrypt = mtk_aes_gcm_decrypt ,
. init = mtk_aes_gcm_init ,
. exit = mtk_aes_gcm_exit ,
2017-08-22 10:08:14 +02:00
. ivsize = GCM_AES_IV_SIZE ,
2017-01-20 13:41:15 +08:00
. maxauthsize = AES_BLOCK_SIZE ,
. base = {
. cra_name = " gcm(aes) " ,
. cra_driver_name = " gcm-aes-mtk " ,
. cra_priority = 400 ,
. cra_flags = CRYPTO_ALG_ASYNC ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct mtk_aes_gcm_ctx ) ,
. cra_alignmask = 0xf ,
. cra_module = THIS_MODULE ,
} ,
} ;
2017-03-09 10:11:16 +08:00
static void mtk_aes_queue_task ( unsigned long data )
{
struct mtk_aes_rec * aes = ( struct mtk_aes_rec * ) data ;
mtk_aes_handle_queue ( aes - > cryp , aes - > id , NULL ) ;
}
2017-03-09 10:11:12 +08:00
static void mtk_aes_done_task ( unsigned long data )
2016-12-19 10:20:44 +08:00
{
2017-03-09 10:11:12 +08:00
struct mtk_aes_rec * aes = ( struct mtk_aes_rec * ) data ;
struct mtk_cryp * cryp = aes - > cryp ;
2016-12-19 10:20:44 +08:00
mtk_aes_unmap ( cryp , aes ) ;
2017-01-20 13:41:11 +08:00
aes - > resume ( cryp , aes ) ;
2016-12-19 10:20:44 +08:00
}
2017-03-09 10:11:12 +08:00
static irqreturn_t mtk_aes_irq ( int irq , void * dev_id )
2016-12-19 10:20:44 +08:00
{
2017-03-09 10:11:12 +08:00
struct mtk_aes_rec * aes = ( struct mtk_aes_rec * ) dev_id ;
struct mtk_cryp * cryp = aes - > cryp ;
u32 val = mtk_aes_read ( cryp , RDR_STAT ( aes - > id ) ) ;
2016-12-19 10:20:44 +08:00
2017-03-09 10:11:12 +08:00
mtk_aes_write ( cryp , RDR_STAT ( aes - > id ) , val ) ;
2016-12-19 10:20:44 +08:00
if ( likely ( AES_FLAGS_BUSY & aes - > flags ) ) {
2017-03-09 10:11:12 +08:00
mtk_aes_write ( cryp , RDR_PROC_COUNT ( aes - > id ) , MTK_CNT_RST ) ;
mtk_aes_write ( cryp , RDR_THRESH ( aes - > id ) ,
2016-12-19 10:20:44 +08:00
MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE ) ;
2017-03-09 10:11:16 +08:00
tasklet_schedule ( & aes - > done_task ) ;
2016-12-19 10:20:44 +08:00
} else {
dev_warn ( cryp - > dev , " AES interrupt when no active requests. \n " ) ;
}
return IRQ_HANDLED ;
}
/*
* The purpose of creating encryption and decryption records is
* to process outbound / inbound data in parallel , it can improve
* performance in most use cases , such as IPSec VPN , especially
* under heavy network traffic .
*/
static int mtk_aes_record_init ( struct mtk_cryp * cryp )
{
struct mtk_aes_rec * * aes = cryp - > aes ;
int i , err = - ENOMEM ;
for ( i = 0 ; i < MTK_REC_NUM ; i + + ) {
aes [ i ] = kzalloc ( sizeof ( * * aes ) , GFP_KERNEL ) ;
if ( ! aes [ i ] )
goto err_cleanup ;
aes [ i ] - > buf = ( void * ) __get_free_pages ( GFP_KERNEL ,
AES_BUF_ORDER ) ;
if ( ! aes [ i ] - > buf )
goto err_cleanup ;
2017-03-09 10:11:12 +08:00
aes [ i ] - > cryp = cryp ;
2016-12-19 10:20:44 +08:00
spin_lock_init ( & aes [ i ] - > lock ) ;
crypto_init_queue ( & aes [ i ] - > queue , AES_QUEUE_SIZE ) ;
2017-03-09 10:11:12 +08:00
2017-03-09 10:11:16 +08:00
tasklet_init ( & aes [ i ] - > queue_task , mtk_aes_queue_task ,
( unsigned long ) aes [ i ] ) ;
tasklet_init ( & aes [ i ] - > done_task , mtk_aes_done_task ,
2017-03-09 10:11:12 +08:00
( unsigned long ) aes [ i ] ) ;
2016-12-19 10:20:44 +08:00
}
2017-03-09 10:11:12 +08:00
/* Link to ring0 and ring1 respectively */
2017-03-09 10:11:13 +08:00
aes [ 0 ] - > id = MTK_RING0 ;
aes [ 1 ] - > id = MTK_RING1 ;
2016-12-19 10:20:44 +08:00
return 0 ;
err_cleanup :
for ( ; i - - ; ) {
free_page ( ( unsigned long ) aes [ i ] - > buf ) ;
kfree ( aes [ i ] ) ;
}
return err ;
}
static void mtk_aes_record_free ( struct mtk_cryp * cryp )
{
int i ;
for ( i = 0 ; i < MTK_REC_NUM ; i + + ) {
2017-03-09 10:11:16 +08:00
tasklet_kill ( & cryp - > aes [ i ] - > done_task ) ;
tasklet_kill ( & cryp - > aes [ i ] - > queue_task ) ;
2016-12-19 10:20:44 +08:00
free_page ( ( unsigned long ) cryp - > aes [ i ] - > buf ) ;
kfree ( cryp - > aes [ i ] ) ;
}
}
static void mtk_aes_unregister_algs ( void )
{
int i ;
2017-01-20 13:41:15 +08:00
crypto_unregister_aead ( & aes_gcm_alg ) ;
2016-12-19 10:20:44 +08:00
for ( i = 0 ; i < ARRAY_SIZE ( aes_algs ) ; i + + )
crypto_unregister_alg ( & aes_algs [ i ] ) ;
}
static int mtk_aes_register_algs ( void )
{
int err , i ;
for ( i = 0 ; i < ARRAY_SIZE ( aes_algs ) ; i + + ) {
err = crypto_register_alg ( & aes_algs [ i ] ) ;
if ( err )
goto err_aes_algs ;
}
2017-01-20 13:41:15 +08:00
err = crypto_register_aead ( & aes_gcm_alg ) ;
if ( err )
goto err_aes_algs ;
2016-12-19 10:20:44 +08:00
return 0 ;
err_aes_algs :
for ( ; i - - ; )
crypto_unregister_alg ( & aes_algs [ i ] ) ;
return err ;
}
int mtk_cipher_alg_register ( struct mtk_cryp * cryp )
{
int ret ;
INIT_LIST_HEAD ( & cryp - > aes_list ) ;
/* Initialize two cipher records */
ret = mtk_aes_record_init ( cryp ) ;
if ( ret )
goto err_record ;
2017-03-09 10:11:13 +08:00
ret = devm_request_irq ( cryp - > dev , cryp - > irq [ MTK_RING0 ] , mtk_aes_irq ,
2017-03-09 10:11:12 +08:00
0 , " mtk-aes " , cryp - > aes [ 0 ] ) ;
2016-12-19 10:20:44 +08:00
if ( ret ) {
2017-03-09 10:11:12 +08:00
dev_err ( cryp - > dev , " unable to request AES irq. \n " ) ;
2016-12-19 10:20:44 +08:00
goto err_res ;
}
2017-03-09 10:11:13 +08:00
ret = devm_request_irq ( cryp - > dev , cryp - > irq [ MTK_RING1 ] , mtk_aes_irq ,
2017-03-09 10:11:12 +08:00
0 , " mtk-aes " , cryp - > aes [ 1 ] ) ;
2016-12-19 10:20:44 +08:00
if ( ret ) {
2017-03-09 10:11:12 +08:00
dev_err ( cryp - > dev , " unable to request AES irq. \n " ) ;
2016-12-19 10:20:44 +08:00
goto err_res ;
}
/* Enable ring0 and ring1 interrupt */
2017-03-09 10:11:13 +08:00
mtk_aes_write ( cryp , AIC_ENABLE_SET ( MTK_RING0 ) , MTK_IRQ_RDR0 ) ;
mtk_aes_write ( cryp , AIC_ENABLE_SET ( MTK_RING1 ) , MTK_IRQ_RDR1 ) ;
2016-12-19 10:20:44 +08:00
spin_lock ( & mtk_aes . lock ) ;
list_add_tail ( & cryp - > aes_list , & mtk_aes . dev_list ) ;
spin_unlock ( & mtk_aes . lock ) ;
ret = mtk_aes_register_algs ( ) ;
if ( ret )
goto err_algs ;
return 0 ;
err_algs :
spin_lock ( & mtk_aes . lock ) ;
list_del ( & cryp - > aes_list ) ;
spin_unlock ( & mtk_aes . lock ) ;
err_res :
mtk_aes_record_free ( cryp ) ;
err_record :
dev_err ( cryp - > dev , " mtk-aes initialization failed. \n " ) ;
return ret ;
}
void mtk_cipher_alg_release ( struct mtk_cryp * cryp )
{
spin_lock ( & mtk_aes . lock ) ;
list_del ( & cryp - > aes_list ) ;
spin_unlock ( & mtk_aes . lock ) ;
mtk_aes_unregister_algs ( ) ;
mtk_aes_record_free ( cryp ) ;
}