2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2017-05-24 10:35:31 +03:00
/*
* Cryptographic API .
*
* Support for OMAP AES GCM HW acceleration .
*
* Copyright ( c ) 2016 Texas Instruments Incorporated
*/
# include <linux/errno.h>
# include <linux/scatterlist.h>
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
# include <linux/omap-dma.h>
# include <linux/interrupt.h>
2019-11-05 17:01:08 +03:00
# include <linux/pm_runtime.h>
2017-05-24 10:35:31 +03:00
# include <crypto/aes.h>
2017-08-22 11:08:16 +03:00
# include <crypto/gcm.h>
2017-05-24 10:35:31 +03:00
# include <crypto/scatterwalk.h>
# include <crypto/skcipher.h>
# include <crypto/internal/aead.h>
# include "omap-crypto.h"
# include "omap-aes.h"
static int omap_aes_gcm_handle_queue ( struct omap_aes_dev * dd ,
struct aead_request * req ) ;
static void omap_aes_gcm_finish_req ( struct omap_aes_dev * dd , int ret )
{
struct aead_request * req = dd - > aead_req ;
dd - > in_sg = NULL ;
dd - > out_sg = NULL ;
2019-11-05 17:01:08 +03:00
crypto_finalize_aead_request ( dd - > engine , req , ret ) ;
pm_runtime_mark_last_busy ( dd - > dev ) ;
pm_runtime_put_autosuspend ( dd - > dev ) ;
2017-05-24 10:35:31 +03:00
}
static void omap_aes_gcm_done_task ( struct omap_aes_dev * dd )
{
u8 * tag ;
int alen , clen , i , ret = 0 , nsg ;
struct omap_aes_reqctx * rctx ;
alen = ALIGN ( dd - > assoc_len , AES_BLOCK_SIZE ) ;
clen = ALIGN ( dd - > total , AES_BLOCK_SIZE ) ;
rctx = aead_request_ctx ( dd - > aead_req ) ;
nsg = ! ! ( dd - > assoc_len & & dd - > total ) ;
dma_sync_sg_for_device ( dd - > dev , dd - > out_sg , dd - > out_sg_len ,
DMA_FROM_DEVICE ) ;
dma_unmap_sg ( dd - > dev , dd - > in_sg , dd - > in_sg_len , DMA_TO_DEVICE ) ;
dma_unmap_sg ( dd - > dev , dd - > out_sg , dd - > out_sg_len , DMA_FROM_DEVICE ) ;
omap_aes_crypt_dma_stop ( dd ) ;
omap_crypto_cleanup ( dd - > out_sg , dd - > orig_out ,
dd - > aead_req - > assoclen , dd - > total ,
FLAGS_OUT_DATA_ST_SHIFT , dd - > flags ) ;
if ( dd - > flags & FLAGS_ENCRYPT )
scatterwalk_map_and_copy ( rctx - > auth_tag ,
dd - > aead_req - > dst ,
dd - > total + dd - > aead_req - > assoclen ,
dd - > authsize , 1 ) ;
omap_crypto_cleanup ( & dd - > in_sgl [ 0 ] , NULL , 0 , alen ,
FLAGS_ASSOC_DATA_ST_SHIFT , dd - > flags ) ;
omap_crypto_cleanup ( & dd - > in_sgl [ nsg ] , NULL , 0 , clen ,
FLAGS_IN_DATA_ST_SHIFT , dd - > flags ) ;
if ( ! ( dd - > flags & FLAGS_ENCRYPT ) ) {
tag = ( u8 * ) rctx - > auth_tag ;
for ( i = 0 ; i < dd - > authsize ; i + + ) {
if ( tag [ i ] ) {
ret = - EBADMSG ;
}
}
}
omap_aes_gcm_finish_req ( dd , ret ) ;
}
static int omap_aes_gcm_copy_buffers ( struct omap_aes_dev * dd ,
struct aead_request * req )
{
int alen , clen , cryptlen , assoclen , ret ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
unsigned int authlen = crypto_aead_authsize ( aead ) ;
struct scatterlist * tmp , sg_arr [ 2 ] ;
int nsg ;
u16 flags ;
assoclen = req - > assoclen ;
cryptlen = req - > cryptlen ;
if ( dd - > flags & FLAGS_RFC4106_GCM )
assoclen - = 8 ;
if ( ! ( dd - > flags & FLAGS_ENCRYPT ) )
cryptlen - = authlen ;
alen = ALIGN ( assoclen , AES_BLOCK_SIZE ) ;
clen = ALIGN ( cryptlen , AES_BLOCK_SIZE ) ;
nsg = ! ! ( assoclen & & cryptlen ) ;
omap_aes_clear_copy_flags ( dd ) ;
sg_init_table ( dd - > in_sgl , nsg + 1 ) ;
if ( assoclen ) {
tmp = req - > src ;
ret = omap_crypto_align_sg ( & tmp , assoclen ,
AES_BLOCK_SIZE , dd - > in_sgl ,
OMAP_CRYPTO_COPY_DATA |
OMAP_CRYPTO_ZERO_BUF |
OMAP_CRYPTO_FORCE_SINGLE_ENTRY ,
FLAGS_ASSOC_DATA_ST_SHIFT ,
& dd - > flags ) ;
2019-11-05 17:01:02 +03:00
if ( ret )
return ret ;
2017-05-24 10:35:31 +03:00
}
if ( cryptlen ) {
tmp = scatterwalk_ffwd ( sg_arr , req - > src , req - > assoclen ) ;
2019-11-05 17:01:08 +03:00
if ( nsg )
sg_unmark_end ( dd - > in_sgl ) ;
2017-05-24 10:35:31 +03:00
ret = omap_crypto_align_sg ( & tmp , cryptlen ,
AES_BLOCK_SIZE , & dd - > in_sgl [ nsg ] ,
OMAP_CRYPTO_COPY_DATA |
OMAP_CRYPTO_ZERO_BUF |
OMAP_CRYPTO_FORCE_SINGLE_ENTRY ,
FLAGS_IN_DATA_ST_SHIFT ,
& dd - > flags ) ;
2019-11-05 17:01:02 +03:00
if ( ret )
return ret ;
2017-05-24 10:35:31 +03:00
}
dd - > in_sg = dd - > in_sgl ;
dd - > total = cryptlen ;
dd - > assoc_len = assoclen ;
dd - > authsize = authlen ;
dd - > out_sg = req - > dst ;
dd - > orig_out = req - > dst ;
2019-11-05 17:01:08 +03:00
dd - > out_sg = scatterwalk_ffwd ( sg_arr , req - > dst , req - > assoclen ) ;
2017-05-24 10:35:31 +03:00
flags = 0 ;
if ( req - > src = = req - > dst | | dd - > out_sg = = sg_arr )
flags | = OMAP_CRYPTO_FORCE_COPY ;
2019-11-05 17:00:56 +03:00
if ( cryptlen ) {
ret = omap_crypto_align_sg ( & dd - > out_sg , cryptlen ,
AES_BLOCK_SIZE , & dd - > out_sgl ,
flags ,
FLAGS_OUT_DATA_ST_SHIFT , & dd - > flags ) ;
if ( ret )
return ret ;
}
2017-05-24 10:35:31 +03:00
dd - > in_sg_len = sg_nents_for_len ( dd - > in_sg , alen + clen ) ;
dd - > out_sg_len = sg_nents_for_len ( dd - > out_sg , clen ) ;
return 0 ;
}
static int do_encrypt_iv ( struct aead_request * req , u32 * tag , u32 * iv )
{
2019-11-05 17:01:05 +03:00
struct omap_aes_gcm_ctx * ctx = crypto_aead_ctx ( crypto_aead_reqtfm ( req ) ) ;
2017-05-24 10:35:31 +03:00
2019-11-05 17:01:05 +03:00
aes_encrypt ( & ctx - > actx , ( u8 * ) tag , ( u8 * ) iv ) ;
return 0 ;
2017-05-24 10:35:31 +03:00
}
void omap_aes_gcm_dma_out_callback ( void * data )
{
struct omap_aes_dev * dd = data ;
struct omap_aes_reqctx * rctx ;
int i , val ;
u32 * auth_tag , tag [ 4 ] ;
if ( ! ( dd - > flags & FLAGS_ENCRYPT ) )
scatterwalk_map_and_copy ( tag , dd - > aead_req - > src ,
dd - > total + dd - > aead_req - > assoclen ,
dd - > authsize , 0 ) ;
rctx = aead_request_ctx ( dd - > aead_req ) ;
auth_tag = ( u32 * ) rctx - > auth_tag ;
for ( i = 0 ; i < 4 ; i + + ) {
val = omap_aes_read ( dd , AES_REG_TAG_N ( dd , i ) ) ;
auth_tag [ i ] = val ^ auth_tag [ i ] ;
if ( ! ( dd - > flags & FLAGS_ENCRYPT ) )
auth_tag [ i ] = auth_tag [ i ] ^ tag [ i ] ;
}
omap_aes_gcm_done_task ( dd ) ;
}
static int omap_aes_gcm_handle_queue ( struct omap_aes_dev * dd ,
struct aead_request * req )
{
if ( req )
2019-11-05 17:01:08 +03:00
return crypto_transfer_aead_request_to_engine ( dd - > engine , req ) ;
2017-05-24 10:35:31 +03:00
2019-11-05 17:01:08 +03:00
return 0 ;
}
2017-05-24 10:35:31 +03:00
2019-11-05 17:01:08 +03:00
static int omap_aes_gcm_prepare_req ( struct crypto_engine * engine , void * areq )
{
struct aead_request * req = container_of ( areq , struct aead_request ,
base ) ;
struct omap_aes_reqctx * rctx = aead_request_ctx ( req ) ;
struct omap_aes_dev * dd = rctx - > dd ;
struct omap_aes_gcm_ctx * ctx = crypto_aead_ctx ( crypto_aead_reqtfm ( req ) ) ;
int err ;
2017-05-24 10:35:31 +03:00
dd - > aead_req = req ;
rctx - > mode & = FLAGS_MODE_MASK ;
dd - > flags = ( dd - > flags & ~ FLAGS_MODE_MASK ) | rctx - > mode ;
err = omap_aes_gcm_copy_buffers ( dd , req ) ;
if ( err )
return err ;
2019-11-05 17:01:08 +03:00
dd - > ctx = & ctx - > octx ;
2017-05-24 10:35:31 +03:00
2019-11-05 17:01:08 +03:00
return omap_aes_write_ctrl ( dd ) ;
2017-05-24 10:35:31 +03:00
}
static int omap_aes_gcm_crypt ( struct aead_request * req , unsigned long mode )
{
struct omap_aes_reqctx * rctx = aead_request_ctx ( req ) ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
unsigned int authlen = crypto_aead_authsize ( aead ) ;
struct omap_aes_dev * dd ;
__be32 counter = cpu_to_be32 ( 1 ) ;
int err , assoclen ;
memset ( rctx - > auth_tag , 0 , sizeof ( rctx - > auth_tag ) ) ;
2017-08-22 11:08:16 +03:00
memcpy ( rctx - > iv + GCM_AES_IV_SIZE , & counter , 4 ) ;
2017-05-24 10:35:31 +03:00
err = do_encrypt_iv ( req , ( u32 * ) rctx - > auth_tag , ( u32 * ) rctx - > iv ) ;
if ( err )
return err ;
if ( mode & FLAGS_RFC4106_GCM )
assoclen = req - > assoclen - 8 ;
else
assoclen = req - > assoclen ;
if ( assoclen + req - > cryptlen = = 0 ) {
scatterwalk_map_and_copy ( rctx - > auth_tag , req - > dst , 0 , authlen ,
1 ) ;
return 0 ;
}
dd = omap_aes_find_dev ( rctx ) ;
if ( ! dd )
return - ENODEV ;
rctx - > mode = mode ;
return omap_aes_gcm_handle_queue ( dd , req ) ;
}
int omap_aes_gcm_encrypt ( struct aead_request * req )
{
struct omap_aes_reqctx * rctx = aead_request_ctx ( req ) ;
2017-08-22 11:08:16 +03:00
memcpy ( rctx - > iv , req - > iv , GCM_AES_IV_SIZE ) ;
2017-05-24 10:35:31 +03:00
return omap_aes_gcm_crypt ( req , FLAGS_ENCRYPT | FLAGS_GCM ) ;
}
int omap_aes_gcm_decrypt ( struct aead_request * req )
{
struct omap_aes_reqctx * rctx = aead_request_ctx ( req ) ;
2017-08-22 11:08:16 +03:00
memcpy ( rctx - > iv , req - > iv , GCM_AES_IV_SIZE ) ;
2017-05-24 10:35:31 +03:00
return omap_aes_gcm_crypt ( req , FLAGS_GCM ) ;
}
int omap_aes_4106gcm_encrypt ( struct aead_request * req )
{
2019-11-05 17:01:05 +03:00
struct omap_aes_gcm_ctx * ctx = crypto_aead_ctx ( crypto_aead_reqtfm ( req ) ) ;
2017-05-24 10:35:31 +03:00
struct omap_aes_reqctx * rctx = aead_request_ctx ( req ) ;
2019-11-05 17:01:05 +03:00
memcpy ( rctx - > iv , ctx - > octx . nonce , 4 ) ;
2017-05-24 10:35:31 +03:00
memcpy ( rctx - > iv + 4 , req - > iv , 8 ) ;
2019-11-05 17:01:04 +03:00
return crypto_ipsec_check_assoclen ( req - > assoclen ) ? :
omap_aes_gcm_crypt ( req , FLAGS_ENCRYPT | FLAGS_GCM |
2017-05-24 10:35:31 +03:00
FLAGS_RFC4106_GCM ) ;
}
int omap_aes_4106gcm_decrypt ( struct aead_request * req )
{
2019-11-05 17:01:05 +03:00
struct omap_aes_gcm_ctx * ctx = crypto_aead_ctx ( crypto_aead_reqtfm ( req ) ) ;
2017-05-24 10:35:31 +03:00
struct omap_aes_reqctx * rctx = aead_request_ctx ( req ) ;
2019-11-05 17:01:05 +03:00
memcpy ( rctx - > iv , ctx - > octx . nonce , 4 ) ;
2017-05-24 10:35:31 +03:00
memcpy ( rctx - > iv + 4 , req - > iv , 8 ) ;
2019-11-05 17:01:04 +03:00
return crypto_ipsec_check_assoclen ( req - > assoclen ) ? :
omap_aes_gcm_crypt ( req , FLAGS_GCM | FLAGS_RFC4106_GCM ) ;
2017-05-24 10:35:31 +03:00
}
int omap_aes_gcm_setkey ( struct crypto_aead * tfm , const u8 * key ,
unsigned int keylen )
{
2019-11-05 17:01:05 +03:00
struct omap_aes_gcm_ctx * ctx = crypto_aead_ctx ( tfm ) ;
int ret ;
2017-05-24 10:35:31 +03:00
2019-11-05 17:01:05 +03:00
ret = aes_expandkey ( & ctx - > actx , key , keylen ) ;
if ( ret )
return ret ;
2017-05-24 10:35:31 +03:00
2019-11-05 17:01:05 +03:00
memcpy ( ctx - > octx . key , key , keylen ) ;
ctx - > octx . keylen = keylen ;
2017-05-24 10:35:31 +03:00
return 0 ;
}
int omap_aes_4106gcm_setkey ( struct crypto_aead * tfm , const u8 * key ,
unsigned int keylen )
{
2019-11-05 17:01:05 +03:00
struct omap_aes_gcm_ctx * ctx = crypto_aead_ctx ( tfm ) ;
int ret ;
2017-05-24 10:35:31 +03:00
if ( keylen < 4 )
return - EINVAL ;
keylen - = 4 ;
2019-11-05 17:01:05 +03:00
ret = aes_expandkey ( & ctx - > actx , key , keylen ) ;
if ( ret )
return ret ;
memcpy ( ctx - > octx . key , key , keylen ) ;
memcpy ( ctx - > octx . nonce , key + keylen , 4 ) ;
ctx - > octx . keylen = keylen ;
2017-05-24 10:35:31 +03:00
return 0 ;
}
2019-11-05 17:01:03 +03:00
int omap_aes_gcm_setauthsize ( struct crypto_aead * tfm , unsigned int authsize )
{
return crypto_gcm_check_authsize ( authsize ) ;
}
int omap_aes_4106gcm_setauthsize ( struct crypto_aead * parent ,
unsigned int authsize )
{
return crypto_rfc4106_check_authsize ( authsize ) ;
}
2019-11-05 17:01:08 +03:00
static int omap_aes_gcm_crypt_req ( struct crypto_engine * engine , void * areq )
{
struct aead_request * req = container_of ( areq , struct aead_request ,
base ) ;
struct omap_aes_reqctx * rctx = aead_request_ctx ( req ) ;
struct omap_aes_dev * dd = rctx - > dd ;
int ret = 0 ;
if ( ! dd )
return - ENODEV ;
if ( dd - > in_sg_len )
ret = omap_aes_crypt_dma_start ( dd ) ;
else
omap_aes_gcm_dma_out_callback ( dd ) ;
return ret ;
}
int omap_aes_gcm_cra_init ( struct crypto_aead * tfm )
{
struct omap_aes_ctx * ctx = crypto_aead_ctx ( tfm ) ;
ctx - > enginectx . op . prepare_request = omap_aes_gcm_prepare_req ;
ctx - > enginectx . op . unprepare_request = NULL ;
ctx - > enginectx . op . do_one_request = omap_aes_gcm_crypt_req ;
crypto_aead_set_reqsize ( tfm , sizeof ( struct omap_aes_reqctx ) ) ;
return 0 ;
}