2012-05-14 15:05:59 +04:00
/**
* AES GCM routines supporting the Power 7 + Nest Accelerators driver
*
* Copyright ( C ) 2012 International Business Machines Inc .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; version 2 only .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*
* Author : Kent Yoder < yoder1 @ us . ibm . com >
*/
# include <crypto/internal/aead.h>
# include <crypto/aes.h>
# include <crypto/algapi.h>
# include <crypto/scatterwalk.h>
# include <linux/module.h>
# include <linux/types.h>
# include <linux/crypto.h>
# include <asm/vio.h>
# include "nx_csbcpb.h"
# include "nx.h"
static int gcm_aes_nx_set_key ( struct crypto_aead * tfm ,
const u8 * in_key ,
unsigned int key_len )
{
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( & tfm - > base ) ;
struct nx_csbcpb * csbcpb = nx_ctx - > csbcpb ;
struct nx_csbcpb * csbcpb_aead = nx_ctx - > csbcpb_aead ;
nx_ctx_init ( nx_ctx , HCOP_FC_AES ) ;
switch ( key_len ) {
case AES_KEYSIZE_128 :
NX_CPB_SET_KEY_SIZE ( csbcpb , NX_KS_AES_128 ) ;
NX_CPB_SET_KEY_SIZE ( csbcpb_aead , NX_KS_AES_128 ) ;
nx_ctx - > ap = & nx_ctx - > props [ NX_PROPS_AES_128 ] ;
break ;
case AES_KEYSIZE_192 :
NX_CPB_SET_KEY_SIZE ( csbcpb , NX_KS_AES_192 ) ;
NX_CPB_SET_KEY_SIZE ( csbcpb_aead , NX_KS_AES_192 ) ;
nx_ctx - > ap = & nx_ctx - > props [ NX_PROPS_AES_192 ] ;
break ;
case AES_KEYSIZE_256 :
NX_CPB_SET_KEY_SIZE ( csbcpb , NX_KS_AES_256 ) ;
NX_CPB_SET_KEY_SIZE ( csbcpb_aead , NX_KS_AES_256 ) ;
nx_ctx - > ap = & nx_ctx - > props [ NX_PROPS_AES_256 ] ;
break ;
default :
return - EINVAL ;
}
csbcpb - > cpb . hdr . mode = NX_MODE_AES_GCM ;
memcpy ( csbcpb - > cpb . aes_gcm . key , in_key , key_len ) ;
csbcpb_aead - > cpb . hdr . mode = NX_MODE_AES_GCA ;
memcpy ( csbcpb_aead - > cpb . aes_gca . key , in_key , key_len ) ;
return 0 ;
}
static int gcm4106_aes_nx_set_key ( struct crypto_aead * tfm ,
const u8 * in_key ,
unsigned int key_len )
{
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( & tfm - > base ) ;
char * nonce = nx_ctx - > priv . gcm . nonce ;
int rc ;
if ( key_len < 4 )
return - EINVAL ;
key_len - = 4 ;
rc = gcm_aes_nx_set_key ( tfm , in_key , key_len ) ;
if ( rc )
goto out ;
memcpy ( nonce , in_key + key_len , 4 ) ;
out :
return rc ;
}
static int gcm_aes_nx_setauthsize ( struct crypto_aead * tfm ,
unsigned int authsize )
{
if ( authsize > crypto_aead_alg ( tfm ) - > maxauthsize )
return - EINVAL ;
crypto_aead_crt ( tfm ) - > authsize = authsize ;
return 0 ;
}
static int gcm4106_aes_nx_setauthsize ( struct crypto_aead * tfm ,
unsigned int authsize )
{
switch ( authsize ) {
case 8 :
case 12 :
case 16 :
break ;
default :
return - EINVAL ;
}
crypto_aead_crt ( tfm ) - > authsize = authsize ;
return 0 ;
}
static int nx_gca ( struct nx_crypto_ctx * nx_ctx ,
struct aead_request * req ,
u8 * out )
{
2013-08-29 18:36:35 +04:00
int rc ;
2012-05-14 15:05:59 +04:00
struct nx_csbcpb * csbcpb_aead = nx_ctx - > csbcpb_aead ;
struct scatter_walk walk ;
struct nx_sg * nx_sg = nx_ctx - > in_sg ;
2013-08-29 18:36:35 +04:00
unsigned int nbytes = req - > assoclen ;
unsigned int processed = 0 , to_process ;
2014-10-28 20:47:48 +03:00
unsigned int max_sg_len ;
2012-05-14 15:05:59 +04:00
2013-08-29 18:36:35 +04:00
if ( nbytes < = AES_BLOCK_SIZE ) {
2012-05-14 15:05:59 +04:00
scatterwalk_start ( & walk , req - > assoc ) ;
2013-08-29 18:36:35 +04:00
scatterwalk_copychunks ( out , & walk , nbytes , SCATTERWALK_FROM_SG ) ;
2012-05-14 15:05:59 +04:00
scatterwalk_done ( & walk , SCATTERWALK_FROM_SG , 0 ) ;
2013-08-29 18:36:35 +04:00
return 0 ;
2012-05-14 15:05:59 +04:00
}
2013-08-29 18:36:35 +04:00
NX_CPB_FDM ( csbcpb_aead ) & = ~ NX_FDM_CONTINUATION ;
2012-05-14 15:05:59 +04:00
2013-08-29 18:36:35 +04:00
/* page_limit: number of sg entries that fit on one page */
2014-10-28 20:47:48 +03:00
max_sg_len = min_t ( u64 , nx_driver . of . max_sg_len / sizeof ( struct nx_sg ) ,
2013-08-29 18:36:35 +04:00
nx_ctx - > ap - > sglen ) ;
2014-10-28 20:47:48 +03:00
max_sg_len = min_t ( u64 , max_sg_len ,
nx_ctx - > ap - > databytelen / NX_PAGE_SIZE ) ;
2012-05-14 15:05:59 +04:00
2013-08-29 18:36:35 +04:00
do {
/*
* to_process : the data chunk to process in this update .
* This value is bound by sg list limits .
*/
to_process = min_t ( u64 , nbytes - processed ,
nx_ctx - > ap - > databytelen ) ;
to_process = min_t ( u64 , to_process ,
NX_PAGE_SIZE * ( max_sg_len - 1 ) ) ;
2014-10-28 20:47:48 +03:00
nx_sg = nx_walk_and_build ( nx_ctx - > in_sg , max_sg_len ,
req - > assoc , processed , & to_process ) ;
2013-08-29 18:36:35 +04:00
if ( ( to_process + processed ) < nbytes )
NX_CPB_FDM ( csbcpb_aead ) | = NX_FDM_INTERMEDIATE ;
else
NX_CPB_FDM ( csbcpb_aead ) & = ~ NX_FDM_INTERMEDIATE ;
nx_ctx - > op_aead . inlen = ( nx_ctx - > in_sg - nx_sg )
* sizeof ( struct nx_sg ) ;
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op_aead ,
req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ;
if ( rc )
return rc ;
memcpy ( csbcpb_aead - > cpb . aes_gca . in_pat ,
csbcpb_aead - > cpb . aes_gca . out_pat ,
AES_BLOCK_SIZE ) ;
NX_CPB_FDM ( csbcpb_aead ) | = NX_FDM_CONTINUATION ;
atomic_inc ( & ( nx_ctx - > stats - > aes_ops ) ) ;
atomic64_add ( req - > assoclen , & ( nx_ctx - > stats - > aes_bytes ) ) ;
processed + = to_process ;
} while ( processed < nbytes ) ;
2012-05-14 15:05:59 +04:00
memcpy ( out , csbcpb_aead - > cpb . aes_gca . out_pat , AES_BLOCK_SIZE ) ;
2013-08-29 18:36:35 +04:00
return rc ;
}
2013-08-29 18:36:39 +04:00
static int gmac ( struct aead_request * req , struct blkcipher_desc * desc )
{
int rc ;
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
struct nx_csbcpb * csbcpb = nx_ctx - > csbcpb ;
struct nx_sg * nx_sg ;
unsigned int nbytes = req - > assoclen ;
unsigned int processed = 0 , to_process ;
2014-10-28 20:47:48 +03:00
unsigned int max_sg_len ;
2013-08-29 18:36:39 +04:00
/* Set GMAC mode */
csbcpb - > cpb . hdr . mode = NX_MODE_AES_GMAC ;
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_CONTINUATION ;
/* page_limit: number of sg entries that fit on one page */
2014-10-28 20:47:48 +03:00
max_sg_len = min_t ( u64 , nx_driver . of . max_sg_len / sizeof ( struct nx_sg ) ,
2013-08-29 18:36:39 +04:00
nx_ctx - > ap - > sglen ) ;
2014-10-28 20:47:48 +03:00
max_sg_len = min_t ( u64 , max_sg_len ,
nx_ctx - > ap - > databytelen / NX_PAGE_SIZE ) ;
2013-08-29 18:36:39 +04:00
/* Copy IV */
memcpy ( csbcpb - > cpb . aes_gcm . iv_or_cnt , desc - > info , AES_BLOCK_SIZE ) ;
do {
/*
* to_process : the data chunk to process in this update .
* This value is bound by sg list limits .
*/
to_process = min_t ( u64 , nbytes - processed ,
nx_ctx - > ap - > databytelen ) ;
to_process = min_t ( u64 , to_process ,
NX_PAGE_SIZE * ( max_sg_len - 1 ) ) ;
2014-10-28 20:47:48 +03:00
nx_sg = nx_walk_and_build ( nx_ctx - > in_sg , max_sg_len ,
req - > assoc , processed , & to_process ) ;
2013-08-29 18:36:39 +04:00
if ( ( to_process + processed ) < nbytes )
NX_CPB_FDM ( csbcpb ) | = NX_FDM_INTERMEDIATE ;
else
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_INTERMEDIATE ;
nx_ctx - > op . inlen = ( nx_ctx - > in_sg - nx_sg )
* sizeof ( struct nx_sg ) ;
csbcpb - > cpb . aes_gcm . bit_length_data = 0 ;
csbcpb - > cpb . aes_gcm . bit_length_aad = 8 * nbytes ;
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op ,
req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ;
if ( rc )
goto out ;
memcpy ( csbcpb - > cpb . aes_gcm . in_pat_or_aad ,
csbcpb - > cpb . aes_gcm . out_pat_or_mac , AES_BLOCK_SIZE ) ;
memcpy ( csbcpb - > cpb . aes_gcm . in_s0 ,
csbcpb - > cpb . aes_gcm . out_s0 , AES_BLOCK_SIZE ) ;
NX_CPB_FDM ( csbcpb ) | = NX_FDM_CONTINUATION ;
atomic_inc ( & ( nx_ctx - > stats - > aes_ops ) ) ;
atomic64_add ( req - > assoclen , & ( nx_ctx - > stats - > aes_bytes ) ) ;
processed + = to_process ;
} while ( processed < nbytes ) ;
out :
/* Restore GCM mode */
csbcpb - > cpb . hdr . mode = NX_MODE_AES_GCM ;
return rc ;
}
2013-08-29 18:36:35 +04:00
static int gcm_empty ( struct aead_request * req , struct blkcipher_desc * desc ,
int enc )
{
int rc ;
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
struct nx_csbcpb * csbcpb = nx_ctx - > csbcpb ;
2013-08-29 18:36:39 +04:00
char out [ AES_BLOCK_SIZE ] ;
struct nx_sg * in_sg , * out_sg ;
2014-10-28 20:47:48 +03:00
int len ;
2013-08-29 18:36:35 +04:00
/* For scenarios where the input message is zero length, AES CTR mode
* may be used . Set the source data to be a single block ( 16 B ) of all
* zeros , and set the input IV value to be the same as the GMAC IV
* value . - nx_wb 4.8 .1 .3 */
2013-08-29 18:36:39 +04:00
/* Change to ECB mode */
csbcpb - > cpb . hdr . mode = NX_MODE_AES_ECB ;
memcpy ( csbcpb - > cpb . aes_ecb . key , csbcpb - > cpb . aes_gcm . key ,
sizeof ( csbcpb - > cpb . aes_ecb . key ) ) ;
2013-08-29 18:36:35 +04:00
if ( enc )
2013-08-29 18:36:39 +04:00
NX_CPB_FDM ( csbcpb ) | = NX_FDM_ENDE_ENCRYPT ;
2013-08-29 18:36:35 +04:00
else
2013-08-29 18:36:39 +04:00
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_ENDE_ENCRYPT ;
2014-10-28 20:47:48 +03:00
len = AES_BLOCK_SIZE ;
2013-08-29 18:36:39 +04:00
/* Encrypt the counter/IV */
in_sg = nx_build_sg_list ( nx_ctx - > in_sg , ( u8 * ) desc - > info ,
2014-10-28 20:47:48 +03:00
& len , nx_ctx - > ap - > sglen ) ;
if ( len ! = AES_BLOCK_SIZE )
return - EINVAL ;
len = sizeof ( out ) ;
out_sg = nx_build_sg_list ( nx_ctx - > out_sg , ( u8 * ) out , & len ,
2013-08-29 18:36:39 +04:00
nx_ctx - > ap - > sglen ) ;
2014-10-28 20:47:48 +03:00
if ( len ! = sizeof ( out ) )
return - EINVAL ;
2013-08-29 18:36:39 +04:00
nx_ctx - > op . inlen = ( nx_ctx - > in_sg - in_sg ) * sizeof ( struct nx_sg ) ;
nx_ctx - > op . outlen = ( nx_ctx - > out_sg - out_sg ) * sizeof ( struct nx_sg ) ;
2013-08-29 18:36:35 +04:00
2013-08-29 18:36:39 +04:00
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op ,
desc - > flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ;
if ( rc )
goto out ;
atomic_inc ( & ( nx_ctx - > stats - > aes_ops ) ) ;
/* Copy out the auth tag */
memcpy ( csbcpb - > cpb . aes_gcm . out_pat_or_mac , out ,
crypto_aead_authsize ( crypto_aead_reqtfm ( req ) ) ) ;
2012-05-14 15:05:59 +04:00
out :
2013-08-29 18:36:39 +04:00
/* Restore XCBC mode */
csbcpb - > cpb . hdr . mode = NX_MODE_AES_GCM ;
/*
* ECB key uses the same region that GCM AAD and counter , so it ' s safe
* to just fill it with zeroes .
*/
memset ( csbcpb - > cpb . aes_ecb . key , 0 , sizeof ( csbcpb - > cpb . aes_ecb . key ) ) ;
2012-05-14 15:05:59 +04:00
return rc ;
}
static int gcm_aes_nx_crypt ( struct aead_request * req , int enc )
{
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
struct nx_csbcpb * csbcpb = nx_ctx - > csbcpb ;
struct blkcipher_desc desc ;
unsigned int nbytes = req - > cryptlen ;
2013-08-29 18:36:35 +04:00
unsigned int processed = 0 , to_process ;
2013-08-13 01:49:37 +04:00
unsigned long irq_flags ;
2012-05-14 15:05:59 +04:00
int rc = - EINVAL ;
2013-08-13 01:49:37 +04:00
spin_lock_irqsave ( & nx_ctx - > lock , irq_flags ) ;
2012-05-14 15:05:59 +04:00
desc . info = nx_ctx - > priv . gcm . iv ;
/* initialize the counter */
* ( u32 * ) ( desc . info + NX_GCM_CTR_OFFSET ) = 1 ;
if ( nbytes = = 0 ) {
2013-08-29 18:36:39 +04:00
if ( req - > assoclen = = 0 )
rc = gcm_empty ( req , & desc , enc ) ;
else
rc = gmac ( req , & desc ) ;
if ( rc )
goto out ;
else
goto mac ;
2012-05-14 15:05:59 +04:00
}
2013-08-29 18:36:35 +04:00
/* Process associated data */
2012-05-14 15:05:59 +04:00
csbcpb - > cpb . aes_gcm . bit_length_aad = req - > assoclen * 8 ;
if ( req - > assoclen ) {
rc = nx_gca ( nx_ctx , req , csbcpb - > cpb . aes_gcm . in_pat_or_aad ) ;
if ( rc )
goto out ;
}
2013-08-29 18:36:35 +04:00
/* Set flags for encryption */
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_CONTINUATION ;
if ( enc ) {
2012-05-14 15:05:59 +04:00
NX_CPB_FDM ( csbcpb ) | = NX_FDM_ENDE_ENCRYPT ;
2013-08-29 18:36:35 +04:00
} else {
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_ENDE_ENCRYPT ;
2013-04-12 21:13:59 +04:00
nbytes - = crypto_aead_authsize ( crypto_aead_reqtfm ( req ) ) ;
2013-08-29 18:36:35 +04:00
}
2012-05-14 15:05:59 +04:00
2013-08-29 18:36:35 +04:00
do {
2014-10-28 20:47:48 +03:00
to_process = nbytes - processed ;
2012-05-14 15:05:59 +04:00
2013-08-29 18:36:35 +04:00
csbcpb - > cpb . aes_gcm . bit_length_data = nbytes * 8 ;
desc . tfm = ( struct crypto_blkcipher * ) req - > base . tfm ;
rc = nx_build_sg_lists ( nx_ctx , & desc , req - > dst ,
2014-10-28 20:47:48 +03:00
req - > src , & to_process , processed ,
2013-08-29 18:36:35 +04:00
csbcpb - > cpb . aes_gcm . iv_or_cnt ) ;
2014-10-28 20:47:48 +03:00
2013-08-29 18:36:35 +04:00
if ( rc )
goto out ;
2012-05-14 15:05:59 +04:00
2014-10-28 20:47:48 +03:00
if ( ( to_process + processed ) < nbytes )
NX_CPB_FDM ( csbcpb ) | = NX_FDM_INTERMEDIATE ;
else
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_INTERMEDIATE ;
2013-08-29 18:36:35 +04:00
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op ,
req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ;
if ( rc )
goto out ;
memcpy ( desc . info , csbcpb - > cpb . aes_gcm . out_cnt , AES_BLOCK_SIZE ) ;
memcpy ( csbcpb - > cpb . aes_gcm . in_pat_or_aad ,
csbcpb - > cpb . aes_gcm . out_pat_or_mac , AES_BLOCK_SIZE ) ;
memcpy ( csbcpb - > cpb . aes_gcm . in_s0 ,
csbcpb - > cpb . aes_gcm . out_s0 , AES_BLOCK_SIZE ) ;
NX_CPB_FDM ( csbcpb ) | = NX_FDM_CONTINUATION ;
atomic_inc ( & ( nx_ctx - > stats - > aes_ops ) ) ;
atomic64_add ( csbcpb - > csb . processed_byte_count ,
& ( nx_ctx - > stats - > aes_bytes ) ) ;
2012-05-14 15:05:59 +04:00
2013-08-29 18:36:35 +04:00
processed + = to_process ;
} while ( processed < nbytes ) ;
2012-05-14 15:05:59 +04:00
2013-08-29 18:36:39 +04:00
mac :
2012-05-14 15:05:59 +04:00
if ( enc ) {
/* copy out the auth tag */
scatterwalk_map_and_copy ( csbcpb - > cpb . aes_gcm . out_pat_or_mac ,
req - > dst , nbytes ,
crypto_aead_authsize ( crypto_aead_reqtfm ( req ) ) ,
SCATTERWALK_TO_SG ) ;
2013-08-15 02:17:57 +04:00
} else {
2012-05-14 15:05:59 +04:00
u8 * itag = nx_ctx - > priv . gcm . iauth_tag ;
u8 * otag = csbcpb - > cpb . aes_gcm . out_pat_or_mac ;
2013-08-15 02:17:57 +04:00
scatterwalk_map_and_copy ( itag , req - > src , nbytes ,
2012-05-14 15:05:59 +04:00
crypto_aead_authsize ( crypto_aead_reqtfm ( req ) ) ,
SCATTERWALK_FROM_SG ) ;
rc = memcmp ( itag , otag ,
crypto_aead_authsize ( crypto_aead_reqtfm ( req ) ) ) ?
- EBADMSG : 0 ;
}
out :
2013-08-13 01:49:37 +04:00
spin_unlock_irqrestore ( & nx_ctx - > lock , irq_flags ) ;
2012-05-14 15:05:59 +04:00
return rc ;
}
static int gcm_aes_nx_encrypt ( struct aead_request * req )
{
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
char * iv = nx_ctx - > priv . gcm . iv ;
memcpy ( iv , req - > iv , 12 ) ;
return gcm_aes_nx_crypt ( req , 1 ) ;
}
static int gcm_aes_nx_decrypt ( struct aead_request * req )
{
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
char * iv = nx_ctx - > priv . gcm . iv ;
memcpy ( iv , req - > iv , 12 ) ;
return gcm_aes_nx_crypt ( req , 0 ) ;
}
static int gcm4106_aes_nx_encrypt ( struct aead_request * req )
{
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
char * iv = nx_ctx - > priv . gcm . iv ;
char * nonce = nx_ctx - > priv . gcm . nonce ;
memcpy ( iv , nonce , NX_GCM4106_NONCE_LEN ) ;
memcpy ( iv + NX_GCM4106_NONCE_LEN , req - > iv , 8 ) ;
return gcm_aes_nx_crypt ( req , 1 ) ;
}
static int gcm4106_aes_nx_decrypt ( struct aead_request * req )
{
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( req - > base . tfm ) ;
char * iv = nx_ctx - > priv . gcm . iv ;
char * nonce = nx_ctx - > priv . gcm . nonce ;
memcpy ( iv , nonce , NX_GCM4106_NONCE_LEN ) ;
memcpy ( iv + NX_GCM4106_NONCE_LEN , req - > iv , 8 ) ;
return gcm_aes_nx_crypt ( req , 0 ) ;
}
/* tell the block cipher walk routines that this is a stream cipher by
* setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
* during encrypt / decrypt doesn ' t solve this problem , because it calls
* blkcipher_walk_done under the covers , which doesn ' t use walk - > blocksize ,
* but instead uses this tfm - > blocksize . */
struct crypto_alg nx_gcm_aes_alg = {
. cra_name = " gcm(aes) " ,
. cra_driver_name = " gcm-aes-nx " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_TYPE_AEAD ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct nx_crypto_ctx ) ,
. cra_type = & crypto_aead_type ,
. cra_module = THIS_MODULE ,
. cra_init = nx_crypto_ctx_aes_gcm_init ,
. cra_exit = nx_crypto_ctx_exit ,
. cra_aead = {
. ivsize = AES_BLOCK_SIZE ,
. maxauthsize = AES_BLOCK_SIZE ,
. setkey = gcm_aes_nx_set_key ,
. setauthsize = gcm_aes_nx_setauthsize ,
. encrypt = gcm_aes_nx_encrypt ,
. decrypt = gcm_aes_nx_decrypt ,
}
} ;
struct crypto_alg nx_gcm4106_aes_alg = {
. cra_name = " rfc4106(gcm(aes)) " ,
. cra_driver_name = " rfc4106-gcm-aes-nx " ,
. cra_priority = 300 ,
. cra_flags = CRYPTO_ALG_TYPE_AEAD ,
. cra_blocksize = 1 ,
. cra_ctxsize = sizeof ( struct nx_crypto_ctx ) ,
. cra_type = & crypto_nivaead_type ,
. cra_module = THIS_MODULE ,
. cra_init = nx_crypto_ctx_aes_gcm_init ,
. cra_exit = nx_crypto_ctx_exit ,
. cra_aead = {
. ivsize = 8 ,
. maxauthsize = AES_BLOCK_SIZE ,
. geniv = " seqiv " ,
. setkey = gcm4106_aes_nx_set_key ,
. setauthsize = gcm4106_aes_nx_setauthsize ,
. encrypt = gcm4106_aes_nx_encrypt ,
. decrypt = gcm4106_aes_nx_decrypt ,
}
} ;