2014-03-26 23:53:05 +04:00
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions .
*
2018-07-31 00:06:42 +03:00
* Copyright ( C ) 2014 - 2018 Linaro Ltd . < ard . biesheuvel @ linaro . org >
2014-03-26 23:53:05 +04:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation .
*/
# include <asm/neon.h>
2017-07-24 13:28:05 +03:00
# include <asm/simd.h>
2014-03-26 23:53:05 +04:00
# include <asm/unaligned.h>
2017-07-24 13:28:16 +03:00
# include <crypto/aes.h>
# include <crypto/algapi.h>
# include <crypto/b128ops.h>
2017-07-24 13:28:05 +03:00
# include <crypto/gf128mul.h>
2017-07-24 13:28:16 +03:00
# include <crypto/internal/aead.h>
2014-03-26 23:53:05 +04:00
# include <crypto/internal/hash.h>
2017-07-24 13:28:16 +03:00
# include <crypto/internal/skcipher.h>
# include <crypto/scatterwalk.h>
2014-03-26 23:53:05 +04:00
# include <linux/cpufeature.h>
# include <linux/crypto.h>
# include <linux/module.h>
2017-07-24 13:28:16 +03:00
MODULE_DESCRIPTION ( " GHASH and AES-GCM using ARMv8 Crypto Extensions " ) ;
2014-03-26 23:53:05 +04:00
MODULE_AUTHOR ( " Ard Biesheuvel <ard.biesheuvel@linaro.org> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
2017-07-24 13:28:18 +03:00
MODULE_ALIAS_CRYPTO ( " ghash " ) ;
2014-03-26 23:53:05 +04:00
# define GHASH_BLOCK_SIZE 16
# define GHASH_DIGEST_SIZE 16
2017-07-24 13:28:16 +03:00
# define GCM_IV_SIZE 12
2014-03-26 23:53:05 +04:00
struct ghash_key {
u64 a ;
u64 b ;
2017-07-24 13:28:05 +03:00
be128 k ;
2014-03-26 23:53:05 +04:00
} ;
struct ghash_desc_ctx {
u64 digest [ GHASH_DIGEST_SIZE / sizeof ( u64 ) ] ;
u8 buf [ GHASH_BLOCK_SIZE ] ;
u32 count ;
} ;
2017-07-24 13:28:16 +03:00
struct gcm_aes_ctx {
struct crypto_aes_ctx aes_key ;
2018-07-31 00:06:41 +03:00
u64 h2 [ 2 ] ;
2017-07-24 13:28:16 +03:00
struct ghash_key ghash_key ;
} ;
2017-07-24 13:28:18 +03:00
asmlinkage void pmull_ghash_update_p64 ( int blocks , u64 dg [ ] , const char * src ,
struct ghash_key const * k ,
const char * head ) ;
asmlinkage void pmull_ghash_update_p8 ( int blocks , u64 dg [ ] , const char * src ,
struct ghash_key const * k ,
const char * head ) ;
static void ( * pmull_ghash_update ) ( int blocks , u64 dg [ ] , const char * src ,
struct ghash_key const * k ,
const char * head ) ;
2014-03-26 23:53:05 +04:00
2017-07-24 13:28:16 +03:00
asmlinkage void pmull_gcm_encrypt ( int blocks , u64 dg [ ] , u8 dst [ ] ,
2018-07-31 00:06:41 +03:00
const u8 src [ ] , u64 const * k , u8 ctr [ ] ,
u32 const rk [ ] , int rounds , u8 ks [ ] ) ;
2017-07-24 13:28:16 +03:00
asmlinkage void pmull_gcm_decrypt ( int blocks , u64 dg [ ] , u8 dst [ ] ,
2018-07-31 00:06:41 +03:00
const u8 src [ ] , u64 const * k ,
2018-04-30 19:18:26 +03:00
u8 ctr [ ] , u32 const rk [ ] , int rounds ) ;
2017-07-24 13:28:16 +03:00
asmlinkage void pmull_gcm_encrypt_block ( u8 dst [ ] , u8 const src [ ] ,
u32 const rk [ ] , int rounds ) ;
asmlinkage void __aes_arm64_encrypt ( u32 * rk , u8 * out , const u8 * in , int rounds ) ;
2014-03-26 23:53:05 +04:00
static int ghash_init ( struct shash_desc * desc )
{
struct ghash_desc_ctx * ctx = shash_desc_ctx ( desc ) ;
* ctx = ( struct ghash_desc_ctx ) { } ;
return 0 ;
}
2017-07-24 13:28:05 +03:00
static void ghash_do_update ( int blocks , u64 dg [ ] , const char * src ,
struct ghash_key * key , const char * head )
{
if ( likely ( may_use_simd ( ) ) ) {
kernel_neon_begin ( ) ;
pmull_ghash_update ( blocks , dg , src , key , head ) ;
kernel_neon_end ( ) ;
} else {
be128 dst = { cpu_to_be64 ( dg [ 1 ] ) , cpu_to_be64 ( dg [ 0 ] ) } ;
do {
const u8 * in = src ;
if ( head ) {
in = head ;
blocks + + ;
head = NULL ;
} else {
src + = GHASH_BLOCK_SIZE ;
}
crypto_xor ( ( u8 * ) & dst , in , GHASH_BLOCK_SIZE ) ;
gf128mul_lle ( & dst , & key - > k ) ;
} while ( - - blocks ) ;
dg [ 0 ] = be64_to_cpu ( dst . b ) ;
dg [ 1 ] = be64_to_cpu ( dst . a ) ;
}
}
2018-08-04 21:46:24 +03:00
/* avoid hogging the CPU for too long */
# define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
2014-03-26 23:53:05 +04:00
static int ghash_update ( struct shash_desc * desc , const u8 * src ,
unsigned int len )
{
struct ghash_desc_ctx * ctx = shash_desc_ctx ( desc ) ;
unsigned int partial = ctx - > count % GHASH_BLOCK_SIZE ;
ctx - > count + = len ;
if ( ( partial + len ) > = GHASH_BLOCK_SIZE ) {
struct ghash_key * key = crypto_shash_ctx ( desc - > tfm ) ;
int blocks ;
if ( partial ) {
int p = GHASH_BLOCK_SIZE - partial ;
memcpy ( ctx - > buf + partial , src , p ) ;
src + = p ;
len - = p ;
}
blocks = len / GHASH_BLOCK_SIZE ;
len % = GHASH_BLOCK_SIZE ;
2018-08-04 21:46:24 +03:00
do {
int chunk = min ( blocks , MAX_BLOCKS ) ;
ghash_do_update ( chunk , ctx - > digest , src , key ,
partial ? ctx - > buf : NULL ) ;
2017-07-24 13:28:05 +03:00
2018-08-04 21:46:24 +03:00
blocks - = chunk ;
src + = chunk * GHASH_BLOCK_SIZE ;
partial = 0 ;
} while ( unlikely ( blocks > 0 ) ) ;
2014-03-26 23:53:05 +04:00
}
if ( len )
memcpy ( ctx - > buf + partial , src , len ) ;
return 0 ;
}
static int ghash_final ( struct shash_desc * desc , u8 * dst )
{
struct ghash_desc_ctx * ctx = shash_desc_ctx ( desc ) ;
unsigned int partial = ctx - > count % GHASH_BLOCK_SIZE ;
if ( partial ) {
struct ghash_key * key = crypto_shash_ctx ( desc - > tfm ) ;
memset ( ctx - > buf + partial , 0 , GHASH_BLOCK_SIZE - partial ) ;
2017-07-24 13:28:05 +03:00
ghash_do_update ( 1 , ctx - > digest , ctx - > buf , key , NULL ) ;
2014-03-26 23:53:05 +04:00
}
put_unaligned_be64 ( ctx - > digest [ 1 ] , dst ) ;
put_unaligned_be64 ( ctx - > digest [ 0 ] , dst + 8 ) ;
* ctx = ( struct ghash_desc_ctx ) { } ;
return 0 ;
}
2017-07-24 13:28:16 +03:00
static int __ghash_setkey ( struct ghash_key * key ,
const u8 * inkey , unsigned int keylen )
2014-03-26 23:53:05 +04:00
{
u64 a , b ;
2017-07-24 13:28:05 +03:00
/* needed for the fallback */
memcpy ( & key - > k , inkey , GHASH_BLOCK_SIZE ) ;
2014-03-26 23:53:05 +04:00
/* perform multiplication by 'x' in GF(2^128) */
b = get_unaligned_be64 ( inkey ) ;
a = get_unaligned_be64 ( inkey + 8 ) ;
key - > a = ( a < < 1 ) | ( b > > 63 ) ;
key - > b = ( b < < 1 ) | ( a > > 63 ) ;
if ( b > > 63 )
key - > b ^ = 0xc200000000000000UL ;
return 0 ;
}
2017-07-24 13:28:16 +03:00
static int ghash_setkey ( struct crypto_shash * tfm ,
const u8 * inkey , unsigned int keylen )
{
struct ghash_key * key = crypto_shash_ctx ( tfm ) ;
if ( keylen ! = GHASH_BLOCK_SIZE ) {
crypto_shash_set_flags ( tfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return - EINVAL ;
}
return __ghash_setkey ( key , inkey , keylen ) ;
}
2014-03-26 23:53:05 +04:00
static struct shash_alg ghash_alg = {
2017-07-24 13:28:16 +03:00
. base . cra_name = " ghash " ,
. base . cra_driver_name = " ghash-ce " ,
. base . cra_priority = 200 ,
. base . cra_blocksize = GHASH_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct ghash_key ) ,
. base . cra_module = THIS_MODULE ,
. digestsize = GHASH_DIGEST_SIZE ,
. init = ghash_init ,
. update = ghash_update ,
. final = ghash_final ,
. setkey = ghash_setkey ,
. descsize = sizeof ( struct ghash_desc_ctx ) ,
} ;
static int num_rounds ( struct crypto_aes_ctx * ctx )
{
/*
* # of rounds specified by AES :
* 128 bit key 10 rounds
* 192 bit key 12 rounds
* 256 bit key 14 rounds
* = > n byte key = > 6 + ( n / 4 ) rounds
*/
return 6 + ctx - > key_length / 4 ;
}
static int gcm_setkey ( struct crypto_aead * tfm , const u8 * inkey ,
unsigned int keylen )
{
struct gcm_aes_ctx * ctx = crypto_aead_ctx ( tfm ) ;
2018-07-31 00:06:41 +03:00
be128 h1 , h2 ;
u8 * key = ( u8 * ) & h1 ;
2017-07-24 13:28:16 +03:00
int ret ;
ret = crypto_aes_expand_key ( & ctx - > aes_key , inkey , keylen ) ;
if ( ret ) {
tfm - > base . crt_flags | = CRYPTO_TFM_RES_BAD_KEY_LEN ;
return - EINVAL ;
}
__aes_arm64_encrypt ( ctx - > aes_key . key_enc , key , ( u8 [ AES_BLOCK_SIZE ] ) { } ,
num_rounds ( & ctx - > aes_key ) ) ;
2018-07-31 00:06:41 +03:00
__ghash_setkey ( & ctx - > ghash_key , key , sizeof ( be128 ) ) ;
/* calculate H^2 (used for 2-way aggregation) */
h2 = h1 ;
gf128mul_lle ( & h2 , & h1 ) ;
ctx - > h2 [ 0 ] = ( be64_to_cpu ( h2 . b ) < < 1 ) | ( be64_to_cpu ( h2 . a ) > > 63 ) ;
ctx - > h2 [ 1 ] = ( be64_to_cpu ( h2 . a ) < < 1 ) | ( be64_to_cpu ( h2 . b ) > > 63 ) ;
if ( be64_to_cpu ( h2 . a ) > > 63 )
ctx - > h2 [ 1 ] ^ = 0xc200000000000000UL ;
return 0 ;
2017-07-24 13:28:16 +03:00
}
static int gcm_setauthsize ( struct crypto_aead * tfm , unsigned int authsize )
{
switch ( authsize ) {
case 4 :
case 8 :
case 12 . . . 16 :
break ;
default :
return - EINVAL ;
}
return 0 ;
}
static void gcm_update_mac ( u64 dg [ ] , const u8 * src , int count , u8 buf [ ] ,
int * buf_count , struct gcm_aes_ctx * ctx )
{
if ( * buf_count > 0 ) {
int buf_added = min ( count , GHASH_BLOCK_SIZE - * buf_count ) ;
memcpy ( & buf [ * buf_count ] , src , buf_added ) ;
* buf_count + = buf_added ;
src + = buf_added ;
count - = buf_added ;
}
if ( count > = GHASH_BLOCK_SIZE | | * buf_count = = GHASH_BLOCK_SIZE ) {
int blocks = count / GHASH_BLOCK_SIZE ;
ghash_do_update ( blocks , dg , src , & ctx - > ghash_key ,
* buf_count ? buf : NULL ) ;
src + = blocks * GHASH_BLOCK_SIZE ;
count % = GHASH_BLOCK_SIZE ;
* buf_count = 0 ;
}
if ( count > 0 ) {
memcpy ( buf , src , count ) ;
* buf_count = count ;
}
}
static void gcm_calculate_auth_mac ( struct aead_request * req , u64 dg [ ] )
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct gcm_aes_ctx * ctx = crypto_aead_ctx ( aead ) ;
u8 buf [ GHASH_BLOCK_SIZE ] ;
struct scatter_walk walk ;
u32 len = req - > assoclen ;
int buf_count = 0 ;
scatterwalk_start ( & walk , req - > src ) ;
do {
u32 n = scatterwalk_clamp ( & walk , len ) ;
u8 * p ;
if ( ! n ) {
scatterwalk_start ( & walk , sg_next ( walk . sg ) ) ;
n = scatterwalk_clamp ( & walk , len ) ;
}
p = scatterwalk_map ( & walk ) ;
gcm_update_mac ( dg , p , n , buf , & buf_count , ctx ) ;
len - = n ;
scatterwalk_unmap ( p ) ;
scatterwalk_advance ( & walk , n ) ;
scatterwalk_done ( & walk , 0 , len ) ;
} while ( len ) ;
if ( buf_count ) {
memset ( & buf [ buf_count ] , 0 , GHASH_BLOCK_SIZE - buf_count ) ;
ghash_do_update ( 1 , dg , buf , & ctx - > ghash_key , NULL ) ;
}
}
static void gcm_final ( struct aead_request * req , struct gcm_aes_ctx * ctx ,
u64 dg [ ] , u8 tag [ ] , int cryptlen )
{
u8 mac [ AES_BLOCK_SIZE ] ;
u128 lengths ;
lengths . a = cpu_to_be64 ( req - > assoclen * 8 ) ;
lengths . b = cpu_to_be64 ( cryptlen * 8 ) ;
ghash_do_update ( 1 , dg , ( void * ) & lengths , & ctx - > ghash_key , NULL ) ;
put_unaligned_be64 ( dg [ 1 ] , mac ) ;
put_unaligned_be64 ( dg [ 0 ] , mac + 8 ) ;
crypto_xor ( tag , mac , AES_BLOCK_SIZE ) ;
}
static int gcm_encrypt ( struct aead_request * req )
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct gcm_aes_ctx * ctx = crypto_aead_ctx ( aead ) ;
struct skcipher_walk walk ;
u8 iv [ AES_BLOCK_SIZE ] ;
2018-07-31 00:06:40 +03:00
u8 ks [ 2 * AES_BLOCK_SIZE ] ;
2017-07-24 13:28:16 +03:00
u8 tag [ AES_BLOCK_SIZE ] ;
u64 dg [ 2 ] = { } ;
2018-07-31 00:06:40 +03:00
int nrounds = num_rounds ( & ctx - > aes_key ) ;
2017-07-24 13:28:16 +03:00
int err ;
if ( req - > assoclen )
gcm_calculate_auth_mac ( req , dg ) ;
memcpy ( iv , req - > iv , GCM_IV_SIZE ) ;
put_unaligned_be32 ( 1 , iv + GCM_IV_SIZE ) ;
2018-07-31 00:06:42 +03:00
err = skcipher_walk_aead_encrypt ( & walk , req , false ) ;
2017-07-24 13:28:16 +03:00
2018-07-31 00:06:42 +03:00
if ( likely ( may_use_simd ( ) & & walk . total > = 2 * AES_BLOCK_SIZE ) ) {
u32 const * rk = NULL ;
kernel_neon_begin ( ) ;
2018-07-31 00:06:40 +03:00
pmull_gcm_encrypt_block ( tag , iv , ctx - > aes_key . key_enc , nrounds ) ;
2017-07-24 13:28:16 +03:00
put_unaligned_be32 ( 2 , iv + GCM_IV_SIZE ) ;
2018-07-31 00:06:40 +03:00
pmull_gcm_encrypt_block ( ks , iv , NULL , nrounds ) ;
2017-07-24 13:28:16 +03:00
put_unaligned_be32 ( 3 , iv + GCM_IV_SIZE ) ;
2018-07-31 00:06:40 +03:00
pmull_gcm_encrypt_block ( ks + AES_BLOCK_SIZE , iv , NULL , nrounds ) ;
put_unaligned_be32 ( 4 , iv + GCM_IV_SIZE ) ;
2017-07-24 13:28:16 +03:00
2018-07-31 00:06:42 +03:00
do {
2018-07-31 00:06:40 +03:00
int blocks = walk . nbytes / ( 2 * AES_BLOCK_SIZE ) * 2 ;
2017-07-24 13:28:16 +03:00
2018-07-31 00:06:42 +03:00
if ( rk )
kernel_neon_begin ( ) ;
2017-07-24 13:28:16 +03:00
pmull_gcm_encrypt ( blocks , dg , walk . dst . virt . addr ,
2018-07-31 00:06:41 +03:00
walk . src . virt . addr , ctx - > h2 , iv ,
2018-07-31 00:06:42 +03:00
rk , nrounds , ks ) ;
2018-04-30 19:18:26 +03:00
kernel_neon_end ( ) ;
2017-07-24 13:28:16 +03:00
err = skcipher_walk_done ( & walk ,
2018-07-31 00:06:40 +03:00
walk . nbytes % ( 2 * AES_BLOCK_SIZE ) ) ;
2018-07-31 00:06:42 +03:00
rk = ctx - > aes_key . key_enc ;
} while ( walk . nbytes > = 2 * AES_BLOCK_SIZE ) ;
2017-07-24 13:28:16 +03:00
} else {
2018-07-31 00:06:40 +03:00
__aes_arm64_encrypt ( ctx - > aes_key . key_enc , tag , iv , nrounds ) ;
2017-07-24 13:28:16 +03:00
put_unaligned_be32 ( 2 , iv + GCM_IV_SIZE ) ;
while ( walk . nbytes > = AES_BLOCK_SIZE ) {
int blocks = walk . nbytes / AES_BLOCK_SIZE ;
u8 * dst = walk . dst . virt . addr ;
u8 * src = walk . src . virt . addr ;
do {
__aes_arm64_encrypt ( ctx - > aes_key . key_enc ,
2018-07-31 00:06:40 +03:00
ks , iv , nrounds ) ;
2017-07-24 13:28:16 +03:00
crypto_xor_cpy ( dst , src , ks , AES_BLOCK_SIZE ) ;
crypto_inc ( iv , AES_BLOCK_SIZE ) ;
dst + = AES_BLOCK_SIZE ;
src + = AES_BLOCK_SIZE ;
} while ( - - blocks > 0 ) ;
ghash_do_update ( walk . nbytes / AES_BLOCK_SIZE , dg ,
walk . dst . virt . addr , & ctx - > ghash_key ,
NULL ) ;
err = skcipher_walk_done ( & walk ,
walk . nbytes % AES_BLOCK_SIZE ) ;
}
if ( walk . nbytes )
__aes_arm64_encrypt ( ctx - > aes_key . key_enc , ks , iv ,
2018-07-31 00:06:40 +03:00
nrounds ) ;
2017-07-24 13:28:16 +03:00
}
/* handle the tail */
if ( walk . nbytes ) {
u8 buf [ GHASH_BLOCK_SIZE ] ;
2018-07-31 00:06:40 +03:00
unsigned int nbytes = walk . nbytes ;
u8 * dst = walk . dst . virt . addr ;
u8 * head = NULL ;
2017-07-24 13:28:16 +03:00
crypto_xor_cpy ( walk . dst . virt . addr , walk . src . virt . addr , ks ,
walk . nbytes ) ;
2018-07-31 00:06:40 +03:00
if ( walk . nbytes > GHASH_BLOCK_SIZE ) {
head = dst ;
dst + = GHASH_BLOCK_SIZE ;
nbytes % = GHASH_BLOCK_SIZE ;
}
memcpy ( buf , dst , nbytes ) ;
memset ( buf + nbytes , 0 , GHASH_BLOCK_SIZE - nbytes ) ;
ghash_do_update ( ! ! nbytes , dg , buf , & ctx - > ghash_key , head ) ;
2017-07-24 13:28:16 +03:00
err = skcipher_walk_done ( & walk , 0 ) ;
}
if ( err )
return err ;
gcm_final ( req , ctx , dg , tag , req - > cryptlen ) ;
/* copy authtag to end of dst */
scatterwalk_map_and_copy ( tag , req - > dst , req - > assoclen + req - > cryptlen ,
crypto_aead_authsize ( aead ) , 1 ) ;
return 0 ;
}
static int gcm_decrypt ( struct aead_request * req )
{
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct gcm_aes_ctx * ctx = crypto_aead_ctx ( aead ) ;
unsigned int authsize = crypto_aead_authsize ( aead ) ;
struct skcipher_walk walk ;
2018-07-31 00:06:40 +03:00
u8 iv [ 2 * AES_BLOCK_SIZE ] ;
2017-07-24 13:28:16 +03:00
u8 tag [ AES_BLOCK_SIZE ] ;
2018-07-31 00:06:40 +03:00
u8 buf [ 2 * GHASH_BLOCK_SIZE ] ;
2017-07-24 13:28:16 +03:00
u64 dg [ 2 ] = { } ;
2018-07-31 00:06:40 +03:00
int nrounds = num_rounds ( & ctx - > aes_key ) ;
2017-07-24 13:28:16 +03:00
int err ;
if ( req - > assoclen )
gcm_calculate_auth_mac ( req , dg ) ;
memcpy ( iv , req - > iv , GCM_IV_SIZE ) ;
put_unaligned_be32 ( 1 , iv + GCM_IV_SIZE ) ;
2018-07-31 00:06:42 +03:00
err = skcipher_walk_aead_decrypt ( & walk , req , false ) ;
if ( likely ( may_use_simd ( ) & & walk . total > = 2 * AES_BLOCK_SIZE ) ) {
u32 const * rk = NULL ;
2017-07-24 13:28:16 +03:00
kernel_neon_begin ( ) ;
2018-07-31 00:06:40 +03:00
pmull_gcm_encrypt_block ( tag , iv , ctx - > aes_key . key_enc , nrounds ) ;
2017-07-24 13:28:16 +03:00
put_unaligned_be32 ( 2 , iv + GCM_IV_SIZE ) ;
2018-07-31 00:06:42 +03:00
do {
2018-07-31 00:06:40 +03:00
int blocks = walk . nbytes / ( 2 * AES_BLOCK_SIZE ) * 2 ;
2018-07-31 00:06:42 +03:00
int rem = walk . total - blocks * AES_BLOCK_SIZE ;
if ( rk )
kernel_neon_begin ( ) ;
2017-07-24 13:28:16 +03:00
pmull_gcm_decrypt ( blocks , dg , walk . dst . virt . addr ,
2018-07-31 00:06:41 +03:00
walk . src . virt . addr , ctx - > h2 , iv ,
2018-07-31 00:06:42 +03:00
rk , nrounds ) ;
2017-07-24 13:28:16 +03:00
2018-07-31 00:06:42 +03:00
/* check if this is the final iteration of the loop */
if ( rem < ( 2 * AES_BLOCK_SIZE ) ) {
u8 * iv2 = iv + AES_BLOCK_SIZE ;
2018-07-31 00:06:40 +03:00
2018-07-31 00:06:42 +03:00
if ( rem > AES_BLOCK_SIZE ) {
memcpy ( iv2 , iv , AES_BLOCK_SIZE ) ;
crypto_inc ( iv2 , AES_BLOCK_SIZE ) ;
}
2018-07-31 00:06:40 +03:00
2018-07-31 00:06:42 +03:00
pmull_gcm_encrypt_block ( iv , iv , NULL , nrounds ) ;
2018-07-31 00:06:40 +03:00
2018-07-31 00:06:42 +03:00
if ( rem > AES_BLOCK_SIZE )
pmull_gcm_encrypt_block ( iv2 , iv2 , NULL ,
nrounds ) ;
}
2018-07-31 00:06:40 +03:00
2018-07-27 15:59:15 +03:00
kernel_neon_end ( ) ;
2018-07-31 00:06:42 +03:00
err = skcipher_walk_done ( & walk ,
walk . nbytes % ( 2 * AES_BLOCK_SIZE ) ) ;
rk = ctx - > aes_key . key_enc ;
} while ( walk . nbytes > = 2 * AES_BLOCK_SIZE ) ;
2017-07-24 13:28:16 +03:00
} else {
2018-07-31 00:06:40 +03:00
__aes_arm64_encrypt ( ctx - > aes_key . key_enc , tag , iv , nrounds ) ;
2017-07-24 13:28:16 +03:00
put_unaligned_be32 ( 2 , iv + GCM_IV_SIZE ) ;
while ( walk . nbytes > = AES_BLOCK_SIZE ) {
int blocks = walk . nbytes / AES_BLOCK_SIZE ;
u8 * dst = walk . dst . virt . addr ;
u8 * src = walk . src . virt . addr ;
ghash_do_update ( blocks , dg , walk . src . virt . addr ,
& ctx - > ghash_key , NULL ) ;
do {
__aes_arm64_encrypt ( ctx - > aes_key . key_enc ,
2018-07-31 00:06:40 +03:00
buf , iv , nrounds ) ;
2017-07-24 13:28:16 +03:00
crypto_xor_cpy ( dst , src , buf , AES_BLOCK_SIZE ) ;
crypto_inc ( iv , AES_BLOCK_SIZE ) ;
dst + = AES_BLOCK_SIZE ;
src + = AES_BLOCK_SIZE ;
} while ( - - blocks > 0 ) ;
err = skcipher_walk_done ( & walk ,
walk . nbytes % AES_BLOCK_SIZE ) ;
}
if ( walk . nbytes )
__aes_arm64_encrypt ( ctx - > aes_key . key_enc , iv , iv ,
2018-07-31 00:06:40 +03:00
nrounds ) ;
2017-07-24 13:28:16 +03:00
}
/* handle the tail */
if ( walk . nbytes ) {
2018-07-31 00:06:40 +03:00
const u8 * src = walk . src . virt . addr ;
const u8 * head = NULL ;
unsigned int nbytes = walk . nbytes ;
if ( walk . nbytes > GHASH_BLOCK_SIZE ) {
head = src ;
src + = GHASH_BLOCK_SIZE ;
nbytes % = GHASH_BLOCK_SIZE ;
}
memcpy ( buf , src , nbytes ) ;
memset ( buf + nbytes , 0 , GHASH_BLOCK_SIZE - nbytes ) ;
ghash_do_update ( ! ! nbytes , dg , buf , & ctx - > ghash_key , head ) ;
2017-07-24 13:28:16 +03:00
crypto_xor_cpy ( walk . dst . virt . addr , walk . src . virt . addr , iv ,
walk . nbytes ) ;
err = skcipher_walk_done ( & walk , 0 ) ;
}
if ( err )
return err ;
gcm_final ( req , ctx , dg , tag , req - > cryptlen - authsize ) ;
/* compare calculated auth tag with the stored one */
scatterwalk_map_and_copy ( buf , req - > src ,
req - > assoclen + req - > cryptlen - authsize ,
authsize , 0 ) ;
if ( crypto_memneq ( tag , buf , authsize ) )
return - EBADMSG ;
return 0 ;
}
static struct aead_alg gcm_aes_alg = {
. ivsize = GCM_IV_SIZE ,
2018-07-31 00:06:40 +03:00
. chunksize = 2 * AES_BLOCK_SIZE ,
2017-07-24 13:28:16 +03:00
. maxauthsize = AES_BLOCK_SIZE ,
. setkey = gcm_setkey ,
. setauthsize = gcm_setauthsize ,
. encrypt = gcm_encrypt ,
. decrypt = gcm_decrypt ,
. base . cra_name = " gcm(aes) " ,
. base . cra_driver_name = " gcm-aes-ce " ,
. base . cra_priority = 300 ,
. base . cra_blocksize = 1 ,
. base . cra_ctxsize = sizeof ( struct gcm_aes_ctx ) ,
. base . cra_module = THIS_MODULE ,
2014-03-26 23:53:05 +04:00
} ;
static int __init ghash_ce_mod_init ( void )
{
2017-07-24 13:28:16 +03:00
int ret ;
2017-07-24 13:28:18 +03:00
if ( ! ( elf_hwcap & HWCAP_ASIMD ) )
return - ENODEV ;
if ( elf_hwcap & HWCAP_PMULL )
pmull_ghash_update = pmull_ghash_update_p64 ;
else
pmull_ghash_update = pmull_ghash_update_p8 ;
2017-07-24 13:28:16 +03:00
ret = crypto_register_shash ( & ghash_alg ) ;
if ( ret )
2017-07-24 13:28:18 +03:00
return ret ;
if ( elf_hwcap & HWCAP_PMULL ) {
ret = crypto_register_aead ( & gcm_aes_alg ) ;
if ( ret )
crypto_unregister_shash ( & ghash_alg ) ;
}
2017-07-24 13:28:16 +03:00
return ret ;
2014-03-26 23:53:05 +04:00
}
static void __exit ghash_ce_mod_exit ( void )
{
crypto_unregister_shash ( & ghash_alg ) ;
2017-07-24 13:28:16 +03:00
crypto_unregister_aead ( & gcm_aes_alg ) ;
2014-03-26 23:53:05 +04:00
}
2017-07-24 13:28:18 +03:00
static const struct cpu_feature ghash_cpu_feature [ ] = {
{ cpu_feature ( PMULL ) } , { }
} ;
MODULE_DEVICE_TABLE ( cpu , ghash_cpu_feature ) ;
module_init ( ghash_ce_mod_init ) ;
2014-03-26 23:53:05 +04:00
module_exit ( ghash_ce_mod_exit ) ;