2015-05-21 10:11:15 +03:00
/*
* echainiv : Encrypted Chain IV Generator
*
* This generator generates an IV based on a sequence number by xoring it
* with a salt and then encrypting it with the same key as used to encrypt
* the plain text . This algorithm requires that the block size be equal
* to the IV size . It is mainly useful for CBC .
*
* This generator can only be used by algorithms where authentication
* is performed after encryption ( i . e . , authenc ) .
*
* Copyright ( c ) 2015 Herbert Xu < herbert @ gondor . apana . org . au >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
2015-05-27 09:37:33 +03:00
# include <crypto/internal/geniv.h>
2015-05-21 10:11:15 +03:00
# include <crypto/null.h>
# include <crypto/rng.h>
# include <crypto/scatterwalk.h>
# include <linux/err.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/percpu.h>
# include <linux/spinlock.h>
# include <linux/string.h>
# define MAX_IV_SIZE 16
struct echainiv_ctx {
2015-05-27 09:37:33 +03:00
/* aead_geniv_ctx must be first the element */
struct aead_geniv_ctx geniv ;
2015-05-21 10:11:15 +03:00
struct crypto_blkcipher * null ;
u8 salt [ ] __attribute__ ( ( aligned ( __alignof__ ( u32 ) ) ) ) ;
} ;
static DEFINE_PER_CPU ( u32 [ MAX_IV_SIZE / sizeof ( u32 ) ] , echainiv_iv ) ;
/* We don't care if we get preempted and read/write IVs from the next CPU. */
2015-05-23 06:22:47 +03:00
static void echainiv_read_iv ( u8 * dst , unsigned size )
2015-05-21 10:11:15 +03:00
{
u32 * a = ( u32 * ) dst ;
u32 __percpu * b = echainiv_iv ;
for ( ; size > = 4 ; size - = 4 ) {
* a + + = this_cpu_read ( * b ) ;
b + + ;
}
}
2015-05-23 06:22:47 +03:00
static void echainiv_write_iv ( const u8 * src , unsigned size )
2015-05-21 10:11:15 +03:00
{
const u32 * a = ( const u32 * ) src ;
u32 __percpu * b = echainiv_iv ;
for ( ; size > = 4 ; size - = 4 ) {
this_cpu_write ( * b , * a ) ;
a + + ;
b + + ;
}
}
static void echainiv_encrypt_complete2 ( struct aead_request * req , int err )
{
struct aead_request * subreq = aead_request_ctx ( req ) ;
struct crypto_aead * geniv ;
unsigned int ivsize ;
if ( err = = - EINPROGRESS )
return ;
if ( err )
goto out ;
geniv = crypto_aead_reqtfm ( req ) ;
ivsize = crypto_aead_ivsize ( geniv ) ;
echainiv_write_iv ( subreq - > iv , ivsize ) ;
if ( req - > iv ! = subreq - > iv )
memcpy ( req - > iv , subreq - > iv , ivsize ) ;
out :
if ( req - > iv ! = subreq - > iv )
kzfree ( subreq - > iv ) ;
}
static void echainiv_encrypt_complete ( struct crypto_async_request * base ,
int err )
{
struct aead_request * req = base - > data ;
echainiv_encrypt_complete2 ( req , err ) ;
aead_request_complete ( req , err ) ;
}
static int echainiv_encrypt ( struct aead_request * req )
{
struct crypto_aead * geniv = crypto_aead_reqtfm ( req ) ;
struct echainiv_ctx * ctx = crypto_aead_ctx ( geniv ) ;
struct aead_request * subreq = aead_request_ctx ( req ) ;
crypto_completion_t compl ;
void * data ;
u8 * info ;
2015-05-23 10:41:54 +03:00
unsigned int ivsize = crypto_aead_ivsize ( geniv ) ;
2015-05-21 10:11:15 +03:00
int err ;
2015-05-23 10:41:54 +03:00
if ( req - > cryptlen < ivsize )
return - EINVAL ;
2015-05-27 09:37:33 +03:00
aead_request_set_tfm ( subreq , ctx - > geniv . child ) ;
2015-05-21 10:11:15 +03:00
compl = echainiv_encrypt_complete ;
data = req ;
info = req - > iv ;
if ( req - > src ! = req - > dst ) {
struct blkcipher_desc desc = {
. tfm = ctx - > null ,
} ;
err = crypto_blkcipher_encrypt (
2015-05-27 09:37:31 +03:00
& desc , req - > dst , req - > src ,
req - > assoclen + req - > cryptlen ) ;
2015-05-21 10:11:15 +03:00
if ( err )
return err ;
}
if ( unlikely ( ! IS_ALIGNED ( ( unsigned long ) info ,
crypto_aead_alignmask ( geniv ) + 1 ) ) ) {
info = kmalloc ( ivsize , req - > base . flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC ) ;
if ( ! info )
return - ENOMEM ;
memcpy ( info , req - > iv , ivsize ) ;
}
aead_request_set_callback ( subreq , req - > base . flags , compl , data ) ;
aead_request_set_crypt ( subreq , req - > dst , req - > dst ,
req - > cryptlen - ivsize , info ) ;
2015-05-23 10:41:57 +03:00
aead_request_set_ad ( subreq , req - > assoclen + ivsize ) ;
2015-05-21 10:11:15 +03:00
crypto_xor ( info , ctx - > salt , ivsize ) ;
scatterwalk_map_and_copy ( info , req - > dst , req - > assoclen , ivsize , 1 ) ;
echainiv_read_iv ( info , ivsize ) ;
err = crypto_aead_encrypt ( subreq ) ;
echainiv_encrypt_complete2 ( req , err ) ;
return err ;
}
static int echainiv_decrypt ( struct aead_request * req )
{
struct crypto_aead * geniv = crypto_aead_reqtfm ( req ) ;
struct echainiv_ctx * ctx = crypto_aead_ctx ( geniv ) ;
struct aead_request * subreq = aead_request_ctx ( req ) ;
crypto_completion_t compl ;
void * data ;
2015-05-23 10:41:54 +03:00
unsigned int ivsize = crypto_aead_ivsize ( geniv ) ;
if ( req - > cryptlen < ivsize + crypto_aead_authsize ( geniv ) )
return - EINVAL ;
2015-05-21 10:11:15 +03:00
2015-05-27 09:37:33 +03:00
aead_request_set_tfm ( subreq , ctx - > geniv . child ) ;
2015-05-21 10:11:15 +03:00
compl = req - > base . complete ;
data = req - > base . data ;
aead_request_set_callback ( subreq , req - > base . flags , compl , data ) ;
aead_request_set_crypt ( subreq , req - > src , req - > dst ,
req - > cryptlen - ivsize , req - > iv ) ;
2015-05-23 10:41:57 +03:00
aead_request_set_ad ( subreq , req - > assoclen + ivsize ) ;
2015-05-21 10:11:15 +03:00
scatterwalk_map_and_copy ( req - > iv , req - > src , req - > assoclen , ivsize , 0 ) ;
if ( req - > src ! = req - > dst )
scatterwalk_map_and_copy ( req - > iv , req - > dst ,
req - > assoclen , ivsize , 1 ) ;
return crypto_aead_decrypt ( subreq ) ;
}
static int echainiv_init ( struct crypto_tfm * tfm )
{
struct crypto_aead * geniv = __crypto_aead_cast ( tfm ) ;
struct echainiv_ctx * ctx = crypto_aead_ctx ( geniv ) ;
int err ;
2015-05-27 09:37:33 +03:00
spin_lock_init ( & ctx - > geniv . lock ) ;
2015-05-21 10:11:15 +03:00
crypto_aead_set_reqsize ( geniv , sizeof ( struct aead_request ) ) ;
2015-06-21 14:11:50 +03:00
err = crypto_get_default_rng ( ) ;
if ( err )
goto out ;
2015-06-03 09:49:24 +03:00
err = crypto_rng_get_bytes ( crypto_default_rng , ctx - > salt ,
crypto_aead_ivsize ( geniv ) ) ;
2015-06-21 14:11:50 +03:00
crypto_put_default_rng ( ) ;
2015-06-03 09:49:24 +03:00
if ( err )
goto out ;
2015-05-21 10:11:15 +03:00
ctx - > null = crypto_get_default_null_skcipher ( ) ;
err = PTR_ERR ( ctx - > null ) ;
if ( IS_ERR ( ctx - > null ) )
goto out ;
err = aead_geniv_init ( tfm ) ;
if ( err )
goto drop_null ;
2015-05-27 09:37:33 +03:00
ctx - > geniv . child = geniv - > child ;
2015-05-21 10:11:15 +03:00
geniv - > child = geniv ;
out :
return err ;
drop_null :
crypto_put_default_null_skcipher ( ) ;
goto out ;
}
static void echainiv_exit ( struct crypto_tfm * tfm )
{
struct echainiv_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2015-05-27 09:37:33 +03:00
crypto_free_aead ( ctx - > geniv . child ) ;
2015-05-21 10:11:15 +03:00
crypto_put_default_null_skcipher ( ) ;
}
2015-05-23 10:41:52 +03:00
static int echainiv_aead_create ( struct crypto_template * tmpl ,
struct rtattr * * tb )
2015-05-21 10:11:15 +03:00
{
struct aead_instance * inst ;
struct crypto_aead_spawn * spawn ;
struct aead_alg * alg ;
2015-05-23 10:41:52 +03:00
int err ;
2015-05-21 10:11:15 +03:00
2015-05-23 10:41:52 +03:00
inst = aead_geniv_alloc ( tmpl , tb , 0 , 0 ) ;
2015-05-21 10:11:15 +03:00
if ( IS_ERR ( inst ) )
2015-05-23 10:41:52 +03:00
return PTR_ERR ( inst ) ;
2015-05-21 10:11:15 +03:00
2015-05-27 09:37:33 +03:00
spawn = aead_instance_ctx ( inst ) ;
alg = crypto_spawn_aead_alg ( spawn ) ;
if ( alg - > base . cra_aead . encrypt )
goto done ;
2015-05-23 10:41:52 +03:00
err = - EINVAL ;
2015-05-27 09:37:33 +03:00
if ( inst - > alg . ivsize & ( sizeof ( u32 ) - 1 ) | |
2015-05-23 10:41:52 +03:00
inst - > alg . ivsize > MAX_IV_SIZE )
goto free_inst ;
2015-05-21 10:11:15 +03:00
2015-06-03 09:49:24 +03:00
inst - > alg . encrypt = echainiv_encrypt ;
2015-05-21 10:11:15 +03:00
inst - > alg . decrypt = echainiv_decrypt ;
inst - > alg . base . cra_init = echainiv_init ;
inst - > alg . base . cra_exit = echainiv_exit ;
inst - > alg . base . cra_alignmask | = __alignof__ ( u32 ) - 1 ;
inst - > alg . base . cra_ctxsize = sizeof ( struct echainiv_ctx ) ;
2015-05-27 09:37:34 +03:00
inst - > alg . base . cra_ctxsize + = inst - > alg . ivsize ;
2015-05-21 10:11:15 +03:00
2015-05-27 09:37:33 +03:00
done :
2015-05-23 10:41:52 +03:00
err = aead_register_instance ( tmpl , inst ) ;
if ( err )
goto free_inst ;
2015-05-21 10:11:15 +03:00
out :
2015-05-23 10:41:52 +03:00
return err ;
free_inst :
aead_geniv_free ( inst ) ;
goto out ;
2015-05-21 10:11:15 +03:00
}
static void echainiv_free ( struct crypto_instance * inst )
{
aead_geniv_free ( aead_instance ( inst ) ) ;
}
static struct crypto_template echainiv_tmpl = {
. name = " echainiv " ,
2015-06-21 14:11:50 +03:00
. create = echainiv_aead_create ,
2015-05-21 10:11:15 +03:00
. free = echainiv_free ,
. module = THIS_MODULE ,
} ;
static int __init echainiv_module_init ( void )
{
return crypto_register_template ( & echainiv_tmpl ) ;
}
static void __exit echainiv_module_exit ( void )
{
crypto_unregister_template ( & echainiv_tmpl ) ;
}
module_init ( echainiv_module_init ) ;
module_exit ( echainiv_module_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Encrypted Chain IV Generator " ) ;
MODULE_ALIAS_CRYPTO ( " echainiv " ) ;