2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2006-09-21 05:44:08 +04:00
/*
* CBC : Cipher Block Chaining mode
*
2016-11-22 15:08:42 +03:00
* Copyright ( c ) 2006 - 2016 Herbert Xu < herbert @ gondor . apana . org . au >
2006-09-21 05:44:08 +04:00
*/
2017-02-27 15:38:25 +03:00
# include <crypto/algapi.h>
2020-12-11 15:27:15 +03:00
# include <crypto/internal/cipher.h>
2016-11-22 15:08:39 +03:00
# include <crypto/internal/skcipher.h>
2006-09-21 05:44:08 +04:00
# include <linux/err.h>
# include <linux/init.h>
# include <linux/kernel.h>
2007-11-20 12:36:00 +03:00
# include <linux/log2.h>
2006-09-21 05:44:08 +04:00
# include <linux/module.h>
2020-09-01 14:49:11 +03:00
static int crypto_cbc_encrypt_segment ( struct skcipher_walk * walk ,
struct crypto_skcipher * skcipher )
2016-11-22 15:08:39 +03:00
{
2020-09-01 14:49:11 +03:00
unsigned int bsize = crypto_skcipher_blocksize ( skcipher ) ;
void ( * fn ) ( struct crypto_tfm * , u8 * , const u8 * ) ;
unsigned int nbytes = walk - > nbytes ;
u8 * src = walk - > src . virt . addr ;
u8 * dst = walk - > dst . virt . addr ;
struct crypto_cipher * cipher ;
struct crypto_tfm * tfm ;
u8 * iv = walk - > iv ;
cipher = skcipher_cipher_simple ( skcipher ) ;
tfm = crypto_cipher_tfm ( cipher ) ;
fn = crypto_cipher_alg ( cipher ) - > cia_encrypt ;
do {
crypto_xor ( iv , src , bsize ) ;
fn ( tfm , dst , iv ) ;
memcpy ( iv , dst , bsize ) ;
src + = bsize ;
dst + = bsize ;
} while ( ( nbytes - = bsize ) > = bsize ) ;
return nbytes ;
}
static int crypto_cbc_encrypt_inplace ( struct skcipher_walk * walk ,
struct crypto_skcipher * skcipher )
{
unsigned int bsize = crypto_skcipher_blocksize ( skcipher ) ;
void ( * fn ) ( struct crypto_tfm * , u8 * , const u8 * ) ;
unsigned int nbytes = walk - > nbytes ;
u8 * src = walk - > src . virt . addr ;
struct crypto_cipher * cipher ;
struct crypto_tfm * tfm ;
u8 * iv = walk - > iv ;
cipher = skcipher_cipher_simple ( skcipher ) ;
tfm = crypto_cipher_tfm ( cipher ) ;
fn = crypto_cipher_alg ( cipher ) - > cia_encrypt ;
do {
crypto_xor ( src , iv , bsize ) ;
fn ( tfm , src , src ) ;
iv = src ;
src + = bsize ;
} while ( ( nbytes - = bsize ) > = bsize ) ;
memcpy ( walk - > iv , iv , bsize ) ;
return nbytes ;
2016-11-22 15:08:39 +03:00
}
static int crypto_cbc_encrypt ( struct skcipher_request * req )
2006-09-21 05:44:08 +04:00
{
2020-09-01 14:49:11 +03:00
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct skcipher_walk walk ;
int err ;
err = skcipher_walk_virt ( & walk , req , false ) ;
while ( walk . nbytes ) {
if ( walk . src . virt . addr = = walk . dst . virt . addr )
err = crypto_cbc_encrypt_inplace ( & walk , skcipher ) ;
else
err = crypto_cbc_encrypt_segment ( & walk , skcipher ) ;
err = skcipher_walk_done ( & walk , err ) ;
}
return err ;
}
static int crypto_cbc_decrypt_segment ( struct skcipher_walk * walk ,
struct crypto_skcipher * skcipher )
{
unsigned int bsize = crypto_skcipher_blocksize ( skcipher ) ;
void ( * fn ) ( struct crypto_tfm * , u8 * , const u8 * ) ;
unsigned int nbytes = walk - > nbytes ;
u8 * src = walk - > src . virt . addr ;
u8 * dst = walk - > dst . virt . addr ;
struct crypto_cipher * cipher ;
struct crypto_tfm * tfm ;
u8 * iv = walk - > iv ;
cipher = skcipher_cipher_simple ( skcipher ) ;
tfm = crypto_cipher_tfm ( cipher ) ;
fn = crypto_cipher_alg ( cipher ) - > cia_decrypt ;
do {
fn ( tfm , dst , src ) ;
crypto_xor ( dst , iv , bsize ) ;
iv = src ;
src + = bsize ;
dst + = bsize ;
} while ( ( nbytes - = bsize ) > = bsize ) ;
memcpy ( walk - > iv , iv , bsize ) ;
return nbytes ;
2016-11-22 15:08:39 +03:00
}
2020-09-01 14:49:11 +03:00
static int crypto_cbc_decrypt_inplace ( struct skcipher_walk * walk ,
struct crypto_skcipher * skcipher )
2016-11-22 15:08:39 +03:00
{
2020-09-01 14:49:11 +03:00
unsigned int bsize = crypto_skcipher_blocksize ( skcipher ) ;
void ( * fn ) ( struct crypto_tfm * , u8 * , const u8 * ) ;
unsigned int nbytes = walk - > nbytes ;
u8 * src = walk - > src . virt . addr ;
u8 last_iv [ MAX_CIPHER_BLOCKSIZE ] ;
struct crypto_cipher * cipher ;
struct crypto_tfm * tfm ;
cipher = skcipher_cipher_simple ( skcipher ) ;
tfm = crypto_cipher_tfm ( cipher ) ;
fn = crypto_cipher_alg ( cipher ) - > cia_decrypt ;
/* Start of the last block. */
src + = nbytes - ( nbytes & ( bsize - 1 ) ) - bsize ;
memcpy ( last_iv , src , bsize ) ;
for ( ; ; ) {
fn ( tfm , src , src ) ;
if ( ( nbytes - = bsize ) < bsize )
break ;
crypto_xor ( src , src - bsize , bsize ) ;
src - = bsize ;
}
crypto_xor ( src , walk - > iv , bsize ) ;
memcpy ( walk - > iv , last_iv , bsize ) ;
return nbytes ;
2016-11-22 15:08:39 +03:00
}
static int crypto_cbc_decrypt ( struct skcipher_request * req )
{
2020-09-01 14:49:11 +03:00
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
2016-11-22 15:08:39 +03:00
struct skcipher_walk walk ;
2006-09-21 05:44:08 +04:00
int err ;
2016-11-22 15:08:39 +03:00
err = skcipher_walk_virt ( & walk , req , false ) ;
2006-09-21 05:44:08 +04:00
2016-11-22 15:08:39 +03:00
while ( walk . nbytes ) {
2020-09-01 14:49:11 +03:00
if ( walk . src . virt . addr = = walk . dst . virt . addr )
err = crypto_cbc_decrypt_inplace ( & walk , skcipher ) ;
else
err = crypto_cbc_decrypt_segment ( & walk , skcipher ) ;
2016-11-22 15:08:39 +03:00
err = skcipher_walk_done ( & walk , err ) ;
2006-09-21 05:44:08 +04:00
}
return err ;
}
2016-11-22 15:08:39 +03:00
static int crypto_cbc_create ( struct crypto_template * tmpl , struct rtattr * * tb )
{
struct skcipher_instance * inst ;
2006-09-21 05:44:08 +04:00
struct crypto_alg * alg ;
2007-01-01 10:37:02 +03:00
int err ;
2019-12-20 08:29:40 +03:00
inst = skcipher_alloc_instance_simple ( tmpl , tb ) ;
2019-01-04 07:16:15 +03:00
if ( IS_ERR ( inst ) )
return PTR_ERR ( inst ) ;
2007-11-20 12:36:00 +03:00
2019-12-20 08:29:40 +03:00
alg = skcipher_ialg_simple ( inst ) ;
2016-11-22 15:08:39 +03:00
err = - EINVAL ;
if ( ! is_power_of_2 ( alg - > cra_blocksize ) )
2019-01-04 07:16:15 +03:00
goto out_free_inst ;
2006-09-21 05:44:08 +04:00
2016-11-22 15:08:39 +03:00
inst - > alg . encrypt = crypto_cbc_encrypt ;
inst - > alg . decrypt = crypto_cbc_decrypt ;
2006-09-21 05:44:08 +04:00
2016-11-22 15:08:39 +03:00
err = skcipher_register_instance ( tmpl , inst ) ;
2019-12-20 08:29:40 +03:00
if ( err ) {
2019-01-04 07:16:15 +03:00
out_free_inst :
2019-12-20 08:29:40 +03:00
inst - > free ( inst ) ;
}
2019-01-04 07:16:15 +03:00
return err ;
2006-09-21 05:44:08 +04:00
}
static struct crypto_template crypto_cbc_tmpl = {
. name = " cbc " ,
2016-11-22 15:08:39 +03:00
. create = crypto_cbc_create ,
2006-09-21 05:44:08 +04:00
. module = THIS_MODULE ,
} ;
static int __init crypto_cbc_module_init ( void )
{
return crypto_register_template ( & crypto_cbc_tmpl ) ;
}
static void __exit crypto_cbc_module_exit ( void )
{
crypto_unregister_template ( & crypto_cbc_tmpl ) ;
}
2019-04-12 07:57:42 +03:00
subsys_initcall ( crypto_cbc_module_init ) ;
2006-09-21 05:44:08 +04:00
module_exit ( crypto_cbc_module_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
2019-01-04 07:16:15 +03:00
MODULE_DESCRIPTION ( " CBC block cipher mode of operation " ) ;
2014-11-25 03:32:38 +03:00
MODULE_ALIAS_CRYPTO ( " cbc " ) ;