2015-08-20 10:21:45 +03:00
/*
* Symmetric key cipher operations .
*
* Generic encrypt / decrypt wrapper for ciphers , handles operations across
* multiple page boundaries by using temporary blocks . In user context ,
* the kernel is given a chance to schedule us once per page .
*
* Copyright ( c ) 2015 Herbert Xu < herbert @ gondor . apana . org . au >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
# include <crypto/internal/skcipher.h>
# include <linux/bug.h>
# include <linux/module.h>
# include "internal.h"
static unsigned int crypto_skcipher_extsize ( struct crypto_alg * alg )
{
if ( alg - > cra_type = = & crypto_blkcipher_type )
return sizeof ( struct crypto_blkcipher * ) ;
BUG_ON ( alg - > cra_type ! = & crypto_ablkcipher_type & &
alg - > cra_type ! = & crypto_givcipher_type ) ;
return sizeof ( struct crypto_ablkcipher * ) ;
}
static int skcipher_setkey_blkcipher ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
struct crypto_blkcipher * * ctx = crypto_skcipher_ctx ( tfm ) ;
struct crypto_blkcipher * blkcipher = * ctx ;
int err ;
crypto_blkcipher_clear_flags ( blkcipher , ~ 0 ) ;
crypto_blkcipher_set_flags ( blkcipher , crypto_skcipher_get_flags ( tfm ) &
CRYPTO_TFM_REQ_MASK ) ;
err = crypto_blkcipher_setkey ( blkcipher , key , keylen ) ;
crypto_skcipher_set_flags ( tfm , crypto_blkcipher_get_flags ( blkcipher ) &
CRYPTO_TFM_RES_MASK ) ;
return err ;
}
static int skcipher_crypt_blkcipher ( struct skcipher_request * req ,
int ( * crypt ) ( struct blkcipher_desc * ,
struct scatterlist * ,
struct scatterlist * ,
unsigned int ) )
{
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct crypto_blkcipher * * ctx = crypto_skcipher_ctx ( tfm ) ;
struct blkcipher_desc desc = {
. tfm = * ctx ,
. info = req - > iv ,
. flags = req - > base . flags ,
} ;
return crypt ( & desc , req - > dst , req - > src , req - > cryptlen ) ;
}
static int skcipher_encrypt_blkcipher ( struct skcipher_request * req )
{
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct blkcipher_alg * alg = & tfm - > __crt_alg - > cra_blkcipher ;
return skcipher_crypt_blkcipher ( req , alg - > encrypt ) ;
}
static int skcipher_decrypt_blkcipher ( struct skcipher_request * req )
{
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct blkcipher_alg * alg = & tfm - > __crt_alg - > cra_blkcipher ;
return skcipher_crypt_blkcipher ( req , alg - > decrypt ) ;
}
static void crypto_exit_skcipher_ops_blkcipher ( struct crypto_tfm * tfm )
{
struct crypto_blkcipher * * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_blkcipher ( * ctx ) ;
}
2015-09-27 17:47:05 +03:00
static int crypto_init_skcipher_ops_blkcipher ( struct crypto_tfm * tfm )
2015-08-20 10:21:45 +03:00
{
struct crypto_alg * calg = tfm - > __crt_alg ;
struct crypto_skcipher * skcipher = __crypto_skcipher_cast ( tfm ) ;
struct crypto_blkcipher * * ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_blkcipher * blkcipher ;
struct crypto_tfm * btfm ;
if ( ! crypto_mod_get ( calg ) )
return - EAGAIN ;
btfm = __crypto_alloc_tfm ( calg , CRYPTO_ALG_TYPE_BLKCIPHER ,
CRYPTO_ALG_TYPE_MASK ) ;
if ( IS_ERR ( btfm ) ) {
crypto_mod_put ( calg ) ;
return PTR_ERR ( btfm ) ;
}
blkcipher = __crypto_blkcipher_cast ( btfm ) ;
* ctx = blkcipher ;
tfm - > exit = crypto_exit_skcipher_ops_blkcipher ;
skcipher - > setkey = skcipher_setkey_blkcipher ;
skcipher - > encrypt = skcipher_encrypt_blkcipher ;
skcipher - > decrypt = skcipher_decrypt_blkcipher ;
skcipher - > ivsize = crypto_blkcipher_ivsize ( blkcipher ) ;
2016-01-21 12:10:56 +03:00
skcipher - > keysize = calg - > cra_blkcipher . max_keysize ;
2015-08-20 10:21:45 +03:00
return 0 ;
}
static int skcipher_setkey_ablkcipher ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
struct crypto_ablkcipher * * ctx = crypto_skcipher_ctx ( tfm ) ;
struct crypto_ablkcipher * ablkcipher = * ctx ;
int err ;
crypto_ablkcipher_clear_flags ( ablkcipher , ~ 0 ) ;
crypto_ablkcipher_set_flags ( ablkcipher ,
crypto_skcipher_get_flags ( tfm ) &
CRYPTO_TFM_REQ_MASK ) ;
err = crypto_ablkcipher_setkey ( ablkcipher , key , keylen ) ;
crypto_skcipher_set_flags ( tfm ,
crypto_ablkcipher_get_flags ( ablkcipher ) &
CRYPTO_TFM_RES_MASK ) ;
return err ;
}
static int skcipher_crypt_ablkcipher ( struct skcipher_request * req ,
int ( * crypt ) ( struct ablkcipher_request * ) )
{
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct crypto_ablkcipher * * ctx = crypto_skcipher_ctx ( tfm ) ;
struct ablkcipher_request * subreq = skcipher_request_ctx ( req ) ;
ablkcipher_request_set_tfm ( subreq , * ctx ) ;
ablkcipher_request_set_callback ( subreq , skcipher_request_flags ( req ) ,
req - > base . complete , req - > base . data ) ;
ablkcipher_request_set_crypt ( subreq , req - > src , req - > dst , req - > cryptlen ,
req - > iv ) ;
return crypt ( subreq ) ;
}
static int skcipher_encrypt_ablkcipher ( struct skcipher_request * req )
{
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct ablkcipher_alg * alg = & tfm - > __crt_alg - > cra_ablkcipher ;
return skcipher_crypt_ablkcipher ( req , alg - > encrypt ) ;
}
static int skcipher_decrypt_ablkcipher ( struct skcipher_request * req )
{
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct ablkcipher_alg * alg = & tfm - > __crt_alg - > cra_ablkcipher ;
return skcipher_crypt_ablkcipher ( req , alg - > decrypt ) ;
}
static void crypto_exit_skcipher_ops_ablkcipher ( struct crypto_tfm * tfm )
{
struct crypto_ablkcipher * * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_ablkcipher ( * ctx ) ;
}
2015-09-27 17:47:05 +03:00
static int crypto_init_skcipher_ops_ablkcipher ( struct crypto_tfm * tfm )
2015-08-20 10:21:45 +03:00
{
struct crypto_alg * calg = tfm - > __crt_alg ;
struct crypto_skcipher * skcipher = __crypto_skcipher_cast ( tfm ) ;
struct crypto_ablkcipher * * ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_ablkcipher * ablkcipher ;
struct crypto_tfm * abtfm ;
if ( ! crypto_mod_get ( calg ) )
return - EAGAIN ;
abtfm = __crypto_alloc_tfm ( calg , 0 , 0 ) ;
if ( IS_ERR ( abtfm ) ) {
crypto_mod_put ( calg ) ;
return PTR_ERR ( abtfm ) ;
}
ablkcipher = __crypto_ablkcipher_cast ( abtfm ) ;
* ctx = ablkcipher ;
tfm - > exit = crypto_exit_skcipher_ops_ablkcipher ;
skcipher - > setkey = skcipher_setkey_ablkcipher ;
skcipher - > encrypt = skcipher_encrypt_ablkcipher ;
skcipher - > decrypt = skcipher_decrypt_ablkcipher ;
skcipher - > ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
skcipher - > reqsize = crypto_ablkcipher_reqsize ( ablkcipher ) +
sizeof ( struct ablkcipher_request ) ;
2016-01-21 12:10:56 +03:00
skcipher - > keysize = calg - > cra_ablkcipher . max_keysize ;
2015-08-20 10:21:45 +03:00
return 0 ;
}
static int crypto_skcipher_init_tfm ( struct crypto_tfm * tfm )
{
if ( tfm - > __crt_alg - > cra_type = = & crypto_blkcipher_type )
return crypto_init_skcipher_ops_blkcipher ( tfm ) ;
BUG_ON ( tfm - > __crt_alg - > cra_type ! = & crypto_ablkcipher_type & &
tfm - > __crt_alg - > cra_type ! = & crypto_givcipher_type ) ;
return crypto_init_skcipher_ops_ablkcipher ( tfm ) ;
}
static const struct crypto_type crypto_skcipher_type2 = {
. extsize = crypto_skcipher_extsize ,
. init_tfm = crypto_skcipher_init_tfm ,
. maskclear = ~ CRYPTO_ALG_TYPE_MASK ,
. maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK ,
. type = CRYPTO_ALG_TYPE_BLKCIPHER ,
. tfmsize = offsetof ( struct crypto_skcipher , base ) ,
} ;
struct crypto_skcipher * crypto_alloc_skcipher ( const char * alg_name ,
u32 type , u32 mask )
{
return crypto_alloc_tfm ( alg_name , & crypto_skcipher_type2 , type , mask ) ;
}
EXPORT_SYMBOL_GPL ( crypto_alloc_skcipher ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Symmetric key cipher type " ) ;