2007-04-16 14:49:20 +04:00
/*
* Software async crypto daemon .
*
* Copyright ( c ) 2006 Herbert Xu < herbert @ gondor . apana . org . au >
*
2010-09-20 12:05:12 +04:00
* Added AEAD support to cryptd .
* Authors : Tadeusz Struk ( tadeusz . struk @ intel . com )
* Adrian Hoban < adrian . hoban @ intel . com >
* Gabriele Paoloni < gabriele . paoloni @ intel . com >
* Aidan O ' Mahony ( aidan . o . mahony @ intel . com )
* Copyright ( c ) 2010 , Intel Corporation .
*
2007-04-16 14:49:20 +04:00
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
# include <crypto/algapi.h>
2008-07-10 12:01:22 +04:00
# include <crypto/internal/hash.h>
2010-09-20 12:05:12 +04:00
# include <crypto/internal/aead.h>
2009-01-18 08:19:46 +03:00
# include <crypto/cryptd.h>
2009-02-19 09:42:19 +03:00
# include <crypto/crypto_wq.h>
2007-04-16 14:49:20 +04:00
# include <linux/err.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/list.h>
# include <linux/module.h>
# include <linux/scatterlist.h>
# include <linux/sched.h>
# include <linux/slab.h>
2009-02-19 09:42:19 +03:00
# define CRYPTD_MAX_CPU_QLEN 100
2007-04-16 14:49:20 +04:00
2009-02-19 09:42:19 +03:00
struct cryptd_cpu_queue {
2007-04-16 14:49:20 +04:00
struct crypto_queue queue ;
2009-02-19 09:42:19 +03:00
struct work_struct work ;
} ;
struct cryptd_queue {
2010-02-02 08:39:15 +03:00
struct cryptd_cpu_queue __percpu * cpu_queue ;
2007-04-16 14:49:20 +04:00
} ;
struct cryptd_instance_ctx {
struct crypto_spawn spawn ;
2009-02-19 09:42:19 +03:00
struct cryptd_queue * queue ;
2007-04-16 14:49:20 +04:00
} ;
2009-07-12 17:38:59 +04:00
struct hashd_instance_ctx {
struct crypto_shash_spawn spawn ;
struct cryptd_queue * queue ;
} ;
2010-09-20 12:05:12 +04:00
struct aead_instance_ctx {
struct crypto_aead_spawn aead_spawn ;
struct cryptd_queue * queue ;
} ;
2007-04-16 14:49:20 +04:00
struct cryptd_blkcipher_ctx {
struct crypto_blkcipher * child ;
} ;
struct cryptd_blkcipher_request_ctx {
crypto_completion_t complete ;
} ;
2008-05-14 17:23:00 +04:00
struct cryptd_hash_ctx {
2009-07-12 17:38:59 +04:00
struct crypto_shash * child ;
2008-05-14 17:23:00 +04:00
} ;
struct cryptd_hash_request_ctx {
crypto_completion_t complete ;
2009-07-12 17:38:59 +04:00
struct shash_desc desc ;
2008-05-14 17:23:00 +04:00
} ;
2007-04-16 14:49:20 +04:00
2010-09-20 12:05:12 +04:00
struct cryptd_aead_ctx {
struct crypto_aead * child ;
} ;
struct cryptd_aead_request_ctx {
crypto_completion_t complete ;
} ;
2009-02-19 09:42:19 +03:00
static void cryptd_queue_worker ( struct work_struct * work ) ;
static int cryptd_init_queue ( struct cryptd_queue * queue ,
unsigned int max_cpu_qlen )
{
int cpu ;
struct cryptd_cpu_queue * cpu_queue ;
queue - > cpu_queue = alloc_percpu ( struct cryptd_cpu_queue ) ;
if ( ! queue - > cpu_queue )
return - ENOMEM ;
for_each_possible_cpu ( cpu ) {
cpu_queue = per_cpu_ptr ( queue - > cpu_queue , cpu ) ;
crypto_init_queue ( & cpu_queue - > queue , max_cpu_qlen ) ;
INIT_WORK ( & cpu_queue - > work , cryptd_queue_worker ) ;
}
return 0 ;
}
static void cryptd_fini_queue ( struct cryptd_queue * queue )
{
int cpu ;
struct cryptd_cpu_queue * cpu_queue ;
for_each_possible_cpu ( cpu ) {
cpu_queue = per_cpu_ptr ( queue - > cpu_queue , cpu ) ;
BUG_ON ( cpu_queue - > queue . qlen ) ;
}
free_percpu ( queue - > cpu_queue ) ;
}
static int cryptd_enqueue_request ( struct cryptd_queue * queue ,
struct crypto_async_request * request )
{
int cpu , err ;
struct cryptd_cpu_queue * cpu_queue ;
cpu = get_cpu ( ) ;
2009-10-03 14:48:23 +04:00
cpu_queue = this_cpu_ptr ( queue - > cpu_queue ) ;
2009-02-19 09:42:19 +03:00
err = crypto_enqueue_request ( & cpu_queue - > queue , request ) ;
queue_work_on ( cpu , kcrypto_wq , & cpu_queue - > work ) ;
put_cpu ( ) ;
return err ;
}
/* Called in workqueue context, do one real cryption work (via
* req - > complete ) and reschedule itself if there are more work to
* do . */
static void cryptd_queue_worker ( struct work_struct * work )
{
struct cryptd_cpu_queue * cpu_queue ;
struct crypto_async_request * req , * backlog ;
cpu_queue = container_of ( work , struct cryptd_cpu_queue , work ) ;
2012-10-21 21:42:28 +04:00
/*
* Only handle one request at a time to avoid hogging crypto workqueue .
* preempt_disable / enable is used to prevent being preempted by
* cryptd_enqueue_request ( ) . local_bh_disable / enable is used to prevent
* cryptd_enqueue_request ( ) being accessed from software interrupts .
*/
local_bh_disable ( ) ;
2009-02-19 09:42:19 +03:00
preempt_disable ( ) ;
backlog = crypto_get_backlog ( & cpu_queue - > queue ) ;
req = crypto_dequeue_request ( & cpu_queue - > queue ) ;
preempt_enable ( ) ;
2012-10-21 21:42:28 +04:00
local_bh_enable ( ) ;
2009-02-19 09:42:19 +03:00
if ( ! req )
return ;
if ( backlog )
backlog - > complete ( backlog , - EINPROGRESS ) ;
req - > complete ( req , 0 ) ;
if ( cpu_queue - > queue . qlen )
queue_work ( kcrypto_wq , & cpu_queue - > work ) ;
}
static inline struct cryptd_queue * cryptd_get_queue ( struct crypto_tfm * tfm )
2007-04-16 14:49:20 +04:00
{
struct crypto_instance * inst = crypto_tfm_alg_instance ( tfm ) ;
struct cryptd_instance_ctx * ictx = crypto_instance_ctx ( inst ) ;
2009-02-19 09:42:19 +03:00
return ictx - > queue ;
2007-04-16 14:49:20 +04:00
}
static int cryptd_blkcipher_setkey ( struct crypto_ablkcipher * parent ,
const u8 * key , unsigned int keylen )
{
struct cryptd_blkcipher_ctx * ctx = crypto_ablkcipher_ctx ( parent ) ;
struct crypto_blkcipher * child = ctx - > child ;
int err ;
crypto_blkcipher_clear_flags ( child , CRYPTO_TFM_REQ_MASK ) ;
crypto_blkcipher_set_flags ( child , crypto_ablkcipher_get_flags ( parent ) &
CRYPTO_TFM_REQ_MASK ) ;
err = crypto_blkcipher_setkey ( child , key , keylen ) ;
crypto_ablkcipher_set_flags ( parent , crypto_blkcipher_get_flags ( child ) &
CRYPTO_TFM_RES_MASK ) ;
return err ;
}
static void cryptd_blkcipher_crypt ( struct ablkcipher_request * req ,
struct crypto_blkcipher * child ,
int err ,
int ( * crypt ) ( struct blkcipher_desc * desc ,
struct scatterlist * dst ,
struct scatterlist * src ,
unsigned int len ) )
{
struct cryptd_blkcipher_request_ctx * rctx ;
struct blkcipher_desc desc ;
rctx = ablkcipher_request_ctx ( req ) ;
2008-05-07 17:10:13 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2007-04-16 14:49:20 +04:00
desc . tfm = child ;
desc . info = req - > info ;
desc . flags = CRYPTO_TFM_REQ_MAY_SLEEP ;
err = crypt ( & desc , req - > dst , req - > src , req - > nbytes ) ;
req - > base . complete = rctx - > complete ;
2008-05-07 17:10:13 +04:00
out :
2007-04-16 14:49:20 +04:00
local_bh_disable ( ) ;
2008-05-07 17:10:13 +04:00
rctx - > complete ( & req - > base , err ) ;
2007-04-16 14:49:20 +04:00
local_bh_enable ( ) ;
}
static void cryptd_blkcipher_encrypt ( struct crypto_async_request * req , int err )
{
struct cryptd_blkcipher_ctx * ctx = crypto_tfm_ctx ( req - > tfm ) ;
struct crypto_blkcipher * child = ctx - > child ;
cryptd_blkcipher_crypt ( ablkcipher_request_cast ( req ) , child , err ,
crypto_blkcipher_crt ( child ) - > encrypt ) ;
}
static void cryptd_blkcipher_decrypt ( struct crypto_async_request * req , int err )
{
struct cryptd_blkcipher_ctx * ctx = crypto_tfm_ctx ( req - > tfm ) ;
struct crypto_blkcipher * child = ctx - > child ;
cryptd_blkcipher_crypt ( ablkcipher_request_cast ( req ) , child , err ,
crypto_blkcipher_crt ( child ) - > decrypt ) ;
}
static int cryptd_blkcipher_enqueue ( struct ablkcipher_request * req ,
crypto_completion_t complete )
{
struct cryptd_blkcipher_request_ctx * rctx = ablkcipher_request_ctx ( req ) ;
struct crypto_ablkcipher * tfm = crypto_ablkcipher_reqtfm ( req ) ;
2009-02-19 09:42:19 +03:00
struct cryptd_queue * queue ;
2007-04-16 14:49:20 +04:00
2009-02-19 09:42:19 +03:00
queue = cryptd_get_queue ( crypto_ablkcipher_tfm ( tfm ) ) ;
2007-04-16 14:49:20 +04:00
rctx - > complete = req - > base . complete ;
req - > base . complete = complete ;
2009-02-19 09:42:19 +03:00
return cryptd_enqueue_request ( queue , & req - > base ) ;
2007-04-16 14:49:20 +04:00
}
static int cryptd_blkcipher_encrypt_enqueue ( struct ablkcipher_request * req )
{
return cryptd_blkcipher_enqueue ( req , cryptd_blkcipher_encrypt ) ;
}
static int cryptd_blkcipher_decrypt_enqueue ( struct ablkcipher_request * req )
{
return cryptd_blkcipher_enqueue ( req , cryptd_blkcipher_decrypt ) ;
}
static int cryptd_blkcipher_init_tfm ( struct crypto_tfm * tfm )
{
struct crypto_instance * inst = crypto_tfm_alg_instance ( tfm ) ;
struct cryptd_instance_ctx * ictx = crypto_instance_ctx ( inst ) ;
struct crypto_spawn * spawn = & ictx - > spawn ;
struct cryptd_blkcipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_blkcipher * cipher ;
cipher = crypto_spawn_blkcipher ( spawn ) ;
if ( IS_ERR ( cipher ) )
return PTR_ERR ( cipher ) ;
ctx - > child = cipher ;
tfm - > crt_ablkcipher . reqsize =
sizeof ( struct cryptd_blkcipher_request_ctx ) ;
return 0 ;
}
static void cryptd_blkcipher_exit_tfm ( struct crypto_tfm * tfm )
{
struct cryptd_blkcipher_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_blkcipher ( ctx - > child ) ;
}
2009-07-14 15:11:32 +04:00
static void * cryptd_alloc_instance ( struct crypto_alg * alg , unsigned int head ,
unsigned int tail )
2007-04-16 14:49:20 +04:00
{
2009-07-14 15:11:32 +04:00
char * p ;
2007-04-16 14:49:20 +04:00
struct crypto_instance * inst ;
int err ;
2009-07-14 15:11:32 +04:00
p = kzalloc ( head + sizeof ( * inst ) + tail , GFP_KERNEL ) ;
if ( ! p )
return ERR_PTR ( - ENOMEM ) ;
inst = ( void * ) ( p + head ) ;
2007-04-16 14:49:20 +04:00
err = - ENAMETOOLONG ;
if ( snprintf ( inst - > alg . cra_driver_name , CRYPTO_MAX_ALG_NAME ,
" cryptd(%s) " , alg - > cra_driver_name ) > = CRYPTO_MAX_ALG_NAME )
goto out_free_inst ;
memcpy ( inst - > alg . cra_name , alg - > cra_name , CRYPTO_MAX_ALG_NAME ) ;
inst - > alg . cra_priority = alg - > cra_priority + 50 ;
inst - > alg . cra_blocksize = alg - > cra_blocksize ;
inst - > alg . cra_alignmask = alg - > cra_alignmask ;
out :
2009-07-14 15:11:32 +04:00
return p ;
2007-04-16 14:49:20 +04:00
out_free_inst :
2009-07-14 15:11:32 +04:00
kfree ( p ) ;
p = ERR_PTR ( err ) ;
2007-04-16 14:49:20 +04:00
goto out ;
}
2009-07-14 14:45:45 +04:00
static int cryptd_create_blkcipher ( struct crypto_template * tmpl ,
struct rtattr * * tb ,
struct cryptd_queue * queue )
2007-04-16 14:49:20 +04:00
{
2009-07-12 17:38:59 +04:00
struct cryptd_instance_ctx * ctx ;
2007-04-16 14:49:20 +04:00
struct crypto_instance * inst ;
struct crypto_alg * alg ;
2009-07-12 17:38:59 +04:00
int err ;
2007-04-16 14:49:20 +04:00
alg = crypto_get_attr_alg ( tb , CRYPTO_ALG_TYPE_BLKCIPHER ,
2007-11-15 17:36:07 +03:00
CRYPTO_ALG_TYPE_MASK ) ;
2007-04-16 14:49:20 +04:00
if ( IS_ERR ( alg ) )
2009-07-14 14:45:45 +04:00
return PTR_ERR ( alg ) ;
2007-04-16 14:49:20 +04:00
2009-07-14 15:11:32 +04:00
inst = cryptd_alloc_instance ( alg , 0 , sizeof ( * ctx ) ) ;
2009-07-15 12:51:04 +04:00
err = PTR_ERR ( inst ) ;
2007-04-16 14:49:20 +04:00
if ( IS_ERR ( inst ) )
goto out_put_alg ;
2009-07-12 17:38:59 +04:00
ctx = crypto_instance_ctx ( inst ) ;
ctx - > queue = queue ;
err = crypto_init_spawn ( & ctx - > spawn , alg , inst ,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC ) ;
if ( err )
goto out_free_inst ;
2007-11-15 17:36:07 +03:00
inst - > alg . cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC ;
2007-04-16 14:49:20 +04:00
inst - > alg . cra_type = & crypto_ablkcipher_type ;
inst - > alg . cra_ablkcipher . ivsize = alg - > cra_blkcipher . ivsize ;
inst - > alg . cra_ablkcipher . min_keysize = alg - > cra_blkcipher . min_keysize ;
inst - > alg . cra_ablkcipher . max_keysize = alg - > cra_blkcipher . max_keysize ;
2007-11-27 16:15:31 +03:00
inst - > alg . cra_ablkcipher . geniv = alg - > cra_blkcipher . geniv ;
2007-04-16 14:49:20 +04:00
inst - > alg . cra_ctxsize = sizeof ( struct cryptd_blkcipher_ctx ) ;
inst - > alg . cra_init = cryptd_blkcipher_init_tfm ;
inst - > alg . cra_exit = cryptd_blkcipher_exit_tfm ;
inst - > alg . cra_ablkcipher . setkey = cryptd_blkcipher_setkey ;
inst - > alg . cra_ablkcipher . encrypt = cryptd_blkcipher_encrypt_enqueue ;
inst - > alg . cra_ablkcipher . decrypt = cryptd_blkcipher_decrypt_enqueue ;
2009-07-14 14:45:45 +04:00
err = crypto_register_instance ( tmpl , inst ) ;
if ( err ) {
crypto_drop_spawn ( & ctx - > spawn ) ;
out_free_inst :
kfree ( inst ) ;
}
2007-04-16 14:49:20 +04:00
out_put_alg :
crypto_mod_put ( alg ) ;
2009-07-14 14:45:45 +04:00
return err ;
2007-04-16 14:49:20 +04:00
}
2008-05-14 17:23:00 +04:00
static int cryptd_hash_init_tfm ( struct crypto_tfm * tfm )
{
struct crypto_instance * inst = crypto_tfm_alg_instance ( tfm ) ;
2009-07-12 17:38:59 +04:00
struct hashd_instance_ctx * ictx = crypto_instance_ctx ( inst ) ;
struct crypto_shash_spawn * spawn = & ictx - > spawn ;
2008-05-14 17:23:00 +04:00
struct cryptd_hash_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2009-07-12 17:38:59 +04:00
struct crypto_shash * hash ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
hash = crypto_spawn_shash ( spawn ) ;
if ( IS_ERR ( hash ) )
return PTR_ERR ( hash ) ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
ctx - > child = hash ;
2009-07-12 19:06:33 +04:00
crypto_ahash_set_reqsize ( __crypto_ahash_cast ( tfm ) ,
sizeof ( struct cryptd_hash_request_ctx ) +
crypto_shash_descsize ( hash ) ) ;
2008-05-14 17:23:00 +04:00
return 0 ;
}
static void cryptd_hash_exit_tfm ( struct crypto_tfm * tfm )
{
struct cryptd_hash_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2009-07-12 17:38:59 +04:00
crypto_free_shash ( ctx - > child ) ;
2008-05-14 17:23:00 +04:00
}
static int cryptd_hash_setkey ( struct crypto_ahash * parent ,
const u8 * key , unsigned int keylen )
{
struct cryptd_hash_ctx * ctx = crypto_ahash_ctx ( parent ) ;
2009-07-12 17:38:59 +04:00
struct crypto_shash * child = ctx - > child ;
2008-05-14 17:23:00 +04:00
int err ;
2009-07-12 17:38:59 +04:00
crypto_shash_clear_flags ( child , CRYPTO_TFM_REQ_MASK ) ;
crypto_shash_set_flags ( child , crypto_ahash_get_flags ( parent ) &
CRYPTO_TFM_REQ_MASK ) ;
err = crypto_shash_setkey ( child , key , keylen ) ;
crypto_ahash_set_flags ( parent , crypto_shash_get_flags ( child ) &
CRYPTO_TFM_RES_MASK ) ;
2008-05-14 17:23:00 +04:00
return err ;
}
static int cryptd_hash_enqueue ( struct ahash_request * req ,
crypto_completion_t complete )
{
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
2009-02-19 09:42:19 +03:00
struct cryptd_queue * queue =
cryptd_get_queue ( crypto_ahash_tfm ( tfm ) ) ;
2008-05-14 17:23:00 +04:00
rctx - > complete = req - > base . complete ;
req - > base . complete = complete ;
2009-02-19 09:42:19 +03:00
return cryptd_enqueue_request ( queue , & req - > base ) ;
2008-05-14 17:23:00 +04:00
}
static void cryptd_hash_init ( struct crypto_async_request * req_async , int err )
{
2009-07-12 17:38:59 +04:00
struct cryptd_hash_ctx * ctx = crypto_tfm_ctx ( req_async - > tfm ) ;
struct crypto_shash * child = ctx - > child ;
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
struct shash_desc * desc = & rctx - > desc ;
2008-05-14 17:23:00 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2009-07-12 17:38:59 +04:00
desc - > tfm = child ;
desc - > flags = CRYPTO_TFM_REQ_MAY_SLEEP ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
err = crypto_shash_init ( desc ) ;
2008-05-14 17:23:00 +04:00
req - > base . complete = rctx - > complete ;
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int cryptd_hash_init_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_init ) ;
}
static void cryptd_hash_update ( struct crypto_async_request * req_async , int err )
{
2009-07-12 17:38:59 +04:00
struct ahash_request * req = ahash_request_cast ( req_async ) ;
2008-05-14 17:23:00 +04:00
struct cryptd_hash_request_ctx * rctx ;
rctx = ahash_request_ctx ( req ) ;
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2009-07-12 17:38:59 +04:00
err = shash_ahash_update ( req , & rctx - > desc ) ;
2008-05-14 17:23:00 +04:00
req - > base . complete = rctx - > complete ;
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int cryptd_hash_update_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_update ) ;
}
static void cryptd_hash_final ( struct crypto_async_request * req_async , int err )
{
2009-07-12 17:38:59 +04:00
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
2008-05-14 17:23:00 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2009-07-12 17:38:59 +04:00
err = crypto_shash_final ( & rctx - > desc , req - > result ) ;
2008-05-14 17:23:00 +04:00
req - > base . complete = rctx - > complete ;
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int cryptd_hash_final_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_final ) ;
}
2009-07-22 07:10:22 +04:00
static void cryptd_hash_finup ( struct crypto_async_request * req_async , int err )
{
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
err = shash_ahash_finup ( req , & rctx - > desc ) ;
req - > base . complete = rctx - > complete ;
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int cryptd_hash_finup_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_finup ) ;
}
2008-05-14 17:23:00 +04:00
static void cryptd_hash_digest ( struct crypto_async_request * req_async , int err )
{
2009-07-12 17:38:59 +04:00
struct cryptd_hash_ctx * ctx = crypto_tfm_ctx ( req_async - > tfm ) ;
struct crypto_shash * child = ctx - > child ;
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
struct shash_desc * desc = & rctx - > desc ;
2008-05-14 17:23:00 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2009-07-12 17:38:59 +04:00
desc - > tfm = child ;
desc - > flags = CRYPTO_TFM_REQ_MAY_SLEEP ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
err = shash_ahash_digest ( req , desc ) ;
2008-05-14 17:23:00 +04:00
req - > base . complete = rctx - > complete ;
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int cryptd_hash_digest_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_digest ) ;
}
2009-07-22 07:10:22 +04:00
static int cryptd_hash_export ( struct ahash_request * req , void * out )
{
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
return crypto_shash_export ( & rctx - > desc , out ) ;
}
static int cryptd_hash_import ( struct ahash_request * req , const void * in )
{
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
return crypto_shash_import ( & rctx - > desc , in ) ;
}
2009-07-14 14:45:45 +04:00
static int cryptd_create_hash ( struct crypto_template * tmpl , struct rtattr * * tb ,
struct cryptd_queue * queue )
2008-05-14 17:23:00 +04:00
{
2009-07-12 17:38:59 +04:00
struct hashd_instance_ctx * ctx ;
2009-07-14 15:11:32 +04:00
struct ahash_instance * inst ;
2009-07-12 17:38:59 +04:00
struct shash_alg * salg ;
2008-05-14 17:23:00 +04:00
struct crypto_alg * alg ;
2009-07-12 17:38:59 +04:00
int err ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
salg = shash_attr_alg ( tb [ 1 ] , 0 , 0 ) ;
if ( IS_ERR ( salg ) )
2009-07-14 14:45:45 +04:00
return PTR_ERR ( salg ) ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
alg = & salg - > base ;
2009-07-14 15:11:32 +04:00
inst = cryptd_alloc_instance ( alg , ahash_instance_headroom ( ) ,
sizeof ( * ctx ) ) ;
2009-07-15 12:51:04 +04:00
err = PTR_ERR ( inst ) ;
2008-05-14 17:23:00 +04:00
if ( IS_ERR ( inst ) )
goto out_put_alg ;
2009-07-14 15:11:32 +04:00
ctx = ahash_instance_ctx ( inst ) ;
2009-07-12 17:38:59 +04:00
ctx - > queue = queue ;
2009-07-14 15:11:32 +04:00
err = crypto_init_shash_spawn ( & ctx - > spawn , salg ,
ahash_crypto_instance ( inst ) ) ;
2009-07-12 17:38:59 +04:00
if ( err )
goto out_free_inst ;
2009-07-14 15:11:32 +04:00
inst - > alg . halg . base . cra_flags = CRYPTO_ALG_ASYNC ;
2008-05-14 17:23:00 +04:00
2009-07-14 15:11:32 +04:00
inst - > alg . halg . digestsize = salg - > digestsize ;
inst - > alg . halg . base . cra_ctxsize = sizeof ( struct cryptd_hash_ctx ) ;
2008-05-14 17:23:00 +04:00
2009-07-14 15:11:32 +04:00
inst - > alg . halg . base . cra_init = cryptd_hash_init_tfm ;
inst - > alg . halg . base . cra_exit = cryptd_hash_exit_tfm ;
2008-05-14 17:23:00 +04:00
2009-07-14 15:11:32 +04:00
inst - > alg . init = cryptd_hash_init_enqueue ;
inst - > alg . update = cryptd_hash_update_enqueue ;
inst - > alg . final = cryptd_hash_final_enqueue ;
2009-07-22 07:10:22 +04:00
inst - > alg . finup = cryptd_hash_finup_enqueue ;
inst - > alg . export = cryptd_hash_export ;
inst - > alg . import = cryptd_hash_import ;
2009-07-14 15:11:32 +04:00
inst - > alg . setkey = cryptd_hash_setkey ;
inst - > alg . digest = cryptd_hash_digest_enqueue ;
2008-05-14 17:23:00 +04:00
2009-07-14 15:11:32 +04:00
err = ahash_register_instance ( tmpl , inst ) ;
2009-07-14 14:45:45 +04:00
if ( err ) {
crypto_drop_shash ( & ctx - > spawn ) ;
out_free_inst :
kfree ( inst ) ;
}
2008-05-14 17:23:00 +04:00
out_put_alg :
crypto_mod_put ( alg ) ;
2009-07-14 14:45:45 +04:00
return err ;
2008-05-14 17:23:00 +04:00
}
2010-09-20 12:05:12 +04:00
static void cryptd_aead_crypt ( struct aead_request * req ,
struct crypto_aead * child ,
int err ,
int ( * crypt ) ( struct aead_request * req ) )
{
struct cryptd_aead_request_ctx * rctx ;
rctx = aead_request_ctx ( req ) ;
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
aead_request_set_tfm ( req , child ) ;
err = crypt ( req ) ;
req - > base . complete = rctx - > complete ;
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static void cryptd_aead_encrypt ( struct crypto_async_request * areq , int err )
{
struct cryptd_aead_ctx * ctx = crypto_tfm_ctx ( areq - > tfm ) ;
struct crypto_aead * child = ctx - > child ;
struct aead_request * req ;
req = container_of ( areq , struct aead_request , base ) ;
cryptd_aead_crypt ( req , child , err , crypto_aead_crt ( child ) - > encrypt ) ;
}
static void cryptd_aead_decrypt ( struct crypto_async_request * areq , int err )
{
struct cryptd_aead_ctx * ctx = crypto_tfm_ctx ( areq - > tfm ) ;
struct crypto_aead * child = ctx - > child ;
struct aead_request * req ;
req = container_of ( areq , struct aead_request , base ) ;
cryptd_aead_crypt ( req , child , err , crypto_aead_crt ( child ) - > decrypt ) ;
}
static int cryptd_aead_enqueue ( struct aead_request * req ,
crypto_completion_t complete )
{
struct cryptd_aead_request_ctx * rctx = aead_request_ctx ( req ) ;
struct crypto_aead * tfm = crypto_aead_reqtfm ( req ) ;
struct cryptd_queue * queue = cryptd_get_queue ( crypto_aead_tfm ( tfm ) ) ;
rctx - > complete = req - > base . complete ;
req - > base . complete = complete ;
return cryptd_enqueue_request ( queue , & req - > base ) ;
}
static int cryptd_aead_encrypt_enqueue ( struct aead_request * req )
{
return cryptd_aead_enqueue ( req , cryptd_aead_encrypt ) ;
}
static int cryptd_aead_decrypt_enqueue ( struct aead_request * req )
{
return cryptd_aead_enqueue ( req , cryptd_aead_decrypt ) ;
}
static int cryptd_aead_init_tfm ( struct crypto_tfm * tfm )
{
struct crypto_instance * inst = crypto_tfm_alg_instance ( tfm ) ;
struct aead_instance_ctx * ictx = crypto_instance_ctx ( inst ) ;
struct crypto_aead_spawn * spawn = & ictx - > aead_spawn ;
struct cryptd_aead_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_aead * cipher ;
cipher = crypto_spawn_aead ( spawn ) ;
if ( IS_ERR ( cipher ) )
return PTR_ERR ( cipher ) ;
crypto_aead_set_flags ( cipher , CRYPTO_TFM_REQ_MAY_SLEEP ) ;
ctx - > child = cipher ;
tfm - > crt_aead . reqsize = sizeof ( struct cryptd_aead_request_ctx ) ;
return 0 ;
}
static void cryptd_aead_exit_tfm ( struct crypto_tfm * tfm )
{
struct cryptd_aead_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_aead ( ctx - > child ) ;
}
static int cryptd_create_aead ( struct crypto_template * tmpl ,
struct rtattr * * tb ,
struct cryptd_queue * queue )
{
struct aead_instance_ctx * ctx ;
struct crypto_instance * inst ;
struct crypto_alg * alg ;
int err ;
alg = crypto_get_attr_alg ( tb , CRYPTO_ALG_TYPE_AEAD ,
CRYPTO_ALG_TYPE_MASK ) ;
if ( IS_ERR ( alg ) )
return PTR_ERR ( alg ) ;
inst = cryptd_alloc_instance ( alg , 0 , sizeof ( * ctx ) ) ;
err = PTR_ERR ( inst ) ;
if ( IS_ERR ( inst ) )
goto out_put_alg ;
ctx = crypto_instance_ctx ( inst ) ;
ctx - > queue = queue ;
err = crypto_init_spawn ( & ctx - > aead_spawn . base , alg , inst ,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC ) ;
if ( err )
goto out_free_inst ;
inst - > alg . cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC ;
inst - > alg . cra_type = alg - > cra_type ;
inst - > alg . cra_ctxsize = sizeof ( struct cryptd_aead_ctx ) ;
inst - > alg . cra_init = cryptd_aead_init_tfm ;
inst - > alg . cra_exit = cryptd_aead_exit_tfm ;
inst - > alg . cra_aead . setkey = alg - > cra_aead . setkey ;
inst - > alg . cra_aead . setauthsize = alg - > cra_aead . setauthsize ;
inst - > alg . cra_aead . geniv = alg - > cra_aead . geniv ;
inst - > alg . cra_aead . ivsize = alg - > cra_aead . ivsize ;
inst - > alg . cra_aead . maxauthsize = alg - > cra_aead . maxauthsize ;
inst - > alg . cra_aead . encrypt = cryptd_aead_encrypt_enqueue ;
inst - > alg . cra_aead . decrypt = cryptd_aead_decrypt_enqueue ;
inst - > alg . cra_aead . givencrypt = alg - > cra_aead . givencrypt ;
inst - > alg . cra_aead . givdecrypt = alg - > cra_aead . givdecrypt ;
err = crypto_register_instance ( tmpl , inst ) ;
if ( err ) {
crypto_drop_spawn ( & ctx - > aead_spawn . base ) ;
out_free_inst :
kfree ( inst ) ;
}
out_put_alg :
crypto_mod_put ( alg ) ;
return err ;
}
2009-02-19 09:42:19 +03:00
static struct cryptd_queue queue ;
2007-04-16 14:49:20 +04:00
2009-07-14 14:45:45 +04:00
static int cryptd_create ( struct crypto_template * tmpl , struct rtattr * * tb )
2007-04-16 14:49:20 +04:00
{
struct crypto_attr_type * algt ;
algt = crypto_get_attr_type ( tb ) ;
if ( IS_ERR ( algt ) )
2009-07-14 14:45:45 +04:00
return PTR_ERR ( algt ) ;
2007-04-16 14:49:20 +04:00
switch ( algt - > type & algt - > mask & CRYPTO_ALG_TYPE_MASK ) {
case CRYPTO_ALG_TYPE_BLKCIPHER :
2009-07-14 14:45:45 +04:00
return cryptd_create_blkcipher ( tmpl , tb , & queue ) ;
2008-05-14 17:23:00 +04:00
case CRYPTO_ALG_TYPE_DIGEST :
2009-07-14 14:45:45 +04:00
return cryptd_create_hash ( tmpl , tb , & queue ) ;
2010-09-20 12:05:12 +04:00
case CRYPTO_ALG_TYPE_AEAD :
return cryptd_create_aead ( tmpl , tb , & queue ) ;
2007-04-16 14:49:20 +04:00
}
2009-07-14 14:45:45 +04:00
return - EINVAL ;
2007-04-16 14:49:20 +04:00
}
static void cryptd_free ( struct crypto_instance * inst )
{
struct cryptd_instance_ctx * ctx = crypto_instance_ctx ( inst ) ;
2009-07-14 15:11:32 +04:00
struct hashd_instance_ctx * hctx = crypto_instance_ctx ( inst ) ;
2010-09-20 12:05:12 +04:00
struct aead_instance_ctx * aead_ctx = crypto_instance_ctx ( inst ) ;
2009-07-14 15:11:32 +04:00
switch ( inst - > alg . cra_flags & CRYPTO_ALG_TYPE_MASK ) {
case CRYPTO_ALG_TYPE_AHASH :
crypto_drop_shash ( & hctx - > spawn ) ;
kfree ( ahash_instance ( inst ) ) ;
return ;
2010-09-20 12:05:12 +04:00
case CRYPTO_ALG_TYPE_AEAD :
crypto_drop_spawn ( & aead_ctx - > aead_spawn . base ) ;
kfree ( inst ) ;
return ;
default :
crypto_drop_spawn ( & ctx - > spawn ) ;
kfree ( inst ) ;
2009-07-14 15:11:32 +04:00
}
2007-04-16 14:49:20 +04:00
}
static struct crypto_template cryptd_tmpl = {
. name = " cryptd " ,
2009-07-14 14:45:45 +04:00
. create = cryptd_create ,
2007-04-16 14:49:20 +04:00
. free = cryptd_free ,
. module = THIS_MODULE ,
} ;
2009-01-18 08:19:46 +03:00
struct cryptd_ablkcipher * cryptd_alloc_ablkcipher ( const char * alg_name ,
u32 type , u32 mask )
{
char cryptd_alg_name [ CRYPTO_MAX_ALG_NAME ] ;
2009-03-29 11:33:53 +04:00
struct crypto_tfm * tfm ;
2009-01-18 08:19:46 +03:00
if ( snprintf ( cryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
" cryptd(%s) " , alg_name ) > = CRYPTO_MAX_ALG_NAME )
return ERR_PTR ( - EINVAL ) ;
2009-03-29 11:33:53 +04:00
type & = ~ ( CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV ) ;
type | = CRYPTO_ALG_TYPE_BLKCIPHER ;
mask & = ~ CRYPTO_ALG_TYPE_MASK ;
mask | = ( CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK ) ;
tfm = crypto_alloc_base ( cryptd_alg_name , type , mask ) ;
2009-01-18 08:19:46 +03:00
if ( IS_ERR ( tfm ) )
return ERR_CAST ( tfm ) ;
2009-03-29 11:33:53 +04:00
if ( tfm - > __crt_alg - > cra_module ! = THIS_MODULE ) {
crypto_free_tfm ( tfm ) ;
2009-01-18 08:19:46 +03:00
return ERR_PTR ( - EINVAL ) ;
}
2009-03-29 11:33:53 +04:00
return __cryptd_ablkcipher_cast ( __crypto_ablkcipher_cast ( tfm ) ) ;
2009-01-18 08:19:46 +03:00
}
EXPORT_SYMBOL_GPL ( cryptd_alloc_ablkcipher ) ;
struct crypto_blkcipher * cryptd_ablkcipher_child ( struct cryptd_ablkcipher * tfm )
{
struct cryptd_blkcipher_ctx * ctx = crypto_ablkcipher_ctx ( & tfm - > base ) ;
return ctx - > child ;
}
EXPORT_SYMBOL_GPL ( cryptd_ablkcipher_child ) ;
void cryptd_free_ablkcipher ( struct cryptd_ablkcipher * tfm )
{
crypto_free_ablkcipher ( & tfm - > base ) ;
}
EXPORT_SYMBOL_GPL ( cryptd_free_ablkcipher ) ;
2009-08-06 09:35:20 +04:00
struct cryptd_ahash * cryptd_alloc_ahash ( const char * alg_name ,
u32 type , u32 mask )
{
char cryptd_alg_name [ CRYPTO_MAX_ALG_NAME ] ;
struct crypto_ahash * tfm ;
if ( snprintf ( cryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
" cryptd(%s) " , alg_name ) > = CRYPTO_MAX_ALG_NAME )
return ERR_PTR ( - EINVAL ) ;
tfm = crypto_alloc_ahash ( cryptd_alg_name , type , mask ) ;
if ( IS_ERR ( tfm ) )
return ERR_CAST ( tfm ) ;
if ( tfm - > base . __crt_alg - > cra_module ! = THIS_MODULE ) {
crypto_free_ahash ( tfm ) ;
return ERR_PTR ( - EINVAL ) ;
}
return __cryptd_ahash_cast ( tfm ) ;
}
EXPORT_SYMBOL_GPL ( cryptd_alloc_ahash ) ;
struct crypto_shash * cryptd_ahash_child ( struct cryptd_ahash * tfm )
{
struct cryptd_hash_ctx * ctx = crypto_ahash_ctx ( & tfm - > base ) ;
return ctx - > child ;
}
EXPORT_SYMBOL_GPL ( cryptd_ahash_child ) ;
2009-10-19 06:53:06 +04:00
struct shash_desc * cryptd_shash_desc ( struct ahash_request * req )
{
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
return & rctx - > desc ;
}
EXPORT_SYMBOL_GPL ( cryptd_shash_desc ) ;
2009-08-06 09:35:20 +04:00
void cryptd_free_ahash ( struct cryptd_ahash * tfm )
{
crypto_free_ahash ( & tfm - > base ) ;
}
EXPORT_SYMBOL_GPL ( cryptd_free_ahash ) ;
2010-09-20 12:05:12 +04:00
struct cryptd_aead * cryptd_alloc_aead ( const char * alg_name ,
u32 type , u32 mask )
{
char cryptd_alg_name [ CRYPTO_MAX_ALG_NAME ] ;
struct crypto_aead * tfm ;
if ( snprintf ( cryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
" cryptd(%s) " , alg_name ) > = CRYPTO_MAX_ALG_NAME )
return ERR_PTR ( - EINVAL ) ;
tfm = crypto_alloc_aead ( cryptd_alg_name , type , mask ) ;
if ( IS_ERR ( tfm ) )
return ERR_CAST ( tfm ) ;
if ( tfm - > base . __crt_alg - > cra_module ! = THIS_MODULE ) {
crypto_free_aead ( tfm ) ;
return ERR_PTR ( - EINVAL ) ;
}
return __cryptd_aead_cast ( tfm ) ;
}
EXPORT_SYMBOL_GPL ( cryptd_alloc_aead ) ;
struct crypto_aead * cryptd_aead_child ( struct cryptd_aead * tfm )
{
struct cryptd_aead_ctx * ctx ;
ctx = crypto_aead_ctx ( & tfm - > base ) ;
return ctx - > child ;
}
EXPORT_SYMBOL_GPL ( cryptd_aead_child ) ;
void cryptd_free_aead ( struct cryptd_aead * tfm )
{
crypto_free_aead ( & tfm - > base ) ;
}
EXPORT_SYMBOL_GPL ( cryptd_free_aead ) ;
2007-04-16 14:49:20 +04:00
static int __init cryptd_init ( void )
{
int err ;
2009-02-19 09:42:19 +03:00
err = cryptd_init_queue ( & queue , CRYPTD_MAX_CPU_QLEN ) ;
2007-04-16 14:49:20 +04:00
if ( err )
return err ;
err = crypto_register_template ( & cryptd_tmpl ) ;
if ( err )
2009-02-19 09:42:19 +03:00
cryptd_fini_queue ( & queue ) ;
2007-04-16 14:49:20 +04:00
return err ;
}
static void __exit cryptd_exit ( void )
{
2009-02-19 09:42:19 +03:00
cryptd_fini_queue ( & queue ) ;
2007-04-16 14:49:20 +04:00
crypto_unregister_template ( & cryptd_tmpl ) ;
}
2011-08-19 12:11:23 +04:00
subsys_initcall ( cryptd_init ) ;
2007-04-16 14:49:20 +04:00
module_exit ( cryptd_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Software async crypto daemon " ) ;