2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2007-04-16 14:49:20 +04:00
/*
* Software async crypto daemon .
*
* Copyright ( c ) 2006 Herbert Xu < herbert @ gondor . apana . org . au >
*
2010-09-20 12:05:12 +04:00
* Added AEAD support to cryptd .
* Authors : Tadeusz Struk ( tadeusz . struk @ intel . com )
* Adrian Hoban < adrian . hoban @ intel . com >
* Gabriele Paoloni < gabriele . paoloni @ intel . com >
* Aidan O ' Mahony ( aidan . o . mahony @ intel . com )
* Copyright ( c ) 2010 , Intel Corporation .
2007-04-16 14:49:20 +04:00
*/
2008-07-10 12:01:22 +04:00
# include <crypto/internal/hash.h>
2010-09-20 12:05:12 +04:00
# include <crypto/internal/aead.h>
2016-11-22 15:08:23 +03:00
# include <crypto/internal/skcipher.h>
2009-01-18 08:19:46 +03:00
# include <crypto/cryptd.h>
2019-08-08 11:00:22 +03:00
# include <linux/refcount.h>
2007-04-16 14:49:20 +04:00
# include <linux/err.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/list.h>
# include <linux/module.h>
# include <linux/scatterlist.h>
# include <linux/sched.h>
# include <linux/slab.h>
2019-05-20 19:53:58 +03:00
# include <linux/workqueue.h>
2007-04-16 14:49:20 +04:00
2017-11-30 14:26:14 +03:00
static unsigned int cryptd_max_cpu_qlen = 1000 ;
2017-11-22 08:08:17 +03:00
module_param ( cryptd_max_cpu_qlen , uint , 0 ) ;
MODULE_PARM_DESC ( cryptd_max_cpu_qlen , " Set cryptd Max queue depth " ) ;
2007-04-16 14:49:20 +04:00
2019-05-20 19:53:58 +03:00
static struct workqueue_struct * cryptd_wq ;
2009-02-19 09:42:19 +03:00
struct cryptd_cpu_queue {
2007-04-16 14:49:20 +04:00
struct crypto_queue queue ;
2009-02-19 09:42:19 +03:00
struct work_struct work ;
} ;
struct cryptd_queue {
2010-02-02 08:39:15 +03:00
struct cryptd_cpu_queue __percpu * cpu_queue ;
2007-04-16 14:49:20 +04:00
} ;
struct cryptd_instance_ctx {
struct crypto_spawn spawn ;
2009-02-19 09:42:19 +03:00
struct cryptd_queue * queue ;
2007-04-16 14:49:20 +04:00
} ;
2016-11-22 15:08:23 +03:00
struct skcipherd_instance_ctx {
struct crypto_skcipher_spawn spawn ;
struct cryptd_queue * queue ;
} ;
2009-07-12 17:38:59 +04:00
struct hashd_instance_ctx {
struct crypto_shash_spawn spawn ;
struct cryptd_queue * queue ;
} ;
2010-09-20 12:05:12 +04:00
struct aead_instance_ctx {
struct crypto_aead_spawn aead_spawn ;
struct cryptd_queue * queue ;
} ;
2016-11-22 15:08:23 +03:00
struct cryptd_skcipher_ctx {
2019-08-08 11:00:22 +03:00
refcount_t refcnt ;
2018-09-19 05:10:52 +03:00
struct crypto_sync_skcipher * child ;
2016-11-22 15:08:23 +03:00
} ;
struct cryptd_skcipher_request_ctx {
crypto_completion_t complete ;
} ;
2008-05-14 17:23:00 +04:00
struct cryptd_hash_ctx {
2019-08-08 11:00:22 +03:00
refcount_t refcnt ;
2009-07-12 17:38:59 +04:00
struct crypto_shash * child ;
2008-05-14 17:23:00 +04:00
} ;
struct cryptd_hash_request_ctx {
crypto_completion_t complete ;
2009-07-12 17:38:59 +04:00
struct shash_desc desc ;
2008-05-14 17:23:00 +04:00
} ;
2007-04-16 14:49:20 +04:00
2010-09-20 12:05:12 +04:00
struct cryptd_aead_ctx {
2019-08-08 11:00:22 +03:00
refcount_t refcnt ;
2010-09-20 12:05:12 +04:00
struct crypto_aead * child ;
} ;
struct cryptd_aead_request_ctx {
crypto_completion_t complete ;
} ;
2009-02-19 09:42:19 +03:00
static void cryptd_queue_worker ( struct work_struct * work ) ;
static int cryptd_init_queue ( struct cryptd_queue * queue ,
unsigned int max_cpu_qlen )
{
int cpu ;
struct cryptd_cpu_queue * cpu_queue ;
queue - > cpu_queue = alloc_percpu ( struct cryptd_cpu_queue ) ;
if ( ! queue - > cpu_queue )
return - ENOMEM ;
for_each_possible_cpu ( cpu ) {
cpu_queue = per_cpu_ptr ( queue - > cpu_queue , cpu ) ;
crypto_init_queue ( & cpu_queue - > queue , max_cpu_qlen ) ;
INIT_WORK ( & cpu_queue - > work , cryptd_queue_worker ) ;
}
2017-11-22 08:08:17 +03:00
pr_info ( " cryptd: max_cpu_qlen set to %d \n " , max_cpu_qlen ) ;
2009-02-19 09:42:19 +03:00
return 0 ;
}
static void cryptd_fini_queue ( struct cryptd_queue * queue )
{
int cpu ;
struct cryptd_cpu_queue * cpu_queue ;
for_each_possible_cpu ( cpu ) {
cpu_queue = per_cpu_ptr ( queue - > cpu_queue , cpu ) ;
BUG_ON ( cpu_queue - > queue . qlen ) ;
}
free_percpu ( queue - > cpu_queue ) ;
}
static int cryptd_enqueue_request ( struct cryptd_queue * queue ,
struct crypto_async_request * request )
{
int cpu , err ;
struct cryptd_cpu_queue * cpu_queue ;
2019-08-08 11:00:22 +03:00
refcount_t * refcnt ;
2009-02-19 09:42:19 +03:00
cpu = get_cpu ( ) ;
2009-10-03 14:48:23 +04:00
cpu_queue = this_cpu_ptr ( queue - > cpu_queue ) ;
2009-02-19 09:42:19 +03:00
err = crypto_enqueue_request ( & cpu_queue - > queue , request ) ;
2016-06-21 11:55:13 +03:00
refcnt = crypto_tfm_ctx ( request - > tfm ) ;
2017-10-18 10:00:33 +03:00
if ( err = = - ENOSPC )
2016-06-21 11:55:13 +03:00
goto out_put_cpu ;
2019-05-20 19:53:58 +03:00
queue_work_on ( cpu , cryptd_wq , & cpu_queue - > work ) ;
2016-06-21 11:55:13 +03:00
2019-08-08 11:00:22 +03:00
if ( ! refcount_read ( refcnt ) )
2016-06-21 11:55:13 +03:00
goto out_put_cpu ;
2019-08-08 11:00:22 +03:00
refcount_inc ( refcnt ) ;
2016-06-21 11:55:13 +03:00
out_put_cpu :
2009-02-19 09:42:19 +03:00
put_cpu ( ) ;
return err ;
}
/* Called in workqueue context, do one real cryption work (via
* req - > complete ) and reschedule itself if there are more work to
* do . */
static void cryptd_queue_worker ( struct work_struct * work )
{
struct cryptd_cpu_queue * cpu_queue ;
struct crypto_async_request * req , * backlog ;
cpu_queue = container_of ( work , struct cryptd_cpu_queue , work ) ;
2012-10-21 21:42:28 +04:00
/*
* Only handle one request at a time to avoid hogging crypto workqueue .
* preempt_disable / enable is used to prevent being preempted by
* cryptd_enqueue_request ( ) . local_bh_disable / enable is used to prevent
* cryptd_enqueue_request ( ) being accessed from software interrupts .
*/
local_bh_disable ( ) ;
2009-02-19 09:42:19 +03:00
preempt_disable ( ) ;
backlog = crypto_get_backlog ( & cpu_queue - > queue ) ;
req = crypto_dequeue_request ( & cpu_queue - > queue ) ;
preempt_enable ( ) ;
2012-10-21 21:42:28 +04:00
local_bh_enable ( ) ;
2009-02-19 09:42:19 +03:00
if ( ! req )
return ;
if ( backlog )
backlog - > complete ( backlog , - EINPROGRESS ) ;
req - > complete ( req , 0 ) ;
if ( cpu_queue - > queue . qlen )
2019-05-20 19:53:58 +03:00
queue_work ( cryptd_wq , & cpu_queue - > work ) ;
2009-02-19 09:42:19 +03:00
}
static inline struct cryptd_queue * cryptd_get_queue ( struct crypto_tfm * tfm )
2007-04-16 14:49:20 +04:00
{
struct crypto_instance * inst = crypto_tfm_alg_instance ( tfm ) ;
struct cryptd_instance_ctx * ictx = crypto_instance_ctx ( inst ) ;
2009-02-19 09:42:19 +03:00
return ictx - > queue ;
2007-04-16 14:49:20 +04:00
}
2020-07-10 09:20:38 +03:00
static void cryptd_type_and_mask ( struct crypto_attr_type * algt ,
u32 * type , u32 * mask )
2015-03-30 22:57:06 +03:00
{
2020-07-10 09:20:38 +03:00
/*
* cryptd is allowed to wrap internal algorithms , but in that case the
* resulting cryptd instance will be marked as internal as well .
*/
* type = algt - > type & CRYPTO_ALG_INTERNAL ;
* mask = algt - > mask & CRYPTO_ALG_INTERNAL ;
2015-03-30 22:57:06 +03:00
2020-07-10 09:20:38 +03:00
/* No point in cryptd wrapping an algorithm that's already async. */
* mask | = CRYPTO_ALG_ASYNC ;
2015-07-09 02:17:19 +03:00
2020-07-10 09:20:38 +03:00
* mask | = crypto_algt_inherited_mask ( algt ) ;
2015-03-30 22:57:06 +03:00
}
2015-05-21 10:10:57 +03:00
static int cryptd_init_instance ( struct crypto_instance * inst ,
struct crypto_alg * alg )
{
if ( snprintf ( inst - > alg . cra_driver_name , CRYPTO_MAX_ALG_NAME ,
" cryptd(%s) " ,
alg - > cra_driver_name ) > = CRYPTO_MAX_ALG_NAME )
return - ENAMETOOLONG ;
memcpy ( inst - > alg . cra_name , alg - > cra_name , CRYPTO_MAX_ALG_NAME ) ;
inst - > alg . cra_priority = alg - > cra_priority + 50 ;
inst - > alg . cra_blocksize = alg - > cra_blocksize ;
inst - > alg . cra_alignmask = alg - > cra_alignmask ;
return 0 ;
}
2016-11-22 15:08:23 +03:00
static int cryptd_skcipher_setkey ( struct crypto_skcipher * parent ,
const u8 * key , unsigned int keylen )
{
struct cryptd_skcipher_ctx * ctx = crypto_skcipher_ctx ( parent ) ;
2018-09-19 05:10:52 +03:00
struct crypto_sync_skcipher * child = ctx - > child ;
2016-11-22 15:08:23 +03:00
2018-09-19 05:10:52 +03:00
crypto_sync_skcipher_clear_flags ( child , CRYPTO_TFM_REQ_MASK ) ;
crypto_sync_skcipher_set_flags ( child ,
crypto_skcipher_get_flags ( parent ) &
2016-11-22 15:08:23 +03:00
CRYPTO_TFM_REQ_MASK ) ;
2019-12-31 06:19:38 +03:00
return crypto_sync_skcipher_setkey ( child , key , keylen ) ;
2016-11-22 15:08:23 +03:00
}
static void cryptd_skcipher_complete ( struct skcipher_request * req , int err )
{
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct cryptd_skcipher_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct cryptd_skcipher_request_ctx * rctx = skcipher_request_ctx ( req ) ;
2019-08-08 11:00:22 +03:00
int refcnt = refcount_read ( & ctx - > refcnt ) ;
2016-11-22 15:08:23 +03:00
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
2019-08-08 11:00:22 +03:00
if ( err ! = - EINPROGRESS & & refcnt & & refcount_dec_and_test ( & ctx - > refcnt ) )
2016-11-22 15:08:23 +03:00
crypto_free_skcipher ( tfm ) ;
}
static void cryptd_skcipher_encrypt ( struct crypto_async_request * base ,
int err )
{
struct skcipher_request * req = skcipher_request_cast ( base ) ;
struct cryptd_skcipher_request_ctx * rctx = skcipher_request_ctx ( req ) ;
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct cryptd_skcipher_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2018-09-19 05:10:52 +03:00
struct crypto_sync_skcipher * child = ctx - > child ;
SYNC_SKCIPHER_REQUEST_ON_STACK ( subreq , child ) ;
2016-11-22 15:08:23 +03:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2018-09-19 05:10:52 +03:00
skcipher_request_set_sync_tfm ( subreq , child ) ;
2016-11-22 15:08:23 +03:00
skcipher_request_set_callback ( subreq , CRYPTO_TFM_REQ_MAY_SLEEP ,
NULL , NULL ) ;
skcipher_request_set_crypt ( subreq , req - > src , req - > dst , req - > cryptlen ,
req - > iv ) ;
err = crypto_skcipher_encrypt ( subreq ) ;
skcipher_request_zero ( subreq ) ;
req - > base . complete = rctx - > complete ;
out :
cryptd_skcipher_complete ( req , err ) ;
}
static void cryptd_skcipher_decrypt ( struct crypto_async_request * base ,
int err )
{
struct skcipher_request * req = skcipher_request_cast ( base ) ;
struct cryptd_skcipher_request_ctx * rctx = skcipher_request_ctx ( req ) ;
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct cryptd_skcipher_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2018-09-19 05:10:52 +03:00
struct crypto_sync_skcipher * child = ctx - > child ;
SYNC_SKCIPHER_REQUEST_ON_STACK ( subreq , child ) ;
2016-11-22 15:08:23 +03:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2018-09-19 05:10:52 +03:00
skcipher_request_set_sync_tfm ( subreq , child ) ;
2016-11-22 15:08:23 +03:00
skcipher_request_set_callback ( subreq , CRYPTO_TFM_REQ_MAY_SLEEP ,
NULL , NULL ) ;
skcipher_request_set_crypt ( subreq , req - > src , req - > dst , req - > cryptlen ,
req - > iv ) ;
err = crypto_skcipher_decrypt ( subreq ) ;
skcipher_request_zero ( subreq ) ;
req - > base . complete = rctx - > complete ;
out :
cryptd_skcipher_complete ( req , err ) ;
}
static int cryptd_skcipher_enqueue ( struct skcipher_request * req ,
crypto_completion_t compl )
{
struct cryptd_skcipher_request_ctx * rctx = skcipher_request_ctx ( req ) ;
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct cryptd_queue * queue ;
queue = cryptd_get_queue ( crypto_skcipher_tfm ( tfm ) ) ;
rctx - > complete = req - > base . complete ;
req - > base . complete = compl ;
return cryptd_enqueue_request ( queue , & req - > base ) ;
}
static int cryptd_skcipher_encrypt_enqueue ( struct skcipher_request * req )
{
return cryptd_skcipher_enqueue ( req , cryptd_skcipher_encrypt ) ;
}
static int cryptd_skcipher_decrypt_enqueue ( struct skcipher_request * req )
{
return cryptd_skcipher_enqueue ( req , cryptd_skcipher_decrypt ) ;
}
static int cryptd_skcipher_init_tfm ( struct crypto_skcipher * tfm )
{
struct skcipher_instance * inst = skcipher_alg_instance ( tfm ) ;
struct skcipherd_instance_ctx * ictx = skcipher_instance_ctx ( inst ) ;
struct crypto_skcipher_spawn * spawn = & ictx - > spawn ;
struct cryptd_skcipher_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
struct crypto_skcipher * cipher ;
cipher = crypto_spawn_skcipher ( spawn ) ;
if ( IS_ERR ( cipher ) )
return PTR_ERR ( cipher ) ;
2018-09-19 05:10:52 +03:00
ctx - > child = ( struct crypto_sync_skcipher * ) cipher ;
2016-11-22 15:08:23 +03:00
crypto_skcipher_set_reqsize (
tfm , sizeof ( struct cryptd_skcipher_request_ctx ) ) ;
return 0 ;
}
static void cryptd_skcipher_exit_tfm ( struct crypto_skcipher * tfm )
{
struct cryptd_skcipher_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2018-09-19 05:10:52 +03:00
crypto_free_sync_skcipher ( ctx - > child ) ;
2016-11-22 15:08:23 +03:00
}
static void cryptd_skcipher_free ( struct skcipher_instance * inst )
{
struct skcipherd_instance_ctx * ctx = skcipher_instance_ctx ( inst ) ;
crypto_drop_skcipher ( & ctx - > spawn ) ;
2019-07-02 10:53:25 +03:00
kfree ( inst ) ;
2016-11-22 15:08:23 +03:00
}
static int cryptd_create_skcipher ( struct crypto_template * tmpl ,
struct rtattr * * tb ,
2020-07-10 09:20:38 +03:00
struct crypto_attr_type * algt ,
2016-11-22 15:08:23 +03:00
struct cryptd_queue * queue )
{
struct skcipherd_instance_ctx * ctx ;
struct skcipher_instance * inst ;
struct skcipher_alg * alg ;
u32 type ;
u32 mask ;
int err ;
2020-07-10 09:20:38 +03:00
cryptd_type_and_mask ( algt , & type , & mask ) ;
2016-11-22 15:08:23 +03:00
inst = kzalloc ( sizeof ( * inst ) + sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! inst )
return - ENOMEM ;
ctx = skcipher_instance_ctx ( inst ) ;
ctx - > queue = queue ;
2020-01-03 06:58:45 +03:00
err = crypto_grab_skcipher ( & ctx - > spawn , skcipher_crypto_instance ( inst ) ,
2020-02-26 07:59:15 +03:00
crypto_attr_alg_name ( tb [ 1 ] ) , type , mask ) ;
2016-11-22 15:08:23 +03:00
if ( err )
2020-02-26 07:59:15 +03:00
goto err_free_inst ;
2016-11-22 15:08:23 +03:00
alg = crypto_spawn_skcipher_alg ( & ctx - > spawn ) ;
err = cryptd_init_instance ( skcipher_crypto_instance ( inst ) , & alg - > base ) ;
if ( err )
2020-02-26 07:59:15 +03:00
goto err_free_inst ;
2016-11-22 15:08:23 +03:00
2020-07-10 09:20:38 +03:00
inst - > alg . base . cra_flags | = CRYPTO_ALG_ASYNC |
( alg - > base . cra_flags & CRYPTO_ALG_INTERNAL ) ;
2016-11-22 15:08:23 +03:00
inst - > alg . ivsize = crypto_skcipher_alg_ivsize ( alg ) ;
inst - > alg . chunksize = crypto_skcipher_alg_chunksize ( alg ) ;
inst - > alg . min_keysize = crypto_skcipher_alg_min_keysize ( alg ) ;
inst - > alg . max_keysize = crypto_skcipher_alg_max_keysize ( alg ) ;
inst - > alg . base . cra_ctxsize = sizeof ( struct cryptd_skcipher_ctx ) ;
inst - > alg . init = cryptd_skcipher_init_tfm ;
inst - > alg . exit = cryptd_skcipher_exit_tfm ;
inst - > alg . setkey = cryptd_skcipher_setkey ;
inst - > alg . encrypt = cryptd_skcipher_encrypt_enqueue ;
inst - > alg . decrypt = cryptd_skcipher_decrypt_enqueue ;
inst - > free = cryptd_skcipher_free ;
err = skcipher_register_instance ( tmpl , inst ) ;
if ( err ) {
2020-02-26 07:59:15 +03:00
err_free_inst :
cryptd_skcipher_free ( inst ) ;
2016-11-22 15:08:23 +03:00
}
return err ;
}
2008-05-14 17:23:00 +04:00
static int cryptd_hash_init_tfm ( struct crypto_tfm * tfm )
{
struct crypto_instance * inst = crypto_tfm_alg_instance ( tfm ) ;
2009-07-12 17:38:59 +04:00
struct hashd_instance_ctx * ictx = crypto_instance_ctx ( inst ) ;
struct crypto_shash_spawn * spawn = & ictx - > spawn ;
2008-05-14 17:23:00 +04:00
struct cryptd_hash_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2009-07-12 17:38:59 +04:00
struct crypto_shash * hash ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
hash = crypto_spawn_shash ( spawn ) ;
if ( IS_ERR ( hash ) )
return PTR_ERR ( hash ) ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
ctx - > child = hash ;
2009-07-12 19:06:33 +04:00
crypto_ahash_set_reqsize ( __crypto_ahash_cast ( tfm ) ,
sizeof ( struct cryptd_hash_request_ctx ) +
crypto_shash_descsize ( hash ) ) ;
2008-05-14 17:23:00 +04:00
return 0 ;
}
static void cryptd_hash_exit_tfm ( struct crypto_tfm * tfm )
{
struct cryptd_hash_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2009-07-12 17:38:59 +04:00
crypto_free_shash ( ctx - > child ) ;
2008-05-14 17:23:00 +04:00
}
static int cryptd_hash_setkey ( struct crypto_ahash * parent ,
const u8 * key , unsigned int keylen )
{
struct cryptd_hash_ctx * ctx = crypto_ahash_ctx ( parent ) ;
2009-07-12 17:38:59 +04:00
struct crypto_shash * child = ctx - > child ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
crypto_shash_clear_flags ( child , CRYPTO_TFM_REQ_MASK ) ;
crypto_shash_set_flags ( child , crypto_ahash_get_flags ( parent ) &
CRYPTO_TFM_REQ_MASK ) ;
2019-12-31 06:19:38 +03:00
return crypto_shash_setkey ( child , key , keylen ) ;
2008-05-14 17:23:00 +04:00
}
static int cryptd_hash_enqueue ( struct ahash_request * req ,
2014-07-25 13:53:38 +04:00
crypto_completion_t compl )
2008-05-14 17:23:00 +04:00
{
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
2009-02-19 09:42:19 +03:00
struct cryptd_queue * queue =
cryptd_get_queue ( crypto_ahash_tfm ( tfm ) ) ;
2008-05-14 17:23:00 +04:00
rctx - > complete = req - > base . complete ;
2014-07-25 13:53:38 +04:00
req - > base . complete = compl ;
2008-05-14 17:23:00 +04:00
2009-02-19 09:42:19 +03:00
return cryptd_enqueue_request ( queue , & req - > base ) ;
2008-05-14 17:23:00 +04:00
}
2016-06-21 11:55:13 +03:00
static void cryptd_hash_complete ( struct ahash_request * req , int err )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct cryptd_hash_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
2019-08-08 11:00:22 +03:00
int refcnt = refcount_read ( & ctx - > refcnt ) ;
2016-06-21 11:55:13 +03:00
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
2019-08-08 11:00:22 +03:00
if ( err ! = - EINPROGRESS & & refcnt & & refcount_dec_and_test ( & ctx - > refcnt ) )
2016-06-21 11:55:13 +03:00
crypto_free_ahash ( tfm ) ;
}
2008-05-14 17:23:00 +04:00
static void cryptd_hash_init ( struct crypto_async_request * req_async , int err )
{
2009-07-12 17:38:59 +04:00
struct cryptd_hash_ctx * ctx = crypto_tfm_ctx ( req_async - > tfm ) ;
struct crypto_shash * child = ctx - > child ;
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
struct shash_desc * desc = & rctx - > desc ;
2008-05-14 17:23:00 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2009-07-12 17:38:59 +04:00
desc - > tfm = child ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
err = crypto_shash_init ( desc ) ;
2008-05-14 17:23:00 +04:00
req - > base . complete = rctx - > complete ;
out :
2016-06-21 11:55:13 +03:00
cryptd_hash_complete ( req , err ) ;
2008-05-14 17:23:00 +04:00
}
static int cryptd_hash_init_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_init ) ;
}
static void cryptd_hash_update ( struct crypto_async_request * req_async , int err )
{
2009-07-12 17:38:59 +04:00
struct ahash_request * req = ahash_request_cast ( req_async ) ;
2008-05-14 17:23:00 +04:00
struct cryptd_hash_request_ctx * rctx ;
rctx = ahash_request_ctx ( req ) ;
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2009-07-12 17:38:59 +04:00
err = shash_ahash_update ( req , & rctx - > desc ) ;
2008-05-14 17:23:00 +04:00
req - > base . complete = rctx - > complete ;
out :
2016-06-21 11:55:13 +03:00
cryptd_hash_complete ( req , err ) ;
2008-05-14 17:23:00 +04:00
}
static int cryptd_hash_update_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_update ) ;
}
static void cryptd_hash_final ( struct crypto_async_request * req_async , int err )
{
2009-07-12 17:38:59 +04:00
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
2008-05-14 17:23:00 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2009-07-12 17:38:59 +04:00
err = crypto_shash_final ( & rctx - > desc , req - > result ) ;
2008-05-14 17:23:00 +04:00
req - > base . complete = rctx - > complete ;
out :
2016-06-21 11:55:13 +03:00
cryptd_hash_complete ( req , err ) ;
2008-05-14 17:23:00 +04:00
}
static int cryptd_hash_final_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_final ) ;
}
2009-07-22 07:10:22 +04:00
static void cryptd_hash_finup ( struct crypto_async_request * req_async , int err )
{
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
err = shash_ahash_finup ( req , & rctx - > desc ) ;
req - > base . complete = rctx - > complete ;
out :
2016-06-21 11:55:13 +03:00
cryptd_hash_complete ( req , err ) ;
2009-07-22 07:10:22 +04:00
}
static int cryptd_hash_finup_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_finup ) ;
}
2008-05-14 17:23:00 +04:00
static void cryptd_hash_digest ( struct crypto_async_request * req_async , int err )
{
2009-07-12 17:38:59 +04:00
struct cryptd_hash_ctx * ctx = crypto_tfm_ctx ( req_async - > tfm ) ;
struct crypto_shash * child = ctx - > child ;
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
struct shash_desc * desc = & rctx - > desc ;
2008-05-14 17:23:00 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2009-07-12 17:38:59 +04:00
desc - > tfm = child ;
2008-05-14 17:23:00 +04:00
2009-07-12 17:38:59 +04:00
err = shash_ahash_digest ( req , desc ) ;
2008-05-14 17:23:00 +04:00
req - > base . complete = rctx - > complete ;
out :
2016-06-21 11:55:13 +03:00
cryptd_hash_complete ( req , err ) ;
2008-05-14 17:23:00 +04:00
}
static int cryptd_hash_digest_enqueue ( struct ahash_request * req )
{
return cryptd_hash_enqueue ( req , cryptd_hash_digest ) ;
}
2009-07-22 07:10:22 +04:00
static int cryptd_hash_export ( struct ahash_request * req , void * out )
{
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
return crypto_shash_export ( & rctx - > desc , out ) ;
}
static int cryptd_hash_import ( struct ahash_request * req , const void * in )
{
2016-09-01 16:25:43 +03:00
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct cryptd_hash_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
struct shash_desc * desc = cryptd_shash_desc ( req ) ;
desc - > tfm = ctx - > child ;
2009-07-22 07:10:22 +04:00
2016-09-01 16:25:43 +03:00
return crypto_shash_import ( desc , in ) ;
2009-07-22 07:10:22 +04:00
}
2020-01-03 07:04:37 +03:00
static void cryptd_hash_free ( struct ahash_instance * inst )
{
struct hashd_instance_ctx * ctx = ahash_instance_ctx ( inst ) ;
crypto_drop_shash ( & ctx - > spawn ) ;
kfree ( inst ) ;
}
2009-07-14 14:45:45 +04:00
static int cryptd_create_hash ( struct crypto_template * tmpl , struct rtattr * * tb ,
2020-07-10 09:20:38 +03:00
struct crypto_attr_type * algt ,
2009-07-14 14:45:45 +04:00
struct cryptd_queue * queue )
2008-05-14 17:23:00 +04:00
{
2009-07-12 17:38:59 +04:00
struct hashd_instance_ctx * ctx ;
2009-07-14 15:11:32 +04:00
struct ahash_instance * inst ;
2020-01-03 06:58:53 +03:00
struct shash_alg * alg ;
2020-07-10 09:20:38 +03:00
u32 type ;
u32 mask ;
2009-07-12 17:38:59 +04:00
int err ;
2008-05-14 17:23:00 +04:00
2020-07-10 09:20:38 +03:00
cryptd_type_and_mask ( algt , & type , & mask ) ;
2015-03-30 22:57:06 +03:00
2020-01-03 06:58:53 +03:00
inst = kzalloc ( sizeof ( * inst ) + sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! inst )
return - ENOMEM ;
2008-05-14 17:23:00 +04:00
2009-07-14 15:11:32 +04:00
ctx = ahash_instance_ctx ( inst ) ;
2009-07-12 17:38:59 +04:00
ctx - > queue = queue ;
2020-01-03 06:58:53 +03:00
err = crypto_grab_shash ( & ctx - > spawn , ahash_crypto_instance ( inst ) ,
crypto_attr_alg_name ( tb [ 1 ] ) , type , mask ) ;
2009-07-12 17:38:59 +04:00
if ( err )
2020-01-03 06:58:53 +03:00
goto err_free_inst ;
alg = crypto_spawn_shash_alg ( & ctx - > spawn ) ;
err = cryptd_init_instance ( ahash_crypto_instance ( inst ) , & alg - > base ) ;
if ( err )
goto err_free_inst ;
2009-07-12 17:38:59 +04:00
2020-07-10 09:20:38 +03:00
inst - > alg . halg . base . cra_flags | = CRYPTO_ALG_ASYNC |
( alg - > base . cra_flags & ( CRYPTO_ALG_INTERNAL |
2020-01-03 06:58:53 +03:00
CRYPTO_ALG_OPTIONAL_KEY ) ) ;
inst - > alg . halg . digestsize = alg - > digestsize ;
inst - > alg . halg . statesize = alg - > statesize ;
2009-07-14 15:11:32 +04:00
inst - > alg . halg . base . cra_ctxsize = sizeof ( struct cryptd_hash_ctx ) ;
2008-05-14 17:23:00 +04:00
2009-07-14 15:11:32 +04:00
inst - > alg . halg . base . cra_init = cryptd_hash_init_tfm ;
inst - > alg . halg . base . cra_exit = cryptd_hash_exit_tfm ;
2008-05-14 17:23:00 +04:00
2009-07-14 15:11:32 +04:00
inst - > alg . init = cryptd_hash_init_enqueue ;
inst - > alg . update = cryptd_hash_update_enqueue ;
inst - > alg . final = cryptd_hash_final_enqueue ;
2009-07-22 07:10:22 +04:00
inst - > alg . finup = cryptd_hash_finup_enqueue ;
inst - > alg . export = cryptd_hash_export ;
inst - > alg . import = cryptd_hash_import ;
2020-01-03 06:58:53 +03:00
if ( crypto_shash_alg_has_setkey ( alg ) )
2018-01-03 22:16:23 +03:00
inst - > alg . setkey = cryptd_hash_setkey ;
2009-07-14 15:11:32 +04:00
inst - > alg . digest = cryptd_hash_digest_enqueue ;
2008-05-14 17:23:00 +04:00
2020-01-03 07:04:37 +03:00
inst - > free = cryptd_hash_free ;
2009-07-14 15:11:32 +04:00
err = ahash_register_instance ( tmpl , inst ) ;
2009-07-14 14:45:45 +04:00
if ( err ) {
2020-01-03 06:58:53 +03:00
err_free_inst :
2020-02-26 07:59:15 +03:00
cryptd_hash_free ( inst ) ;
2009-07-14 14:45:45 +04:00
}
return err ;
2008-05-14 17:23:00 +04:00
}
2015-05-28 17:08:01 +03:00
static int cryptd_aead_setkey ( struct crypto_aead * parent ,
const u8 * key , unsigned int keylen )
{
struct cryptd_aead_ctx * ctx = crypto_aead_ctx ( parent ) ;
struct crypto_aead * child = ctx - > child ;
return crypto_aead_setkey ( child , key , keylen ) ;
}
static int cryptd_aead_setauthsize ( struct crypto_aead * parent ,
unsigned int authsize )
{
struct cryptd_aead_ctx * ctx = crypto_aead_ctx ( parent ) ;
struct crypto_aead * child = ctx - > child ;
return crypto_aead_setauthsize ( child , authsize ) ;
}
2010-09-20 12:05:12 +04:00
static void cryptd_aead_crypt ( struct aead_request * req ,
struct crypto_aead * child ,
int err ,
int ( * crypt ) ( struct aead_request * req ) )
{
struct cryptd_aead_request_ctx * rctx ;
2016-06-21 11:55:13 +03:00
struct cryptd_aead_ctx * ctx ;
2015-07-06 14:11:03 +03:00
crypto_completion_t compl ;
2016-06-21 11:55:13 +03:00
struct crypto_aead * tfm ;
int refcnt ;
2015-07-06 14:11:03 +03:00
2010-09-20 12:05:12 +04:00
rctx = aead_request_ctx ( req ) ;
2015-07-06 14:11:03 +03:00
compl = rctx - > complete ;
2010-09-20 12:05:12 +04:00
2016-08-25 11:49:51 +03:00
tfm = crypto_aead_reqtfm ( req ) ;
2010-09-20 12:05:12 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
aead_request_set_tfm ( req , child ) ;
err = crypt ( req ) ;
2016-06-21 11:55:13 +03:00
2010-09-20 12:05:12 +04:00
out :
2016-06-21 11:55:13 +03:00
ctx = crypto_aead_ctx ( tfm ) ;
2019-08-08 11:00:22 +03:00
refcnt = refcount_read ( & ctx - > refcnt ) ;
2016-06-21 11:55:13 +03:00
2010-09-20 12:05:12 +04:00
local_bh_disable ( ) ;
2015-07-06 14:11:03 +03:00
compl ( & req - > base , err ) ;
2010-09-20 12:05:12 +04:00
local_bh_enable ( ) ;
2016-06-21 11:55:13 +03:00
2019-08-08 11:00:22 +03:00
if ( err ! = - EINPROGRESS & & refcnt & & refcount_dec_and_test ( & ctx - > refcnt ) )
2016-06-21 11:55:13 +03:00
crypto_free_aead ( tfm ) ;
2010-09-20 12:05:12 +04:00
}
static void cryptd_aead_encrypt ( struct crypto_async_request * areq , int err )
{
struct cryptd_aead_ctx * ctx = crypto_tfm_ctx ( areq - > tfm ) ;
struct crypto_aead * child = ctx - > child ;
struct aead_request * req ;
req = container_of ( areq , struct aead_request , base ) ;
2015-08-13 12:29:02 +03:00
cryptd_aead_crypt ( req , child , err , crypto_aead_alg ( child ) - > encrypt ) ;
2010-09-20 12:05:12 +04:00
}
static void cryptd_aead_decrypt ( struct crypto_async_request * areq , int err )
{
struct cryptd_aead_ctx * ctx = crypto_tfm_ctx ( areq - > tfm ) ;
struct crypto_aead * child = ctx - > child ;
struct aead_request * req ;
req = container_of ( areq , struct aead_request , base ) ;
2015-08-13 12:29:02 +03:00
cryptd_aead_crypt ( req , child , err , crypto_aead_alg ( child ) - > decrypt ) ;
2010-09-20 12:05:12 +04:00
}
static int cryptd_aead_enqueue ( struct aead_request * req ,
2014-07-25 13:53:38 +04:00
crypto_completion_t compl )
2010-09-20 12:05:12 +04:00
{
struct cryptd_aead_request_ctx * rctx = aead_request_ctx ( req ) ;
struct crypto_aead * tfm = crypto_aead_reqtfm ( req ) ;
struct cryptd_queue * queue = cryptd_get_queue ( crypto_aead_tfm ( tfm ) ) ;
rctx - > complete = req - > base . complete ;
2014-07-25 13:53:38 +04:00
req - > base . complete = compl ;
2010-09-20 12:05:12 +04:00
return cryptd_enqueue_request ( queue , & req - > base ) ;
}
static int cryptd_aead_encrypt_enqueue ( struct aead_request * req )
{
return cryptd_aead_enqueue ( req , cryptd_aead_encrypt ) ;
}
static int cryptd_aead_decrypt_enqueue ( struct aead_request * req )
{
return cryptd_aead_enqueue ( req , cryptd_aead_decrypt ) ;
}
2015-05-28 17:08:04 +03:00
static int cryptd_aead_init_tfm ( struct crypto_aead * tfm )
2010-09-20 12:05:12 +04:00
{
2015-05-28 17:08:04 +03:00
struct aead_instance * inst = aead_alg_instance ( tfm ) ;
struct aead_instance_ctx * ictx = aead_instance_ctx ( inst ) ;
2010-09-20 12:05:12 +04:00
struct crypto_aead_spawn * spawn = & ictx - > aead_spawn ;
2015-05-28 17:08:04 +03:00
struct cryptd_aead_ctx * ctx = crypto_aead_ctx ( tfm ) ;
2010-09-20 12:05:12 +04:00
struct crypto_aead * cipher ;
cipher = crypto_spawn_aead ( spawn ) ;
if ( IS_ERR ( cipher ) )
return PTR_ERR ( cipher ) ;
ctx - > child = cipher ;
2015-07-06 14:11:03 +03:00
crypto_aead_set_reqsize (
tfm , max ( ( unsigned ) sizeof ( struct cryptd_aead_request_ctx ) ,
crypto_aead_reqsize ( cipher ) ) ) ;
2010-09-20 12:05:12 +04:00
return 0 ;
}
2015-05-28 17:08:04 +03:00
static void cryptd_aead_exit_tfm ( struct crypto_aead * tfm )
2010-09-20 12:05:12 +04:00
{
2015-05-28 17:08:04 +03:00
struct cryptd_aead_ctx * ctx = crypto_aead_ctx ( tfm ) ;
2010-09-20 12:05:12 +04:00
crypto_free_aead ( ctx - > child ) ;
}
2020-01-03 07:04:37 +03:00
static void cryptd_aead_free ( struct aead_instance * inst )
{
struct aead_instance_ctx * ctx = aead_instance_ctx ( inst ) ;
crypto_drop_aead ( & ctx - > aead_spawn ) ;
kfree ( inst ) ;
}
2010-09-20 12:05:12 +04:00
static int cryptd_create_aead ( struct crypto_template * tmpl ,
struct rtattr * * tb ,
2020-07-10 09:20:38 +03:00
struct crypto_attr_type * algt ,
2010-09-20 12:05:12 +04:00
struct cryptd_queue * queue )
{
struct aead_instance_ctx * ctx ;
2015-05-28 17:08:04 +03:00
struct aead_instance * inst ;
struct aead_alg * alg ;
2020-07-10 09:20:38 +03:00
u32 type ;
u32 mask ;
2010-09-20 12:05:12 +04:00
int err ;
2020-07-10 09:20:38 +03:00
cryptd_type_and_mask ( algt , & type , & mask ) ;
2015-03-30 22:57:06 +03:00
2015-05-21 10:10:57 +03:00
inst = kzalloc ( sizeof ( * inst ) + sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! inst )
return - ENOMEM ;
2010-09-20 12:05:12 +04:00
2015-05-28 17:08:04 +03:00
ctx = aead_instance_ctx ( inst ) ;
2010-09-20 12:05:12 +04:00
ctx - > queue = queue ;
2020-01-03 06:58:46 +03:00
err = crypto_grab_aead ( & ctx - > aead_spawn , aead_crypto_instance ( inst ) ,
2020-02-26 07:59:15 +03:00
crypto_attr_alg_name ( tb [ 1 ] ) , type , mask ) ;
2010-09-20 12:05:12 +04:00
if ( err )
2020-02-26 07:59:15 +03:00
goto err_free_inst ;
2010-09-20 12:05:12 +04:00
2015-05-28 17:08:04 +03:00
alg = crypto_spawn_aead_alg ( & ctx - > aead_spawn ) ;
err = cryptd_init_instance ( aead_crypto_instance ( inst ) , & alg - > base ) ;
2015-05-21 10:10:57 +03:00
if ( err )
2020-02-26 07:59:15 +03:00
goto err_free_inst ;
2015-05-21 10:10:57 +03:00
2020-07-10 09:20:38 +03:00
inst - > alg . base . cra_flags | = CRYPTO_ALG_ASYNC |
( alg - > base . cra_flags & CRYPTO_ALG_INTERNAL ) ;
2015-05-28 17:08:04 +03:00
inst - > alg . base . cra_ctxsize = sizeof ( struct cryptd_aead_ctx ) ;
2010-09-20 12:05:12 +04:00
2015-05-28 17:08:04 +03:00
inst - > alg . ivsize = crypto_aead_alg_ivsize ( alg ) ;
inst - > alg . maxauthsize = crypto_aead_alg_maxauthsize ( alg ) ;
inst - > alg . init = cryptd_aead_init_tfm ;
inst - > alg . exit = cryptd_aead_exit_tfm ;
inst - > alg . setkey = cryptd_aead_setkey ;
inst - > alg . setauthsize = cryptd_aead_setauthsize ;
inst - > alg . encrypt = cryptd_aead_encrypt_enqueue ;
inst - > alg . decrypt = cryptd_aead_decrypt_enqueue ;
2020-01-03 07:04:37 +03:00
inst - > free = cryptd_aead_free ;
2015-05-28 17:08:04 +03:00
err = aead_register_instance ( tmpl , inst ) ;
2010-09-20 12:05:12 +04:00
if ( err ) {
2020-02-26 07:59:15 +03:00
err_free_inst :
cryptd_aead_free ( inst ) ;
2010-09-20 12:05:12 +04:00
}
return err ;
}
2009-02-19 09:42:19 +03:00
static struct cryptd_queue queue ;
2007-04-16 14:49:20 +04:00
2009-07-14 14:45:45 +04:00
static int cryptd_create ( struct crypto_template * tmpl , struct rtattr * * tb )
2007-04-16 14:49:20 +04:00
{
struct crypto_attr_type * algt ;
algt = crypto_get_attr_type ( tb ) ;
if ( IS_ERR ( algt ) )
2009-07-14 14:45:45 +04:00
return PTR_ERR ( algt ) ;
2007-04-16 14:49:20 +04:00
switch ( algt - > type & algt - > mask & CRYPTO_ALG_TYPE_MASK ) {
crypto: skcipher - remove the "blkcipher" algorithm type
Now that all "blkcipher" algorithms have been converted to "skcipher",
remove the blkcipher algorithm type.
The skcipher (symmetric key cipher) algorithm type was introduced a few
years ago to replace both blkcipher and ablkcipher (synchronous and
asynchronous block cipher). The advantages of skcipher include:
- A much less confusing name, since none of these algorithm types have
ever actually been for raw block ciphers, but rather for all
length-preserving encryption modes including block cipher modes of
operation, stream ciphers, and other length-preserving modes.
- It unified blkcipher and ablkcipher into a single algorithm type
which supports both synchronous and asynchronous implementations.
Note, blkcipher already operated only on scatterlists, so the fact
that skcipher does too isn't a regression in functionality.
- Better type safety by using struct skcipher_alg, struct
crypto_skcipher, etc. instead of crypto_alg, crypto_tfm, etc.
- It sometimes simplifies the implementations of algorithms.
Also, the blkcipher API was no longer being tested.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-10-25 22:41:12 +03:00
case CRYPTO_ALG_TYPE_SKCIPHER :
2020-07-10 09:20:38 +03:00
return cryptd_create_skcipher ( tmpl , tb , algt , & queue ) ;
2019-05-20 19:54:46 +03:00
case CRYPTO_ALG_TYPE_HASH :
2020-07-10 09:20:38 +03:00
return cryptd_create_hash ( tmpl , tb , algt , & queue ) ;
2010-09-20 12:05:12 +04:00
case CRYPTO_ALG_TYPE_AEAD :
2020-07-10 09:20:38 +03:00
return cryptd_create_aead ( tmpl , tb , algt , & queue ) ;
2007-04-16 14:49:20 +04:00
}
2009-07-14 14:45:45 +04:00
return - EINVAL ;
2007-04-16 14:49:20 +04:00
}
static struct crypto_template cryptd_tmpl = {
. name = " cryptd " ,
2009-07-14 14:45:45 +04:00
. create = cryptd_create ,
2007-04-16 14:49:20 +04:00
. module = THIS_MODULE ,
} ;
2016-11-22 15:08:23 +03:00
struct cryptd_skcipher * cryptd_alloc_skcipher ( const char * alg_name ,
u32 type , u32 mask )
{
char cryptd_alg_name [ CRYPTO_MAX_ALG_NAME ] ;
struct cryptd_skcipher_ctx * ctx ;
struct crypto_skcipher * tfm ;
if ( snprintf ( cryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
" cryptd(%s) " , alg_name ) > = CRYPTO_MAX_ALG_NAME )
return ERR_PTR ( - EINVAL ) ;
tfm = crypto_alloc_skcipher ( cryptd_alg_name , type , mask ) ;
if ( IS_ERR ( tfm ) )
return ERR_CAST ( tfm ) ;
if ( tfm - > base . __crt_alg - > cra_module ! = THIS_MODULE ) {
crypto_free_skcipher ( tfm ) ;
return ERR_PTR ( - EINVAL ) ;
}
ctx = crypto_skcipher_ctx ( tfm ) ;
2019-08-08 11:00:22 +03:00
refcount_set ( & ctx - > refcnt , 1 ) ;
2016-11-22 15:08:23 +03:00
return container_of ( tfm , struct cryptd_skcipher , base ) ;
}
EXPORT_SYMBOL_GPL ( cryptd_alloc_skcipher ) ;
struct crypto_skcipher * cryptd_skcipher_child ( struct cryptd_skcipher * tfm )
{
struct cryptd_skcipher_ctx * ctx = crypto_skcipher_ctx ( & tfm - > base ) ;
2018-09-19 05:10:52 +03:00
return & ctx - > child - > base ;
2016-11-22 15:08:23 +03:00
}
EXPORT_SYMBOL_GPL ( cryptd_skcipher_child ) ;
bool cryptd_skcipher_queued ( struct cryptd_skcipher * tfm )
{
struct cryptd_skcipher_ctx * ctx = crypto_skcipher_ctx ( & tfm - > base ) ;
2019-08-08 11:00:22 +03:00
return refcount_read ( & ctx - > refcnt ) - 1 ;
2016-11-22 15:08:23 +03:00
}
EXPORT_SYMBOL_GPL ( cryptd_skcipher_queued ) ;
void cryptd_free_skcipher ( struct cryptd_skcipher * tfm )
{
struct cryptd_skcipher_ctx * ctx = crypto_skcipher_ctx ( & tfm - > base ) ;
2019-08-08 11:00:22 +03:00
if ( refcount_dec_and_test ( & ctx - > refcnt ) )
2016-11-22 15:08:23 +03:00
crypto_free_skcipher ( & tfm - > base ) ;
}
EXPORT_SYMBOL_GPL ( cryptd_free_skcipher ) ;
2009-08-06 09:35:20 +04:00
struct cryptd_ahash * cryptd_alloc_ahash ( const char * alg_name ,
u32 type , u32 mask )
{
char cryptd_alg_name [ CRYPTO_MAX_ALG_NAME ] ;
2016-06-21 11:55:13 +03:00
struct cryptd_hash_ctx * ctx ;
2009-08-06 09:35:20 +04:00
struct crypto_ahash * tfm ;
if ( snprintf ( cryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
" cryptd(%s) " , alg_name ) > = CRYPTO_MAX_ALG_NAME )
return ERR_PTR ( - EINVAL ) ;
tfm = crypto_alloc_ahash ( cryptd_alg_name , type , mask ) ;
if ( IS_ERR ( tfm ) )
return ERR_CAST ( tfm ) ;
if ( tfm - > base . __crt_alg - > cra_module ! = THIS_MODULE ) {
crypto_free_ahash ( tfm ) ;
return ERR_PTR ( - EINVAL ) ;
}
2016-06-21 11:55:13 +03:00
ctx = crypto_ahash_ctx ( tfm ) ;
2019-08-08 11:00:22 +03:00
refcount_set ( & ctx - > refcnt , 1 ) ;
2016-06-21 11:55:13 +03:00
2009-08-06 09:35:20 +04:00
return __cryptd_ahash_cast ( tfm ) ;
}
EXPORT_SYMBOL_GPL ( cryptd_alloc_ahash ) ;
struct crypto_shash * cryptd_ahash_child ( struct cryptd_ahash * tfm )
{
struct cryptd_hash_ctx * ctx = crypto_ahash_ctx ( & tfm - > base ) ;
return ctx - > child ;
}
EXPORT_SYMBOL_GPL ( cryptd_ahash_child ) ;
2009-10-19 06:53:06 +04:00
struct shash_desc * cryptd_shash_desc ( struct ahash_request * req )
{
struct cryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
return & rctx - > desc ;
}
EXPORT_SYMBOL_GPL ( cryptd_shash_desc ) ;
2016-06-21 11:55:13 +03:00
bool cryptd_ahash_queued ( struct cryptd_ahash * tfm )
{
struct cryptd_hash_ctx * ctx = crypto_ahash_ctx ( & tfm - > base ) ;
2019-08-08 11:00:22 +03:00
return refcount_read ( & ctx - > refcnt ) - 1 ;
2016-06-21 11:55:13 +03:00
}
EXPORT_SYMBOL_GPL ( cryptd_ahash_queued ) ;
2009-08-06 09:35:20 +04:00
void cryptd_free_ahash ( struct cryptd_ahash * tfm )
{
2016-06-21 11:55:13 +03:00
struct cryptd_hash_ctx * ctx = crypto_ahash_ctx ( & tfm - > base ) ;
2019-08-08 11:00:22 +03:00
if ( refcount_dec_and_test ( & ctx - > refcnt ) )
2016-06-21 11:55:13 +03:00
crypto_free_ahash ( & tfm - > base ) ;
2009-08-06 09:35:20 +04:00
}
EXPORT_SYMBOL_GPL ( cryptd_free_ahash ) ;
2010-09-20 12:05:12 +04:00
struct cryptd_aead * cryptd_alloc_aead ( const char * alg_name ,
u32 type , u32 mask )
{
char cryptd_alg_name [ CRYPTO_MAX_ALG_NAME ] ;
2016-06-21 11:55:13 +03:00
struct cryptd_aead_ctx * ctx ;
2010-09-20 12:05:12 +04:00
struct crypto_aead * tfm ;
if ( snprintf ( cryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
" cryptd(%s) " , alg_name ) > = CRYPTO_MAX_ALG_NAME )
return ERR_PTR ( - EINVAL ) ;
tfm = crypto_alloc_aead ( cryptd_alg_name , type , mask ) ;
if ( IS_ERR ( tfm ) )
return ERR_CAST ( tfm ) ;
if ( tfm - > base . __crt_alg - > cra_module ! = THIS_MODULE ) {
crypto_free_aead ( tfm ) ;
return ERR_PTR ( - EINVAL ) ;
}
2016-06-21 11:55:13 +03:00
ctx = crypto_aead_ctx ( tfm ) ;
2019-08-08 11:00:22 +03:00
refcount_set ( & ctx - > refcnt , 1 ) ;
2016-06-21 11:55:13 +03:00
2010-09-20 12:05:12 +04:00
return __cryptd_aead_cast ( tfm ) ;
}
EXPORT_SYMBOL_GPL ( cryptd_alloc_aead ) ;
struct crypto_aead * cryptd_aead_child ( struct cryptd_aead * tfm )
{
struct cryptd_aead_ctx * ctx ;
ctx = crypto_aead_ctx ( & tfm - > base ) ;
return ctx - > child ;
}
EXPORT_SYMBOL_GPL ( cryptd_aead_child ) ;
2016-06-21 11:55:13 +03:00
bool cryptd_aead_queued ( struct cryptd_aead * tfm )
{
struct cryptd_aead_ctx * ctx = crypto_aead_ctx ( & tfm - > base ) ;
2019-08-08 11:00:22 +03:00
return refcount_read ( & ctx - > refcnt ) - 1 ;
2016-06-21 11:55:13 +03:00
}
EXPORT_SYMBOL_GPL ( cryptd_aead_queued ) ;
2010-09-20 12:05:12 +04:00
void cryptd_free_aead ( struct cryptd_aead * tfm )
{
2016-06-21 11:55:13 +03:00
struct cryptd_aead_ctx * ctx = crypto_aead_ctx ( & tfm - > base ) ;
2019-08-08 11:00:22 +03:00
if ( refcount_dec_and_test ( & ctx - > refcnt ) )
2016-06-21 11:55:13 +03:00
crypto_free_aead ( & tfm - > base ) ;
2010-09-20 12:05:12 +04:00
}
EXPORT_SYMBOL_GPL ( cryptd_free_aead ) ;
2007-04-16 14:49:20 +04:00
static int __init cryptd_init ( void )
{
int err ;
2019-05-20 19:53:58 +03:00
cryptd_wq = alloc_workqueue ( " cryptd " , WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE ,
1 ) ;
if ( ! cryptd_wq )
return - ENOMEM ;
2017-11-22 08:08:17 +03:00
err = cryptd_init_queue ( & queue , cryptd_max_cpu_qlen ) ;
2007-04-16 14:49:20 +04:00
if ( err )
2019-05-20 19:53:58 +03:00
goto err_destroy_wq ;
2007-04-16 14:49:20 +04:00
err = crypto_register_template ( & cryptd_tmpl ) ;
if ( err )
2019-05-20 19:53:58 +03:00
goto err_fini_queue ;
2007-04-16 14:49:20 +04:00
2019-05-20 19:53:58 +03:00
return 0 ;
err_fini_queue :
cryptd_fini_queue ( & queue ) ;
err_destroy_wq :
destroy_workqueue ( cryptd_wq ) ;
2007-04-16 14:49:20 +04:00
return err ;
}
static void __exit cryptd_exit ( void )
{
2019-05-20 19:53:58 +03:00
destroy_workqueue ( cryptd_wq ) ;
2009-02-19 09:42:19 +03:00
cryptd_fini_queue ( & queue ) ;
2007-04-16 14:49:20 +04:00
crypto_unregister_template ( & cryptd_tmpl ) ;
}
2011-08-19 12:11:23 +04:00
subsys_initcall ( cryptd_init ) ;
2007-04-16 14:49:20 +04:00
module_exit ( cryptd_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Software async crypto daemon " ) ;
2014-11-25 03:32:38 +03:00
MODULE_ALIAS_CRYPTO ( " cryptd " ) ;