2014-07-31 21:29:51 +04:00
/*
* Software multibuffer async crypto daemon .
*
* Copyright ( c ) 2014 Tim Chen < tim . c . chen @ linux . intel . com >
*
* Adapted from crypto daemon .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
# include <crypto/algapi.h>
# include <crypto/internal/hash.h>
# include <crypto/internal/aead.h>
# include <crypto/mcryptd.h>
# include <crypto/crypto_wq.h>
# include <linux/err.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/list.h>
# include <linux/module.h>
# include <linux/scatterlist.h>
# include <linux/sched.h>
2017-02-08 20:51:35 +03:00
# include <linux/sched/stat.h>
2014-07-31 21:29:51 +04:00
# include <linux/slab.h>
# define MCRYPTD_MAX_CPU_QLEN 100
# define MCRYPTD_BATCH 9
static void * mcryptd_alloc_instance ( struct crypto_alg * alg , unsigned int head ,
unsigned int tail ) ;
struct mcryptd_flush_list {
struct list_head list ;
struct mutex lock ;
} ;
2014-08-26 10:40:39 +04:00
static struct mcryptd_flush_list __percpu * mcryptd_flist ;
2014-07-31 21:29:51 +04:00
struct hashd_instance_ctx {
2016-06-22 04:21:46 +03:00
struct crypto_ahash_spawn spawn ;
2014-07-31 21:29:51 +04:00
struct mcryptd_queue * queue ;
} ;
static void mcryptd_queue_worker ( struct work_struct * work ) ;
void mcryptd_arm_flusher ( struct mcryptd_alg_cstate * cstate , unsigned long delay )
{
struct mcryptd_flush_list * flist ;
if ( ! cstate - > flusher_engaged ) {
/* put the flusher on the flush list */
flist = per_cpu_ptr ( mcryptd_flist , smp_processor_id ( ) ) ;
mutex_lock ( & flist - > lock ) ;
list_add_tail ( & cstate - > flush_list , & flist - > list ) ;
cstate - > flusher_engaged = true ;
cstate - > next_flush = jiffies + delay ;
queue_delayed_work_on ( smp_processor_id ( ) , kcrypto_wq ,
& cstate - > flush , delay ) ;
mutex_unlock ( & flist - > lock ) ;
}
}
EXPORT_SYMBOL ( mcryptd_arm_flusher ) ;
static int mcryptd_init_queue ( struct mcryptd_queue * queue ,
unsigned int max_cpu_qlen )
{
int cpu ;
struct mcryptd_cpu_queue * cpu_queue ;
queue - > cpu_queue = alloc_percpu ( struct mcryptd_cpu_queue ) ;
pr_debug ( " mqueue:%p mcryptd_cpu_queue %p \n " , queue , queue - > cpu_queue ) ;
if ( ! queue - > cpu_queue )
return - ENOMEM ;
for_each_possible_cpu ( cpu ) {
cpu_queue = per_cpu_ptr ( queue - > cpu_queue , cpu ) ;
pr_debug ( " cpu_queue #%d %p \n " , cpu , queue - > cpu_queue ) ;
crypto_init_queue ( & cpu_queue - > queue , max_cpu_qlen ) ;
INIT_WORK ( & cpu_queue - > work , mcryptd_queue_worker ) ;
2017-11-30 15:39:27 +03:00
spin_lock_init ( & cpu_queue - > q_lock ) ;
2014-07-31 21:29:51 +04:00
}
return 0 ;
}
static void mcryptd_fini_queue ( struct mcryptd_queue * queue )
{
int cpu ;
struct mcryptd_cpu_queue * cpu_queue ;
for_each_possible_cpu ( cpu ) {
cpu_queue = per_cpu_ptr ( queue - > cpu_queue , cpu ) ;
BUG_ON ( cpu_queue - > queue . qlen ) ;
}
free_percpu ( queue - > cpu_queue ) ;
}
static int mcryptd_enqueue_request ( struct mcryptd_queue * queue ,
struct crypto_async_request * request ,
struct mcryptd_hash_request_ctx * rctx )
{
int cpu , err ;
struct mcryptd_cpu_queue * cpu_queue ;
2017-11-30 15:39:27 +03:00
cpu_queue = raw_cpu_ptr ( queue - > cpu_queue ) ;
spin_lock ( & cpu_queue - > q_lock ) ;
cpu = smp_processor_id ( ) ;
rctx - > tag . cpu = smp_processor_id ( ) ;
2014-07-31 21:29:51 +04:00
err = crypto_enqueue_request ( & cpu_queue - > queue , request ) ;
pr_debug ( " enqueue request: cpu %d cpu_queue %p request %p \n " ,
cpu , cpu_queue , request ) ;
2017-11-30 15:39:27 +03:00
spin_unlock ( & cpu_queue - > q_lock ) ;
2014-07-31 21:29:51 +04:00
queue_work_on ( cpu , kcrypto_wq , & cpu_queue - > work ) ;
return err ;
}
/*
* Try to opportunisticlly flush the partially completed jobs if
* crypto daemon is the only task running .
*/
static void mcryptd_opportunistic_flush ( void )
{
struct mcryptd_flush_list * flist ;
struct mcryptd_alg_cstate * cstate ;
flist = per_cpu_ptr ( mcryptd_flist , smp_processor_id ( ) ) ;
while ( single_task_running ( ) ) {
mutex_lock ( & flist - > lock ) ;
2015-11-16 17:37:15 +03:00
cstate = list_first_entry_or_null ( & flist - > list ,
2014-07-31 21:29:51 +04:00
struct mcryptd_alg_cstate , flush_list ) ;
2015-11-16 17:37:15 +03:00
if ( ! cstate | | ! cstate - > flusher_engaged ) {
2014-07-31 21:29:51 +04:00
mutex_unlock ( & flist - > lock ) ;
return ;
}
list_del ( & cstate - > flush_list ) ;
cstate - > flusher_engaged = false ;
mutex_unlock ( & flist - > lock ) ;
cstate - > alg_state - > flusher ( cstate ) ;
}
}
/*
* Called in workqueue context , do one real cryption work ( via
* req - > complete ) and reschedule itself if there are more work to
* do .
*/
static void mcryptd_queue_worker ( struct work_struct * work )
{
struct mcryptd_cpu_queue * cpu_queue ;
struct crypto_async_request * req , * backlog ;
int i ;
/*
* Need to loop through more than once for multi - buffer to
* be effective .
*/
cpu_queue = container_of ( work , struct mcryptd_cpu_queue , work ) ;
i = 0 ;
while ( i < MCRYPTD_BATCH | | single_task_running ( ) ) {
2017-11-30 15:39:27 +03:00
spin_lock_bh ( & cpu_queue - > q_lock ) ;
2014-07-31 21:29:51 +04:00
backlog = crypto_get_backlog ( & cpu_queue - > queue ) ;
req = crypto_dequeue_request ( & cpu_queue - > queue ) ;
2017-11-30 15:39:27 +03:00
spin_unlock_bh ( & cpu_queue - > q_lock ) ;
2014-07-31 21:29:51 +04:00
if ( ! req ) {
mcryptd_opportunistic_flush ( ) ;
return ;
}
if ( backlog )
backlog - > complete ( backlog , - EINPROGRESS ) ;
req - > complete ( req , 0 ) ;
if ( ! cpu_queue - > queue . qlen )
return ;
+ + i ;
}
if ( cpu_queue - > queue . qlen )
2017-11-30 15:39:27 +03:00
queue_work_on ( smp_processor_id ( ) , kcrypto_wq , & cpu_queue - > work ) ;
2014-07-31 21:29:51 +04:00
}
void mcryptd_flusher ( struct work_struct * __work )
{
struct mcryptd_alg_cstate * alg_cpu_state ;
struct mcryptd_alg_state * alg_state ;
struct mcryptd_flush_list * flist ;
int cpu ;
cpu = smp_processor_id ( ) ;
alg_cpu_state = container_of ( to_delayed_work ( __work ) ,
struct mcryptd_alg_cstate , flush ) ;
alg_state = alg_cpu_state - > alg_state ;
if ( alg_cpu_state - > cpu ! = cpu )
pr_debug ( " mcryptd error: work on cpu %d, should be cpu %d \n " ,
cpu , alg_cpu_state - > cpu ) ;
if ( alg_cpu_state - > flusher_engaged ) {
flist = per_cpu_ptr ( mcryptd_flist , cpu ) ;
mutex_lock ( & flist - > lock ) ;
list_del ( & alg_cpu_state - > flush_list ) ;
alg_cpu_state - > flusher_engaged = false ;
mutex_unlock ( & flist - > lock ) ;
alg_state - > flusher ( alg_cpu_state ) ;
}
}
EXPORT_SYMBOL_GPL ( mcryptd_flusher ) ;
static inline struct mcryptd_queue * mcryptd_get_queue ( struct crypto_tfm * tfm )
{
struct crypto_instance * inst = crypto_tfm_alg_instance ( tfm ) ;
struct mcryptd_instance_ctx * ictx = crypto_instance_ctx ( inst ) ;
return ictx - > queue ;
}
static void * mcryptd_alloc_instance ( struct crypto_alg * alg , unsigned int head ,
unsigned int tail )
{
char * p ;
struct crypto_instance * inst ;
int err ;
p = kzalloc ( head + sizeof ( * inst ) + tail , GFP_KERNEL ) ;
if ( ! p )
return ERR_PTR ( - ENOMEM ) ;
inst = ( void * ) ( p + head ) ;
err = - ENAMETOOLONG ;
if ( snprintf ( inst - > alg . cra_driver_name , CRYPTO_MAX_ALG_NAME ,
" mcryptd(%s) " , alg - > cra_driver_name ) > = CRYPTO_MAX_ALG_NAME )
goto out_free_inst ;
memcpy ( inst - > alg . cra_name , alg - > cra_name , CRYPTO_MAX_ALG_NAME ) ;
inst - > alg . cra_priority = alg - > cra_priority + 50 ;
inst - > alg . cra_blocksize = alg - > cra_blocksize ;
inst - > alg . cra_alignmask = alg - > cra_alignmask ;
out :
return p ;
out_free_inst :
kfree ( p ) ;
p = ERR_PTR ( err ) ;
goto out ;
}
2016-12-05 22:46:31 +03:00
static inline bool mcryptd_check_internal ( struct rtattr * * tb , u32 * type ,
2015-03-30 23:10:58 +03:00
u32 * mask )
{
struct crypto_attr_type * algt ;
algt = crypto_get_attr_type ( tb ) ;
if ( IS_ERR ( algt ) )
2016-12-05 22:46:31 +03:00
return false ;
* type | = algt - > type & CRYPTO_ALG_INTERNAL ;
* mask | = algt - > mask & CRYPTO_ALG_INTERNAL ;
if ( * type & * mask & CRYPTO_ALG_INTERNAL )
return true ;
else
return false ;
2015-03-30 23:10:58 +03:00
}
2014-07-31 21:29:51 +04:00
static int mcryptd_hash_init_tfm ( struct crypto_tfm * tfm )
{
struct crypto_instance * inst = crypto_tfm_alg_instance ( tfm ) ;
struct hashd_instance_ctx * ictx = crypto_instance_ctx ( inst ) ;
2016-06-22 04:21:46 +03:00
struct crypto_ahash_spawn * spawn = & ictx - > spawn ;
2014-07-31 21:29:51 +04:00
struct mcryptd_hash_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2016-06-22 04:21:46 +03:00
struct crypto_ahash * hash ;
2014-07-31 21:29:51 +04:00
2016-06-22 04:21:46 +03:00
hash = crypto_spawn_ahash ( spawn ) ;
2014-07-31 21:29:51 +04:00
if ( IS_ERR ( hash ) )
return PTR_ERR ( hash ) ;
ctx - > child = hash ;
crypto_ahash_set_reqsize ( __crypto_ahash_cast ( tfm ) ,
sizeof ( struct mcryptd_hash_request_ctx ) +
2016-06-22 04:21:46 +03:00
crypto_ahash_reqsize ( hash ) ) ;
2014-07-31 21:29:51 +04:00
return 0 ;
}
static void mcryptd_hash_exit_tfm ( struct crypto_tfm * tfm )
{
struct mcryptd_hash_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2016-06-22 04:21:46 +03:00
crypto_free_ahash ( ctx - > child ) ;
2014-07-31 21:29:51 +04:00
}
static int mcryptd_hash_setkey ( struct crypto_ahash * parent ,
const u8 * key , unsigned int keylen )
{
struct mcryptd_hash_ctx * ctx = crypto_ahash_ctx ( parent ) ;
2016-06-22 04:21:46 +03:00
struct crypto_ahash * child = ctx - > child ;
2014-07-31 21:29:51 +04:00
int err ;
2016-06-22 04:21:46 +03:00
crypto_ahash_clear_flags ( child , CRYPTO_TFM_REQ_MASK ) ;
crypto_ahash_set_flags ( child , crypto_ahash_get_flags ( parent ) &
2014-07-31 21:29:51 +04:00
CRYPTO_TFM_REQ_MASK ) ;
2016-06-22 04:21:46 +03:00
err = crypto_ahash_setkey ( child , key , keylen ) ;
crypto_ahash_set_flags ( parent , crypto_ahash_get_flags ( child ) &
2014-07-31 21:29:51 +04:00
CRYPTO_TFM_RES_MASK ) ;
return err ;
}
static int mcryptd_hash_enqueue ( struct ahash_request * req ,
crypto_completion_t complete )
{
int ret ;
struct mcryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct mcryptd_queue * queue =
mcryptd_get_queue ( crypto_ahash_tfm ( tfm ) ) ;
rctx - > complete = req - > base . complete ;
req - > base . complete = complete ;
ret = mcryptd_enqueue_request ( queue , & req - > base , rctx ) ;
return ret ;
}
static void mcryptd_hash_init ( struct crypto_async_request * req_async , int err )
{
struct mcryptd_hash_ctx * ctx = crypto_tfm_ctx ( req_async - > tfm ) ;
2016-06-22 04:21:46 +03:00
struct crypto_ahash * child = ctx - > child ;
2014-07-31 21:29:51 +04:00
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct mcryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
2016-06-22 04:21:46 +03:00
struct ahash_request * desc = & rctx - > areq ;
2014-07-31 21:29:51 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2016-06-22 04:21:46 +03:00
ahash_request_set_tfm ( desc , child ) ;
ahash_request_set_callback ( desc , CRYPTO_TFM_REQ_MAY_SLEEP ,
rctx - > complete , req_async ) ;
2014-07-31 21:29:51 +04:00
2016-06-22 04:21:46 +03:00
rctx - > out = req - > result ;
err = crypto_ahash_init ( desc ) ;
2014-07-31 21:29:51 +04:00
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int mcryptd_hash_init_enqueue ( struct ahash_request * req )
{
return mcryptd_hash_enqueue ( req , mcryptd_hash_init ) ;
}
static void mcryptd_hash_update ( struct crypto_async_request * req_async , int err )
{
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct mcryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2016-06-22 04:21:46 +03:00
rctx - > out = req - > result ;
2018-01-25 06:09:07 +03:00
err = crypto_ahash_update ( & rctx - > areq ) ;
2014-07-31 21:29:51 +04:00
if ( err ) {
req - > base . complete = rctx - > complete ;
goto out ;
}
return ;
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int mcryptd_hash_update_enqueue ( struct ahash_request * req )
{
return mcryptd_hash_enqueue ( req , mcryptd_hash_update ) ;
}
static void mcryptd_hash_final ( struct crypto_async_request * req_async , int err )
{
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct mcryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2016-06-22 04:21:46 +03:00
rctx - > out = req - > result ;
2018-01-25 06:09:07 +03:00
err = crypto_ahash_final ( & rctx - > areq ) ;
2014-07-31 21:29:51 +04:00
if ( err ) {
req - > base . complete = rctx - > complete ;
goto out ;
}
return ;
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int mcryptd_hash_final_enqueue ( struct ahash_request * req )
{
return mcryptd_hash_enqueue ( req , mcryptd_hash_final ) ;
}
static void mcryptd_hash_finup ( struct crypto_async_request * req_async , int err )
{
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct mcryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2016-06-22 04:21:46 +03:00
rctx - > out = req - > result ;
2018-01-25 06:09:07 +03:00
err = crypto_ahash_finup ( & rctx - > areq ) ;
2014-07-31 21:29:51 +04:00
if ( err ) {
req - > base . complete = rctx - > complete ;
goto out ;
}
return ;
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int mcryptd_hash_finup_enqueue ( struct ahash_request * req )
{
return mcryptd_hash_enqueue ( req , mcryptd_hash_finup ) ;
}
static void mcryptd_hash_digest ( struct crypto_async_request * req_async , int err )
{
struct mcryptd_hash_ctx * ctx = crypto_tfm_ctx ( req_async - > tfm ) ;
2016-06-22 04:21:46 +03:00
struct crypto_ahash * child = ctx - > child ;
2014-07-31 21:29:51 +04:00
struct ahash_request * req = ahash_request_cast ( req_async ) ;
struct mcryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
2016-06-22 04:21:46 +03:00
struct ahash_request * desc = & rctx - > areq ;
2014-07-31 21:29:51 +04:00
if ( unlikely ( err = = - EINPROGRESS ) )
goto out ;
2016-06-22 04:21:46 +03:00
ahash_request_set_tfm ( desc , child ) ;
ahash_request_set_callback ( desc , CRYPTO_TFM_REQ_MAY_SLEEP ,
rctx - > complete , req_async ) ;
2014-07-31 21:29:51 +04:00
2016-06-22 04:21:46 +03:00
rctx - > out = req - > result ;
2018-01-25 06:09:07 +03:00
err = crypto_ahash_init ( desc ) ? : crypto_ahash_finup ( desc ) ;
2014-07-31 21:29:51 +04:00
out :
local_bh_disable ( ) ;
rctx - > complete ( & req - > base , err ) ;
local_bh_enable ( ) ;
}
static int mcryptd_hash_digest_enqueue ( struct ahash_request * req )
{
return mcryptd_hash_enqueue ( req , mcryptd_hash_digest ) ;
}
static int mcryptd_hash_export ( struct ahash_request * req , void * out )
{
struct mcryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
2016-06-22 04:21:46 +03:00
return crypto_ahash_export ( & rctx - > areq , out ) ;
2014-07-31 21:29:51 +04:00
}
static int mcryptd_hash_import ( struct ahash_request * req , const void * in )
{
struct mcryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
2016-06-22 04:21:46 +03:00
return crypto_ahash_import ( & rctx - > areq , in ) ;
2014-07-31 21:29:51 +04:00
}
static int mcryptd_create_hash ( struct crypto_template * tmpl , struct rtattr * * tb ,
struct mcryptd_queue * queue )
{
struct hashd_instance_ctx * ctx ;
struct ahash_instance * inst ;
2016-06-22 04:21:46 +03:00
struct hash_alg_common * halg ;
2014-07-31 21:29:51 +04:00
struct crypto_alg * alg ;
2015-03-30 23:10:58 +03:00
u32 type = 0 ;
u32 mask = 0 ;
2014-07-31 21:29:51 +04:00
int err ;
2016-12-05 22:46:31 +03:00
if ( ! mcryptd_check_internal ( tb , & type , & mask ) )
return - EINVAL ;
2015-03-30 23:10:58 +03:00
2016-06-22 04:21:46 +03:00
halg = ahash_attr_alg ( tb [ 1 ] , type , mask ) ;
if ( IS_ERR ( halg ) )
return PTR_ERR ( halg ) ;
2014-07-31 21:29:51 +04:00
2016-06-22 04:21:46 +03:00
alg = & halg - > base ;
2014-07-31 21:29:51 +04:00
pr_debug ( " crypto: mcryptd hash alg: %s \n " , alg - > cra_name ) ;
inst = mcryptd_alloc_instance ( alg , ahash_instance_headroom ( ) ,
sizeof ( * ctx ) ) ;
err = PTR_ERR ( inst ) ;
if ( IS_ERR ( inst ) )
goto out_put_alg ;
ctx = ahash_instance_ctx ( inst ) ;
ctx - > queue = queue ;
2016-06-22 04:21:46 +03:00
err = crypto_init_ahash_spawn ( & ctx - > spawn , halg ,
2014-07-31 21:29:51 +04:00
ahash_crypto_instance ( inst ) ) ;
if ( err )
goto out_free_inst ;
2018-01-03 22:16:26 +03:00
inst - > alg . halg . base . cra_flags = CRYPTO_ALG_ASYNC |
( alg - > cra_flags & ( CRYPTO_ALG_INTERNAL |
CRYPTO_ALG_OPTIONAL_KEY ) ) ;
2014-07-31 21:29:51 +04:00
2016-06-22 04:21:46 +03:00
inst - > alg . halg . digestsize = halg - > digestsize ;
inst - > alg . halg . statesize = halg - > statesize ;
2014-07-31 21:29:51 +04:00
inst - > alg . halg . base . cra_ctxsize = sizeof ( struct mcryptd_hash_ctx ) ;
inst - > alg . halg . base . cra_init = mcryptd_hash_init_tfm ;
inst - > alg . halg . base . cra_exit = mcryptd_hash_exit_tfm ;
inst - > alg . init = mcryptd_hash_init_enqueue ;
inst - > alg . update = mcryptd_hash_update_enqueue ;
inst - > alg . final = mcryptd_hash_final_enqueue ;
inst - > alg . finup = mcryptd_hash_finup_enqueue ;
inst - > alg . export = mcryptd_hash_export ;
inst - > alg . import = mcryptd_hash_import ;
2018-01-03 22:16:24 +03:00
if ( crypto_hash_alg_has_setkey ( halg ) )
inst - > alg . setkey = mcryptd_hash_setkey ;
2014-07-31 21:29:51 +04:00
inst - > alg . digest = mcryptd_hash_digest_enqueue ;
err = ahash_register_instance ( tmpl , inst ) ;
if ( err ) {
2016-06-22 04:21:46 +03:00
crypto_drop_ahash ( & ctx - > spawn ) ;
2014-07-31 21:29:51 +04:00
out_free_inst :
kfree ( inst ) ;
}
out_put_alg :
crypto_mod_put ( alg ) ;
return err ;
}
static struct mcryptd_queue mqueue ;
static int mcryptd_create ( struct crypto_template * tmpl , struct rtattr * * tb )
{
struct crypto_attr_type * algt ;
algt = crypto_get_attr_type ( tb ) ;
if ( IS_ERR ( algt ) )
return PTR_ERR ( algt ) ;
switch ( algt - > type & algt - > mask & CRYPTO_ALG_TYPE_MASK ) {
case CRYPTO_ALG_TYPE_DIGEST :
return mcryptd_create_hash ( tmpl , tb , & mqueue ) ;
break ;
}
return - EINVAL ;
}
static void mcryptd_free ( struct crypto_instance * inst )
{
struct mcryptd_instance_ctx * ctx = crypto_instance_ctx ( inst ) ;
struct hashd_instance_ctx * hctx = crypto_instance_ctx ( inst ) ;
switch ( inst - > alg . cra_flags & CRYPTO_ALG_TYPE_MASK ) {
case CRYPTO_ALG_TYPE_AHASH :
2016-06-22 04:21:46 +03:00
crypto_drop_ahash ( & hctx - > spawn ) ;
2014-07-31 21:29:51 +04:00
kfree ( ahash_instance ( inst ) ) ;
return ;
default :
crypto_drop_spawn ( & ctx - > spawn ) ;
kfree ( inst ) ;
}
}
static struct crypto_template mcryptd_tmpl = {
. name = " mcryptd " ,
. create = mcryptd_create ,
. free = mcryptd_free ,
. module = THIS_MODULE ,
} ;
struct mcryptd_ahash * mcryptd_alloc_ahash ( const char * alg_name ,
u32 type , u32 mask )
{
char mcryptd_alg_name [ CRYPTO_MAX_ALG_NAME ] ;
struct crypto_ahash * tfm ;
if ( snprintf ( mcryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
" mcryptd(%s) " , alg_name ) > = CRYPTO_MAX_ALG_NAME )
return ERR_PTR ( - EINVAL ) ;
tfm = crypto_alloc_ahash ( mcryptd_alg_name , type , mask ) ;
if ( IS_ERR ( tfm ) )
return ERR_CAST ( tfm ) ;
if ( tfm - > base . __crt_alg - > cra_module ! = THIS_MODULE ) {
crypto_free_ahash ( tfm ) ;
return ERR_PTR ( - EINVAL ) ;
}
return __mcryptd_ahash_cast ( tfm ) ;
}
EXPORT_SYMBOL_GPL ( mcryptd_alloc_ahash ) ;
2016-06-22 04:21:46 +03:00
struct crypto_ahash * mcryptd_ahash_child ( struct mcryptd_ahash * tfm )
2014-07-31 21:29:51 +04:00
{
struct mcryptd_hash_ctx * ctx = crypto_ahash_ctx ( & tfm - > base ) ;
return ctx - > child ;
}
EXPORT_SYMBOL_GPL ( mcryptd_ahash_child ) ;
2016-06-22 04:21:46 +03:00
struct ahash_request * mcryptd_ahash_desc ( struct ahash_request * req )
2014-07-31 21:29:51 +04:00
{
struct mcryptd_hash_request_ctx * rctx = ahash_request_ctx ( req ) ;
2016-06-22 04:21:46 +03:00
return & rctx - > areq ;
2014-07-31 21:29:51 +04:00
}
2016-06-22 04:21:46 +03:00
EXPORT_SYMBOL_GPL ( mcryptd_ahash_desc ) ;
2014-07-31 21:29:51 +04:00
void mcryptd_free_ahash ( struct mcryptd_ahash * tfm )
{
crypto_free_ahash ( & tfm - > base ) ;
}
EXPORT_SYMBOL_GPL ( mcryptd_free_ahash ) ;
static int __init mcryptd_init ( void )
{
int err , cpu ;
struct mcryptd_flush_list * flist ;
mcryptd_flist = alloc_percpu ( struct mcryptd_flush_list ) ;
for_each_possible_cpu ( cpu ) {
flist = per_cpu_ptr ( mcryptd_flist , cpu ) ;
INIT_LIST_HEAD ( & flist - > list ) ;
mutex_init ( & flist - > lock ) ;
}
err = mcryptd_init_queue ( & mqueue , MCRYPTD_MAX_CPU_QLEN ) ;
if ( err ) {
free_percpu ( mcryptd_flist ) ;
return err ;
}
err = crypto_register_template ( & mcryptd_tmpl ) ;
if ( err ) {
mcryptd_fini_queue ( & mqueue ) ;
free_percpu ( mcryptd_flist ) ;
}
return err ;
}
static void __exit mcryptd_exit ( void )
{
mcryptd_fini_queue ( & mqueue ) ;
crypto_unregister_template ( & mcryptd_tmpl ) ;
free_percpu ( mcryptd_flist ) ;
}
subsys_initcall ( mcryptd_init ) ;
module_exit ( mcryptd_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Software async multibuffer crypto daemon " ) ;
2014-11-25 03:32:38 +03:00
MODULE_ALIAS_CRYPTO ( " mcryptd " ) ;