2010-01-07 07:57:19 +03:00
/*
* pcrypt - Parallel crypto wrapper .
*
* Copyright ( C ) 2009 secunet Security Networks AG
* Copyright ( C ) 2009 Steffen Klassert < steffen . klassert @ secunet . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . ,
* 51 Franklin St - Fifth Floor , Boston , MA 02110 - 1301 USA .
*/
# include <crypto/algapi.h>
# include <crypto/internal/aead.h>
2015-05-22 15:34:22 +03:00
# include <linux/atomic.h>
2010-01-07 07:57:19 +03:00
# include <linux/err.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/slab.h>
2010-07-14 14:31:57 +04:00
# include <linux/notifier.h>
2010-07-14 14:34:15 +04:00
# include <linux/kobject.h>
2010-07-27 09:18:46 +04:00
# include <linux/cpu.h>
2010-01-07 07:57:19 +03:00
# include <crypto/pcrypt.h>
2010-07-27 09:16:33 +04:00
struct padata_pcrypt {
2010-07-14 14:31:57 +04:00
struct padata_instance * pinst ;
struct workqueue_struct * wq ;
/*
* Cpumask for callback CPUs . It should be
* equal to serial cpumask of corresponding padata instance ,
* so it is updated when padata notifies us about serial
* cpumask change .
*
* cb_cpumask is protected by RCU . This fact prevents us from
* using cpumask_var_t directly because the actual type of
* cpumsak_var_t depends on kernel configuration ( particularly on
* CONFIG_CPUMASK_OFFSTACK macro ) . Depending on the configuration
* cpumask_var_t may be either a pointer to the struct cpumask
* or a variable allocated on the stack . Thus we can not safely use
* cpumask_var_t with RCU operations such as rcu_assign_pointer or
* rcu_dereference . So cpumask_var_t is wrapped with struct
* pcrypt_cpumask which makes possible to use it with RCU .
*/
struct pcrypt_cpumask {
cpumask_var_t mask ;
} * cb_cpumask ;
struct notifier_block nblock ;
} ;
2010-07-27 09:16:33 +04:00
static struct padata_pcrypt pencrypt ;
static struct padata_pcrypt pdecrypt ;
2010-07-14 14:34:15 +04:00
static struct kset * pcrypt_kset ;
2010-01-07 07:57:19 +03:00
struct pcrypt_instance_ctx {
2015-05-21 10:10:58 +03:00
struct crypto_aead_spawn spawn ;
2015-05-22 15:34:22 +03:00
atomic_t tfm_count ;
2010-01-07 07:57:19 +03:00
} ;
struct pcrypt_aead_ctx {
struct crypto_aead * child ;
unsigned int cb_cpu ;
} ;
static int pcrypt_do_parallel ( struct padata_priv * padata , unsigned int * cb_cpu ,
2010-07-27 09:16:33 +04:00
struct padata_pcrypt * pcrypt )
2010-01-07 07:57:19 +03:00
{
unsigned int cpu_index , cpu , i ;
2010-07-14 14:31:57 +04:00
struct pcrypt_cpumask * cpumask ;
2010-01-07 07:57:19 +03:00
cpu = * cb_cpu ;
2010-07-14 14:31:57 +04:00
rcu_read_lock_bh ( ) ;
2013-11-28 22:20:04 +04:00
cpumask = rcu_dereference_bh ( pcrypt - > cb_cpumask ) ;
2010-07-14 14:31:57 +04:00
if ( cpumask_test_cpu ( cpu , cpumask - > mask ) )
2010-01-07 07:57:19 +03:00
goto out ;
2010-07-20 10:52:20 +04:00
if ( ! cpumask_weight ( cpumask - > mask ) )
goto out ;
2010-07-14 14:31:57 +04:00
cpu_index = cpu % cpumask_weight ( cpumask - > mask ) ;
2010-01-07 07:57:19 +03:00
2010-07-14 14:31:57 +04:00
cpu = cpumask_first ( cpumask - > mask ) ;
2010-01-07 07:57:19 +03:00
for ( i = 0 ; i < cpu_index ; i + + )
2010-07-14 14:31:57 +04:00
cpu = cpumask_next ( cpu , cpumask - > mask ) ;
2010-01-07 07:57:19 +03:00
* cb_cpu = cpu ;
out :
2010-07-14 14:31:57 +04:00
rcu_read_unlock_bh ( ) ;
return padata_do_parallel ( pcrypt - > pinst , padata , cpu ) ;
2010-01-07 07:57:19 +03:00
}
static int pcrypt_aead_setkey ( struct crypto_aead * parent ,
const u8 * key , unsigned int keylen )
{
struct pcrypt_aead_ctx * ctx = crypto_aead_ctx ( parent ) ;
return crypto_aead_setkey ( ctx - > child , key , keylen ) ;
}
static int pcrypt_aead_setauthsize ( struct crypto_aead * parent ,
unsigned int authsize )
{
struct pcrypt_aead_ctx * ctx = crypto_aead_ctx ( parent ) ;
return crypto_aead_setauthsize ( ctx - > child , authsize ) ;
}
static void pcrypt_aead_serial ( struct padata_priv * padata )
{
struct pcrypt_request * preq = pcrypt_padata_request ( padata ) ;
struct aead_request * req = pcrypt_request_ctx ( preq ) ;
aead_request_complete ( req - > base . data , padata - > info ) ;
}
static void pcrypt_aead_done ( struct crypto_async_request * areq , int err )
{
struct aead_request * req = areq - > data ;
struct pcrypt_request * preq = aead_request_ctx ( req ) ;
struct padata_priv * padata = pcrypt_request_padata ( preq ) ;
padata - > info = err ;
req - > base . flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
padata_do_serial ( padata ) ;
}
static void pcrypt_aead_enc ( struct padata_priv * padata )
{
struct pcrypt_request * preq = pcrypt_padata_request ( padata ) ;
struct aead_request * req = pcrypt_request_ctx ( preq ) ;
padata - > info = crypto_aead_encrypt ( req ) ;
2010-02-04 03:40:17 +03:00
if ( padata - > info = = - EINPROGRESS )
2010-01-07 07:57:19 +03:00
return ;
padata_do_serial ( padata ) ;
}
static int pcrypt_aead_encrypt ( struct aead_request * req )
{
int err ;
struct pcrypt_request * preq = aead_request_ctx ( req ) ;
struct aead_request * creq = pcrypt_request_ctx ( preq ) ;
struct padata_priv * padata = pcrypt_request_padata ( preq ) ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct pcrypt_aead_ctx * ctx = crypto_aead_ctx ( aead ) ;
u32 flags = aead_request_flags ( req ) ;
memset ( padata , 0 , sizeof ( struct padata_priv ) ) ;
padata - > parallel = pcrypt_aead_enc ;
padata - > serial = pcrypt_aead_serial ;
aead_request_set_tfm ( creq , ctx - > child ) ;
aead_request_set_callback ( creq , flags & ~ CRYPTO_TFM_REQ_MAY_SLEEP ,
pcrypt_aead_done , req ) ;
aead_request_set_crypt ( creq , req - > src , req - > dst ,
req - > cryptlen , req - > iv ) ;
2015-05-28 17:08:00 +03:00
aead_request_set_ad ( creq , req - > assoclen ) ;
2010-01-07 07:57:19 +03:00
2010-07-14 14:31:57 +04:00
err = pcrypt_do_parallel ( padata , & ctx - > cb_cpu , & pencrypt ) ;
2010-07-07 17:32:02 +04:00
if ( ! err )
return - EINPROGRESS ;
2010-01-07 07:57:19 +03:00
return err ;
}
static void pcrypt_aead_dec ( struct padata_priv * padata )
{
struct pcrypt_request * preq = pcrypt_padata_request ( padata ) ;
struct aead_request * req = pcrypt_request_ctx ( preq ) ;
padata - > info = crypto_aead_decrypt ( req ) ;
2010-02-04 03:40:17 +03:00
if ( padata - > info = = - EINPROGRESS )
2010-01-07 07:57:19 +03:00
return ;
padata_do_serial ( padata ) ;
}
static int pcrypt_aead_decrypt ( struct aead_request * req )
{
int err ;
struct pcrypt_request * preq = aead_request_ctx ( req ) ;
struct aead_request * creq = pcrypt_request_ctx ( preq ) ;
struct padata_priv * padata = pcrypt_request_padata ( preq ) ;
struct crypto_aead * aead = crypto_aead_reqtfm ( req ) ;
struct pcrypt_aead_ctx * ctx = crypto_aead_ctx ( aead ) ;
u32 flags = aead_request_flags ( req ) ;
memset ( padata , 0 , sizeof ( struct padata_priv ) ) ;
padata - > parallel = pcrypt_aead_dec ;
padata - > serial = pcrypt_aead_serial ;
aead_request_set_tfm ( creq , ctx - > child ) ;
aead_request_set_callback ( creq , flags & ~ CRYPTO_TFM_REQ_MAY_SLEEP ,
pcrypt_aead_done , req ) ;
aead_request_set_crypt ( creq , req - > src , req - > dst ,
req - > cryptlen , req - > iv ) ;
2015-05-28 17:08:00 +03:00
aead_request_set_ad ( creq , req - > assoclen ) ;
2010-01-07 07:57:19 +03:00
2010-07-14 14:31:57 +04:00
err = pcrypt_do_parallel ( padata , & ctx - > cb_cpu , & pdecrypt ) ;
2010-07-07 17:32:02 +04:00
if ( ! err )
return - EINPROGRESS ;
2010-01-07 07:57:19 +03:00
return err ;
}
2015-05-28 17:08:00 +03:00
static int pcrypt_aead_init_tfm ( struct crypto_aead * tfm )
2010-01-07 07:57:19 +03:00
{
int cpu , cpu_index ;
2015-05-28 17:08:00 +03:00
struct aead_instance * inst = aead_alg_instance ( tfm ) ;
struct pcrypt_instance_ctx * ictx = aead_instance_ctx ( inst ) ;
struct pcrypt_aead_ctx * ctx = crypto_aead_ctx ( tfm ) ;
2010-01-07 07:57:19 +03:00
struct crypto_aead * cipher ;
2015-05-22 15:34:22 +03:00
cpu_index = ( unsigned int ) atomic_inc_return ( & ictx - > tfm_count ) %
cpumask_weight ( cpu_online_mask ) ;
2010-01-07 07:57:19 +03:00
2012-03-28 10:51:03 +04:00
ctx - > cb_cpu = cpumask_first ( cpu_online_mask ) ;
2010-01-07 07:57:19 +03:00
for ( cpu = 0 ; cpu < cpu_index ; cpu + + )
2012-03-28 10:51:03 +04:00
ctx - > cb_cpu = cpumask_next ( ctx - > cb_cpu , cpu_online_mask ) ;
2010-01-07 07:57:19 +03:00
2015-05-28 17:08:00 +03:00
cipher = crypto_spawn_aead ( & ictx - > spawn ) ;
2010-01-07 07:57:19 +03:00
if ( IS_ERR ( cipher ) )
return PTR_ERR ( cipher ) ;
ctx - > child = cipher ;
2015-05-28 17:08:00 +03:00
crypto_aead_set_reqsize ( tfm , sizeof ( struct pcrypt_request ) +
sizeof ( struct aead_request ) +
crypto_aead_reqsize ( cipher ) ) ;
2010-01-07 07:57:19 +03:00
return 0 ;
}
2015-05-28 17:08:00 +03:00
static void pcrypt_aead_exit_tfm ( struct crypto_aead * tfm )
2010-01-07 07:57:19 +03:00
{
2015-05-28 17:08:00 +03:00
struct pcrypt_aead_ctx * ctx = crypto_aead_ctx ( tfm ) ;
2010-01-07 07:57:19 +03:00
crypto_free_aead ( ctx - > child ) ;
}
2017-12-21 01:28:25 +03:00
static void pcrypt_free ( struct aead_instance * inst )
{
struct pcrypt_instance_ctx * ctx = aead_instance_ctx ( inst ) ;
crypto_drop_aead ( & ctx - > spawn ) ;
kfree ( inst ) ;
}
2015-05-21 10:10:58 +03:00
static int pcrypt_init_instance ( struct crypto_instance * inst ,
struct crypto_alg * alg )
2010-01-07 07:57:19 +03:00
{
if ( snprintf ( inst - > alg . cra_driver_name , CRYPTO_MAX_ALG_NAME ,
" pcrypt(%s) " , alg - > cra_driver_name ) > = CRYPTO_MAX_ALG_NAME )
2015-05-21 10:10:58 +03:00
return - ENAMETOOLONG ;
2010-01-07 07:57:19 +03:00
memcpy ( inst - > alg . cra_name , alg - > cra_name , CRYPTO_MAX_ALG_NAME ) ;
inst - > alg . cra_priority = alg - > cra_priority + 100 ;
inst - > alg . cra_blocksize = alg - > cra_blocksize ;
inst - > alg . cra_alignmask = alg - > cra_alignmask ;
2015-05-21 10:10:58 +03:00
return 0 ;
2010-01-07 07:57:19 +03:00
}
2015-05-28 17:08:00 +03:00
static int pcrypt_create_aead ( struct crypto_template * tmpl , struct rtattr * * tb ,
u32 type , u32 mask )
2010-01-07 07:57:19 +03:00
{
2015-05-21 10:10:58 +03:00
struct pcrypt_instance_ctx * ctx ;
2015-07-09 02:17:18 +03:00
struct crypto_attr_type * algt ;
2015-05-28 17:08:00 +03:00
struct aead_instance * inst ;
struct aead_alg * alg ;
2015-05-21 10:10:58 +03:00
const char * name ;
int err ;
2015-07-09 02:17:18 +03:00
algt = crypto_get_attr_type ( tb ) ;
if ( IS_ERR ( algt ) )
return PTR_ERR ( algt ) ;
2015-05-21 10:10:58 +03:00
name = crypto_attr_alg_name ( tb [ 1 ] ) ;
if ( IS_ERR ( name ) )
2015-05-28 17:08:00 +03:00
return PTR_ERR ( name ) ;
2015-05-21 10:10:58 +03:00
inst = kzalloc ( sizeof ( * inst ) + sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! inst )
2015-05-28 17:08:00 +03:00
return - ENOMEM ;
2015-05-21 10:10:58 +03:00
2015-05-28 17:08:00 +03:00
ctx = aead_instance_ctx ( inst ) ;
crypto_set_aead_spawn ( & ctx - > spawn , aead_crypto_instance ( inst ) ) ;
2010-01-07 07:57:19 +03:00
2015-08-13 12:29:06 +03:00
err = crypto_grab_aead ( & ctx - > spawn , name , 0 , 0 ) ;
2015-05-21 10:10:58 +03:00
if ( err )
goto out_free_inst ;
2010-01-07 07:57:19 +03:00
2015-05-28 17:08:00 +03:00
alg = crypto_spawn_aead_alg ( & ctx - > spawn ) ;
err = pcrypt_init_instance ( aead_crypto_instance ( inst ) , & alg - > base ) ;
2015-05-21 10:10:58 +03:00
if ( err )
goto out_drop_aead ;
2010-01-07 07:57:19 +03:00
2015-07-09 02:17:18 +03:00
inst - > alg . base . cra_flags = CRYPTO_ALG_ASYNC ;
2015-05-28 17:08:00 +03:00
inst - > alg . ivsize = crypto_aead_alg_ivsize ( alg ) ;
inst - > alg . maxauthsize = crypto_aead_alg_maxauthsize ( alg ) ;
2010-01-07 07:57:19 +03:00
2015-05-28 17:08:00 +03:00
inst - > alg . base . cra_ctxsize = sizeof ( struct pcrypt_aead_ctx ) ;
2010-01-07 07:57:19 +03:00
2015-05-28 17:08:00 +03:00
inst - > alg . init = pcrypt_aead_init_tfm ;
inst - > alg . exit = pcrypt_aead_exit_tfm ;
2010-01-07 07:57:19 +03:00
2015-05-28 17:08:00 +03:00
inst - > alg . setkey = pcrypt_aead_setkey ;
inst - > alg . setauthsize = pcrypt_aead_setauthsize ;
inst - > alg . encrypt = pcrypt_aead_encrypt ;
inst - > alg . decrypt = pcrypt_aead_decrypt ;
2010-01-07 07:57:19 +03:00
2017-12-21 01:28:25 +03:00
inst - > free = pcrypt_free ;
2015-05-28 17:08:00 +03:00
err = aead_register_instance ( tmpl , inst ) ;
if ( err )
goto out_drop_aead ;
2010-01-07 07:57:19 +03:00
2015-05-21 10:10:58 +03:00
out :
2015-05-28 17:08:00 +03:00
return err ;
2015-05-21 10:10:58 +03:00
out_drop_aead :
crypto_drop_aead ( & ctx - > spawn ) ;
out_free_inst :
kfree ( inst ) ;
goto out ;
2010-01-07 07:57:19 +03:00
}
2015-05-28 17:08:00 +03:00
static int pcrypt_create ( struct crypto_template * tmpl , struct rtattr * * tb )
2010-01-07 07:57:19 +03:00
{
struct crypto_attr_type * algt ;
algt = crypto_get_attr_type ( tb ) ;
if ( IS_ERR ( algt ) )
2015-05-28 17:08:00 +03:00
return PTR_ERR ( algt ) ;
2010-01-07 07:57:19 +03:00
switch ( algt - > type & algt - > mask & CRYPTO_ALG_TYPE_MASK ) {
case CRYPTO_ALG_TYPE_AEAD :
2015-05-28 17:08:00 +03:00
return pcrypt_create_aead ( tmpl , tb , algt - > type , algt - > mask ) ;
2010-01-07 07:57:19 +03:00
}
2015-05-28 17:08:00 +03:00
return - EINVAL ;
2010-01-07 07:57:19 +03:00
}
2010-07-14 14:31:57 +04:00
static int pcrypt_cpumask_change_notify ( struct notifier_block * self ,
unsigned long val , void * data )
{
2010-07-27 09:16:33 +04:00
struct padata_pcrypt * pcrypt ;
2010-07-14 14:31:57 +04:00
struct pcrypt_cpumask * new_mask , * old_mask ;
2010-07-27 09:18:46 +04:00
struct padata_cpumask * cpumask = ( struct padata_cpumask * ) data ;
2010-07-14 14:31:57 +04:00
if ( ! ( val & PADATA_CPU_SERIAL ) )
return 0 ;
2010-07-27 09:16:33 +04:00
pcrypt = container_of ( self , struct padata_pcrypt , nblock ) ;
2010-07-14 14:31:57 +04:00
new_mask = kmalloc ( sizeof ( * new_mask ) , GFP_KERNEL ) ;
if ( ! new_mask )
return - ENOMEM ;
if ( ! alloc_cpumask_var ( & new_mask - > mask , GFP_KERNEL ) ) {
kfree ( new_mask ) ;
return - ENOMEM ;
}
old_mask = pcrypt - > cb_cpumask ;
2010-07-27 09:18:46 +04:00
cpumask_copy ( new_mask - > mask , cpumask - > cbcpu ) ;
2010-07-14 14:31:57 +04:00
rcu_assign_pointer ( pcrypt - > cb_cpumask , new_mask ) ;
2018-11-06 03:57:40 +03:00
synchronize_rcu ( ) ;
2010-07-14 14:31:57 +04:00
free_cpumask_var ( old_mask - > mask ) ;
kfree ( old_mask ) ;
return 0 ;
}
2010-07-14 14:34:15 +04:00
static int pcrypt_sysfs_add ( struct padata_instance * pinst , const char * name )
{
int ret ;
pinst - > kobj . kset = pcrypt_kset ;
2018-10-27 17:49:26 +03:00
ret = kobject_add ( & pinst - > kobj , NULL , " %s " , name ) ;
2010-07-14 14:34:15 +04:00
if ( ! ret )
kobject_uevent ( & pinst - > kobj , KOBJ_ADD ) ;
return ret ;
}
2010-07-27 09:16:33 +04:00
static int pcrypt_init_padata ( struct padata_pcrypt * pcrypt ,
const char * name )
2010-07-14 14:31:57 +04:00
{
int ret = - ENOMEM ;
struct pcrypt_cpumask * mask ;
2010-07-27 09:18:46 +04:00
get_online_cpus ( ) ;
2013-07-04 02:04:57 +04:00
pcrypt - > wq = alloc_workqueue ( " %s " , WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE ,
1 , name ) ;
2010-07-14 14:31:57 +04:00
if ( ! pcrypt - > wq )
goto err ;
2010-07-27 09:14:28 +04:00
pcrypt - > pinst = padata_alloc_possible ( pcrypt - > wq ) ;
2010-07-14 14:31:57 +04:00
if ( ! pcrypt - > pinst )
goto err_destroy_workqueue ;
mask = kmalloc ( sizeof ( * mask ) , GFP_KERNEL ) ;
if ( ! mask )
goto err_free_padata ;
if ( ! alloc_cpumask_var ( & mask - > mask , GFP_KERNEL ) ) {
kfree ( mask ) ;
goto err_free_padata ;
}
2012-03-28 10:51:03 +04:00
cpumask_and ( mask - > mask , cpu_possible_mask , cpu_online_mask ) ;
2010-07-14 14:31:57 +04:00
rcu_assign_pointer ( pcrypt - > cb_cpumask , mask ) ;
pcrypt - > nblock . notifier_call = pcrypt_cpumask_change_notify ;
ret = padata_register_cpumask_notifier ( pcrypt - > pinst , & pcrypt - > nblock ) ;
if ( ret )
goto err_free_cpumask ;
2010-07-14 14:34:15 +04:00
ret = pcrypt_sysfs_add ( pcrypt - > pinst , name ) ;
if ( ret )
goto err_unregister_notifier ;
2010-07-27 09:18:46 +04:00
put_online_cpus ( ) ;
2010-07-14 14:31:57 +04:00
return ret ;
2010-07-27 09:18:46 +04:00
2010-07-14 14:34:15 +04:00
err_unregister_notifier :
padata_unregister_cpumask_notifier ( pcrypt - > pinst , & pcrypt - > nblock ) ;
2010-07-14 14:31:57 +04:00
err_free_cpumask :
free_cpumask_var ( mask - > mask ) ;
kfree ( mask ) ;
err_free_padata :
padata_free ( pcrypt - > pinst ) ;
err_destroy_workqueue :
destroy_workqueue ( pcrypt - > wq ) ;
err :
2010-07-27 09:18:46 +04:00
put_online_cpus ( ) ;
2010-07-14 14:31:57 +04:00
return ret ;
}
2010-07-27 09:16:33 +04:00
static void pcrypt_fini_padata ( struct padata_pcrypt * pcrypt )
2010-07-14 14:31:57 +04:00
{
free_cpumask_var ( pcrypt - > cb_cpumask - > mask ) ;
kfree ( pcrypt - > cb_cpumask ) ;
padata_stop ( pcrypt - > pinst ) ;
padata_unregister_cpumask_notifier ( pcrypt - > pinst , & pcrypt - > nblock ) ;
destroy_workqueue ( pcrypt - > wq ) ;
padata_free ( pcrypt - > pinst ) ;
}
2010-01-07 07:57:19 +03:00
static struct crypto_template pcrypt_tmpl = {
. name = " pcrypt " ,
2015-05-28 17:08:00 +03:00
. create = pcrypt_create ,
2010-01-07 07:57:19 +03:00
. module = THIS_MODULE ,
} ;
static int __init pcrypt_init ( void )
{
2010-07-14 14:34:15 +04:00
int err = - ENOMEM ;
pcrypt_kset = kset_create_and_add ( " pcrypt " , NULL , kernel_kobj ) ;
if ( ! pcrypt_kset )
goto err ;
2010-01-07 07:57:19 +03:00
2010-07-27 09:16:33 +04:00
err = pcrypt_init_padata ( & pencrypt , " pencrypt " ) ;
2010-07-07 17:30:10 +04:00
if ( err )
2010-07-14 14:34:15 +04:00
goto err_unreg_kset ;
2010-07-07 17:30:10 +04:00
2010-07-27 09:16:33 +04:00
err = pcrypt_init_padata ( & pdecrypt , " pdecrypt " ) ;
2010-07-07 17:30:10 +04:00
if ( err )
2010-07-14 14:31:57 +04:00
goto err_deinit_pencrypt ;
2010-07-07 17:30:10 +04:00
2010-07-14 14:31:57 +04:00
padata_start ( pencrypt . pinst ) ;
padata_start ( pdecrypt . pinst ) ;
2010-01-07 07:57:19 +03:00
2010-07-14 14:31:57 +04:00
return crypto_register_template ( & pcrypt_tmpl ) ;
2010-01-07 07:57:19 +03:00
2010-07-14 14:31:57 +04:00
err_deinit_pencrypt :
2010-07-27 09:16:33 +04:00
pcrypt_fini_padata ( & pencrypt ) ;
2010-07-14 14:34:15 +04:00
err_unreg_kset :
kset_unregister ( pcrypt_kset ) ;
2010-01-07 07:57:19 +03:00
err :
2010-07-07 17:30:10 +04:00
return err ;
2010-01-07 07:57:19 +03:00
}
static void __exit pcrypt_exit ( void )
{
2010-07-27 09:16:33 +04:00
pcrypt_fini_padata ( & pencrypt ) ;
pcrypt_fini_padata ( & pdecrypt ) ;
2010-01-07 07:57:19 +03:00
2010-07-14 14:34:15 +04:00
kset_unregister ( pcrypt_kset ) ;
2010-01-07 07:57:19 +03:00
crypto_unregister_template ( & pcrypt_tmpl ) ;
}
2019-04-12 07:57:42 +03:00
subsys_initcall ( pcrypt_init ) ;
2010-01-07 07:57:19 +03:00
module_exit ( pcrypt_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Steffen Klassert <steffen.klassert@secunet.com> " ) ;
MODULE_DESCRIPTION ( " Parallel crypto wrapper " ) ;
2014-11-25 03:32:38 +03:00
MODULE_ALIAS_CRYPTO ( " pcrypt " ) ;