2007-04-16 14:48:54 +04:00
/*
* Asynchronous block chaining cipher operations .
2010-02-16 15:23:37 +03:00
*
2007-04-16 14:48:54 +04:00
* This is the asynchronous version of blkcipher . c indicating completion
* via a callback .
*
* Copyright ( c ) 2006 Herbert Xu < herbert @ gondor . apana . org . au >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
2010-02-16 15:23:37 +03:00
* Software Foundation ; either version 2 of the License , or ( at your option )
2007-04-16 14:48:54 +04:00
* any later version .
*
*/
2007-12-17 15:07:31 +03:00
# include <crypto/internal/skcipher.h>
2009-06-25 14:43:48 +04:00
# include <linux/cpumask.h>
2007-12-17 15:07:31 +03:00
# include <linux/err.h>
2007-04-16 14:48:54 +04:00
# include <linux/init.h>
2007-08-23 12:23:01 +04:00
# include <linux/kernel.h>
2007-04-16 14:48:54 +04:00
# include <linux/module.h>
2007-12-04 04:46:48 +03:00
# include <linux/rtnetlink.h>
# include <linux/sched.h>
2007-08-23 12:23:01 +04:00
# include <linux/slab.h>
2007-04-16 14:48:54 +04:00
# include <linux/seq_file.h>
2011-09-27 09:42:32 +04:00
# include <linux/cryptouser.h>
# include <net/netlink.h>
2007-04-16 14:48:54 +04:00
2010-05-19 08:13:07 +04:00
# include <crypto/scatterwalk.h>
2007-12-17 15:07:31 +03:00
# include "internal.h"
2009-06-25 14:43:48 +04:00
static const char * skcipher_default_geniv __read_mostly ;
2010-05-19 08:13:07 +04:00
struct ablkcipher_buffer {
struct list_head entry ;
struct scatter_walk dst ;
unsigned int len ;
void * data ;
} ;
enum {
ABLKCIPHER_WALK_SLOW = 1 < < 0 ,
} ;
static inline void ablkcipher_buffer_write ( struct ablkcipher_buffer * p )
{
scatterwalk_copychunks ( p - > data , & p - > dst , p - > len , 1 ) ;
}
void __ablkcipher_walk_complete ( struct ablkcipher_walk * walk )
{
struct ablkcipher_buffer * p , * tmp ;
list_for_each_entry_safe ( p , tmp , & walk - > buffers , entry ) {
ablkcipher_buffer_write ( p ) ;
list_del ( & p - > entry ) ;
kfree ( p ) ;
}
}
EXPORT_SYMBOL_GPL ( __ablkcipher_walk_complete ) ;
static inline void ablkcipher_queue_write ( struct ablkcipher_walk * walk ,
struct ablkcipher_buffer * p )
{
p - > dst = walk - > out ;
list_add_tail ( & p - > entry , & walk - > buffers ) ;
}
/* Get a spot of the specified length that does not straddle a page.
* The caller needs to ensure that there is enough space for this operation .
*/
static inline u8 * ablkcipher_get_spot ( u8 * start , unsigned int len )
{
u8 * end_page = ( u8 * ) ( ( ( unsigned long ) ( start + len - 1 ) ) & PAGE_MASK ) ;
return max ( start , end_page ) ;
}
static inline unsigned int ablkcipher_done_slow ( struct ablkcipher_walk * walk ,
unsigned int bsize )
{
unsigned int n = bsize ;
for ( ; ; ) {
unsigned int len_this_page = scatterwalk_pagelen ( & walk - > out ) ;
if ( len_this_page > n )
len_this_page = n ;
scatterwalk_advance ( & walk - > out , n ) ;
if ( n = = len_this_page )
break ;
n - = len_this_page ;
scatterwalk_start ( & walk - > out , scatterwalk_sg_next ( walk - > out . sg ) ) ;
}
return bsize ;
}
static inline unsigned int ablkcipher_done_fast ( struct ablkcipher_walk * walk ,
unsigned int n )
{
scatterwalk_advance ( & walk - > in , n ) ;
scatterwalk_advance ( & walk - > out , n ) ;
return n ;
}
static int ablkcipher_walk_next ( struct ablkcipher_request * req ,
struct ablkcipher_walk * walk ) ;
int ablkcipher_walk_done ( struct ablkcipher_request * req ,
struct ablkcipher_walk * walk , int err )
{
struct crypto_tfm * tfm = req - > base . tfm ;
unsigned int nbytes = 0 ;
if ( likely ( err > = 0 ) ) {
unsigned int n = walk - > nbytes - err ;
if ( likely ( ! ( walk - > flags & ABLKCIPHER_WALK_SLOW ) ) )
n = ablkcipher_done_fast ( walk , n ) ;
else if ( WARN_ON ( err ) ) {
err = - EINVAL ;
goto err ;
} else
n = ablkcipher_done_slow ( walk , n ) ;
nbytes = walk - > total - n ;
err = 0 ;
}
scatterwalk_done ( & walk - > in , 0 , nbytes ) ;
scatterwalk_done ( & walk - > out , 1 , nbytes ) ;
err :
walk - > total = nbytes ;
walk - > nbytes = nbytes ;
if ( nbytes ) {
crypto_yield ( req - > base . flags ) ;
return ablkcipher_walk_next ( req , walk ) ;
}
if ( walk - > iv ! = req - > info )
memcpy ( req - > info , walk - > iv , tfm - > crt_ablkcipher . ivsize ) ;
2011-01-29 07:09:43 +03:00
kfree ( walk - > iv_buffer ) ;
2010-05-19 08:13:07 +04:00
return err ;
}
EXPORT_SYMBOL_GPL ( ablkcipher_walk_done ) ;
static inline int ablkcipher_next_slow ( struct ablkcipher_request * req ,
struct ablkcipher_walk * walk ,
unsigned int bsize ,
unsigned int alignmask ,
void * * src_p , void * * dst_p )
{
unsigned aligned_bsize = ALIGN ( bsize , alignmask + 1 ) ;
struct ablkcipher_buffer * p ;
void * src , * dst , * base ;
unsigned int n ;
n = ALIGN ( sizeof ( struct ablkcipher_buffer ) , alignmask + 1 ) ;
n + = ( aligned_bsize * 3 - ( alignmask + 1 ) +
( alignmask & ~ ( crypto_tfm_ctx_alignment ( ) - 1 ) ) ) ;
p = kmalloc ( n , GFP_ATOMIC ) ;
if ( ! p )
2010-06-23 14:01:45 +04:00
return ablkcipher_walk_done ( req , walk , - ENOMEM ) ;
2010-05-19 08:13:07 +04:00
base = p + 1 ;
dst = ( u8 * ) ALIGN ( ( unsigned long ) base , alignmask + 1 ) ;
src = dst = ablkcipher_get_spot ( dst , bsize ) ;
p - > len = bsize ;
p - > data = dst ;
scatterwalk_copychunks ( src , & walk - > in , bsize , 0 ) ;
ablkcipher_queue_write ( walk , p ) ;
walk - > nbytes = bsize ;
walk - > flags | = ABLKCIPHER_WALK_SLOW ;
* src_p = src ;
* dst_p = dst ;
return 0 ;
}
static inline int ablkcipher_copy_iv ( struct ablkcipher_walk * walk ,
struct crypto_tfm * tfm ,
unsigned int alignmask )
{
unsigned bs = walk - > blocksize ;
unsigned int ivsize = tfm - > crt_ablkcipher . ivsize ;
unsigned aligned_bs = ALIGN ( bs , alignmask + 1 ) ;
unsigned int size = aligned_bs * 2 + ivsize + max ( aligned_bs , ivsize ) -
( alignmask + 1 ) ;
u8 * iv ;
size + = alignmask & ~ ( crypto_tfm_ctx_alignment ( ) - 1 ) ;
walk - > iv_buffer = kmalloc ( size , GFP_ATOMIC ) ;
if ( ! walk - > iv_buffer )
return - ENOMEM ;
iv = ( u8 * ) ALIGN ( ( unsigned long ) walk - > iv_buffer , alignmask + 1 ) ;
iv = ablkcipher_get_spot ( iv , bs ) + aligned_bs ;
iv = ablkcipher_get_spot ( iv , bs ) + aligned_bs ;
iv = ablkcipher_get_spot ( iv , ivsize ) ;
walk - > iv = memcpy ( iv , walk - > iv , ivsize ) ;
return 0 ;
}
static inline int ablkcipher_next_fast ( struct ablkcipher_request * req ,
struct ablkcipher_walk * walk )
{
walk - > src . page = scatterwalk_page ( & walk - > in ) ;
walk - > src . offset = offset_in_page ( walk - > in . offset ) ;
walk - > dst . page = scatterwalk_page ( & walk - > out ) ;
walk - > dst . offset = offset_in_page ( walk - > out . offset ) ;
return 0 ;
}
static int ablkcipher_walk_next ( struct ablkcipher_request * req ,
struct ablkcipher_walk * walk )
{
struct crypto_tfm * tfm = req - > base . tfm ;
unsigned int alignmask , bsize , n ;
void * src , * dst ;
int err ;
alignmask = crypto_tfm_alg_alignmask ( tfm ) ;
n = walk - > total ;
if ( unlikely ( n < crypto_tfm_alg_blocksize ( tfm ) ) ) {
req - > base . flags | = CRYPTO_TFM_RES_BAD_BLOCK_LEN ;
return ablkcipher_walk_done ( req , walk , - EINVAL ) ;
}
walk - > flags & = ~ ABLKCIPHER_WALK_SLOW ;
src = dst = NULL ;
bsize = min ( walk - > blocksize , n ) ;
n = scatterwalk_clamp ( & walk - > in , n ) ;
n = scatterwalk_clamp ( & walk - > out , n ) ;
if ( n < bsize | |
! scatterwalk_aligned ( & walk - > in , alignmask ) | |
! scatterwalk_aligned ( & walk - > out , alignmask ) ) {
err = ablkcipher_next_slow ( req , walk , bsize , alignmask ,
& src , & dst ) ;
goto set_phys_lowmem ;
}
walk - > nbytes = n ;
return ablkcipher_next_fast ( req , walk ) ;
set_phys_lowmem :
if ( err > = 0 ) {
walk - > src . page = virt_to_page ( src ) ;
walk - > dst . page = virt_to_page ( dst ) ;
walk - > src . offset = ( ( unsigned long ) src & ( PAGE_SIZE - 1 ) ) ;
walk - > dst . offset = ( ( unsigned long ) dst & ( PAGE_SIZE - 1 ) ) ;
}
return err ;
}
static int ablkcipher_walk_first ( struct ablkcipher_request * req ,
struct ablkcipher_walk * walk )
{
struct crypto_tfm * tfm = req - > base . tfm ;
unsigned int alignmask ;
alignmask = crypto_tfm_alg_alignmask ( tfm ) ;
if ( WARN_ON_ONCE ( in_irq ( ) ) )
return - EDEADLK ;
walk - > nbytes = walk - > total ;
if ( unlikely ( ! walk - > total ) )
return 0 ;
walk - > iv_buffer = NULL ;
walk - > iv = req - > info ;
if ( unlikely ( ( ( unsigned long ) walk - > iv & alignmask ) ) ) {
int err = ablkcipher_copy_iv ( walk , tfm , alignmask ) ;
if ( err )
return err ;
}
scatterwalk_start ( & walk - > in , walk - > in . sg ) ;
scatterwalk_start ( & walk - > out , walk - > out . sg ) ;
return ablkcipher_walk_next ( req , walk ) ;
}
int ablkcipher_walk_phys ( struct ablkcipher_request * req ,
struct ablkcipher_walk * walk )
{
walk - > blocksize = crypto_tfm_alg_blocksize ( req - > base . tfm ) ;
return ablkcipher_walk_first ( req , walk ) ;
}
EXPORT_SYMBOL_GPL ( ablkcipher_walk_phys ) ;
2007-08-23 12:23:01 +04:00
static int setkey_unaligned ( struct crypto_ablkcipher * tfm , const u8 * key ,
unsigned int keylen )
2007-05-19 13:51:21 +04:00
{
struct ablkcipher_alg * cipher = crypto_ablkcipher_alg ( tfm ) ;
unsigned long alignmask = crypto_ablkcipher_alignmask ( tfm ) ;
int ret ;
u8 * buffer , * alignbuffer ;
unsigned long absize ;
absize = keylen + alignmask ;
buffer = kmalloc ( absize , GFP_ATOMIC ) ;
if ( ! buffer )
return - ENOMEM ;
alignbuffer = ( u8 * ) ALIGN ( ( unsigned long ) buffer , alignmask + 1 ) ;
memcpy ( alignbuffer , key , keylen ) ;
ret = cipher - > setkey ( tfm , alignbuffer , keylen ) ;
2007-08-03 16:33:47 +04:00
memset ( alignbuffer , 0 , keylen ) ;
2007-05-19 13:51:21 +04:00
kfree ( buffer ) ;
return ret ;
}
2007-04-16 14:48:54 +04:00
static int setkey ( struct crypto_ablkcipher * tfm , const u8 * key ,
unsigned int keylen )
{
struct ablkcipher_alg * cipher = crypto_ablkcipher_alg ( tfm ) ;
2007-05-19 13:51:21 +04:00
unsigned long alignmask = crypto_ablkcipher_alignmask ( tfm ) ;
2007-04-16 14:48:54 +04:00
if ( keylen < cipher - > min_keysize | | keylen > cipher - > max_keysize ) {
crypto_ablkcipher_set_flags ( tfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return - EINVAL ;
}
2007-05-19 13:51:21 +04:00
if ( ( unsigned long ) key & alignmask )
return setkey_unaligned ( tfm , key , keylen ) ;
2007-04-16 14:48:54 +04:00
return cipher - > setkey ( tfm , key , keylen ) ;
}
static unsigned int crypto_ablkcipher_ctxsize ( struct crypto_alg * alg , u32 type ,
u32 mask )
{
return alg - > cra_ctxsize ;
}
2007-12-04 04:46:48 +03:00
int skcipher_null_givencrypt ( struct skcipher_givcrypt_request * req )
{
return crypto_ablkcipher_encrypt ( & req - > creq ) ;
}
int skcipher_null_givdecrypt ( struct skcipher_givcrypt_request * req )
{
return crypto_ablkcipher_decrypt ( & req - > creq ) ;
}
2007-04-16 14:48:54 +04:00
static int crypto_init_ablkcipher_ops ( struct crypto_tfm * tfm , u32 type ,
u32 mask )
{
struct ablkcipher_alg * alg = & tfm - > __crt_alg - > cra_ablkcipher ;
struct ablkcipher_tfm * crt = & tfm - > crt_ablkcipher ;
if ( alg - > ivsize > PAGE_SIZE / 8 )
return - EINVAL ;
crt - > setkey = setkey ;
crt - > encrypt = alg - > encrypt ;
crt - > decrypt = alg - > decrypt ;
2007-12-04 04:46:48 +03:00
if ( ! alg - > ivsize ) {
crt - > givencrypt = skcipher_null_givencrypt ;
crt - > givdecrypt = skcipher_null_givdecrypt ;
}
2007-12-05 13:08:36 +03:00
crt - > base = __crypto_ablkcipher_cast ( tfm ) ;
2007-04-16 14:48:54 +04:00
crt - > ivsize = alg - > ivsize ;
return 0 ;
}
2011-11-03 16:46:07 +04:00
# ifdef CONFIG_NET
2011-09-27 09:42:32 +04:00
static int crypto_ablkcipher_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
struct crypto_report_blkcipher rblkcipher ;
2013-02-05 21:19:13 +04:00
strncpy ( rblkcipher . type , " ablkcipher " , sizeof ( rblkcipher . type ) ) ;
strncpy ( rblkcipher . geniv , alg - > cra_ablkcipher . geniv ? : " <default> " ,
sizeof ( rblkcipher . geniv ) ) ;
2011-09-27 09:42:32 +04:00
rblkcipher . blocksize = alg - > cra_blocksize ;
rblkcipher . min_keysize = alg - > cra_ablkcipher . min_keysize ;
rblkcipher . max_keysize = alg - > cra_ablkcipher . max_keysize ;
rblkcipher . ivsize = alg - > cra_ablkcipher . ivsize ;
2012-04-02 04:19:05 +04:00
if ( nla_put ( skb , CRYPTOCFGA_REPORT_BLKCIPHER ,
sizeof ( struct crypto_report_blkcipher ) , & rblkcipher ) )
goto nla_put_failure ;
2011-09-27 09:42:32 +04:00
return 0 ;
nla_put_failure :
return - EMSGSIZE ;
}
2011-11-03 16:46:07 +04:00
# else
static int crypto_ablkcipher_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
return - ENOSYS ;
}
# endif
2011-09-27 09:42:32 +04:00
2007-04-16 14:48:54 +04:00
static void crypto_ablkcipher_show ( struct seq_file * m , struct crypto_alg * alg )
__attribute__ ( ( unused ) ) ;
static void crypto_ablkcipher_show ( struct seq_file * m , struct crypto_alg * alg )
{
struct ablkcipher_alg * ablkcipher = & alg - > cra_ablkcipher ;
seq_printf ( m , " type : ablkcipher \n " ) ;
2007-12-14 17:29:37 +03:00
seq_printf ( m , " async : %s \n " , alg - > cra_flags & CRYPTO_ALG_ASYNC ?
" yes " : " no " ) ;
2007-04-16 14:48:54 +04:00
seq_printf ( m , " blocksize : %u \n " , alg - > cra_blocksize ) ;
seq_printf ( m , " min keysize : %u \n " , ablkcipher - > min_keysize ) ;
seq_printf ( m , " max keysize : %u \n " , ablkcipher - > max_keysize ) ;
seq_printf ( m , " ivsize : %u \n " , ablkcipher - > ivsize ) ;
2007-11-27 16:33:24 +03:00
seq_printf ( m , " geniv : %s \n " , ablkcipher - > geniv ? : " <default> " ) ;
2007-04-16 14:48:54 +04:00
}
const struct crypto_type crypto_ablkcipher_type = {
. ctxsize = crypto_ablkcipher_ctxsize ,
. init = crypto_init_ablkcipher_ops ,
# ifdef CONFIG_PROC_FS
. show = crypto_ablkcipher_show ,
# endif
2011-09-27 09:42:32 +04:00
. report = crypto_ablkcipher_report ,
2007-04-16 14:48:54 +04:00
} ;
EXPORT_SYMBOL_GPL ( crypto_ablkcipher_type ) ;
2007-12-17 16:51:27 +03:00
static int no_givdecrypt ( struct skcipher_givcrypt_request * req )
{
return - ENOSYS ;
}
static int crypto_init_givcipher_ops ( struct crypto_tfm * tfm , u32 type ,
u32 mask )
{
struct ablkcipher_alg * alg = & tfm - > __crt_alg - > cra_ablkcipher ;
struct ablkcipher_tfm * crt = & tfm - > crt_ablkcipher ;
if ( alg - > ivsize > PAGE_SIZE / 8 )
return - EINVAL ;
2007-12-05 13:08:36 +03:00
crt - > setkey = tfm - > __crt_alg - > cra_flags & CRYPTO_ALG_GENIV ?
alg - > setkey : setkey ;
2007-12-17 16:51:27 +03:00
crt - > encrypt = alg - > encrypt ;
crt - > decrypt = alg - > decrypt ;
crt - > givencrypt = alg - > givencrypt ;
crt - > givdecrypt = alg - > givdecrypt ? : no_givdecrypt ;
2007-12-05 13:08:36 +03:00
crt - > base = __crypto_ablkcipher_cast ( tfm ) ;
2007-12-17 16:51:27 +03:00
crt - > ivsize = alg - > ivsize ;
return 0 ;
}
2011-11-03 16:46:07 +04:00
# ifdef CONFIG_NET
2011-09-27 09:43:24 +04:00
static int crypto_givcipher_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
struct crypto_report_blkcipher rblkcipher ;
2013-02-05 21:19:13 +04:00
strncpy ( rblkcipher . type , " givcipher " , sizeof ( rblkcipher . type ) ) ;
strncpy ( rblkcipher . geniv , alg - > cra_ablkcipher . geniv ? : " <built-in> " ,
sizeof ( rblkcipher . geniv ) ) ;
2011-09-27 09:43:24 +04:00
rblkcipher . blocksize = alg - > cra_blocksize ;
rblkcipher . min_keysize = alg - > cra_ablkcipher . min_keysize ;
rblkcipher . max_keysize = alg - > cra_ablkcipher . max_keysize ;
rblkcipher . ivsize = alg - > cra_ablkcipher . ivsize ;
2012-04-02 04:19:05 +04:00
if ( nla_put ( skb , CRYPTOCFGA_REPORT_BLKCIPHER ,
sizeof ( struct crypto_report_blkcipher ) , & rblkcipher ) )
goto nla_put_failure ;
2011-09-27 09:43:24 +04:00
return 0 ;
nla_put_failure :
return - EMSGSIZE ;
}
2011-11-03 16:46:07 +04:00
# else
static int crypto_givcipher_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
return - ENOSYS ;
}
# endif
2011-09-27 09:43:24 +04:00
2007-12-17 16:51:27 +03:00
static void crypto_givcipher_show ( struct seq_file * m , struct crypto_alg * alg )
__attribute__ ( ( unused ) ) ;
static void crypto_givcipher_show ( struct seq_file * m , struct crypto_alg * alg )
{
struct ablkcipher_alg * ablkcipher = & alg - > cra_ablkcipher ;
seq_printf ( m , " type : givcipher \n " ) ;
2007-12-14 17:29:37 +03:00
seq_printf ( m , " async : %s \n " , alg - > cra_flags & CRYPTO_ALG_ASYNC ?
" yes " : " no " ) ;
2007-12-17 16:51:27 +03:00
seq_printf ( m , " blocksize : %u \n " , alg - > cra_blocksize ) ;
seq_printf ( m , " min keysize : %u \n " , ablkcipher - > min_keysize ) ;
seq_printf ( m , " max keysize : %u \n " , ablkcipher - > max_keysize ) ;
seq_printf ( m , " ivsize : %u \n " , ablkcipher - > ivsize ) ;
2007-11-27 16:33:24 +03:00
seq_printf ( m , " geniv : %s \n " , ablkcipher - > geniv ? : " <built-in> " ) ;
2007-12-17 16:51:27 +03:00
}
const struct crypto_type crypto_givcipher_type = {
. ctxsize = crypto_ablkcipher_ctxsize ,
. init = crypto_init_givcipher_ops ,
# ifdef CONFIG_PROC_FS
. show = crypto_givcipher_show ,
# endif
2011-09-27 09:43:24 +04:00
. report = crypto_givcipher_report ,
2007-12-17 16:51:27 +03:00
} ;
EXPORT_SYMBOL_GPL ( crypto_givcipher_type ) ;
2007-12-05 13:08:36 +03:00
const char * crypto_default_geniv ( const struct crypto_alg * alg )
{
2009-08-14 16:55:35 +04:00
if ( ( ( alg - > cra_flags & CRYPTO_ALG_TYPE_MASK ) = =
CRYPTO_ALG_TYPE_BLKCIPHER ? alg - > cra_blkcipher . ivsize :
alg - > cra_ablkcipher . ivsize ) ! =
alg - > cra_blocksize )
return " chainiv " ;
2009-06-25 14:43:48 +04:00
return alg - > cra_flags & CRYPTO_ALG_ASYNC ?
" eseqiv " : skcipher_default_geniv ;
2007-12-05 13:08:36 +03:00
}
2007-12-04 04:46:48 +03:00
static int crypto_givcipher_default ( struct crypto_alg * alg , u32 type , u32 mask )
{
struct rtattr * tb [ 3 ] ;
struct {
struct rtattr attr ;
struct crypto_attr_type data ;
} ptype ;
struct {
struct rtattr attr ;
struct crypto_attr_alg data ;
} palg ;
struct crypto_template * tmpl ;
struct crypto_instance * inst ;
struct crypto_alg * larval ;
const char * geniv ;
int err ;
larval = crypto_larval_lookup ( alg - > cra_driver_name ,
2009-06-25 10:46:31 +04:00
( type & ~ CRYPTO_ALG_TYPE_MASK ) |
2007-12-04 04:46:48 +03:00
CRYPTO_ALG_TYPE_GIVCIPHER ,
2009-06-25 10:46:31 +04:00
mask | CRYPTO_ALG_TYPE_MASK ) ;
2007-12-04 04:46:48 +03:00
err = PTR_ERR ( larval ) ;
if ( IS_ERR ( larval ) )
goto out ;
err = - EAGAIN ;
if ( ! crypto_is_larval ( larval ) )
goto drop_larval ;
ptype . attr . rta_len = sizeof ( ptype ) ;
ptype . attr . rta_type = CRYPTOA_TYPE ;
ptype . data . type = type | CRYPTO_ALG_GENIV ;
/* GENIV tells the template that we're making a default geniv. */
ptype . data . mask = mask | CRYPTO_ALG_GENIV ;
tb [ 0 ] = & ptype . attr ;
palg . attr . rta_len = sizeof ( palg ) ;
palg . attr . rta_type = CRYPTOA_ALG ;
/* Must use the exact name to locate ourselves. */
memcpy ( palg . data . name , alg - > cra_driver_name , CRYPTO_MAX_ALG_NAME ) ;
tb [ 1 ] = & palg . attr ;
tb [ 2 ] = NULL ;
if ( ( alg - > cra_flags & CRYPTO_ALG_TYPE_MASK ) = =
CRYPTO_ALG_TYPE_BLKCIPHER )
geniv = alg - > cra_blkcipher . geniv ;
else
geniv = alg - > cra_ablkcipher . geniv ;
if ( ! geniv )
geniv = crypto_default_geniv ( alg ) ;
tmpl = crypto_lookup_template ( geniv ) ;
err = - ENOENT ;
if ( ! tmpl )
goto kill_larval ;
inst = tmpl - > alloc ( tb ) ;
err = PTR_ERR ( inst ) ;
if ( IS_ERR ( inst ) )
goto put_tmpl ;
if ( ( err = crypto_register_instance ( tmpl , inst ) ) ) {
tmpl - > free ( inst ) ;
goto put_tmpl ;
}
/* Redo the lookup to use the instance we just registered. */
err = - EAGAIN ;
put_tmpl :
crypto_tmpl_put ( tmpl ) ;
kill_larval :
crypto_larval_kill ( larval ) ;
drop_larval :
crypto_mod_put ( larval ) ;
out :
crypto_mod_put ( alg ) ;
return err ;
}
2012-03-29 11:03:47 +04:00
struct crypto_alg * crypto_lookup_skcipher ( const char * name , u32 type , u32 mask )
2007-12-04 04:46:48 +03:00
{
struct crypto_alg * alg ;
alg = crypto_alg_mod_lookup ( name , type , mask ) ;
if ( IS_ERR ( alg ) )
return alg ;
if ( ( alg - > cra_flags & CRYPTO_ALG_TYPE_MASK ) = =
CRYPTO_ALG_TYPE_GIVCIPHER )
return alg ;
if ( ! ( ( alg - > cra_flags & CRYPTO_ALG_TYPE_MASK ) = =
CRYPTO_ALG_TYPE_BLKCIPHER ? alg - > cra_blkcipher . ivsize :
alg - > cra_ablkcipher . ivsize ) )
return alg ;
2009-02-18 15:33:55 +03:00
crypto_mod_put ( alg ) ;
alg = crypto_alg_mod_lookup ( name , type | CRYPTO_ALG_TESTED ,
mask & ~ CRYPTO_ALG_TESTED ) ;
if ( IS_ERR ( alg ) )
return alg ;
if ( ( alg - > cra_flags & CRYPTO_ALG_TYPE_MASK ) = =
CRYPTO_ALG_TYPE_GIVCIPHER ) {
if ( ( alg - > cra_flags ^ type ^ ~ mask ) & CRYPTO_ALG_TESTED ) {
crypto_mod_put ( alg ) ;
alg = ERR_PTR ( - ENOENT ) ;
}
return alg ;
}
BUG_ON ( ! ( ( alg - > cra_flags & CRYPTO_ALG_TYPE_MASK ) = =
CRYPTO_ALG_TYPE_BLKCIPHER ? alg - > cra_blkcipher . ivsize :
alg - > cra_ablkcipher . ivsize ) ) ;
2007-12-04 04:46:48 +03:00
return ERR_PTR ( crypto_givcipher_default ( alg , type , mask ) ) ;
}
2012-03-29 11:03:47 +04:00
EXPORT_SYMBOL_GPL ( crypto_lookup_skcipher ) ;
2007-12-04 04:46:48 +03:00
2007-12-17 15:07:31 +03:00
int crypto_grab_skcipher ( struct crypto_skcipher_spawn * spawn , const char * name ,
u32 type , u32 mask )
{
struct crypto_alg * alg ;
int err ;
type = crypto_skcipher_type ( type ) ;
mask = crypto_skcipher_mask ( mask ) ;
2007-12-04 04:46:48 +03:00
alg = crypto_lookup_skcipher ( name , type , mask ) ;
2007-12-17 15:07:31 +03:00
if ( IS_ERR ( alg ) )
return PTR_ERR ( alg ) ;
err = crypto_init_spawn ( & spawn - > base , alg , spawn - > base . inst , mask ) ;
crypto_mod_put ( alg ) ;
return err ;
}
EXPORT_SYMBOL_GPL ( crypto_grab_skcipher ) ;
2007-12-04 04:46:48 +03:00
struct crypto_ablkcipher * crypto_alloc_ablkcipher ( const char * alg_name ,
u32 type , u32 mask )
{
struct crypto_tfm * tfm ;
int err ;
type = crypto_skcipher_type ( type ) ;
mask = crypto_skcipher_mask ( mask ) ;
for ( ; ; ) {
struct crypto_alg * alg ;
alg = crypto_lookup_skcipher ( alg_name , type , mask ) ;
if ( IS_ERR ( alg ) ) {
err = PTR_ERR ( alg ) ;
goto err ;
}
tfm = __crypto_alloc_tfm ( alg , type , mask ) ;
if ( ! IS_ERR ( tfm ) )
return __crypto_ablkcipher_cast ( tfm ) ;
crypto_mod_put ( alg ) ;
err = PTR_ERR ( tfm ) ;
err :
if ( err ! = - EAGAIN )
break ;
if ( signal_pending ( current ) ) {
err = - EINTR ;
break ;
}
}
return ERR_PTR ( err ) ;
}
EXPORT_SYMBOL_GPL ( crypto_alloc_ablkcipher ) ;
2009-06-25 14:43:48 +04:00
static int __init skcipher_module_init ( void )
{
skcipher_default_geniv = num_possible_cpus ( ) > 1 ?
" eseqiv " : " chainiv " ;
return 0 ;
}
static void skcipher_module_exit ( void )
{
}
module_init ( skcipher_module_init ) ;
module_exit ( skcipher_module_exit ) ;