2008-05-14 16:41:47 +04:00
/*
* Asynchronous Cryptographic Hash operations .
*
* This is the asynchronous version of hash . c with notification of
* completion via a callback .
*
* Copyright ( c ) 2008 Loc Ho < lho @ amcc . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
2008-07-07 18:19:53 +04:00
# include <crypto/internal/hash.h>
# include <crypto/scatterwalk.h>
2008-05-14 16:41:47 +04:00
# include <linux/err.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/seq_file.h>
2011-09-27 09:41:07 +04:00
# include <linux/cryptouser.h>
# include <net/netlink.h>
2008-05-14 16:41:47 +04:00
# include "internal.h"
2009-07-15 08:40:40 +04:00
struct ahash_request_priv {
crypto_completion_t complete ;
void * data ;
u8 * result ;
void * ubuf [ ] CRYPTO_MINALIGN_ATTR ;
} ;
2009-07-14 08:28:26 +04:00
static inline struct ahash_alg * crypto_ahash_alg ( struct crypto_ahash * hash )
{
return container_of ( crypto_hash_alg_common ( hash ) , struct ahash_alg ,
halg ) ;
}
2008-07-07 18:19:53 +04:00
static int hash_walk_next ( struct crypto_hash_walk * walk )
{
unsigned int alignmask = walk - > alignmask ;
unsigned int offset = walk - > offset ;
unsigned int nbytes = min ( walk - > entrylen ,
( ( unsigned int ) ( PAGE_SIZE ) ) - offset ) ;
2011-11-25 19:14:17 +04:00
walk - > data = kmap_atomic ( walk - > pg ) ;
2008-07-07 18:19:53 +04:00
walk - > data + = offset ;
2010-08-06 05:26:38 +04:00
if ( offset & alignmask ) {
unsigned int unaligned = alignmask + 1 - ( offset & alignmask ) ;
if ( nbytes > unaligned )
nbytes = unaligned ;
}
2008-07-07 18:19:53 +04:00
walk - > entrylen - = nbytes ;
return nbytes ;
}
static int hash_walk_new_entry ( struct crypto_hash_walk * walk )
{
struct scatterlist * sg ;
sg = walk - > sg ;
walk - > pg = sg_page ( sg ) ;
walk - > offset = sg - > offset ;
walk - > entrylen = sg - > length ;
if ( walk - > entrylen > walk - > total )
walk - > entrylen = walk - > total ;
walk - > total - = walk - > entrylen ;
return hash_walk_next ( walk ) ;
}
int crypto_hash_walk_done ( struct crypto_hash_walk * walk , int err )
{
unsigned int alignmask = walk - > alignmask ;
unsigned int nbytes = walk - > entrylen ;
walk - > data - = walk - > offset ;
if ( nbytes & & walk - > offset & alignmask & & ! err ) {
walk - > offset = ALIGN ( walk - > offset , alignmask + 1 ) ;
walk - > data + = walk - > offset ;
nbytes = min ( nbytes ,
( ( unsigned int ) ( PAGE_SIZE ) ) - walk - > offset ) ;
walk - > entrylen - = nbytes ;
return nbytes ;
}
2011-11-25 19:14:17 +04:00
kunmap_atomic ( walk - > data ) ;
2008-07-07 18:19:53 +04:00
crypto_yield ( walk - > flags ) ;
if ( err )
return err ;
2009-05-31 17:09:22 +04:00
if ( nbytes ) {
walk - > offset = 0 ;
walk - > pg + + ;
2008-07-07 18:19:53 +04:00
return hash_walk_next ( walk ) ;
2009-05-31 17:09:22 +04:00
}
2008-07-07 18:19:53 +04:00
if ( ! walk - > total )
return 0 ;
walk - > sg = scatterwalk_sg_next ( walk - > sg ) ;
return hash_walk_new_entry ( walk ) ;
}
EXPORT_SYMBOL_GPL ( crypto_hash_walk_done ) ;
int crypto_hash_walk_first ( struct ahash_request * req ,
struct crypto_hash_walk * walk )
{
walk - > total = req - > nbytes ;
if ( ! walk - > total )
return 0 ;
walk - > alignmask = crypto_ahash_alignmask ( crypto_ahash_reqtfm ( req ) ) ;
walk - > sg = req - > src ;
walk - > flags = req - > base . flags ;
return hash_walk_new_entry ( walk ) ;
}
EXPORT_SYMBOL_GPL ( crypto_hash_walk_first ) ;
2008-08-31 16:21:09 +04:00
int crypto_hash_walk_first_compat ( struct hash_desc * hdesc ,
struct crypto_hash_walk * walk ,
struct scatterlist * sg , unsigned int len )
{
walk - > total = len ;
if ( ! walk - > total )
return 0 ;
walk - > alignmask = crypto_hash_alignmask ( hdesc - > tfm ) ;
walk - > sg = sg ;
walk - > flags = hdesc - > flags ;
return hash_walk_new_entry ( walk ) ;
}
2008-05-14 16:41:47 +04:00
static int ahash_setkey_unaligned ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
unsigned long alignmask = crypto_ahash_alignmask ( tfm ) ;
int ret ;
u8 * buffer , * alignbuffer ;
unsigned long absize ;
absize = keylen + alignmask ;
2009-07-14 17:48:35 +04:00
buffer = kmalloc ( absize , GFP_KERNEL ) ;
2008-05-14 16:41:47 +04:00
if ( ! buffer )
return - ENOMEM ;
alignbuffer = ( u8 * ) ALIGN ( ( unsigned long ) buffer , alignmask + 1 ) ;
memcpy ( alignbuffer , key , keylen ) ;
2009-07-15 16:39:05 +04:00
ret = tfm - > setkey ( tfm , alignbuffer , keylen ) ;
2009-07-14 17:35:36 +04:00
kzfree ( buffer ) ;
2008-05-14 16:41:47 +04:00
return ret ;
}
2009-07-15 08:40:40 +04:00
int crypto_ahash_setkey ( struct crypto_ahash * tfm , const u8 * key ,
2008-05-14 16:41:47 +04:00
unsigned int keylen )
{
unsigned long alignmask = crypto_ahash_alignmask ( tfm ) ;
if ( ( unsigned long ) key & alignmask )
return ahash_setkey_unaligned ( tfm , key , keylen ) ;
2009-07-15 16:39:05 +04:00
return tfm - > setkey ( tfm , key , keylen ) ;
2008-05-14 16:41:47 +04:00
}
2009-07-15 08:40:40 +04:00
EXPORT_SYMBOL_GPL ( crypto_ahash_setkey ) ;
2008-05-14 16:41:47 +04:00
2008-11-08 03:56:57 +03:00
static int ahash_nosetkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
return - ENOSYS ;
}
2009-07-15 08:40:40 +04:00
static inline unsigned int ahash_align_buffer_size ( unsigned len ,
unsigned long mask )
{
return len + ( mask & ~ ( crypto_tfm_ctx_alignment ( ) - 1 ) ) ;
}
static void ahash_op_unaligned_finish ( struct ahash_request * req , int err )
{
struct ahash_request_priv * priv = req - > priv ;
if ( err = = - EINPROGRESS )
return ;
if ( ! err )
memcpy ( priv - > result , req - > result ,
crypto_ahash_digestsize ( crypto_ahash_reqtfm ( req ) ) ) ;
kzfree ( priv ) ;
}
static void ahash_op_unaligned_done ( struct crypto_async_request * req , int err )
{
struct ahash_request * areq = req - > data ;
struct ahash_request_priv * priv = areq - > priv ;
crypto_completion_t complete = priv - > complete ;
void * data = priv - > data ;
ahash_op_unaligned_finish ( areq , err ) ;
complete ( data , err ) ;
}
static int ahash_op_unaligned ( struct ahash_request * req ,
int ( * op ) ( struct ahash_request * ) )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
unsigned long alignmask = crypto_ahash_alignmask ( tfm ) ;
unsigned int ds = crypto_ahash_digestsize ( tfm ) ;
struct ahash_request_priv * priv ;
int err ;
priv = kmalloc ( sizeof ( * priv ) + ahash_align_buffer_size ( ds , alignmask ) ,
( req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ?
2009-07-24 09:56:31 +04:00
GFP_KERNEL : GFP_ATOMIC ) ;
2009-07-15 08:40:40 +04:00
if ( ! priv )
return - ENOMEM ;
priv - > result = req - > result ;
priv - > complete = req - > base . complete ;
priv - > data = req - > base . data ;
req - > result = PTR_ALIGN ( ( u8 * ) priv - > ubuf , alignmask + 1 ) ;
req - > base . complete = ahash_op_unaligned_done ;
req - > base . data = req ;
req - > priv = priv ;
err = op ( req ) ;
ahash_op_unaligned_finish ( req , err ) ;
return err ;
}
static int crypto_ahash_op ( struct ahash_request * req ,
int ( * op ) ( struct ahash_request * ) )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
unsigned long alignmask = crypto_ahash_alignmask ( tfm ) ;
if ( ( unsigned long ) req - > result & alignmask )
return ahash_op_unaligned ( req , op ) ;
return op ( req ) ;
}
int crypto_ahash_final ( struct ahash_request * req )
{
return crypto_ahash_op ( req , crypto_ahash_reqtfm ( req ) - > final ) ;
}
EXPORT_SYMBOL_GPL ( crypto_ahash_final ) ;
int crypto_ahash_finup ( struct ahash_request * req )
{
return crypto_ahash_op ( req , crypto_ahash_reqtfm ( req ) - > finup ) ;
}
EXPORT_SYMBOL_GPL ( crypto_ahash_finup ) ;
int crypto_ahash_digest ( struct ahash_request * req )
{
return crypto_ahash_op ( req , crypto_ahash_reqtfm ( req ) - > digest ) ;
}
EXPORT_SYMBOL_GPL ( crypto_ahash_digest ) ;
static void ahash_def_finup_finish2 ( struct ahash_request * req , int err )
{
struct ahash_request_priv * priv = req - > priv ;
if ( err = = - EINPROGRESS )
return ;
if ( ! err )
memcpy ( priv - > result , req - > result ,
crypto_ahash_digestsize ( crypto_ahash_reqtfm ( req ) ) ) ;
kzfree ( priv ) ;
}
static void ahash_def_finup_done2 ( struct crypto_async_request * req , int err )
{
struct ahash_request * areq = req - > data ;
struct ahash_request_priv * priv = areq - > priv ;
crypto_completion_t complete = priv - > complete ;
void * data = priv - > data ;
ahash_def_finup_finish2 ( areq , err ) ;
complete ( data , err ) ;
}
static int ahash_def_finup_finish1 ( struct ahash_request * req , int err )
{
if ( err )
goto out ;
req - > base . complete = ahash_def_finup_done2 ;
req - > base . flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
err = crypto_ahash_reqtfm ( req ) - > final ( req ) ;
out :
ahash_def_finup_finish2 ( req , err ) ;
return err ;
}
static void ahash_def_finup_done1 ( struct crypto_async_request * req , int err )
{
struct ahash_request * areq = req - > data ;
struct ahash_request_priv * priv = areq - > priv ;
crypto_completion_t complete = priv - > complete ;
void * data = priv - > data ;
err = ahash_def_finup_finish1 ( areq , err ) ;
complete ( data , err ) ;
}
static int ahash_def_finup ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
unsigned long alignmask = crypto_ahash_alignmask ( tfm ) ;
unsigned int ds = crypto_ahash_digestsize ( tfm ) ;
struct ahash_request_priv * priv ;
priv = kmalloc ( sizeof ( * priv ) + ahash_align_buffer_size ( ds , alignmask ) ,
( req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ?
2009-07-24 09:56:31 +04:00
GFP_KERNEL : GFP_ATOMIC ) ;
2009-07-15 08:40:40 +04:00
if ( ! priv )
return - ENOMEM ;
priv - > result = req - > result ;
priv - > complete = req - > base . complete ;
priv - > data = req - > base . data ;
req - > result = PTR_ALIGN ( ( u8 * ) priv - > ubuf , alignmask + 1 ) ;
req - > base . complete = ahash_def_finup_done1 ;
req - > base . data = req ;
req - > priv = priv ;
return ahash_def_finup_finish1 ( req , tfm - > update ( req ) ) ;
}
static int ahash_no_export ( struct ahash_request * req , void * out )
{
return - ENOSYS ;
}
static int ahash_no_import ( struct ahash_request * req , const void * in )
{
return - ENOSYS ;
}
2009-07-14 08:28:26 +04:00
static int crypto_ahash_init_tfm ( struct crypto_tfm * tfm )
{
struct crypto_ahash * hash = __crypto_ahash_cast ( tfm ) ;
struct ahash_alg * alg = crypto_ahash_alg ( hash ) ;
2009-07-15 08:40:40 +04:00
hash - > setkey = ahash_nosetkey ;
hash - > export = ahash_no_export ;
hash - > import = ahash_no_import ;
2009-07-14 08:28:26 +04:00
if ( tfm - > __crt_alg - > cra_type ! = & crypto_ahash_type )
return crypto_init_shash_ops_async ( tfm ) ;
hash - > init = alg - > init ;
hash - > update = alg - > update ;
2009-07-15 08:40:40 +04:00
hash - > final = alg - > final ;
hash - > finup = alg - > finup ? : ahash_def_finup ;
2009-07-14 08:28:26 +04:00
hash - > digest = alg - > digest ;
2009-07-15 08:40:40 +04:00
if ( alg - > setkey )
hash - > setkey = alg - > setkey ;
if ( alg - > export )
hash - > export = alg - > export ;
if ( alg - > import )
hash - > import = alg - > import ;
2009-07-14 08:28:26 +04:00
return 0 ;
}
static unsigned int crypto_ahash_extsize ( struct crypto_alg * alg )
{
if ( alg - > cra_type = = & crypto_ahash_type )
return alg - > cra_ctxsize ;
return sizeof ( struct crypto_shash * ) ;
}
2011-11-03 16:46:07 +04:00
# ifdef CONFIG_NET
2011-09-27 09:41:07 +04:00
static int crypto_ahash_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
struct crypto_report_hash rhash ;
snprintf ( rhash . type , CRYPTO_MAX_ALG_NAME , " %s " , " ahash " ) ;
rhash . blocksize = alg - > cra_blocksize ;
rhash . digestsize = __crypto_hash_alg_common ( alg ) - > digestsize ;
2012-04-02 04:19:05 +04:00
if ( nla_put ( skb , CRYPTOCFGA_REPORT_HASH ,
sizeof ( struct crypto_report_hash ) , & rhash ) )
goto nla_put_failure ;
2011-09-27 09:41:07 +04:00
return 0 ;
nla_put_failure :
return - EMSGSIZE ;
}
2011-11-03 16:46:07 +04:00
# else
static int crypto_ahash_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
return - ENOSYS ;
}
# endif
2011-09-27 09:41:07 +04:00
2008-05-14 16:41:47 +04:00
static void crypto_ahash_show ( struct seq_file * m , struct crypto_alg * alg )
__attribute__ ( ( unused ) ) ;
static void crypto_ahash_show ( struct seq_file * m , struct crypto_alg * alg )
{
seq_printf ( m , " type : ahash \n " ) ;
seq_printf ( m , " async : %s \n " , alg - > cra_flags & CRYPTO_ALG_ASYNC ?
" yes " : " no " ) ;
seq_printf ( m , " blocksize : %u \n " , alg - > cra_blocksize ) ;
2009-07-14 08:28:26 +04:00
seq_printf ( m , " digestsize : %u \n " ,
__crypto_hash_alg_common ( alg ) - > digestsize ) ;
2008-05-14 16:41:47 +04:00
}
const struct crypto_type crypto_ahash_type = {
2009-07-14 08:28:26 +04:00
. extsize = crypto_ahash_extsize ,
. init_tfm = crypto_ahash_init_tfm ,
2008-05-14 16:41:47 +04:00
# ifdef CONFIG_PROC_FS
. show = crypto_ahash_show ,
# endif
2011-09-27 09:41:07 +04:00
. report = crypto_ahash_report ,
2009-07-14 08:28:26 +04:00
. maskclear = ~ CRYPTO_ALG_TYPE_MASK ,
. maskset = CRYPTO_ALG_TYPE_AHASH_MASK ,
. type = CRYPTO_ALG_TYPE_AHASH ,
. tfmsize = offsetof ( struct crypto_ahash , base ) ,
2008-05-14 16:41:47 +04:00
} ;
EXPORT_SYMBOL_GPL ( crypto_ahash_type ) ;
2009-07-14 08:28:26 +04:00
struct crypto_ahash * crypto_alloc_ahash ( const char * alg_name , u32 type ,
u32 mask )
{
return crypto_alloc_tfm ( alg_name , & crypto_ahash_type , type , mask ) ;
}
EXPORT_SYMBOL_GPL ( crypto_alloc_ahash ) ;
2009-07-14 10:06:06 +04:00
static int ahash_prepare_alg ( struct ahash_alg * alg )
{
struct crypto_alg * base = & alg - > halg . base ;
if ( alg - > halg . digestsize > PAGE_SIZE / 8 | |
alg - > halg . statesize > PAGE_SIZE / 8 )
return - EINVAL ;
base - > cra_type = & crypto_ahash_type ;
base - > cra_flags & = ~ CRYPTO_ALG_TYPE_MASK ;
base - > cra_flags | = CRYPTO_ALG_TYPE_AHASH ;
return 0 ;
}
int crypto_register_ahash ( struct ahash_alg * alg )
{
struct crypto_alg * base = & alg - > halg . base ;
int err ;
err = ahash_prepare_alg ( alg ) ;
if ( err )
return err ;
return crypto_register_alg ( base ) ;
}
EXPORT_SYMBOL_GPL ( crypto_register_ahash ) ;
int crypto_unregister_ahash ( struct ahash_alg * alg )
{
return crypto_unregister_alg ( & alg - > halg . base ) ;
}
EXPORT_SYMBOL_GPL ( crypto_unregister_ahash ) ;
int ahash_register_instance ( struct crypto_template * tmpl ,
struct ahash_instance * inst )
{
int err ;
err = ahash_prepare_alg ( & inst - > alg ) ;
if ( err )
return err ;
return crypto_register_instance ( tmpl , ahash_crypto_instance ( inst ) ) ;
}
EXPORT_SYMBOL_GPL ( ahash_register_instance ) ;
void ahash_free_instance ( struct crypto_instance * inst )
{
crypto_drop_spawn ( crypto_instance_ctx ( inst ) ) ;
kfree ( ahash_instance ( inst ) ) ;
}
EXPORT_SYMBOL_GPL ( ahash_free_instance ) ;
int crypto_init_ahash_spawn ( struct crypto_ahash_spawn * spawn ,
struct hash_alg_common * alg ,
struct crypto_instance * inst )
{
return crypto_init_spawn2 ( & spawn - > base , & alg - > base , inst ,
& crypto_ahash_type ) ;
}
EXPORT_SYMBOL_GPL ( crypto_init_ahash_spawn ) ;
struct hash_alg_common * ahash_attr_alg ( struct rtattr * rta , u32 type , u32 mask )
{
struct crypto_alg * alg ;
alg = crypto_attr_alg2 ( rta , & crypto_ahash_type , type , mask ) ;
return IS_ERR ( alg ) ? ERR_CAST ( alg ) : __crypto_hash_alg_common ( alg ) ;
}
EXPORT_SYMBOL_GPL ( ahash_attr_alg ) ;
2008-05-14 16:41:47 +04:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Asynchronous cryptographic hash type " ) ;