2008-05-14 16:41:47 +04:00
/*
* Asynchronous Cryptographic Hash operations .
*
* This is the asynchronous version of hash . c with notification of
* completion via a callback .
*
* Copyright ( c ) 2008 Loc Ho < lho @ amcc . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
2008-07-07 18:19:53 +04:00
# include <crypto/internal/hash.h>
# include <crypto/scatterwalk.h>
2014-05-21 16:56:12 +04:00
# include <linux/bug.h>
2008-05-14 16:41:47 +04:00
# include <linux/err.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/seq_file.h>
2011-09-27 09:41:07 +04:00
# include <linux/cryptouser.h>
2016-12-31 18:56:23 +03:00
# include <linux/compiler.h>
2011-09-27 09:41:07 +04:00
# include <net/netlink.h>
2008-05-14 16:41:47 +04:00
# include "internal.h"
2009-07-15 08:40:40 +04:00
struct ahash_request_priv {
crypto_completion_t complete ;
void * data ;
u8 * result ;
2017-04-10 12:27:57 +03:00
u32 flags ;
2009-07-15 08:40:40 +04:00
void * ubuf [ ] CRYPTO_MINALIGN_ATTR ;
} ;
2009-07-14 08:28:26 +04:00
static inline struct ahash_alg * crypto_ahash_alg ( struct crypto_ahash * hash )
{
return container_of ( crypto_hash_alg_common ( hash ) , struct ahash_alg ,
halg ) ;
}
2008-07-07 18:19:53 +04:00
static int hash_walk_next ( struct crypto_hash_walk * walk )
{
unsigned int alignmask = walk - > alignmask ;
unsigned int offset = walk - > offset ;
unsigned int nbytes = min ( walk - > entrylen ,
( ( unsigned int ) ( PAGE_SIZE ) ) - offset ) ;
2014-05-21 16:56:12 +04:00
if ( walk - > flags & CRYPTO_ALG_ASYNC )
walk - > data = kmap ( walk - > pg ) ;
else
walk - > data = kmap_atomic ( walk - > pg ) ;
2008-07-07 18:19:53 +04:00
walk - > data + = offset ;
2010-08-06 05:26:38 +04:00
if ( offset & alignmask ) {
unsigned int unaligned = alignmask + 1 - ( offset & alignmask ) ;
2014-12-05 08:44:54 +03:00
2010-08-06 05:26:38 +04:00
if ( nbytes > unaligned )
nbytes = unaligned ;
}
2008-07-07 18:19:53 +04:00
walk - > entrylen - = nbytes ;
return nbytes ;
}
static int hash_walk_new_entry ( struct crypto_hash_walk * walk )
{
struct scatterlist * sg ;
sg = walk - > sg ;
walk - > offset = sg - > offset ;
2016-05-04 12:52:56 +03:00
walk - > pg = sg_page ( walk - > sg ) + ( walk - > offset > > PAGE_SHIFT ) ;
walk - > offset = offset_in_page ( walk - > offset ) ;
2008-07-07 18:19:53 +04:00
walk - > entrylen = sg - > length ;
if ( walk - > entrylen > walk - > total )
walk - > entrylen = walk - > total ;
walk - > total - = walk - > entrylen ;
return hash_walk_next ( walk ) ;
}
int crypto_hash_walk_done ( struct crypto_hash_walk * walk , int err )
{
unsigned int alignmask = walk - > alignmask ;
unsigned int nbytes = walk - > entrylen ;
walk - > data - = walk - > offset ;
if ( nbytes & & walk - > offset & alignmask & & ! err ) {
walk - > offset = ALIGN ( walk - > offset , alignmask + 1 ) ;
walk - > data + = walk - > offset ;
nbytes = min ( nbytes ,
( ( unsigned int ) ( PAGE_SIZE ) ) - walk - > offset ) ;
walk - > entrylen - = nbytes ;
return nbytes ;
}
2014-05-21 16:56:12 +04:00
if ( walk - > flags & CRYPTO_ALG_ASYNC )
kunmap ( walk - > pg ) ;
else {
kunmap_atomic ( walk - > data ) ;
/*
* The may sleep test only makes sense for sync users .
* Async users don ' t need to sleep here anyway .
*/
crypto_yield ( walk - > flags ) ;
}
2008-07-07 18:19:53 +04:00
if ( err )
return err ;
2009-05-31 17:09:22 +04:00
if ( nbytes ) {
walk - > offset = 0 ;
walk - > pg + + ;
2008-07-07 18:19:53 +04:00
return hash_walk_next ( walk ) ;
2009-05-31 17:09:22 +04:00
}
2008-07-07 18:19:53 +04:00
if ( ! walk - > total )
return 0 ;
2015-01-20 11:06:16 +03:00
walk - > sg = sg_next ( walk - > sg ) ;
2008-07-07 18:19:53 +04:00
return hash_walk_new_entry ( walk ) ;
}
EXPORT_SYMBOL_GPL ( crypto_hash_walk_done ) ;
int crypto_hash_walk_first ( struct ahash_request * req ,
struct crypto_hash_walk * walk )
{
walk - > total = req - > nbytes ;
2014-07-11 03:18:08 +04:00
if ( ! walk - > total ) {
walk - > entrylen = 0 ;
2008-07-07 18:19:53 +04:00
return 0 ;
2014-07-11 03:18:08 +04:00
}
2008-07-07 18:19:53 +04:00
walk - > alignmask = crypto_ahash_alignmask ( crypto_ahash_reqtfm ( req ) ) ;
walk - > sg = req - > src ;
2014-05-21 16:56:12 +04:00
walk - > flags = req - > base . flags & CRYPTO_TFM_REQ_MASK ;
2008-07-07 18:19:53 +04:00
return hash_walk_new_entry ( walk ) ;
}
EXPORT_SYMBOL_GPL ( crypto_hash_walk_first ) ;
2014-05-21 16:56:12 +04:00
int crypto_ahash_walk_first ( struct ahash_request * req ,
struct crypto_hash_walk * walk )
{
walk - > total = req - > nbytes ;
2014-07-11 03:18:08 +04:00
if ( ! walk - > total ) {
walk - > entrylen = 0 ;
2014-05-21 16:56:12 +04:00
return 0 ;
2014-07-11 03:18:08 +04:00
}
2014-05-21 16:56:12 +04:00
walk - > alignmask = crypto_ahash_alignmask ( crypto_ahash_reqtfm ( req ) ) ;
walk - > sg = req - > src ;
walk - > flags = req - > base . flags & CRYPTO_TFM_REQ_MASK ;
walk - > flags | = CRYPTO_ALG_ASYNC ;
BUILD_BUG_ON ( CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC ) ;
return hash_walk_new_entry ( walk ) ;
}
EXPORT_SYMBOL_GPL ( crypto_ahash_walk_first ) ;
2008-05-14 16:41:47 +04:00
static int ahash_setkey_unaligned ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
unsigned long alignmask = crypto_ahash_alignmask ( tfm ) ;
int ret ;
u8 * buffer , * alignbuffer ;
unsigned long absize ;
absize = keylen + alignmask ;
2009-07-14 17:48:35 +04:00
buffer = kmalloc ( absize , GFP_KERNEL ) ;
2008-05-14 16:41:47 +04:00
if ( ! buffer )
return - ENOMEM ;
alignbuffer = ( u8 * ) ALIGN ( ( unsigned long ) buffer , alignmask + 1 ) ;
memcpy ( alignbuffer , key , keylen ) ;
2009-07-15 16:39:05 +04:00
ret = tfm - > setkey ( tfm , alignbuffer , keylen ) ;
2009-07-14 17:35:36 +04:00
kzfree ( buffer ) ;
2008-05-14 16:41:47 +04:00
return ret ;
}
2009-07-15 08:40:40 +04:00
int crypto_ahash_setkey ( struct crypto_ahash * tfm , const u8 * key ,
2008-05-14 16:41:47 +04:00
unsigned int keylen )
{
unsigned long alignmask = crypto_ahash_alignmask ( tfm ) ;
if ( ( unsigned long ) key & alignmask )
return ahash_setkey_unaligned ( tfm , key , keylen ) ;
2009-07-15 16:39:05 +04:00
return tfm - > setkey ( tfm , key , keylen ) ;
2008-05-14 16:41:47 +04:00
}
2009-07-15 08:40:40 +04:00
EXPORT_SYMBOL_GPL ( crypto_ahash_setkey ) ;
2008-05-14 16:41:47 +04:00
2008-11-08 03:56:57 +03:00
static int ahash_nosetkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
return - ENOSYS ;
}
2009-07-15 08:40:40 +04:00
static inline unsigned int ahash_align_buffer_size ( unsigned len ,
unsigned long mask )
{
return len + ( mask & ~ ( crypto_tfm_ctx_alignment ( ) - 1 ) ) ;
}
2014-03-14 05:37:05 +04:00
static int ahash_save_req ( struct ahash_request * req , crypto_completion_t cplt )
2009-07-15 08:40:40 +04:00
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
unsigned long alignmask = crypto_ahash_alignmask ( tfm ) ;
unsigned int ds = crypto_ahash_digestsize ( tfm ) ;
struct ahash_request_priv * priv ;
priv = kmalloc ( sizeof ( * priv ) + ahash_align_buffer_size ( ds , alignmask ) ,
( req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ) ?
2009-07-24 09:56:31 +04:00
GFP_KERNEL : GFP_ATOMIC ) ;
2009-07-15 08:40:40 +04:00
if ( ! priv )
return - ENOMEM ;
2014-03-14 05:37:04 +04:00
/*
* WARNING : Voodoo programming below !
*
* The code below is obscure and hard to understand , thus explanation
* is necessary . See include / crypto / hash . h and include / linux / crypto . h
* to understand the layout of structures used here !
*
* The code here will replace portions of the ORIGINAL request with
* pointers to new code and buffers so the hashing operation can store
* the result in aligned buffer . We will call the modified request
* an ADJUSTED request .
*
* The newly mangled request will look as such :
*
* req {
* . result = ADJUSTED [ new aligned buffer ]
* . base . complete = ADJUSTED [ pointer to completion function ]
* . base . data = ADJUSTED [ * req ( pointer to self ) ]
* . priv = ADJUSTED [ new priv ] {
* . result = ORIGINAL ( result )
* . complete = ORIGINAL ( base . complete )
* . data = ORIGINAL ( base . data )
* }
*/
2009-07-15 08:40:40 +04:00
priv - > result = req - > result ;
priv - > complete = req - > base . complete ;
priv - > data = req - > base . data ;
2017-04-10 12:27:57 +03:00
priv - > flags = req - > base . flags ;
2014-03-14 05:37:04 +04:00
/*
* WARNING : We do not backup req - > priv here ! The req - > priv
* is for internal use of the Crypto API and the
* user must _NOT_ _EVER_ depend on it ' s content !
*/
2009-07-15 08:40:40 +04:00
req - > result = PTR_ALIGN ( ( u8 * ) priv - > ubuf , alignmask + 1 ) ;
2014-03-14 05:37:05 +04:00
req - > base . complete = cplt ;
2009-07-15 08:40:40 +04:00
req - > base . data = req ;
req - > priv = priv ;
2014-03-14 05:37:05 +04:00
return 0 ;
}
2017-04-10 12:27:57 +03:00
static void ahash_restore_req ( struct ahash_request * req , int err )
2014-03-14 05:37:05 +04:00
{
struct ahash_request_priv * priv = req - > priv ;
2017-04-10 12:27:57 +03:00
if ( ! err )
memcpy ( priv - > result , req - > result ,
crypto_ahash_digestsize ( crypto_ahash_reqtfm ( req ) ) ) ;
2014-03-14 05:37:05 +04:00
/* Restore the original crypto request. */
req - > result = priv - > result ;
2017-04-10 12:27:57 +03:00
ahash_request_set_callback ( req , priv - > flags ,
priv - > complete , priv - > data ) ;
2014-03-14 05:37:05 +04:00
req - > priv = NULL ;
/* Free the req->priv.priv from the ADJUSTED request. */
kzfree ( priv ) ;
}
2017-04-10 12:27:57 +03:00
static void ahash_notify_einprogress ( struct ahash_request * req )
2014-03-14 05:37:05 +04:00
{
struct ahash_request_priv * priv = req - > priv ;
2017-04-10 12:27:57 +03:00
struct crypto_async_request oreq ;
2014-03-14 05:37:05 +04:00
2017-04-10 12:27:57 +03:00
oreq . data = priv - > data ;
2014-03-14 05:37:05 +04:00
2017-04-10 12:27:57 +03:00
priv - > complete ( & oreq , - EINPROGRESS ) ;
2014-03-14 05:37:05 +04:00
}
static void ahash_op_unaligned_done ( struct crypto_async_request * req , int err )
{
struct ahash_request * areq = req - > data ;
2017-04-10 12:27:57 +03:00
if ( err = = - EINPROGRESS ) {
ahash_notify_einprogress ( areq ) ;
return ;
}
2014-03-14 05:37:05 +04:00
/*
* Restore the original request , see ahash_op_unaligned ( ) for what
* goes where .
*
* The " struct ahash_request *req " here is in fact the " req.base "
* from the ADJUSTED request from ahash_op_unaligned ( ) , thus as it
* is a pointer to self , it is also the ADJUSTED " req " .
*/
/* First copy req->result into req->priv.result */
2017-04-10 12:27:57 +03:00
ahash_restore_req ( areq , err ) ;
2014-03-14 05:37:05 +04:00
/* Complete the ORIGINAL request. */
areq - > base . complete ( & areq - > base , err ) ;
}
static int ahash_op_unaligned ( struct ahash_request * req ,
int ( * op ) ( struct ahash_request * ) )
{
int err ;
err = ahash_save_req ( req , ahash_op_unaligned_done ) ;
if ( err )
return err ;
2009-07-15 08:40:40 +04:00
err = op ( req ) ;
2017-10-18 10:00:36 +03:00
if ( err = = - EINPROGRESS | | err = = - EBUSY )
2017-04-10 12:27:57 +03:00
return err ;
ahash_restore_req ( req , err ) ;
2009-07-15 08:40:40 +04:00
return err ;
}
static int crypto_ahash_op ( struct ahash_request * req ,
int ( * op ) ( struct ahash_request * ) )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
unsigned long alignmask = crypto_ahash_alignmask ( tfm ) ;
if ( ( unsigned long ) req - > result & alignmask )
return ahash_op_unaligned ( req , op ) ;
return op ( req ) ;
}
int crypto_ahash_final ( struct ahash_request * req )
{
return crypto_ahash_op ( req , crypto_ahash_reqtfm ( req ) - > final ) ;
}
EXPORT_SYMBOL_GPL ( crypto_ahash_final ) ;
int crypto_ahash_finup ( struct ahash_request * req )
{
return crypto_ahash_op ( req , crypto_ahash_reqtfm ( req ) - > finup ) ;
}
EXPORT_SYMBOL_GPL ( crypto_ahash_finup ) ;
int crypto_ahash_digest ( struct ahash_request * req )
{
return crypto_ahash_op ( req , crypto_ahash_reqtfm ( req ) - > digest ) ;
}
EXPORT_SYMBOL_GPL ( crypto_ahash_digest ) ;
2017-04-10 12:27:57 +03:00
static void ahash_def_finup_done2 ( struct crypto_async_request * req , int err )
2009-07-15 08:40:40 +04:00
{
2017-04-10 12:27:57 +03:00
struct ahash_request * areq = req - > data ;
2009-07-15 08:40:40 +04:00
if ( err = = - EINPROGRESS )
return ;
2017-04-10 12:27:57 +03:00
ahash_restore_req ( areq , err ) ;
2009-07-15 08:40:40 +04:00
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 05:37:06 +04:00
areq - > base . complete ( & areq - > base , err ) ;
2009-07-15 08:40:40 +04:00
}
static int ahash_def_finup_finish1 ( struct ahash_request * req , int err )
{
if ( err )
goto out ;
req - > base . complete = ahash_def_finup_done2 ;
2017-04-10 12:27:57 +03:00
2009-07-15 08:40:40 +04:00
err = crypto_ahash_reqtfm ( req ) - > final ( req ) ;
2017-10-18 10:00:36 +03:00
if ( err = = - EINPROGRESS | | err = = - EBUSY )
2017-04-10 12:27:57 +03:00
return err ;
2009-07-15 08:40:40 +04:00
out :
2017-04-10 12:27:57 +03:00
ahash_restore_req ( req , err ) ;
2009-07-15 08:40:40 +04:00
return err ;
}
static void ahash_def_finup_done1 ( struct crypto_async_request * req , int err )
{
struct ahash_request * areq = req - > data ;
2017-04-10 12:27:57 +03:00
if ( err = = - EINPROGRESS ) {
ahash_notify_einprogress ( areq ) ;
return ;
}
areq - > base . flags & = ~ CRYPTO_TFM_REQ_MAY_SLEEP ;
2009-07-15 08:40:40 +04:00
err = ahash_def_finup_finish1 ( areq , err ) ;
2017-04-10 12:27:57 +03:00
if ( areq - > priv )
return ;
2009-07-15 08:40:40 +04:00
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 05:37:06 +04:00
areq - > base . complete ( & areq - > base , err ) ;
2009-07-15 08:40:40 +04:00
}
static int ahash_def_finup ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 05:37:06 +04:00
int err ;
2009-07-15 08:40:40 +04:00
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 05:37:06 +04:00
err = ahash_save_req ( req , ahash_def_finup_done1 ) ;
if ( err )
return err ;
2009-07-15 08:40:40 +04:00
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 05:37:06 +04:00
err = tfm - > update ( req ) ;
2017-10-18 10:00:36 +03:00
if ( err = = - EINPROGRESS | | err = = - EBUSY )
2017-04-10 12:27:57 +03:00
return err ;
crypto: hash - Simplify the ahash_finup implementation
The ahash_def_finup() can make use of the request save/restore functions,
thus make it so. This simplifies the code a little and unifies the code
paths.
Note that the same remark about free()ing the req->priv applies here, the
req->priv can only be free()'d after the original request was restored.
Finally, squash a bug in the invocation of completion in the ASYNC path.
In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err);
was called with X=areq->base.data . This is incorrect , as X=&areq->base
is the correct value. By analysis of the data structures, we see the areq is
of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request'
and areq->base.completion is of type crypto_completion_t, which is defined in
include/linux/crypto.h as:
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
This is one lead that the X should be &areq->base . Next up, we can inspect
other code which calls the completion callback to give us kind-of statistical
idea of how this callback is used. We can try:
$ git grep base\.complete\( drivers/crypto/
Finally, by inspecting ahash_request_set_callback() implementation defined
in include/crypto/hash.h , we observe that the .data entry of 'struct
crypto_async_request' is intended for arbitrary data, not for completion
argument.
Signed-off-by: Marek Vasut <marex@denx.de>
Cc: David S. Miller <davem@davemloft.net>
Cc: Fabio Estevam <fabio.estevam@freescale.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-03-14 05:37:06 +04:00
return ahash_def_finup_finish1 ( req , err ) ;
2009-07-15 08:40:40 +04:00
}
static int ahash_no_export ( struct ahash_request * req , void * out )
{
return - ENOSYS ;
}
static int ahash_no_import ( struct ahash_request * req , const void * in )
{
return - ENOSYS ;
}
2009-07-14 08:28:26 +04:00
static int crypto_ahash_init_tfm ( struct crypto_tfm * tfm )
{
struct crypto_ahash * hash = __crypto_ahash_cast ( tfm ) ;
struct ahash_alg * alg = crypto_ahash_alg ( hash ) ;
2009-07-15 08:40:40 +04:00
hash - > setkey = ahash_nosetkey ;
2016-01-08 16:28:26 +03:00
hash - > has_setkey = false ;
2009-07-15 08:40:40 +04:00
hash - > export = ahash_no_export ;
hash - > import = ahash_no_import ;
2009-07-14 08:28:26 +04:00
if ( tfm - > __crt_alg - > cra_type ! = & crypto_ahash_type )
return crypto_init_shash_ops_async ( tfm ) ;
hash - > init = alg - > init ;
hash - > update = alg - > update ;
2009-07-15 08:40:40 +04:00
hash - > final = alg - > final ;
hash - > finup = alg - > finup ? : ahash_def_finup ;
2009-07-14 08:28:26 +04:00
hash - > digest = alg - > digest ;
2009-07-15 08:40:40 +04:00
2016-01-08 16:28:26 +03:00
if ( alg - > setkey ) {
2009-07-15 08:40:40 +04:00
hash - > setkey = alg - > setkey ;
2016-01-08 16:28:26 +03:00
hash - > has_setkey = true ;
}
2009-07-15 08:40:40 +04:00
if ( alg - > export )
hash - > export = alg - > export ;
if ( alg - > import )
hash - > import = alg - > import ;
2009-07-14 08:28:26 +04:00
return 0 ;
}
static unsigned int crypto_ahash_extsize ( struct crypto_alg * alg )
{
2016-06-29 13:03:47 +03:00
if ( alg - > cra_type ! = & crypto_ahash_type )
return sizeof ( struct crypto_shash * ) ;
2009-07-14 08:28:26 +04:00
2016-06-29 13:03:47 +03:00
return crypto_alg_extsize ( alg ) ;
2009-07-14 08:28:26 +04:00
}
2011-11-03 16:46:07 +04:00
# ifdef CONFIG_NET
2011-09-27 09:41:07 +04:00
static int crypto_ahash_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
struct crypto_report_hash rhash ;
2013-02-05 21:19:13 +04:00
strncpy ( rhash . type , " ahash " , sizeof ( rhash . type ) ) ;
2011-09-27 09:41:07 +04:00
rhash . blocksize = alg - > cra_blocksize ;
rhash . digestsize = __crypto_hash_alg_common ( alg ) - > digestsize ;
2012-04-02 04:19:05 +04:00
if ( nla_put ( skb , CRYPTOCFGA_REPORT_HASH ,
sizeof ( struct crypto_report_hash ) , & rhash ) )
goto nla_put_failure ;
2011-09-27 09:41:07 +04:00
return 0 ;
nla_put_failure :
return - EMSGSIZE ;
}
2011-11-03 16:46:07 +04:00
# else
static int crypto_ahash_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
return - ENOSYS ;
}
# endif
2011-09-27 09:41:07 +04:00
2008-05-14 16:41:47 +04:00
static void crypto_ahash_show ( struct seq_file * m , struct crypto_alg * alg )
2016-12-31 18:56:23 +03:00
__maybe_unused ;
2008-05-14 16:41:47 +04:00
static void crypto_ahash_show ( struct seq_file * m , struct crypto_alg * alg )
{
seq_printf ( m , " type : ahash \n " ) ;
seq_printf ( m , " async : %s \n " , alg - > cra_flags & CRYPTO_ALG_ASYNC ?
" yes " : " no " ) ;
seq_printf ( m , " blocksize : %u \n " , alg - > cra_blocksize ) ;
2009-07-14 08:28:26 +04:00
seq_printf ( m , " digestsize : %u \n " ,
__crypto_hash_alg_common ( alg ) - > digestsize ) ;
2008-05-14 16:41:47 +04:00
}
const struct crypto_type crypto_ahash_type = {
2009-07-14 08:28:26 +04:00
. extsize = crypto_ahash_extsize ,
. init_tfm = crypto_ahash_init_tfm ,
2008-05-14 16:41:47 +04:00
# ifdef CONFIG_PROC_FS
. show = crypto_ahash_show ,
# endif
2011-09-27 09:41:07 +04:00
. report = crypto_ahash_report ,
2009-07-14 08:28:26 +04:00
. maskclear = ~ CRYPTO_ALG_TYPE_MASK ,
. maskset = CRYPTO_ALG_TYPE_AHASH_MASK ,
. type = CRYPTO_ALG_TYPE_AHASH ,
. tfmsize = offsetof ( struct crypto_ahash , base ) ,
2008-05-14 16:41:47 +04:00
} ;
EXPORT_SYMBOL_GPL ( crypto_ahash_type ) ;
2009-07-14 08:28:26 +04:00
struct crypto_ahash * crypto_alloc_ahash ( const char * alg_name , u32 type ,
u32 mask )
{
return crypto_alloc_tfm ( alg_name , & crypto_ahash_type , type , mask ) ;
}
EXPORT_SYMBOL_GPL ( crypto_alloc_ahash ) ;
2016-01-23 08:52:40 +03:00
int crypto_has_ahash ( const char * alg_name , u32 type , u32 mask )
{
return crypto_type_has_alg ( alg_name , & crypto_ahash_type , type , mask ) ;
}
EXPORT_SYMBOL_GPL ( crypto_has_ahash ) ;
2009-07-14 10:06:06 +04:00
static int ahash_prepare_alg ( struct ahash_alg * alg )
{
struct crypto_alg * base = & alg - > halg . base ;
if ( alg - > halg . digestsize > PAGE_SIZE / 8 | |
2015-10-09 22:43:33 +03:00
alg - > halg . statesize > PAGE_SIZE / 8 | |
alg - > halg . statesize = = 0 )
2009-07-14 10:06:06 +04:00
return - EINVAL ;
base - > cra_type = & crypto_ahash_type ;
base - > cra_flags & = ~ CRYPTO_ALG_TYPE_MASK ;
base - > cra_flags | = CRYPTO_ALG_TYPE_AHASH ;
return 0 ;
}
int crypto_register_ahash ( struct ahash_alg * alg )
{
struct crypto_alg * base = & alg - > halg . base ;
int err ;
err = ahash_prepare_alg ( alg ) ;
if ( err )
return err ;
return crypto_register_alg ( base ) ;
}
EXPORT_SYMBOL_GPL ( crypto_register_ahash ) ;
int crypto_unregister_ahash ( struct ahash_alg * alg )
{
return crypto_unregister_alg ( & alg - > halg . base ) ;
}
EXPORT_SYMBOL_GPL ( crypto_unregister_ahash ) ;
2017-08-10 15:53:52 +03:00
int crypto_register_ahashes ( struct ahash_alg * algs , int count )
{
int i , ret ;
for ( i = 0 ; i < count ; i + + ) {
ret = crypto_register_ahash ( & algs [ i ] ) ;
if ( ret )
goto err ;
}
return 0 ;
err :
for ( - - i ; i > = 0 ; - - i )
crypto_unregister_ahash ( & algs [ i ] ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( crypto_register_ahashes ) ;
void crypto_unregister_ahashes ( struct ahash_alg * algs , int count )
{
int i ;
for ( i = count - 1 ; i > = 0 ; - - i )
crypto_unregister_ahash ( & algs [ i ] ) ;
}
EXPORT_SYMBOL_GPL ( crypto_unregister_ahashes ) ;
2009-07-14 10:06:06 +04:00
int ahash_register_instance ( struct crypto_template * tmpl ,
struct ahash_instance * inst )
{
int err ;
err = ahash_prepare_alg ( & inst - > alg ) ;
if ( err )
return err ;
return crypto_register_instance ( tmpl , ahash_crypto_instance ( inst ) ) ;
}
EXPORT_SYMBOL_GPL ( ahash_register_instance ) ;
void ahash_free_instance ( struct crypto_instance * inst )
{
crypto_drop_spawn ( crypto_instance_ctx ( inst ) ) ;
kfree ( ahash_instance ( inst ) ) ;
}
EXPORT_SYMBOL_GPL ( ahash_free_instance ) ;
int crypto_init_ahash_spawn ( struct crypto_ahash_spawn * spawn ,
struct hash_alg_common * alg ,
struct crypto_instance * inst )
{
return crypto_init_spawn2 ( & spawn - > base , & alg - > base , inst ,
& crypto_ahash_type ) ;
}
EXPORT_SYMBOL_GPL ( crypto_init_ahash_spawn ) ;
struct hash_alg_common * ahash_attr_alg ( struct rtattr * rta , u32 type , u32 mask )
{
struct crypto_alg * alg ;
alg = crypto_attr_alg2 ( rta , & crypto_ahash_type , type , mask ) ;
return IS_ERR ( alg ) ? ERR_CAST ( alg ) : __crypto_hash_alg_common ( alg ) ;
}
EXPORT_SYMBOL_GPL ( ahash_attr_alg ) ;
2008-05-14 16:41:47 +04:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Asynchronous cryptographic hash type " ) ;