2019-05-29 17:18:13 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2021-03-21 15:30:07 +03:00
/*
2012-05-14 15:06:20 +04:00
* SHA - 256 routines supporting the Power 7 + Nest Accelerators driver
*
* Copyright ( C ) 2011 - 2012 International Business Machines Inc .
*
* Author : Kent Yoder < yoder1 @ us . ibm . com >
*/
# include <crypto/internal/hash.h>
2020-11-13 08:20:21 +03:00
# include <crypto/sha2.h>
2012-05-14 15:06:20 +04:00
# include <linux/module.h>
# include <asm/vio.h>
2014-10-28 20:49:46 +03:00
# include <asm/byteorder.h>
2012-05-14 15:06:20 +04:00
# include "nx_csbcpb.h"
# include "nx.h"
2021-06-17 11:00:12 +03:00
struct sha256_state_be {
__be32 state [ SHA256_DIGEST_SIZE / 4 ] ;
u64 count ;
u8 buf [ SHA256_BLOCK_SIZE ] ;
} ;
2012-05-14 15:06:20 +04:00
2015-07-07 12:30:25 +03:00
static int nx_crypto_ctx_sha256_init ( struct crypto_tfm * tfm )
2012-05-14 15:06:20 +04:00
{
2015-07-07 12:30:25 +03:00
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( tfm ) ;
int err ;
2012-05-14 15:06:20 +04:00
2015-07-07 12:30:25 +03:00
err = nx_crypto_ctx_sha_init ( tfm ) ;
if ( err )
return err ;
2012-05-14 15:06:20 +04:00
2015-07-07 12:30:25 +03:00
nx_ctx_init ( nx_ctx , HCOP_FC_SHA ) ;
2012-05-14 15:06:20 +04:00
nx_ctx - > ap = & nx_ctx - > props [ NX_PROPS_SHA256 ] ;
NX_CPB_SET_DIGEST_SIZE ( nx_ctx - > csbcpb , NX_DS_SHA256 ) ;
2015-07-07 12:30:25 +03:00
return 0 ;
}
2015-04-23 23:41:43 +03:00
2015-07-07 12:30:25 +03:00
static int nx_sha256_init ( struct shash_desc * desc ) {
2021-06-17 11:00:12 +03:00
struct sha256_state_be * sctx = shash_desc_ctx ( desc ) ;
2014-10-28 20:49:46 +03:00
2015-07-07 12:30:25 +03:00
memset ( sctx , 0 , sizeof * sctx ) ;
2014-10-28 20:49:46 +03:00
sctx - > state [ 0 ] = __cpu_to_be32 ( SHA256_H0 ) ;
sctx - > state [ 1 ] = __cpu_to_be32 ( SHA256_H1 ) ;
sctx - > state [ 2 ] = __cpu_to_be32 ( SHA256_H2 ) ;
sctx - > state [ 3 ] = __cpu_to_be32 ( SHA256_H3 ) ;
sctx - > state [ 4 ] = __cpu_to_be32 ( SHA256_H4 ) ;
sctx - > state [ 5 ] = __cpu_to_be32 ( SHA256_H5 ) ;
sctx - > state [ 6 ] = __cpu_to_be32 ( SHA256_H6 ) ;
sctx - > state [ 7 ] = __cpu_to_be32 ( SHA256_H7 ) ;
sctx - > count = 0 ;
2012-05-14 15:06:20 +04:00
return 0 ;
}
static int nx_sha256_update ( struct shash_desc * desc , const u8 * data ,
unsigned int len )
{
2021-06-17 11:00:12 +03:00
struct sha256_state_be * sctx = shash_desc_ctx ( desc ) ;
2012-05-14 15:06:20 +04:00
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( & desc - > tfm - > base ) ;
struct nx_csbcpb * csbcpb = ( struct nx_csbcpb * ) nx_ctx - > csbcpb ;
2015-07-07 12:30:25 +03:00
struct nx_sg * out_sg ;
2014-10-28 20:49:46 +03:00
u64 to_process = 0 , leftover , total ;
2013-08-13 01:49:37 +04:00
unsigned long irq_flags ;
2012-05-14 15:06:20 +04:00
int rc = 0 ;
2014-10-28 20:49:46 +03:00
int data_len ;
2015-04-23 23:41:43 +03:00
u32 max_sg_len ;
2014-10-28 20:49:46 +03:00
u64 buf_len = ( sctx - > count % SHA256_BLOCK_SIZE ) ;
2012-05-14 15:06:20 +04:00
2013-08-13 01:49:37 +04:00
spin_lock_irqsave ( & nx_ctx - > lock , irq_flags ) ;
2012-05-14 15:06:20 +04:00
/* 2 cases for total data len:
2013-08-02 16:09:52 +04:00
* 1 : < SHA256_BLOCK_SIZE : copy into state , return 0
* 2 : > = SHA256_BLOCK_SIZE : process X blocks , copy in leftover
2012-05-14 15:06:20 +04:00
*/
2014-10-28 20:49:46 +03:00
total = ( sctx - > count % SHA256_BLOCK_SIZE ) + len ;
2013-08-02 16:09:52 +04:00
if ( total < SHA256_BLOCK_SIZE ) {
2014-10-28 20:49:46 +03:00
memcpy ( sctx - > buf + buf_len , data , len ) ;
2012-05-14 15:06:20 +04:00
sctx - > count + = len ;
goto out ;
}
2014-10-28 20:49:46 +03:00
memcpy ( csbcpb - > cpb . sha256 . message_digest , sctx - > state , SHA256_DIGEST_SIZE ) ;
NX_CPB_FDM ( csbcpb ) | = NX_FDM_INTERMEDIATE ;
NX_CPB_FDM ( csbcpb ) | = NX_FDM_CONTINUATION ;
2013-08-02 16:09:52 +04:00
2015-04-23 23:41:43 +03:00
max_sg_len = min_t ( u64 , nx_ctx - > ap - > sglen ,
nx_driver . of . max_sg_len / sizeof ( struct nx_sg ) ) ;
max_sg_len = min_t ( u64 , max_sg_len ,
nx_ctx - > ap - > databytelen / NX_PAGE_SIZE ) ;
2015-07-07 12:30:25 +03:00
data_len = SHA256_DIGEST_SIZE ;
out_sg = nx_build_sg_list ( nx_ctx - > out_sg , ( u8 * ) sctx - > state ,
& data_len , max_sg_len ) ;
nx_ctx - > op . outlen = ( nx_ctx - > out_sg - out_sg ) * sizeof ( struct nx_sg ) ;
if ( data_len ! = SHA256_DIGEST_SIZE ) {
rc = - EINVAL ;
goto out ;
}
2013-08-02 16:09:52 +04:00
do {
2015-08-08 09:47:28 +03:00
int used_sgs = 0 ;
struct nx_sg * in_sg = nx_ctx - > in_sg ;
2013-08-02 16:09:52 +04:00
2014-10-28 20:49:46 +03:00
if ( buf_len ) {
data_len = buf_len ;
2015-08-08 09:47:28 +03:00
in_sg = nx_build_sg_list ( in_sg ,
2015-04-23 23:41:43 +03:00
( u8 * ) sctx - > buf ,
& data_len ,
max_sg_len ) ;
2014-10-28 20:49:46 +03:00
2015-04-23 23:41:43 +03:00
if ( data_len ! = buf_len ) {
rc = - EINVAL ;
2014-10-28 20:49:46 +03:00
goto out ;
2015-04-23 23:41:43 +03:00
}
2015-08-08 09:47:28 +03:00
used_sgs = in_sg - nx_ctx - > in_sg ;
2013-08-02 16:09:52 +04:00
}
2014-10-28 20:49:46 +03:00
2015-08-08 09:47:28 +03:00
/* to_process: SHA256_BLOCK_SIZE aligned chunk to be
* processed in this iteration . This value is restricted
* by sg list limits and number of sgs we already used
* for leftover data . ( see above )
* In ideal case , we could allow NX_PAGE_SIZE * max_sg_len ,
* but because data may not be aligned , we need to account
* for that too . */
to_process = min_t ( u64 , total ,
( max_sg_len - 1 - used_sgs ) * NX_PAGE_SIZE ) ;
to_process = to_process & ~ ( SHA256_BLOCK_SIZE - 1 ) ;
2014-10-28 20:49:46 +03:00
data_len = to_process - buf_len ;
2015-04-23 23:41:43 +03:00
in_sg = nx_build_sg_list ( in_sg , ( u8 * ) data ,
& data_len , max_sg_len ) ;
2014-10-28 20:49:46 +03:00
2015-04-23 23:41:43 +03:00
nx_ctx - > op . inlen = ( nx_ctx - > in_sg - in_sg ) * sizeof ( struct nx_sg ) ;
2014-10-28 20:49:46 +03:00
2015-08-08 09:47:28 +03:00
to_process = data_len + buf_len ;
2014-10-28 20:49:46 +03:00
leftover = total - to_process ;
/*
* we ' ve hit the nx chip previously and we ' re updating
* again , so copy over the partial digest .
*/
memcpy ( csbcpb - > cpb . sha256 . input_partial_digest ,
2013-08-02 16:09:52 +04:00
csbcpb - > cpb . sha256 . message_digest ,
SHA256_DIGEST_SIZE ) ;
if ( ! nx_ctx - > op . inlen | | ! nx_ctx - > op . outlen ) {
rc = - EINVAL ;
goto out ;
}
2019-04-15 03:37:08 +03:00
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op , 0 ) ;
2013-08-02 16:09:52 +04:00
if ( rc )
goto out ;
atomic_inc ( & ( nx_ctx - > stats - > sha256_ops ) ) ;
total - = to_process ;
2014-10-28 20:49:46 +03:00
data + = to_process - buf_len ;
buf_len = 0 ;
2013-08-02 16:09:52 +04:00
} while ( leftover > = SHA256_BLOCK_SIZE ) ;
2012-05-14 15:06:20 +04:00
/* copy the leftover back into the state struct */
2013-04-12 21:13:59 +04:00
if ( leftover )
2013-08-02 16:09:52 +04:00
memcpy ( sctx - > buf , data , leftover ) ;
2014-10-28 20:49:46 +03:00
sctx - > count + = len ;
memcpy ( sctx - > state , csbcpb - > cpb . sha256 . message_digest , SHA256_DIGEST_SIZE ) ;
2012-05-14 15:06:20 +04:00
out :
2013-08-13 01:49:37 +04:00
spin_unlock_irqrestore ( & nx_ctx - > lock , irq_flags ) ;
2012-05-14 15:06:20 +04:00
return rc ;
}
static int nx_sha256_final ( struct shash_desc * desc , u8 * out )
{
2021-06-17 11:00:12 +03:00
struct sha256_state_be * sctx = shash_desc_ctx ( desc ) ;
2012-05-14 15:06:20 +04:00
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( & desc - > tfm - > base ) ;
struct nx_csbcpb * csbcpb = ( struct nx_csbcpb * ) nx_ctx - > csbcpb ;
2015-04-23 23:41:43 +03:00
struct nx_sg * in_sg , * out_sg ;
2013-08-13 01:49:37 +04:00
unsigned long irq_flags ;
2015-04-23 23:41:43 +03:00
u32 max_sg_len ;
int rc = 0 ;
2014-10-28 20:49:46 +03:00
int len ;
2012-05-14 15:06:20 +04:00
2013-08-13 01:49:37 +04:00
spin_lock_irqsave ( & nx_ctx - > lock , irq_flags ) ;
2015-04-23 23:41:43 +03:00
max_sg_len = min_t ( u64 , nx_ctx - > ap - > sglen ,
nx_driver . of . max_sg_len / sizeof ( struct nx_sg ) ) ;
max_sg_len = min_t ( u64 , max_sg_len ,
nx_ctx - > ap - > databytelen / NX_PAGE_SIZE ) ;
2014-10-28 20:49:46 +03:00
/* final is represented by continuing the operation and indicating that
* this is not an intermediate operation */
if ( sctx - > count > = SHA256_BLOCK_SIZE ) {
2012-05-14 15:06:20 +04:00
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
2014-10-28 20:49:46 +03:00
memcpy ( csbcpb - > cpb . sha256 . input_partial_digest , sctx - > state , SHA256_DIGEST_SIZE ) ;
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_INTERMEDIATE ;
NX_CPB_FDM ( csbcpb ) | = NX_FDM_CONTINUATION ;
} else {
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_INTERMEDIATE ;
NX_CPB_FDM ( csbcpb ) & = ~ NX_FDM_CONTINUATION ;
2012-05-14 15:06:20 +04:00
}
2014-10-28 20:49:46 +03:00
csbcpb - > cpb . sha256 . message_bit_length = ( u64 ) ( sctx - > count * 8 ) ;
2012-05-14 15:06:20 +04:00
2014-10-28 20:49:46 +03:00
len = sctx - > count & ( SHA256_BLOCK_SIZE - 1 ) ;
2015-04-23 23:41:43 +03:00
in_sg = nx_build_sg_list ( nx_ctx - > in_sg , ( u8 * ) sctx - > buf ,
& len , max_sg_len ) ;
2012-05-14 15:06:20 +04:00
2015-04-23 23:41:43 +03:00
if ( len ! = ( sctx - > count & ( SHA256_BLOCK_SIZE - 1 ) ) ) {
rc = - EINVAL ;
2014-10-28 20:49:46 +03:00
goto out ;
2015-04-23 23:41:43 +03:00
}
2014-10-28 20:49:46 +03:00
len = SHA256_DIGEST_SIZE ;
2015-04-23 23:41:43 +03:00
out_sg = nx_build_sg_list ( nx_ctx - > out_sg , out , & len , max_sg_len ) ;
2014-10-28 20:49:46 +03:00
2015-04-23 23:41:43 +03:00
if ( len ! = SHA256_DIGEST_SIZE ) {
rc = - EINVAL ;
2014-10-28 20:49:46 +03:00
goto out ;
2015-04-23 23:41:43 +03:00
}
2012-05-14 15:06:20 +04:00
2015-04-23 23:41:43 +03:00
nx_ctx - > op . inlen = ( nx_ctx - > in_sg - in_sg ) * sizeof ( struct nx_sg ) ;
nx_ctx - > op . outlen = ( nx_ctx - > out_sg - out_sg ) * sizeof ( struct nx_sg ) ;
2012-05-14 15:06:20 +04:00
if ( ! nx_ctx - > op . outlen ) {
rc = - EINVAL ;
goto out ;
}
2019-04-15 03:37:08 +03:00
rc = nx_hcall_sync ( nx_ctx , & nx_ctx - > op , 0 ) ;
2012-05-14 15:06:20 +04:00
if ( rc )
goto out ;
atomic_inc ( & ( nx_ctx - > stats - > sha256_ops ) ) ;
2014-10-28 20:49:46 +03:00
atomic64_add ( sctx - > count , & ( nx_ctx - > stats - > sha256_bytes ) ) ;
2012-05-14 15:06:20 +04:00
memcpy ( out , csbcpb - > cpb . sha256 . message_digest , SHA256_DIGEST_SIZE ) ;
out :
2013-08-13 01:49:37 +04:00
spin_unlock_irqrestore ( & nx_ctx - > lock , irq_flags ) ;
2012-05-14 15:06:20 +04:00
return rc ;
}
static int nx_sha256_export ( struct shash_desc * desc , void * out )
{
2021-06-17 11:00:12 +03:00
struct sha256_state_be * sctx = shash_desc_ctx ( desc ) ;
2012-05-14 15:06:20 +04:00
2014-10-28 20:49:46 +03:00
memcpy ( out , sctx , sizeof ( * sctx ) ) ;
2012-05-14 15:06:20 +04:00
return 0 ;
}
static int nx_sha256_import ( struct shash_desc * desc , const void * in )
{
2021-06-17 11:00:12 +03:00
struct sha256_state_be * sctx = shash_desc_ctx ( desc ) ;
2012-05-14 15:06:20 +04:00
2014-10-28 20:49:46 +03:00
memcpy ( sctx , in , sizeof ( * sctx ) ) ;
2012-05-14 15:06:20 +04:00
return 0 ;
}
struct shash_alg nx_shash_sha256_alg = {
. digestsize = SHA256_DIGEST_SIZE ,
. init = nx_sha256_init ,
. update = nx_sha256_update ,
. final = nx_sha256_final ,
. export = nx_sha256_export ,
. import = nx_sha256_import ,
2021-06-17 11:00:12 +03:00
. descsize = sizeof ( struct sha256_state_be ) ,
. statesize = sizeof ( struct sha256_state_be ) ,
2012-05-14 15:06:20 +04:00
. base = {
. cra_name = " sha256 " ,
. cra_driver_name = " sha256-nx " ,
. cra_priority = 300 ,
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_module = THIS_MODULE ,
. cra_ctxsize = sizeof ( struct nx_crypto_ctx ) ,
2015-07-07 12:30:25 +03:00
. cra_init = nx_crypto_ctx_sha256_init ,
2012-05-14 15:06:20 +04:00
. cra_exit = nx_crypto_ctx_exit ,
}
} ;