2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-08-09 01:25:59 +04:00
/*
* SHA - 256 , as specified in
* http : //csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
*
* SHA - 256 code by Jean - Luc Cooke < jlcooke @ certainkey . com > .
*
* Copyright ( c ) Jean - Luc Cooke < jlcooke @ certainkey . com >
* Copyright ( c ) Andrew McDonald < andrew @ mcdonald . org . uk >
* Copyright ( c ) 2002 James Morris < jmorris @ intercode . com . au >
* Copyright ( c ) 2014 Red Hat Inc .
*/
# include <linux/bitops.h>
2019-08-17 17:24:33 +03:00
# include <linux/export.h>
2019-08-25 21:18:41 +03:00
# include <linux/module.h>
2018-04-14 01:36:46 +03:00
# include <linux/string.h>
2020-11-13 08:20:21 +03:00
# include <crypto/sha2.h>
2019-08-17 17:24:32 +03:00
# include <asm/unaligned.h>
2014-08-09 01:25:59 +04:00
2020-10-25 17:31:18 +03:00
static const u32 SHA256_K [ ] = {
0x428a2f98 , 0x71374491 , 0xb5c0fbcf , 0xe9b5dba5 ,
0x3956c25b , 0x59f111f1 , 0x923f82a4 , 0xab1c5ed5 ,
0xd807aa98 , 0x12835b01 , 0x243185be , 0x550c7dc3 ,
0x72be5d74 , 0x80deb1fe , 0x9bdc06a7 , 0xc19bf174 ,
0xe49b69c1 , 0xefbe4786 , 0x0fc19dc6 , 0x240ca1cc ,
0x2de92c6f , 0x4a7484aa , 0x5cb0a9dc , 0x76f988da ,
0x983e5152 , 0xa831c66d , 0xb00327c8 , 0xbf597fc7 ,
0xc6e00bf3 , 0xd5a79147 , 0x06ca6351 , 0x14292967 ,
0x27b70a85 , 0x2e1b2138 , 0x4d2c6dfc , 0x53380d13 ,
0x650a7354 , 0x766a0abb , 0x81c2c92e , 0x92722c85 ,
0xa2bfe8a1 , 0xa81a664b , 0xc24b8b70 , 0xc76c51a3 ,
0xd192e819 , 0xd6990624 , 0xf40e3585 , 0x106aa070 ,
0x19a4c116 , 0x1e376c08 , 0x2748774c , 0x34b0bcb5 ,
0x391c0cb3 , 0x4ed8aa4a , 0x5b9cca4f , 0x682e6ff3 ,
0x748f82ee , 0x78a5636f , 0x84c87814 , 0x8cc70208 ,
0x90befffa , 0xa4506ceb , 0xbef9a3f7 , 0xc67178f2 ,
} ;
2014-08-09 01:25:59 +04:00
static inline u32 Ch ( u32 x , u32 y , u32 z )
{
return z ^ ( x & ( y ^ z ) ) ;
}
static inline u32 Maj ( u32 x , u32 y , u32 z )
{
return ( x & y ) | ( z & ( x | y ) ) ;
}
# define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
# define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
# define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
# define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
static inline void LOAD_OP ( int I , u32 * W , const u8 * input )
{
2019-08-17 17:24:32 +03:00
W [ I ] = get_unaligned_be32 ( ( __u32 * ) input + I ) ;
2014-08-09 01:25:59 +04:00
}
static inline void BLEND_OP ( int I , u32 * W )
{
W [ I ] = s1 ( W [ I - 2 ] ) + W [ I - 7 ] + s0 ( W [ I - 15 ] ) + W [ I - 16 ] ;
}
2020-10-25 17:31:18 +03:00
# define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \
u32 t1 , t2 ; \
t1 = h + e1 ( e ) + Ch ( e , f , g ) + SHA256_K [ i ] + W [ i ] ; \
t2 = e0 ( a ) + Maj ( a , b , c ) ; \
d + = t1 ; \
h = t1 + t2 ; \
} while ( 0 )
2020-10-25 17:31:17 +03:00
static void sha256_transform ( u32 * state , const u8 * input , u32 * W )
2014-08-09 01:25:59 +04:00
{
2020-10-25 17:31:18 +03:00
u32 a , b , c , d , e , f , g , h ;
2014-08-09 01:25:59 +04:00
int i ;
/* load the input */
2020-10-25 17:31:19 +03:00
for ( i = 0 ; i < 16 ; i + = 8 ) {
LOAD_OP ( i + 0 , W , input ) ;
LOAD_OP ( i + 1 , W , input ) ;
LOAD_OP ( i + 2 , W , input ) ;
LOAD_OP ( i + 3 , W , input ) ;
LOAD_OP ( i + 4 , W , input ) ;
LOAD_OP ( i + 5 , W , input ) ;
LOAD_OP ( i + 6 , W , input ) ;
LOAD_OP ( i + 7 , W , input ) ;
}
2014-08-09 01:25:59 +04:00
/* now blend */
2020-10-25 17:31:19 +03:00
for ( i = 16 ; i < 64 ; i + = 8 ) {
BLEND_OP ( i + 0 , W ) ;
BLEND_OP ( i + 1 , W ) ;
BLEND_OP ( i + 2 , W ) ;
BLEND_OP ( i + 3 , W ) ;
BLEND_OP ( i + 4 , W ) ;
BLEND_OP ( i + 5 , W ) ;
BLEND_OP ( i + 6 , W ) ;
BLEND_OP ( i + 7 , W ) ;
}
2014-08-09 01:25:59 +04:00
/* load the state into our registers */
a = state [ 0 ] ; b = state [ 1 ] ; c = state [ 2 ] ; d = state [ 3 ] ;
e = state [ 4 ] ; f = state [ 5 ] ; g = state [ 6 ] ; h = state [ 7 ] ;
/* now iterate */
2020-10-25 17:31:18 +03:00
for ( i = 0 ; i < 64 ; i + = 8 ) {
SHA256_ROUND ( i + 0 , a , b , c , d , e , f , g , h ) ;
SHA256_ROUND ( i + 1 , h , a , b , c , d , e , f , g ) ;
SHA256_ROUND ( i + 2 , g , h , a , b , c , d , e , f ) ;
SHA256_ROUND ( i + 3 , f , g , h , a , b , c , d , e ) ;
SHA256_ROUND ( i + 4 , e , f , g , h , a , b , c , d ) ;
SHA256_ROUND ( i + 5 , d , e , f , g , h , a , b , c ) ;
SHA256_ROUND ( i + 6 , c , d , e , f , g , h , a , b ) ;
SHA256_ROUND ( i + 7 , b , c , d , e , f , g , h , a ) ;
}
2014-08-09 01:25:59 +04:00
state [ 0 ] + = a ; state [ 1 ] + = b ; state [ 2 ] + = c ; state [ 3 ] + = d ;
state [ 4 ] + = e ; state [ 5 ] + = f ; state [ 6 ] + = g ; state [ 7 ] + = h ;
}
2020-05-01 19:42:29 +03:00
void sha256_update ( struct sha256_state * sctx , const u8 * data , unsigned int len )
2014-08-09 01:25:59 +04:00
{
unsigned int partial , done ;
const u8 * src ;
2020-10-25 17:31:17 +03:00
u32 W [ 64 ] ;
2014-08-09 01:25:59 +04:00
partial = sctx - > count & 0x3f ;
sctx - > count + = len ;
done = 0 ;
src = data ;
if ( ( partial + len ) > 63 ) {
if ( partial ) {
done = - partial ;
memcpy ( sctx - > buf + partial , data , done + 64 ) ;
src = sctx - > buf ;
}
do {
2020-10-25 17:31:17 +03:00
sha256_transform ( sctx - > state , src , W ) ;
2014-08-09 01:25:59 +04:00
done + = 64 ;
src = data + done ;
} while ( done + 63 < len ) ;
2020-10-25 17:31:17 +03:00
memzero_explicit ( W , sizeof ( W ) ) ;
2014-08-09 01:25:59 +04:00
partial = 0 ;
}
memcpy ( sctx - > buf + partial , src , len - done ) ;
}
2019-08-17 17:24:33 +03:00
EXPORT_SYMBOL ( sha256_update ) ;
2014-08-09 01:25:59 +04:00
2020-05-01 19:42:29 +03:00
void sha224_update ( struct sha256_state * sctx , const u8 * data , unsigned int len )
2019-08-17 17:24:34 +03:00
{
2020-05-01 19:42:29 +03:00
sha256_update ( sctx , data , len ) ;
2019-08-17 17:24:34 +03:00
}
EXPORT_SYMBOL ( sha224_update ) ;
2020-05-01 19:42:29 +03:00
static void __sha256_final ( struct sha256_state * sctx , u8 * out , int digest_words )
2014-08-09 01:25:59 +04:00
{
__be32 * dst = ( __be32 * ) out ;
__be64 bits ;
unsigned int index , pad_len ;
int i ;
static const u8 padding [ 64 ] = { 0x80 , } ;
/* Save number of bits */
bits = cpu_to_be64 ( sctx - > count < < 3 ) ;
/* Pad out to 56 mod 64. */
index = sctx - > count & 0x3f ;
pad_len = ( index < 56 ) ? ( 56 - index ) : ( ( 64 + 56 ) - index ) ;
sha256_update ( sctx , padding , pad_len ) ;
/* Append length (before padding) */
sha256_update ( sctx , ( const u8 * ) & bits , sizeof ( bits ) ) ;
/* Store state in digest */
2019-08-17 17:24:34 +03:00
for ( i = 0 ; i < digest_words ; i + + )
2019-08-17 17:24:32 +03:00
put_unaligned_be32 ( sctx - > state [ i ] , & dst [ i ] ) ;
2014-08-09 01:25:59 +04:00
/* Zeroize sensitive information. */
2020-10-25 17:31:14 +03:00
memzero_explicit ( sctx , sizeof ( * sctx ) ) ;
2014-08-09 01:25:59 +04:00
}
2019-08-17 17:24:34 +03:00
2020-05-01 19:42:29 +03:00
void sha256_final ( struct sha256_state * sctx , u8 * out )
2019-08-17 17:24:34 +03:00
{
2020-05-01 19:42:29 +03:00
__sha256_final ( sctx , out , 8 ) ;
2019-08-17 17:24:34 +03:00
}
2019-08-17 17:24:33 +03:00
EXPORT_SYMBOL ( sha256_final ) ;
2019-08-17 17:24:34 +03:00
2020-05-01 19:42:29 +03:00
void sha224_final ( struct sha256_state * sctx , u8 * out )
2019-08-17 17:24:34 +03:00
{
2020-05-01 19:42:29 +03:00
__sha256_final ( sctx , out , 7 ) ;
2019-08-17 17:24:34 +03:00
}
EXPORT_SYMBOL ( sha224_final ) ;
2019-08-25 21:18:41 +03:00
2020-07-08 19:39:40 +03:00
void sha256 ( const u8 * data , unsigned int len , u8 * out )
{
struct sha256_state sctx ;
sha256_init ( & sctx ) ;
sha256_update ( & sctx , data , len ) ;
sha256_final ( & sctx , out ) ;
}
EXPORT_SYMBOL ( sha256 ) ;
2019-08-25 21:18:41 +03:00
MODULE_LICENSE ( " GPL " ) ;