2014-05-31 17:44:17 +04:00
/*
* DRBG : Deterministic Random Bits Generator
* Based on NIST Recommended DRBG from NIST SP800 - 90 A with the following
* properties :
* * CTR DRBG with DF with AES - 128 , AES - 192 , AES - 256 cores
* * Hash DRBG with DF with SHA - 1 , SHA - 256 , SHA - 384 , SHA - 512 cores
* * HMAC DRBG with DF with SHA - 1 , SHA - 256 , SHA - 384 , SHA - 512 cores
* * with and without prediction resistance
*
* Copyright Stephan Mueller < smueller @ chronox . de > , 2014
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
* 1. Redistributions of source code must retain the above copyright
* notice , and the entire permission notice in its entirety ,
* including the disclaimer of warranties .
* 2. Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution .
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission .
*
* ALTERNATIVELY , this product may be distributed under the terms of
* the GNU General Public License , in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions . ( This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD - style copyright . )
*
* THIS SOFTWARE IS PROVIDED ` ` AS IS ' ' AND ANY EXPRESS OR IMPLIED
* WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE , ALL OF
* WHICH ARE HEREBY DISCLAIMED . IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR
* CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR
* BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE , EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE .
*
* DRBG Usage
* = = = = = = = = = =
* The SP 800 - 90 A DRBG allows the user to specify a personalization string
* for initialization as well as an additional information string for each
* random number request . The following code fragments show how a caller
* uses the kernel crypto API to use the full functionality of the DRBG .
*
* Usage without any additional data
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* struct crypto_rng * drng ;
* int err ;
* char data [ DATALEN ] ;
*
* drng = crypto_alloc_rng ( drng_name , 0 , 0 ) ;
* err = crypto_rng_get_bytes ( drng , & data , DATALEN ) ;
* crypto_free_rng ( drng ) ;
*
*
* Usage with personalization string during initialization
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* struct crypto_rng * drng ;
* int err ;
* char data [ DATALEN ] ;
* struct drbg_string pers ;
* char personalization [ 11 ] = " some-string " ;
*
* drbg_string_fill ( & pers , personalization , strlen ( personalization ) ) ;
* drng = crypto_alloc_rng ( drng_name , 0 , 0 ) ;
* // The reset completely re-initializes the DRBG with the provided
* // personalization string
* err = crypto_rng_reset ( drng , & personalization , strlen ( personalization ) ) ;
* err = crypto_rng_get_bytes ( drng , & data , DATALEN ) ;
* crypto_free_rng ( drng ) ;
*
*
* Usage with additional information string during random number request
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* struct crypto_rng * drng ;
* int err ;
* char data [ DATALEN ] ;
* char addtl_string [ 11 ] = " some-string " ;
* string drbg_string addtl ;
*
* drbg_string_fill ( & addtl , addtl_string , strlen ( addtl_string ) ) ;
* drng = crypto_alloc_rng ( drng_name , 0 , 0 ) ;
* // The following call is a wrapper to crypto_rng_get_bytes() and returns
* // the same error codes.
* err = crypto_drbg_get_bytes_addtl ( drng , & data , DATALEN , & addtl ) ;
* crypto_free_rng ( drng ) ;
*
*
* Usage with personalization and additional information strings
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Just mix both scenarios above .
*/
# include <crypto/drbg.h>
2020-12-11 15:27:15 +03:00
# include <crypto/internal/cipher.h>
2015-06-09 16:55:38 +03:00
# include <linux/kernel.h>
crypto: drbg - reseed 'nopr' drbgs periodically from get_random_bytes()
In contrast to the fully prediction resistant 'pr' DRBGs, the 'nopr'
variants get seeded once at boot and reseeded only rarely thereafter,
namely only after 2^20 requests have been served each. AFAICT, this
reseeding based on the number of requests served is primarily motivated
by information theoretic considerations, c.f. NIST SP800-90Ar1,
sec. 8.6.8 ("Reseeding").
However, given the relatively large seed lifetime of 2^20 requests, the
'nopr' DRBGs can hardly be considered to provide any prediction resistance
whatsoever, i.e. to protect against threats like side channel leaks of the
internal DRBG state (think e.g. leaked VM snapshots). This is expected and
completely in line with the 'nopr' naming, but as e.g. the
"drbg_nopr_hmac_sha512" implementation is potentially being used for
providing the "stdrng" and thus, the crypto_default_rng serving the
in-kernel crypto, it would certainly be desirable to achieve at least the
same level of prediction resistance as get_random_bytes() does.
Note that the chacha20 rngs underlying get_random_bytes() get reseeded
every CRNG_RESEED_INTERVAL == 5min: the secondary, per-NUMA node rngs from
the primary one and the primary rng in turn from the entropy pool, provided
sufficient entropy is available.
The 'nopr' DRBGs do draw randomness from get_random_bytes() for their
initial seed already, so making them to reseed themselves periodically from
get_random_bytes() in order to let them benefit from the latter's
prediction resistance is not such a big change conceptually.
In principle, it would have been also possible to make the 'nopr' DRBGs to
periodically invoke a full reseeding operation, i.e. to also consider the
jitterentropy source (if enabled) in addition to get_random_bytes() for the
seed value. However, get_random_bytes() is relatively lightweight as
compared to the jitterentropy generation process and thus, even though the
'nopr' reseeding is supposed to get invoked infrequently, it's IMO still
worthwhile to avoid occasional latency spikes for drbg_generate() and
stick to get_random_bytes() only. As an additional remark, note that
drawing randomness from the non-SP800-90B-conforming get_random_bytes()
only won't adversely affect SP800-90A conformance either: the very same is
being done during boot via drbg_seed_from_random() already once
rng_is_initialized() flips to true and it follows that if the DRBG
implementation does conform to SP800-90A now, it will continue to do so.
Make the 'nopr' DRBGs to reseed themselves periodically from
get_random_bytes() every CRNG_RESEED_INTERVAL == 5min.
More specifically, introduce a new member ->last_seed_time to struct
drbg_state for recording in units of jiffies when the last seeding
operation had taken place. Make __drbg_seed() maintain it and let
drbg_generate() invoke a reseed from get_random_bytes() via
drbg_seed_from_random() if more than 5min have passed by since the last
seeding operation. Be careful to not to reseed if in testing mode though,
or otherwise the drbg related tests in crypto/testmgr.c would fail to
reproduce the expected output.
In order to keep the formatting clean in drbg_generate() wrap the logic
for deciding whether or not a reseed is due in a new helper,
drbg_nopr_reseed_interval_elapsed().
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:09 +03:00
# include <linux/jiffies.h>
2014-05-31 17:44:17 +04:00
/***************************************************************
* Backend cipher definitions available to DRBG
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* The order of the DRBG definitions here matter : every DRBG is registered
* as stdrng . Each DRBG receives an increasing cra_priority values the later
* they are defined in this array ( see drbg_fill_array ) .
*
* HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs , and
* the SHA256 / AES 256 over other ciphers . Thus , the favored
* DRBGs are the latest entries in this array .
*/
static const struct drbg_core drbg_cores [ ] = {
# ifdef CONFIG_CRYPTO_DRBG_CTR
{
. flags = DRBG_CTR | DRBG_STRENGTH128 ,
. statelen = 32 , /* 256 bits as defined in 10.2.1 */
. blocklen_bytes = 16 ,
. cra_name = " ctr_aes128 " ,
2015-03-01 22:39:17 +03:00
. backend_cra_name = " aes " ,
2014-05-31 17:44:17 +04:00
} , {
. flags = DRBG_CTR | DRBG_STRENGTH192 ,
. statelen = 40 , /* 320 bits as defined in 10.2.1 */
. blocklen_bytes = 16 ,
. cra_name = " ctr_aes192 " ,
2015-03-01 22:39:17 +03:00
. backend_cra_name = " aes " ,
2014-05-31 17:44:17 +04:00
} , {
. flags = DRBG_CTR | DRBG_STRENGTH256 ,
. statelen = 48 , /* 384 bits as defined in 10.2.1 */
. blocklen_bytes = 16 ,
. cra_name = " ctr_aes256 " ,
2015-03-01 22:39:17 +03:00
. backend_cra_name = " aes " ,
2014-05-31 17:44:17 +04:00
} ,
# endif /* CONFIG_CRYPTO_DRBG_CTR */
# ifdef CONFIG_CRYPTO_DRBG_HASH
{
. flags = DRBG_HASH | DRBG_STRENGTH128 ,
. statelen = 55 , /* 440 bits */
. blocklen_bytes = 20 ,
. cra_name = " sha1 " ,
. backend_cra_name = " sha1 " ,
} , {
. flags = DRBG_HASH | DRBG_STRENGTH256 ,
. statelen = 111 , /* 888 bits */
. blocklen_bytes = 48 ,
. cra_name = " sha384 " ,
. backend_cra_name = " sha384 " ,
} , {
. flags = DRBG_HASH | DRBG_STRENGTH256 ,
. statelen = 111 , /* 888 bits */
. blocklen_bytes = 64 ,
. cra_name = " sha512 " ,
. backend_cra_name = " sha512 " ,
} , {
. flags = DRBG_HASH | DRBG_STRENGTH256 ,
. statelen = 55 , /* 440 bits */
. blocklen_bytes = 32 ,
. cra_name = " sha256 " ,
. backend_cra_name = " sha256 " ,
} ,
# endif /* CONFIG_CRYPTO_DRBG_HASH */
# ifdef CONFIG_CRYPTO_DRBG_HMAC
{
2014-07-06 04:26:37 +04:00
. flags = DRBG_HMAC | DRBG_STRENGTH128 ,
2014-05-31 17:44:17 +04:00
. statelen = 20 , /* block length of cipher */
. blocklen_bytes = 20 ,
. cra_name = " hmac_sha1 " ,
. backend_cra_name = " hmac(sha1) " ,
} , {
. flags = DRBG_HMAC | DRBG_STRENGTH256 ,
. statelen = 48 , /* block length of cipher */
. blocklen_bytes = 48 ,
. cra_name = " hmac_sha384 " ,
. backend_cra_name = " hmac(sha384) " ,
} , {
. flags = DRBG_HMAC | DRBG_STRENGTH256 ,
. statelen = 32 , /* block length of cipher */
. blocklen_bytes = 32 ,
. cra_name = " hmac_sha256 " ,
. backend_cra_name = " hmac(sha256) " ,
2021-05-20 22:31:11 +03:00
} , {
. flags = DRBG_HMAC | DRBG_STRENGTH256 ,
. statelen = 64 , /* block length of cipher */
. blocklen_bytes = 64 ,
. cra_name = " hmac_sha512 " ,
. backend_cra_name = " hmac(sha512) " ,
2014-05-31 17:44:17 +04:00
} ,
# endif /* CONFIG_CRYPTO_DRBG_HMAC */
} ;
2015-06-09 16:55:38 +03:00
static int drbg_uninstantiate ( struct drbg_state * drbg ) ;
2014-05-31 17:44:17 +04:00
/******************************************************************
* Generic helper functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Return strength of DRBG according to SP800 - 90 A section 8.4
*
* @ flags DRBG flags reference
*
* Return : normalized strength in * bytes * value or 32 as default
* to counter programming errors
*/
static inline unsigned short drbg_sec_strength ( drbg_flag_t flags )
{
switch ( flags & DRBG_STRENGTH_MASK ) {
case DRBG_STRENGTH128 :
return 16 ;
case DRBG_STRENGTH192 :
return 24 ;
case DRBG_STRENGTH256 :
return 32 ;
default :
return 32 ;
}
}
2019-05-08 17:19:24 +03:00
/*
* FIPS 140 - 2 continuous self test for the noise source
* The test is performed on the noise source input data . Thus , the function
* implicitly knows the size of the buffer to be equal to the security
* strength .
*
* Note , this function disregards the nonce trailing the entropy data during
* initial seeding .
*
* drbg - > drbg_mutex must have been taken .
*
* @ drbg DRBG handle
* @ entropy buffer of seed data to be checked
*
* return :
* 0 on success
* - EAGAIN on when the CTRNG is not yet primed
* < 0 on error
*/
static int drbg_fips_continuous_test ( struct drbg_state * drbg ,
const unsigned char * entropy )
{
unsigned short entropylen = drbg_sec_strength ( drbg - > core - > flags ) ;
int ret = 0 ;
if ( ! IS_ENABLED ( CONFIG_CRYPTO_FIPS ) )
return 0 ;
/* skip test if we test the overall system */
if ( list_empty ( & drbg - > test_data . list ) )
return 0 ;
/* only perform test in FIPS mode */
if ( ! fips_enabled )
return 0 ;
if ( ! drbg - > fips_primed ) {
/* Priming of FIPS test */
memcpy ( drbg - > prev , entropy , entropylen ) ;
drbg - > fips_primed = true ;
/* priming: another round is needed */
return - EAGAIN ;
}
ret = memcmp ( drbg - > prev , entropy , entropylen ) ;
if ( ! ret )
panic ( " DRBG continuous self test failed \n " ) ;
memcpy ( drbg - > prev , entropy , entropylen ) ;
/* the test shall pass when the two values are not equal */
return 0 ;
}
2014-05-31 17:44:17 +04:00
/*
* Convert an integer into a byte representation of this integer .
* The byte representation is big - endian
*
* @ val value to be converted
2014-08-17 19:37:34 +04:00
* @ buf buffer holding the converted integer - - caller must ensure that
* buffer size is at least 32 bit
2014-05-31 17:44:17 +04:00
*/
# if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
2014-08-17 19:37:34 +04:00
static inline void drbg_cpu_to_be32 ( __u32 val , unsigned char * buf )
2014-05-31 17:44:17 +04:00
{
2014-08-17 19:37:34 +04:00
struct s {
2014-08-26 11:32:24 +04:00
__be32 conv ;
2014-08-17 19:37:34 +04:00
} ;
struct s * conversion = ( struct s * ) buf ;
2014-05-31 17:44:17 +04:00
2014-08-17 19:37:34 +04:00
conversion - > conv = cpu_to_be32 ( val ) ;
2014-05-31 17:44:17 +04:00
}
# endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
/******************************************************************
* CTR DRBG callback functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# ifdef CONFIG_CRYPTO_DRBG_CTR
2014-07-06 04:23:03 +04:00
# define CRYPTO_DRBG_CTR_STRING "CTR "
2014-11-25 11:28:43 +03:00
MODULE_ALIAS_CRYPTO ( " drbg_pr_ctr_aes256 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_ctr_aes256 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_pr_ctr_aes192 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_ctr_aes192 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_pr_ctr_aes128 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_ctr_aes128 " ) ;
2014-11-04 05:08:09 +03:00
2016-05-31 14:11:57 +03:00
static void drbg_kcapi_symsetkey ( struct drbg_state * drbg ,
const unsigned char * key ) ;
static int drbg_kcapi_sym ( struct drbg_state * drbg , unsigned char * outval ,
const struct drbg_string * in ) ;
2014-05-31 17:44:17 +04:00
static int drbg_init_sym_kernel ( struct drbg_state * drbg ) ;
static int drbg_fini_sym_kernel ( struct drbg_state * drbg ) ;
2016-06-14 08:35:37 +03:00
static int drbg_kcapi_sym_ctr ( struct drbg_state * drbg ,
u8 * inbuf , u32 inbuflen ,
u8 * outbuf , u32 outlen ) ;
2018-07-20 20:42:01 +03:00
# define DRBG_OUTSCRATCHLEN 256
2014-05-31 17:44:17 +04:00
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc ( struct drbg_state * drbg ,
unsigned char * out , const unsigned char * key ,
2014-06-28 23:58:24 +04:00
struct list_head * in )
2014-05-31 17:44:17 +04:00
{
2014-06-28 23:58:24 +04:00
int ret = 0 ;
struct drbg_string * curr = NULL ;
2014-05-31 17:44:17 +04:00
struct drbg_string data ;
2014-06-28 23:58:24 +04:00
short cnt = 0 ;
2014-05-31 17:44:17 +04:00
drbg_string_fill ( & data , out , drbg_blocklen ( drbg ) ) ;
/* 10.4.3 step 2 / 4 */
2016-05-31 14:11:57 +03:00
drbg_kcapi_symsetkey ( drbg , key ) ;
2014-06-28 23:58:24 +04:00
list_for_each_entry ( curr , in , list ) {
const unsigned char * pos = curr - > buf ;
size_t len = curr - > len ;
2014-05-31 17:44:17 +04:00
/* 10.4.3 step 4.1 */
2014-06-28 23:58:24 +04:00
while ( len ) {
/* 10.4.3 step 4.2 */
if ( drbg_blocklen ( drbg ) = = cnt ) {
cnt = 0 ;
2016-05-31 14:11:57 +03:00
ret = drbg_kcapi_sym ( drbg , out , & data ) ;
2014-06-28 23:58:24 +04:00
if ( ret )
return ret ;
2014-05-31 17:44:17 +04:00
}
2014-06-28 23:58:24 +04:00
out [ cnt ] ^ = * pos ;
pos + + ;
cnt + + ;
len - - ;
2014-05-31 17:44:17 +04:00
}
}
2014-06-28 23:58:24 +04:00
/* 10.4.3 step 4.2 for last block */
if ( cnt )
2016-05-31 14:11:57 +03:00
ret = drbg_kcapi_sym ( drbg , out , & data ) ;
2014-06-28 23:58:24 +04:00
return ret ;
2014-05-31 17:44:17 +04:00
}
/*
* scratchpad usage : drbg_ctr_update is interlinked with drbg_ctr_df
* ( and drbg_ctr_bcc , but this function does not need any temporary buffers ) ,
* the scratchpad is used as follows :
* drbg_ctr_update :
* temp
* start : drbg - > scratchpad
* length : drbg_statelen ( drbg ) + drbg_blocklen ( drbg )
* note : the cipher writing into this variable works
* blocklen - wise . Now , when the statelen is not a multiple
* of blocklen , the generateion loop below " spills over "
* by at most blocklen . Thus , we need to give sufficient
* memory .
* df_data
* start : drbg - > scratchpad +
* drbg_statelen ( drbg ) + drbg_blocklen ( drbg )
* length : drbg_statelen ( drbg )
*
* drbg_ctr_df :
* pad
* start : df_data + drbg_statelen ( drbg )
* length : drbg_blocklen ( drbg )
* iv
* start : pad + drbg_blocklen ( drbg )
* length : drbg_blocklen ( drbg )
* temp
* start : iv + drbg_blocklen ( drbg )
2014-07-01 19:08:48 +04:00
* length : drbg_satelen ( drbg ) + drbg_blocklen ( drbg )
* note : temp is the buffer that the BCC function operates
* on . BCC operates blockwise . drbg_statelen ( drbg )
* is sufficient when the DRBG state length is a multiple
* of the block size . For AES192 ( and maybe other ciphers )
* this is not correct and the length for temp is
* insufficient ( yes , that also means for such ciphers ,
* the final output of all BCC rounds are truncated ) .
* Therefore , add drbg_blocklen ( drbg ) to cover all
* possibilities .
2014-05-31 17:44:17 +04:00
*/
/* Derivation Function for CTR DRBG as defined in 10.4.2 */
static int drbg_ctr_df ( struct drbg_state * drbg ,
unsigned char * df_data , size_t bytes_to_return ,
2014-06-28 23:58:24 +04:00
struct list_head * seedlist )
2014-05-31 17:44:17 +04:00
{
int ret = - EFAULT ;
unsigned char L_N [ 8 ] ;
/* S3 is input */
struct drbg_string S1 , S2 , S4 , cipherin ;
2014-06-28 23:58:24 +04:00
LIST_HEAD ( bcc_list ) ;
2014-05-31 17:44:17 +04:00
unsigned char * pad = df_data + drbg_statelen ( drbg ) ;
unsigned char * iv = pad + drbg_blocklen ( drbg ) ;
unsigned char * temp = iv + drbg_blocklen ( drbg ) ;
size_t padlen = 0 ;
unsigned int templen = 0 ;
/* 10.4.2 step 7 */
unsigned int i = 0 ;
/* 10.4.2 step 8 */
const unsigned char * K = ( unsigned char * )
" \x00 \x01 \x02 \x03 \x04 \x05 \x06 \x07 "
" \x08 \x09 \x0a \x0b \x0c \x0d \x0e \x0f "
" \x10 \x11 \x12 \x13 \x14 \x15 \x16 \x17 "
" \x18 \x19 \x1a \x1b \x1c \x1d \x1e \x1f " ;
unsigned char * X ;
size_t generated_len = 0 ;
size_t inputlen = 0 ;
2014-06-28 23:58:24 +04:00
struct drbg_string * seed = NULL ;
2014-05-31 17:44:17 +04:00
memset ( pad , 0 , drbg_blocklen ( drbg ) ) ;
memset ( iv , 0 , drbg_blocklen ( drbg ) ) ;
/* 10.4.2 step 1 is implicit as we work byte-wise */
/* 10.4.2 step 2 */
if ( ( 512 / 8 ) < bytes_to_return )
return - EINVAL ;
/* 10.4.2 step 2 -- calculate the entire length of all input data */
2014-06-28 23:58:24 +04:00
list_for_each_entry ( seed , seedlist , list )
inputlen + = seed - > len ;
2014-08-17 19:37:34 +04:00
drbg_cpu_to_be32 ( inputlen , & L_N [ 0 ] ) ;
2014-05-31 17:44:17 +04:00
/* 10.4.2 step 3 */
2014-08-17 19:37:34 +04:00
drbg_cpu_to_be32 ( bytes_to_return , & L_N [ 4 ] ) ;
2014-05-31 17:44:17 +04:00
/* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
padlen = ( inputlen + sizeof ( L_N ) + 1 ) % ( drbg_blocklen ( drbg ) ) ;
/* wrap the padlen appropriately */
if ( padlen )
padlen = drbg_blocklen ( drbg ) - padlen ;
/*
* pad / padlen contains the 0x80 byte and the following zero bytes .
* As the calculated padlen value only covers the number of zero
* bytes , this value has to be incremented by one for the 0x80 byte .
*/
padlen + + ;
pad [ 0 ] = 0x80 ;
/* 10.4.2 step 4 -- first fill the linked list and then order it */
drbg_string_fill ( & S1 , iv , drbg_blocklen ( drbg ) ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & S1 . list , & bcc_list ) ;
2014-05-31 17:44:17 +04:00
drbg_string_fill ( & S2 , L_N , sizeof ( L_N ) ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & S2 . list , & bcc_list ) ;
list_splice_tail ( seedlist , & bcc_list ) ;
2014-05-31 17:44:17 +04:00
drbg_string_fill ( & S4 , pad , padlen ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & S4 . list , & bcc_list ) ;
2014-05-31 17:44:17 +04:00
/* 10.4.2 step 9 */
while ( templen < ( drbg_keylen ( drbg ) + ( drbg_blocklen ( drbg ) ) ) ) {
/*
* 10.4 .2 step 9.1 - the padding is implicit as the buffer
* holds zeros after allocation - - even the increment of i
* is irrelevant as the increment remains within length of i
*/
2014-08-17 19:37:34 +04:00
drbg_cpu_to_be32 ( i , iv ) ;
2014-05-31 17:44:17 +04:00
/* 10.4.2 step 9.2 -- BCC and concatenation with temp */
2014-06-28 23:58:24 +04:00
ret = drbg_ctr_bcc ( drbg , temp + templen , K , & bcc_list ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
goto out ;
/* 10.4.2 step 9.3 */
i + + ;
templen + = drbg_blocklen ( drbg ) ;
}
/* 10.4.2 step 11 */
X = temp + ( drbg_keylen ( drbg ) ) ;
drbg_string_fill ( & cipherin , X , drbg_blocklen ( drbg ) ) ;
/* 10.4.2 step 12: overwriting of outval is implemented in next step */
/* 10.4.2 step 13 */
2016-05-31 14:11:57 +03:00
drbg_kcapi_symsetkey ( drbg , temp ) ;
2014-05-31 17:44:17 +04:00
while ( generated_len < bytes_to_return ) {
short blocklen = 0 ;
/*
* 10.4 .2 step 13.1 : the truncation of the key length is
* implicit as the key is only drbg_blocklen in size based on
* the implementation of the cipher function callback
*/
2016-05-31 14:11:57 +03:00
ret = drbg_kcapi_sym ( drbg , X , & cipherin ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
goto out ;
blocklen = ( drbg_blocklen ( drbg ) <
( bytes_to_return - generated_len ) ) ?
drbg_blocklen ( drbg ) :
( bytes_to_return - generated_len ) ;
/* 10.4.2 step 13.2 and 14 */
memcpy ( df_data + generated_len , X , blocklen ) ;
generated_len + = blocklen ;
}
ret = 0 ;
out :
2015-01-05 02:44:09 +03:00
memset ( iv , 0 , drbg_blocklen ( drbg ) ) ;
2015-04-17 15:54:08 +03:00
memset ( temp , 0 , drbg_statelen ( drbg ) + drbg_blocklen ( drbg ) ) ;
2015-01-05 02:44:09 +03:00
memset ( pad , 0 , drbg_blocklen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
return ret ;
}
2014-07-06 04:24:35 +04:00
/*
* update function of CTR DRBG as defined in 10.2 .1 .2
*
* The reseed variable has an enhanced meaning compared to the update
* functions of the other DRBGs as follows :
* 0 = > initial seed from initialization
* 1 = > reseed via drbg_seed
* 2 = > first invocation from drbg_ctr_update when addtl is present . In
* this case , the df_data scratchpad is not deleted so that it is
* available for another calls to prevent calling the DF function
* again .
* 3 = > second invocation from drbg_ctr_update . When the update function
* was called with addtl , the df_data memory already contains the
* DFed addtl information and we do not need to call DF again .
*/
2014-06-28 23:58:24 +04:00
static int drbg_ctr_update ( struct drbg_state * drbg , struct list_head * seed ,
int reseed )
2014-05-31 17:44:17 +04:00
{
int ret = - EFAULT ;
/* 10.2.1.2 step 1 */
unsigned char * temp = drbg - > scratchpad ;
unsigned char * df_data = drbg - > scratchpad + drbg_statelen ( drbg ) +
drbg_blocklen ( drbg ) ;
2014-07-06 04:24:35 +04:00
if ( 3 > reseed )
memset ( df_data , 0 , drbg_statelen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
2016-06-14 08:34:13 +03:00
if ( ! reseed ) {
/*
* The DRBG uses the CTR mode of the underlying AES cipher . The
* CTR mode increments the counter value after the AES operation
* but SP800 - 90 A requires that the counter is incremented before
* the AES operation . Hence , we increment it at the time we set
* it by one .
*/
crypto_inc ( drbg - > V , drbg_blocklen ( drbg ) ) ;
ret = crypto_skcipher_setkey ( drbg - > ctr_handle , drbg - > C ,
drbg_keylen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
goto out ;
}
2016-06-14 08:34:13 +03:00
/* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
if ( seed ) {
ret = drbg_ctr_df ( drbg , df_data , drbg_statelen ( drbg ) , seed ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
goto out ;
}
2016-06-14 08:35:37 +03:00
ret = drbg_kcapi_sym_ctr ( drbg , df_data , drbg_statelen ( drbg ) ,
temp , drbg_statelen ( drbg ) ) ;
2016-06-14 08:34:13 +03:00
if ( ret )
return ret ;
2014-05-31 17:44:17 +04:00
/* 10.2.1.2 step 5 */
2016-06-14 08:36:06 +03:00
ret = crypto_skcipher_setkey ( drbg - > ctr_handle , temp ,
2016-06-14 08:34:13 +03:00
drbg_keylen ( drbg ) ) ;
if ( ret )
goto out ;
2014-05-31 17:44:17 +04:00
/* 10.2.1.2 step 6 */
memcpy ( drbg - > V , temp + drbg_keylen ( drbg ) , drbg_blocklen ( drbg ) ) ;
2016-06-14 08:34:13 +03:00
/* See above: increment counter by one to compensate timing of CTR op */
crypto_inc ( drbg - > V , drbg_blocklen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
ret = 0 ;
out :
2015-01-05 02:44:09 +03:00
memset ( temp , 0 , drbg_statelen ( drbg ) + drbg_blocklen ( drbg ) ) ;
2014-07-06 04:24:35 +04:00
if ( 2 ! = reseed )
2015-01-05 02:44:09 +03:00
memset ( df_data , 0 , drbg_statelen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
return ret ;
}
/*
* scratchpad use : drbg_ctr_update is called independently from
* drbg_ctr_extract_bytes . Therefore , the scratchpad is reused
*/
/* Generate function of CTR DRBG as defined in 10.2.1.5.2 */
static int drbg_ctr_generate ( struct drbg_state * drbg ,
unsigned char * buf , unsigned int buflen ,
2014-07-06 04:25:36 +04:00
struct list_head * addtl )
2014-05-31 17:44:17 +04:00
{
2016-06-14 08:34:13 +03:00
int ret ;
int len = min_t ( int , buflen , INT_MAX ) ;
2014-05-31 17:44:17 +04:00
/* 10.2.1.5.2 step 2 */
2014-07-06 04:25:36 +04:00
if ( addtl & & ! list_empty ( addtl ) ) {
ret = drbg_ctr_update ( drbg , addtl , 2 ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
return 0 ;
}
/* 10.2.1.5.2 step 4.1 */
2018-07-20 20:42:01 +03:00
ret = drbg_kcapi_sym_ctr ( drbg , NULL , 0 , buf , len ) ;
2016-06-14 08:34:13 +03:00
if ( ret )
return ret ;
2014-05-31 17:44:17 +04:00
2014-07-06 04:24:35 +04:00
/* 10.2.1.5.2 step 6 */
ret = drbg_ctr_update ( drbg , NULL , 3 ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
len = ret ;
return len ;
}
2015-12-07 23:36:57 +03:00
static const struct drbg_state_ops drbg_ctr_ops = {
2014-05-31 17:44:17 +04:00
. update = drbg_ctr_update ,
. generate = drbg_ctr_generate ,
. crypto_init = drbg_init_sym_kernel ,
. crypto_fini = drbg_fini_sym_kernel ,
} ;
# endif /* CONFIG_CRYPTO_DRBG_CTR */
/******************************************************************
* HMAC DRBG callback functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
2016-03-28 17:47:55 +03:00
static int drbg_kcapi_hash ( struct drbg_state * drbg , unsigned char * outval ,
const struct list_head * in ) ;
static void drbg_kcapi_hmacsetkey ( struct drbg_state * drbg ,
const unsigned char * key ) ;
2014-05-31 17:44:17 +04:00
static int drbg_init_hash_kernel ( struct drbg_state * drbg ) ;
static int drbg_fini_hash_kernel ( struct drbg_state * drbg ) ;
# endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
# ifdef CONFIG_CRYPTO_DRBG_HMAC
2014-07-06 04:23:03 +04:00
# define CRYPTO_DRBG_HMAC_STRING "HMAC "
2014-11-25 11:28:43 +03:00
MODULE_ALIAS_CRYPTO ( " drbg_pr_hmac_sha512 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_hmac_sha512 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_pr_hmac_sha384 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_hmac_sha384 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_pr_hmac_sha256 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_hmac_sha256 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_pr_hmac_sha1 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_hmac_sha1 " ) ;
2014-11-04 05:08:09 +03:00
2014-05-31 17:44:17 +04:00
/* update function of HMAC DRBG as defined in 10.1.2.2 */
2014-06-28 23:58:24 +04:00
static int drbg_hmac_update ( struct drbg_state * drbg , struct list_head * seed ,
int reseed )
2014-05-31 17:44:17 +04:00
{
int ret = - EFAULT ;
int i = 0 ;
2014-06-28 23:58:24 +04:00
struct drbg_string seed1 , seed2 , vdata ;
LIST_HEAD ( seedlist ) ;
LIST_HEAD ( vdatalist ) ;
2014-05-31 17:44:17 +04:00
2016-03-28 17:47:55 +03:00
if ( ! reseed ) {
2014-08-17 19:38:58 +04:00
/* 10.1.2.3 step 2 -- memset(0) of C is implicit with kzalloc */
2014-05-31 17:44:17 +04:00
memset ( drbg - > V , 1 , drbg_statelen ( drbg ) ) ;
2016-03-28 17:47:55 +03:00
drbg_kcapi_hmacsetkey ( drbg , drbg - > C ) ;
}
2014-05-31 17:44:17 +04:00
drbg_string_fill ( & seed1 , drbg - > V , drbg_statelen ( drbg ) ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & seed1 . list , & seedlist ) ;
2014-05-31 17:44:17 +04:00
/* buffer of seed2 will be filled in for loop below with one byte */
drbg_string_fill ( & seed2 , NULL , 1 ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & seed2 . list , & seedlist ) ;
2014-05-31 17:44:17 +04:00
/* input data of seed is allowed to be NULL at this point */
2014-06-28 23:58:24 +04:00
if ( seed )
list_splice_tail ( seed , & seedlist ) ;
2014-05-31 17:44:17 +04:00
2014-06-28 23:58:24 +04:00
drbg_string_fill ( & vdata , drbg - > V , drbg_statelen ( drbg ) ) ;
list_add_tail ( & vdata . list , & vdatalist ) ;
2014-05-31 17:44:17 +04:00
for ( i = 2 ; 0 < i ; i - - ) {
/* first round uses 0x0, second 0x1 */
unsigned char prefix = DRBG_PREFIX0 ;
if ( 1 = = i )
prefix = DRBG_PREFIX1 ;
/* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */
seed2 . buf = & prefix ;
2016-03-28 17:47:55 +03:00
ret = drbg_kcapi_hash ( drbg , drbg - > C , & seedlist ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
return ret ;
2016-03-28 17:47:55 +03:00
drbg_kcapi_hmacsetkey ( drbg , drbg - > C ) ;
2014-05-31 17:44:17 +04:00
/* 10.1.2.2 step 2 and 5 -- HMAC for V */
2016-03-28 17:47:55 +03:00
ret = drbg_kcapi_hash ( drbg , drbg - > V , & vdatalist ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
return ret ;
/* 10.1.2.2 step 3 */
2014-06-28 23:58:24 +04:00
if ( ! seed )
2014-05-31 17:44:17 +04:00
return ret ;
}
return 0 ;
}
/* generate function of HMAC DRBG as defined in 10.1.2.5 */
static int drbg_hmac_generate ( struct drbg_state * drbg ,
unsigned char * buf ,
unsigned int buflen ,
2014-07-06 04:25:36 +04:00
struct list_head * addtl )
2014-05-31 17:44:17 +04:00
{
int len = 0 ;
int ret = 0 ;
struct drbg_string data ;
2014-06-28 23:58:24 +04:00
LIST_HEAD ( datalist ) ;
2014-05-31 17:44:17 +04:00
/* 10.1.2.5 step 2 */
2014-07-06 04:25:36 +04:00
if ( addtl & & ! list_empty ( addtl ) ) {
ret = drbg_hmac_update ( drbg , addtl , 1 ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
return ret ;
}
drbg_string_fill ( & data , drbg - > V , drbg_statelen ( drbg ) ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & data . list , & datalist ) ;
2014-05-31 17:44:17 +04:00
while ( len < buflen ) {
unsigned int outlen = 0 ;
/* 10.1.2.5 step 4.1 */
2016-03-28 17:47:55 +03:00
ret = drbg_kcapi_hash ( drbg , drbg - > V , & datalist ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
return ret ;
outlen = ( drbg_blocklen ( drbg ) < ( buflen - len ) ) ?
drbg_blocklen ( drbg ) : ( buflen - len ) ;
/* 10.1.2.5 step 4.2 */
memcpy ( buf + len , drbg - > V , outlen ) ;
len + = outlen ;
}
/* 10.1.2.5 step 6 */
2014-07-06 04:25:36 +04:00
if ( addtl & & ! list_empty ( addtl ) )
ret = drbg_hmac_update ( drbg , addtl , 1 ) ;
else
2014-06-28 23:58:24 +04:00
ret = drbg_hmac_update ( drbg , NULL , 1 ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
return ret ;
return len ;
}
2015-12-07 23:36:57 +03:00
static const struct drbg_state_ops drbg_hmac_ops = {
2014-05-31 17:44:17 +04:00
. update = drbg_hmac_update ,
. generate = drbg_hmac_generate ,
. crypto_init = drbg_init_hash_kernel ,
. crypto_fini = drbg_fini_hash_kernel ,
} ;
# endif /* CONFIG_CRYPTO_DRBG_HMAC */
/******************************************************************
* Hash DRBG callback functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# ifdef CONFIG_CRYPTO_DRBG_HASH
2014-07-06 04:23:03 +04:00
# define CRYPTO_DRBG_HASH_STRING "HASH "
2014-11-25 11:28:43 +03:00
MODULE_ALIAS_CRYPTO ( " drbg_pr_sha512 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_sha512 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_pr_sha384 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_sha384 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_pr_sha256 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_sha256 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_pr_sha1 " ) ;
MODULE_ALIAS_CRYPTO ( " drbg_nopr_sha1 " ) ;
2014-11-04 05:08:09 +03:00
2014-10-14 23:50:13 +04:00
/*
* Increment buffer
*
* @ dst buffer to increment
* @ add value to add
*/
static inline void drbg_add_buf ( unsigned char * dst , size_t dstlen ,
const unsigned char * add , size_t addlen )
{
/* implied: dstlen > addlen */
unsigned char * dstptr ;
const unsigned char * addptr ;
unsigned int remainder = 0 ;
size_t len = addlen ;
dstptr = dst + ( dstlen - 1 ) ;
addptr = add + ( addlen - 1 ) ;
while ( len ) {
remainder + = * dstptr + * addptr ;
* dstptr = remainder & 0xff ;
remainder > > = 8 ;
len - - ; dstptr - - ; addptr - - ;
}
len = dstlen - addlen ;
while ( len & & remainder > 0 ) {
remainder = * dstptr + 1 ;
* dstptr = remainder & 0xff ;
remainder > > = 8 ;
len - - ; dstptr - - ;
}
}
2014-05-31 17:44:17 +04:00
/*
* scratchpad usage : as drbg_hash_update and drbg_hash_df are used
* interlinked , the scratchpad is used as follows :
* drbg_hash_update
* start : drbg - > scratchpad
* length : drbg_statelen ( drbg )
* drbg_hash_df :
* start : drbg - > scratchpad + drbg_statelen ( drbg )
* length : drbg_blocklen ( drbg )
*
* drbg_hash_process_addtl uses the scratchpad , but fully completes
* before either of the functions mentioned before are invoked . Therefore ,
* drbg_hash_process_addtl does not need to be specifically considered .
*/
/* Derivation Function for Hash DRBG as defined in 10.4.1 */
static int drbg_hash_df ( struct drbg_state * drbg ,
unsigned char * outval , size_t outlen ,
2014-06-28 23:58:24 +04:00
struct list_head * entropylist )
2014-05-31 17:44:17 +04:00
{
int ret = 0 ;
size_t len = 0 ;
unsigned char input [ 5 ] ;
unsigned char * tmp = drbg - > scratchpad + drbg_statelen ( drbg ) ;
2014-06-28 23:58:24 +04:00
struct drbg_string data ;
2014-05-31 17:44:17 +04:00
/* 10.4.1 step 3 */
input [ 0 ] = 1 ;
2014-08-17 19:37:34 +04:00
drbg_cpu_to_be32 ( ( outlen * 8 ) , & input [ 1 ] ) ;
2014-05-31 17:44:17 +04:00
/* 10.4.1 step 4.1 -- concatenation of data for input into hash */
2014-06-28 23:58:24 +04:00
drbg_string_fill ( & data , input , 5 ) ;
list_add ( & data . list , entropylist ) ;
2014-05-31 17:44:17 +04:00
/* 10.4.1 step 4 */
while ( len < outlen ) {
short blocklen = 0 ;
/* 10.4.1 step 4.1 */
2016-03-28 17:47:55 +03:00
ret = drbg_kcapi_hash ( drbg , tmp , entropylist ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
goto out ;
/* 10.4.1 step 4.2 */
input [ 0 ] + + ;
blocklen = ( drbg_blocklen ( drbg ) < ( outlen - len ) ) ?
drbg_blocklen ( drbg ) : ( outlen - len ) ;
memcpy ( outval + len , tmp , blocklen ) ;
len + = blocklen ;
}
out :
2015-01-05 02:44:09 +03:00
memset ( tmp , 0 , drbg_blocklen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
return ret ;
}
/* update function for Hash DRBG as defined in 10.1.1.2 / 10.1.1.3 */
2014-06-28 23:58:24 +04:00
static int drbg_hash_update ( struct drbg_state * drbg , struct list_head * seed ,
2014-05-31 17:44:17 +04:00
int reseed )
{
int ret = 0 ;
struct drbg_string data1 , data2 ;
2014-06-28 23:58:24 +04:00
LIST_HEAD ( datalist ) ;
LIST_HEAD ( datalist2 ) ;
2014-05-31 17:44:17 +04:00
unsigned char * V = drbg - > scratchpad ;
unsigned char prefix = DRBG_PREFIX1 ;
if ( ! seed )
return - EINVAL ;
if ( reseed ) {
/* 10.1.1.3 step 1 */
memcpy ( V , drbg - > V , drbg_statelen ( drbg ) ) ;
drbg_string_fill ( & data1 , & prefix , 1 ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & data1 . list , & datalist ) ;
2014-05-31 17:44:17 +04:00
drbg_string_fill ( & data2 , V , drbg_statelen ( drbg ) ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & data2 . list , & datalist ) ;
2014-05-31 17:44:17 +04:00
}
2014-06-28 23:58:24 +04:00
list_splice_tail ( seed , & datalist ) ;
2014-05-31 17:44:17 +04:00
/* 10.1.1.2 / 10.1.1.3 step 2 and 3 */
2014-06-28 23:58:24 +04:00
ret = drbg_hash_df ( drbg , drbg - > V , drbg_statelen ( drbg ) , & datalist ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
goto out ;
/* 10.1.1.2 / 10.1.1.3 step 4 */
prefix = DRBG_PREFIX0 ;
drbg_string_fill ( & data1 , & prefix , 1 ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & data1 . list , & datalist2 ) ;
2014-05-31 17:44:17 +04:00
drbg_string_fill ( & data2 , drbg - > V , drbg_statelen ( drbg ) ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & data2 . list , & datalist2 ) ;
2014-05-31 17:44:17 +04:00
/* 10.1.1.2 / 10.1.1.3 step 4 */
2014-06-28 23:58:24 +04:00
ret = drbg_hash_df ( drbg , drbg - > C , drbg_statelen ( drbg ) , & datalist2 ) ;
2014-05-31 17:44:17 +04:00
out :
2015-01-05 02:44:09 +03:00
memset ( drbg - > scratchpad , 0 , drbg_statelen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
return ret ;
}
/* processing of additional information string for Hash DRBG */
static int drbg_hash_process_addtl ( struct drbg_state * drbg ,
2014-07-06 04:25:36 +04:00
struct list_head * addtl )
2014-05-31 17:44:17 +04:00
{
int ret = 0 ;
struct drbg_string data1 , data2 ;
2014-06-28 23:58:24 +04:00
LIST_HEAD ( datalist ) ;
2014-05-31 17:44:17 +04:00
unsigned char prefix = DRBG_PREFIX2 ;
/* 10.1.1.4 step 2 */
2014-07-06 04:25:36 +04:00
if ( ! addtl | | list_empty ( addtl ) )
2014-05-31 17:44:17 +04:00
return 0 ;
/* 10.1.1.4 step 2a */
drbg_string_fill ( & data1 , & prefix , 1 ) ;
drbg_string_fill ( & data2 , drbg - > V , drbg_statelen ( drbg ) ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & data1 . list , & datalist ) ;
list_add_tail ( & data2 . list , & datalist ) ;
2014-07-06 04:25:36 +04:00
list_splice_tail ( addtl , & datalist ) ;
2016-03-28 17:47:55 +03:00
ret = drbg_kcapi_hash ( drbg , drbg - > scratchpad , & datalist ) ;
2014-05-31 17:44:17 +04:00
if ( ret )
goto out ;
/* 10.1.1.4 step 2b */
drbg_add_buf ( drbg - > V , drbg_statelen ( drbg ) ,
drbg - > scratchpad , drbg_blocklen ( drbg ) ) ;
out :
2015-01-05 02:44:09 +03:00
memset ( drbg - > scratchpad , 0 , drbg_blocklen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
return ret ;
}
/* Hashgen defined in 10.1.1.4 */
static int drbg_hash_hashgen ( struct drbg_state * drbg ,
unsigned char * buf ,
unsigned int buflen )
{
int len = 0 ;
int ret = 0 ;
unsigned char * src = drbg - > scratchpad ;
unsigned char * dst = drbg - > scratchpad + drbg_statelen ( drbg ) ;
struct drbg_string data ;
2014-06-28 23:58:24 +04:00
LIST_HEAD ( datalist ) ;
2014-05-31 17:44:17 +04:00
/* 10.1.1.4 step hashgen 2 */
memcpy ( src , drbg - > V , drbg_statelen ( drbg ) ) ;
drbg_string_fill ( & data , src , drbg_statelen ( drbg ) ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & data . list , & datalist ) ;
2014-05-31 17:44:17 +04:00
while ( len < buflen ) {
unsigned int outlen = 0 ;
/* 10.1.1.4 step hashgen 4.1 */
2016-03-28 17:47:55 +03:00
ret = drbg_kcapi_hash ( drbg , dst , & datalist ) ;
2014-05-31 17:44:17 +04:00
if ( ret ) {
len = ret ;
goto out ;
}
outlen = ( drbg_blocklen ( drbg ) < ( buflen - len ) ) ?
drbg_blocklen ( drbg ) : ( buflen - len ) ;
/* 10.1.1.4 step hashgen 4.2 */
memcpy ( buf + len , dst , outlen ) ;
len + = outlen ;
/* 10.1.1.4 hashgen step 4.3 */
if ( len < buflen )
2014-10-14 23:50:13 +04:00
crypto_inc ( src , drbg_statelen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
}
out :
2015-01-05 02:44:09 +03:00
memset ( drbg - > scratchpad , 0 ,
2014-05-31 17:44:17 +04:00
( drbg_statelen ( drbg ) + drbg_blocklen ( drbg ) ) ) ;
return len ;
}
/* generate function for Hash DRBG as defined in 10.1.1.4 */
static int drbg_hash_generate ( struct drbg_state * drbg ,
unsigned char * buf , unsigned int buflen ,
2014-07-06 04:25:36 +04:00
struct list_head * addtl )
2014-05-31 17:44:17 +04:00
{
int len = 0 ;
int ret = 0 ;
2014-08-17 19:37:34 +04:00
union {
unsigned char req [ 8 ] ;
2014-08-26 11:32:24 +04:00
__be64 req_int ;
2014-08-17 19:37:34 +04:00
} u ;
2014-05-31 17:44:17 +04:00
unsigned char prefix = DRBG_PREFIX3 ;
struct drbg_string data1 , data2 ;
2014-06-28 23:58:24 +04:00
LIST_HEAD ( datalist ) ;
2014-05-31 17:44:17 +04:00
/* 10.1.1.4 step 2 */
ret = drbg_hash_process_addtl ( drbg , addtl ) ;
if ( ret )
return ret ;
/* 10.1.1.4 step 3 */
len = drbg_hash_hashgen ( drbg , buf , buflen ) ;
/* this is the value H as documented in 10.1.1.4 */
/* 10.1.1.4 step 4 */
drbg_string_fill ( & data1 , & prefix , 1 ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & data1 . list , & datalist ) ;
2014-05-31 17:44:17 +04:00
drbg_string_fill ( & data2 , drbg - > V , drbg_statelen ( drbg ) ) ;
2014-06-28 23:58:24 +04:00
list_add_tail ( & data2 . list , & datalist ) ;
2016-03-28 17:47:55 +03:00
ret = drbg_kcapi_hash ( drbg , drbg - > scratchpad , & datalist ) ;
2014-05-31 17:44:17 +04:00
if ( ret ) {
len = ret ;
goto out ;
}
/* 10.1.1.4 step 5 */
drbg_add_buf ( drbg - > V , drbg_statelen ( drbg ) ,
drbg - > scratchpad , drbg_blocklen ( drbg ) ) ;
drbg_add_buf ( drbg - > V , drbg_statelen ( drbg ) ,
drbg - > C , drbg_statelen ( drbg ) ) ;
2014-08-17 19:37:34 +04:00
u . req_int = cpu_to_be64 ( drbg - > reseed_ctr ) ;
drbg_add_buf ( drbg - > V , drbg_statelen ( drbg ) , u . req , 8 ) ;
2014-05-31 17:44:17 +04:00
out :
2015-01-05 02:44:09 +03:00
memset ( drbg - > scratchpad , 0 , drbg_blocklen ( drbg ) ) ;
2014-05-31 17:44:17 +04:00
return len ;
}
/*
* scratchpad usage : as update and generate are used isolated , both
* can use the scratchpad
*/
2015-12-07 23:36:57 +03:00
static const struct drbg_state_ops drbg_hash_ops = {
2014-05-31 17:44:17 +04:00
. update = drbg_hash_update ,
. generate = drbg_hash_generate ,
. crypto_init = drbg_init_hash_kernel ,
. crypto_fini = drbg_fini_hash_kernel ,
} ;
# endif /* CONFIG_CRYPTO_DRBG_HASH */
/******************************************************************
* Functions common for DRBG implementations
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-05-25 16:09:14 +03:00
static inline int __drbg_seed ( struct drbg_state * drbg , struct list_head * seed ,
crypto: drbg - track whether DRBG was seeded with !rng_is_initialized()
Currently, the DRBG implementation schedules asynchronous works from
random_ready_callbacks for reseeding the DRBG instances with output from
get_random_bytes() once the latter has sufficient entropy available.
However, as the get_random_bytes() initialization state can get queried by
means of rng_is_initialized() now, there is no real need for this
asynchronous reseeding logic anymore and it's better to keep things simple
by doing it synchronously when needed instead, i.e. from drbg_generate()
once rng_is_initialized() has flipped to true.
Of course, for this to work, drbg_generate() would need some means by which
it can tell whether or not rng_is_initialized() has flipped to true since
the last seeding from get_random_bytes(). Or equivalently, whether or not
the last seed from get_random_bytes() has happened when
rng_is_initialized() was still evaluating to false.
As it currently stands, enum drbg_seed_state allows for the representation
of two different DRBG seeding states: DRBG_SEED_STATE_UNSEEDED and
DRBG_SEED_STATE_FULL. The former makes drbg_generate() to invoke a full
reseeding operation involving both, the rather expensive jitterentropy as
well as the get_random_bytes() randomness sources. The DRBG_SEED_STATE_FULL
state on the other hand implies that no reseeding at all is required for a
!->pr DRBG variant.
Introduce the new DRBG_SEED_STATE_PARTIAL state to enum drbg_seed_state for
representing the condition that a DRBG was being seeded when
rng_is_initialized() had still been false. In particular, this new state
implies that
- the given DRBG instance has been fully seeded from the jitterentropy
source (if enabled)
- and drbg_generate() is supposed to reseed from get_random_bytes()
*only* once rng_is_initialized() turns to true.
Up to now, the __drbg_seed() helper used to set the given DRBG instance's
->seeded state to constant DRBG_SEED_STATE_FULL. Introduce a new argument
allowing for the specification of the to be written ->seeded value instead.
Make the first of its two callers, drbg_seed(), determine the appropriate
value based on rng_is_initialized(). The remaining caller,
drbg_async_seed(), is known to get invoked only once rng_is_initialized()
is true, hence let it pass constant DRBG_SEED_STATE_FULL for the new
argument to __drbg_seed().
There is no change in behaviour, except for that the pr_devel() in
drbg_generate() would now report "unseeded" for ->pr DRBG instances which
had last been seeded when rng_is_initialized() was still evaluating to
false.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:05 +03:00
int reseed , enum drbg_seed_state new_seed_state )
2015-05-25 16:09:14 +03:00
{
int ret = drbg - > d_ops - > update ( drbg , seed , reseed ) ;
if ( ret )
return ret ;
crypto: drbg - track whether DRBG was seeded with !rng_is_initialized()
Currently, the DRBG implementation schedules asynchronous works from
random_ready_callbacks for reseeding the DRBG instances with output from
get_random_bytes() once the latter has sufficient entropy available.
However, as the get_random_bytes() initialization state can get queried by
means of rng_is_initialized() now, there is no real need for this
asynchronous reseeding logic anymore and it's better to keep things simple
by doing it synchronously when needed instead, i.e. from drbg_generate()
once rng_is_initialized() has flipped to true.
Of course, for this to work, drbg_generate() would need some means by which
it can tell whether or not rng_is_initialized() has flipped to true since
the last seeding from get_random_bytes(). Or equivalently, whether or not
the last seed from get_random_bytes() has happened when
rng_is_initialized() was still evaluating to false.
As it currently stands, enum drbg_seed_state allows for the representation
of two different DRBG seeding states: DRBG_SEED_STATE_UNSEEDED and
DRBG_SEED_STATE_FULL. The former makes drbg_generate() to invoke a full
reseeding operation involving both, the rather expensive jitterentropy as
well as the get_random_bytes() randomness sources. The DRBG_SEED_STATE_FULL
state on the other hand implies that no reseeding at all is required for a
!->pr DRBG variant.
Introduce the new DRBG_SEED_STATE_PARTIAL state to enum drbg_seed_state for
representing the condition that a DRBG was being seeded when
rng_is_initialized() had still been false. In particular, this new state
implies that
- the given DRBG instance has been fully seeded from the jitterentropy
source (if enabled)
- and drbg_generate() is supposed to reseed from get_random_bytes()
*only* once rng_is_initialized() turns to true.
Up to now, the __drbg_seed() helper used to set the given DRBG instance's
->seeded state to constant DRBG_SEED_STATE_FULL. Introduce a new argument
allowing for the specification of the to be written ->seeded value instead.
Make the first of its two callers, drbg_seed(), determine the appropriate
value based on rng_is_initialized(). The remaining caller,
drbg_async_seed(), is known to get invoked only once rng_is_initialized()
is true, hence let it pass constant DRBG_SEED_STATE_FULL for the new
argument to __drbg_seed().
There is no change in behaviour, except for that the pr_devel() in
drbg_generate() would now report "unseeded" for ->pr DRBG instances which
had last been seeded when rng_is_initialized() was still evaluating to
false.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:05 +03:00
drbg - > seeded = new_seed_state ;
crypto: drbg - reseed 'nopr' drbgs periodically from get_random_bytes()
In contrast to the fully prediction resistant 'pr' DRBGs, the 'nopr'
variants get seeded once at boot and reseeded only rarely thereafter,
namely only after 2^20 requests have been served each. AFAICT, this
reseeding based on the number of requests served is primarily motivated
by information theoretic considerations, c.f. NIST SP800-90Ar1,
sec. 8.6.8 ("Reseeding").
However, given the relatively large seed lifetime of 2^20 requests, the
'nopr' DRBGs can hardly be considered to provide any prediction resistance
whatsoever, i.e. to protect against threats like side channel leaks of the
internal DRBG state (think e.g. leaked VM snapshots). This is expected and
completely in line with the 'nopr' naming, but as e.g. the
"drbg_nopr_hmac_sha512" implementation is potentially being used for
providing the "stdrng" and thus, the crypto_default_rng serving the
in-kernel crypto, it would certainly be desirable to achieve at least the
same level of prediction resistance as get_random_bytes() does.
Note that the chacha20 rngs underlying get_random_bytes() get reseeded
every CRNG_RESEED_INTERVAL == 5min: the secondary, per-NUMA node rngs from
the primary one and the primary rng in turn from the entropy pool, provided
sufficient entropy is available.
The 'nopr' DRBGs do draw randomness from get_random_bytes() for their
initial seed already, so making them to reseed themselves periodically from
get_random_bytes() in order to let them benefit from the latter's
prediction resistance is not such a big change conceptually.
In principle, it would have been also possible to make the 'nopr' DRBGs to
periodically invoke a full reseeding operation, i.e. to also consider the
jitterentropy source (if enabled) in addition to get_random_bytes() for the
seed value. However, get_random_bytes() is relatively lightweight as
compared to the jitterentropy generation process and thus, even though the
'nopr' reseeding is supposed to get invoked infrequently, it's IMO still
worthwhile to avoid occasional latency spikes for drbg_generate() and
stick to get_random_bytes() only. As an additional remark, note that
drawing randomness from the non-SP800-90B-conforming get_random_bytes()
only won't adversely affect SP800-90A conformance either: the very same is
being done during boot via drbg_seed_from_random() already once
rng_is_initialized() flips to true and it follows that if the DRBG
implementation does conform to SP800-90A now, it will continue to do so.
Make the 'nopr' DRBGs to reseed themselves periodically from
get_random_bytes() every CRNG_RESEED_INTERVAL == 5min.
More specifically, introduce a new member ->last_seed_time to struct
drbg_state for recording in units of jiffies when the last seeding
operation had taken place. Make __drbg_seed() maintain it and let
drbg_generate() invoke a reseed from get_random_bytes() via
drbg_seed_from_random() if more than 5min have passed by since the last
seeding operation. Be careful to not to reseed if in testing mode though,
or otherwise the drbg related tests in crypto/testmgr.c would fail to
reproduce the expected output.
In order to keep the formatting clean in drbg_generate() wrap the logic
for deciding whether or not a reseed is due in a new helper,
drbg_nopr_reseed_interval_elapsed().
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:09 +03:00
drbg - > last_seed_time = jiffies ;
2015-05-25 16:09:14 +03:00
/* 10.1.1.2 / 10.1.1.3 step 5 */
drbg - > reseed_ctr = 1 ;
crypto: drbg - move dynamic ->reseed_threshold adjustments to __drbg_seed()
Since commit 42ea507fae1a ("crypto: drbg - reseed often if seedsource is
degraded"), the maximum seed lifetime represented by ->reseed_threshold
gets temporarily lowered if the get_random_bytes() source cannot provide
sufficient entropy yet, as is common during boot, and restored back to
the original value again once that has changed.
More specifically, if the add_random_ready_callback() invoked from
drbg_prepare_hrng() in the course of DRBG instantiation does not return
-EALREADY, that is, if get_random_bytes() has not been fully initialized
at this point yet, drbg_prepare_hrng() will lower ->reseed_threshold
to a value of 50. The drbg_async_seed() scheduled from said
random_ready_callback will eventually restore the original value.
A future patch will replace the random_ready_callback based notification
mechanism and thus, there will be no add_random_ready_callback() return
value anymore which could get compared to -EALREADY.
However, there's __drbg_seed() which gets invoked in the course of both,
the DRBG instantiation as well as the eventual reseeding from
get_random_bytes() in aforementioned drbg_async_seed(), if any. Moreover,
it knows about the get_random_bytes() initialization state by the time the
seed data had been obtained from it: the new_seed_state argument introduced
with the previous patch would get set to DRBG_SEED_STATE_PARTIAL in case
get_random_bytes() had not been fully initialized yet and to
DRBG_SEED_STATE_FULL otherwise. Thus, __drbg_seed() provides a convenient
alternative for managing that ->reseed_threshold lowering and restoring at
a central place.
Move all ->reseed_threshold adjustment code from drbg_prepare_hrng() and
drbg_async_seed() respectively to __drbg_seed(). Make __drbg_seed()
lower the ->reseed_threshold to 50 in case its new_seed_state argument
equals DRBG_SEED_STATE_PARTIAL and let it restore the original value
otherwise.
There is no change in behaviour.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:06 +03:00
switch ( drbg - > seeded ) {
case DRBG_SEED_STATE_UNSEEDED :
/* Impossible, but handle it to silence compiler warnings. */
fallthrough ;
case DRBG_SEED_STATE_PARTIAL :
/*
* Require frequent reseeds until the seed source is
* fully initialized .
*/
drbg - > reseed_threshold = 50 ;
break ;
case DRBG_SEED_STATE_FULL :
/*
* Seed source has become fully initialized , frequent
* reseeds no longer required .
*/
drbg - > reseed_threshold = drbg_max_requests ( drbg ) ;
break ;
}
2015-05-25 16:09:14 +03:00
return ret ;
}
2019-05-08 17:19:24 +03:00
static inline int drbg_get_random_bytes ( struct drbg_state * drbg ,
unsigned char * entropy ,
unsigned int entropylen )
{
int ret ;
do {
get_random_bytes ( entropy , entropylen ) ;
ret = drbg_fips_continuous_test ( drbg , entropy ) ;
if ( ret & & ret ! = - EAGAIN )
return ret ;
} while ( ret ) ;
return 0 ;
}
crypto: drbg - make reseeding from get_random_bytes() synchronous
get_random_bytes() usually hasn't full entropy available by the time DRBG
instances are first getting seeded from it during boot. Thus, the DRBG
implementation registers random_ready_callbacks which would in turn
schedule some work for reseeding the DRBGs once get_random_bytes() has
sufficient entropy available.
For reference, the relevant history around handling DRBG (re)seeding in
the context of a not yet fully seeded get_random_bytes() is:
commit 16b369a91d0d ("random: Blocking API for accessing
nonblocking_pool")
commit 4c7879907edd ("crypto: drbg - add async seeding operation")
commit 205a525c3342 ("random: Add callback API for random pool
readiness")
commit 57225e679788 ("crypto: drbg - Use callback API for random
readiness")
commit c2719503f5e1 ("random: Remove kernel blocking API")
However, some time later, the initialization state of get_random_bytes()
has been made queryable via rng_is_initialized() introduced with commit
9a47249d444d ("random: Make crng state queryable"). This primitive now
allows for streamlining the DRBG reseeding from get_random_bytes() by
replacing that aforementioned asynchronous work scheduling from
random_ready_callbacks with some simpler, synchronous code in
drbg_generate() next to the related logic already present therein. Apart
from improving overall code readability, this change will also enable DRBG
users to rely on wait_for_random_bytes() for ensuring that the initial
seeding has completed, if desired.
The previous patches already laid the grounds by making drbg_seed() to
record at each DRBG instance whether it was being seeded at a time when
rng_is_initialized() still had been false as indicated by
->seeded == DRBG_SEED_STATE_PARTIAL.
All that remains to be done now is to make drbg_generate() check for this
condition, determine whether rng_is_initialized() has flipped to true in
the meanwhile and invoke a reseed from get_random_bytes() if so.
Make this move:
- rename the former drbg_async_seed() work handler, i.e. the one in charge
of reseeding a DRBG instance from get_random_bytes(), to
"drbg_seed_from_random()",
- change its signature as appropriate, i.e. make it take a struct
drbg_state rather than a work_struct and change its return type from
"void" to "int" in order to allow for passing error information from
e.g. its __drbg_seed() invocation onwards to callers,
- make drbg_generate() invoke this drbg_seed_from_random() once it
encounters a DRBG instance with ->seeded == DRBG_SEED_STATE_PARTIAL by
the time rng_is_initialized() has flipped to true and
- prune everything related to the former, random_ready_callback based
mechanism.
As drbg_seed_from_random() is now getting invoked from drbg_generate() with
the ->drbg_mutex being held, it must not attempt to recursively grab it
once again. Remove the corresponding mutex operations from what is now
drbg_seed_from_random(). Furthermore, as drbg_seed_from_random() can now
report errors directly to its caller, there's no need for it to temporarily
switch the DRBG's ->seeded state to DRBG_SEED_STATE_UNSEEDED so that a
failure of the subsequently invoked __drbg_seed() will get signaled to
drbg_generate(). Don't do it then.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:07 +03:00
static int drbg_seed_from_random ( struct drbg_state * drbg )
2015-05-25 16:09:36 +03:00
{
struct drbg_string data ;
LIST_HEAD ( seedlist ) ;
2015-06-09 16:55:38 +03:00
unsigned int entropylen = drbg_sec_strength ( drbg - > core - > flags ) ;
unsigned char entropy [ 32 ] ;
2019-05-08 17:19:24 +03:00
int ret ;
2015-05-25 16:09:36 +03:00
2015-06-09 16:55:38 +03:00
BUG_ON ( ! entropylen ) ;
BUG_ON ( entropylen > sizeof ( entropy ) ) ;
2015-05-25 16:09:36 +03:00
2015-06-09 16:55:38 +03:00
drbg_string_fill ( & data , entropy , entropylen ) ;
2015-05-25 16:09:36 +03:00
list_add_tail ( & data . list , & seedlist ) ;
2015-06-09 16:55:38 +03:00
2019-05-08 17:19:24 +03:00
ret = drbg_get_random_bytes ( drbg , entropy , entropylen ) ;
if ( ret )
crypto: drbg - make reseeding from get_random_bytes() synchronous
get_random_bytes() usually hasn't full entropy available by the time DRBG
instances are first getting seeded from it during boot. Thus, the DRBG
implementation registers random_ready_callbacks which would in turn
schedule some work for reseeding the DRBGs once get_random_bytes() has
sufficient entropy available.
For reference, the relevant history around handling DRBG (re)seeding in
the context of a not yet fully seeded get_random_bytes() is:
commit 16b369a91d0d ("random: Blocking API for accessing
nonblocking_pool")
commit 4c7879907edd ("crypto: drbg - add async seeding operation")
commit 205a525c3342 ("random: Add callback API for random pool
readiness")
commit 57225e679788 ("crypto: drbg - Use callback API for random
readiness")
commit c2719503f5e1 ("random: Remove kernel blocking API")
However, some time later, the initialization state of get_random_bytes()
has been made queryable via rng_is_initialized() introduced with commit
9a47249d444d ("random: Make crng state queryable"). This primitive now
allows for streamlining the DRBG reseeding from get_random_bytes() by
replacing that aforementioned asynchronous work scheduling from
random_ready_callbacks with some simpler, synchronous code in
drbg_generate() next to the related logic already present therein. Apart
from improving overall code readability, this change will also enable DRBG
users to rely on wait_for_random_bytes() for ensuring that the initial
seeding has completed, if desired.
The previous patches already laid the grounds by making drbg_seed() to
record at each DRBG instance whether it was being seeded at a time when
rng_is_initialized() still had been false as indicated by
->seeded == DRBG_SEED_STATE_PARTIAL.
All that remains to be done now is to make drbg_generate() check for this
condition, determine whether rng_is_initialized() has flipped to true in
the meanwhile and invoke a reseed from get_random_bytes() if so.
Make this move:
- rename the former drbg_async_seed() work handler, i.e. the one in charge
of reseeding a DRBG instance from get_random_bytes(), to
"drbg_seed_from_random()",
- change its signature as appropriate, i.e. make it take a struct
drbg_state rather than a work_struct and change its return type from
"void" to "int" in order to allow for passing error information from
e.g. its __drbg_seed() invocation onwards to callers,
- make drbg_generate() invoke this drbg_seed_from_random() once it
encounters a DRBG instance with ->seeded == DRBG_SEED_STATE_PARTIAL by
the time rng_is_initialized() has flipped to true and
- prune everything related to the former, random_ready_callback based
mechanism.
As drbg_seed_from_random() is now getting invoked from drbg_generate() with
the ->drbg_mutex being held, it must not attempt to recursively grab it
once again. Remove the corresponding mutex operations from what is now
drbg_seed_from_random(). Furthermore, as drbg_seed_from_random() can now
report errors directly to its caller, there's no need for it to temporarily
switch the DRBG's ->seeded state to DRBG_SEED_STATE_UNSEEDED so that a
failure of the subsequently invoked __drbg_seed() will get signaled to
drbg_generate(). Don't do it then.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:07 +03:00
goto out ;
2015-06-09 16:55:38 +03:00
crypto: drbg - make reseeding from get_random_bytes() synchronous
get_random_bytes() usually hasn't full entropy available by the time DRBG
instances are first getting seeded from it during boot. Thus, the DRBG
implementation registers random_ready_callbacks which would in turn
schedule some work for reseeding the DRBGs once get_random_bytes() has
sufficient entropy available.
For reference, the relevant history around handling DRBG (re)seeding in
the context of a not yet fully seeded get_random_bytes() is:
commit 16b369a91d0d ("random: Blocking API for accessing
nonblocking_pool")
commit 4c7879907edd ("crypto: drbg - add async seeding operation")
commit 205a525c3342 ("random: Add callback API for random pool
readiness")
commit 57225e679788 ("crypto: drbg - Use callback API for random
readiness")
commit c2719503f5e1 ("random: Remove kernel blocking API")
However, some time later, the initialization state of get_random_bytes()
has been made queryable via rng_is_initialized() introduced with commit
9a47249d444d ("random: Make crng state queryable"). This primitive now
allows for streamlining the DRBG reseeding from get_random_bytes() by
replacing that aforementioned asynchronous work scheduling from
random_ready_callbacks with some simpler, synchronous code in
drbg_generate() next to the related logic already present therein. Apart
from improving overall code readability, this change will also enable DRBG
users to rely on wait_for_random_bytes() for ensuring that the initial
seeding has completed, if desired.
The previous patches already laid the grounds by making drbg_seed() to
record at each DRBG instance whether it was being seeded at a time when
rng_is_initialized() still had been false as indicated by
->seeded == DRBG_SEED_STATE_PARTIAL.
All that remains to be done now is to make drbg_generate() check for this
condition, determine whether rng_is_initialized() has flipped to true in
the meanwhile and invoke a reseed from get_random_bytes() if so.
Make this move:
- rename the former drbg_async_seed() work handler, i.e. the one in charge
of reseeding a DRBG instance from get_random_bytes(), to
"drbg_seed_from_random()",
- change its signature as appropriate, i.e. make it take a struct
drbg_state rather than a work_struct and change its return type from
"void" to "int" in order to allow for passing error information from
e.g. its __drbg_seed() invocation onwards to callers,
- make drbg_generate() invoke this drbg_seed_from_random() once it
encounters a DRBG instance with ->seeded == DRBG_SEED_STATE_PARTIAL by
the time rng_is_initialized() has flipped to true and
- prune everything related to the former, random_ready_callback based
mechanism.
As drbg_seed_from_random() is now getting invoked from drbg_generate() with
the ->drbg_mutex being held, it must not attempt to recursively grab it
once again. Remove the corresponding mutex operations from what is now
drbg_seed_from_random(). Furthermore, as drbg_seed_from_random() can now
report errors directly to its caller, there's no need for it to temporarily
switch the DRBG's ->seeded state to DRBG_SEED_STATE_UNSEEDED so that a
failure of the subsequently invoked __drbg_seed() will get signaled to
drbg_generate(). Don't do it then.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:07 +03:00
ret = __drbg_seed ( drbg , & seedlist , true , DRBG_SEED_STATE_FULL ) ;
2015-06-09 16:55:38 +03:00
crypto: drbg - make reseeding from get_random_bytes() synchronous
get_random_bytes() usually hasn't full entropy available by the time DRBG
instances are first getting seeded from it during boot. Thus, the DRBG
implementation registers random_ready_callbacks which would in turn
schedule some work for reseeding the DRBGs once get_random_bytes() has
sufficient entropy available.
For reference, the relevant history around handling DRBG (re)seeding in
the context of a not yet fully seeded get_random_bytes() is:
commit 16b369a91d0d ("random: Blocking API for accessing
nonblocking_pool")
commit 4c7879907edd ("crypto: drbg - add async seeding operation")
commit 205a525c3342 ("random: Add callback API for random pool
readiness")
commit 57225e679788 ("crypto: drbg - Use callback API for random
readiness")
commit c2719503f5e1 ("random: Remove kernel blocking API")
However, some time later, the initialization state of get_random_bytes()
has been made queryable via rng_is_initialized() introduced with commit
9a47249d444d ("random: Make crng state queryable"). This primitive now
allows for streamlining the DRBG reseeding from get_random_bytes() by
replacing that aforementioned asynchronous work scheduling from
random_ready_callbacks with some simpler, synchronous code in
drbg_generate() next to the related logic already present therein. Apart
from improving overall code readability, this change will also enable DRBG
users to rely on wait_for_random_bytes() for ensuring that the initial
seeding has completed, if desired.
The previous patches already laid the grounds by making drbg_seed() to
record at each DRBG instance whether it was being seeded at a time when
rng_is_initialized() still had been false as indicated by
->seeded == DRBG_SEED_STATE_PARTIAL.
All that remains to be done now is to make drbg_generate() check for this
condition, determine whether rng_is_initialized() has flipped to true in
the meanwhile and invoke a reseed from get_random_bytes() if so.
Make this move:
- rename the former drbg_async_seed() work handler, i.e. the one in charge
of reseeding a DRBG instance from get_random_bytes(), to
"drbg_seed_from_random()",
- change its signature as appropriate, i.e. make it take a struct
drbg_state rather than a work_struct and change its return type from
"void" to "int" in order to allow for passing error information from
e.g. its __drbg_seed() invocation onwards to callers,
- make drbg_generate() invoke this drbg_seed_from_random() once it
encounters a DRBG instance with ->seeded == DRBG_SEED_STATE_PARTIAL by
the time rng_is_initialized() has flipped to true and
- prune everything related to the former, random_ready_callback based
mechanism.
As drbg_seed_from_random() is now getting invoked from drbg_generate() with
the ->drbg_mutex being held, it must not attempt to recursively grab it
once again. Remove the corresponding mutex operations from what is now
drbg_seed_from_random(). Furthermore, as drbg_seed_from_random() can now
report errors directly to its caller, there's no need for it to temporarily
switch the DRBG's ->seeded state to DRBG_SEED_STATE_UNSEEDED so that a
failure of the subsequently invoked __drbg_seed() will get signaled to
drbg_generate(). Don't do it then.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:07 +03:00
out :
2015-06-09 16:55:38 +03:00
memzero_explicit ( entropy , entropylen ) ;
crypto: drbg - make reseeding from get_random_bytes() synchronous
get_random_bytes() usually hasn't full entropy available by the time DRBG
instances are first getting seeded from it during boot. Thus, the DRBG
implementation registers random_ready_callbacks which would in turn
schedule some work for reseeding the DRBGs once get_random_bytes() has
sufficient entropy available.
For reference, the relevant history around handling DRBG (re)seeding in
the context of a not yet fully seeded get_random_bytes() is:
commit 16b369a91d0d ("random: Blocking API for accessing
nonblocking_pool")
commit 4c7879907edd ("crypto: drbg - add async seeding operation")
commit 205a525c3342 ("random: Add callback API for random pool
readiness")
commit 57225e679788 ("crypto: drbg - Use callback API for random
readiness")
commit c2719503f5e1 ("random: Remove kernel blocking API")
However, some time later, the initialization state of get_random_bytes()
has been made queryable via rng_is_initialized() introduced with commit
9a47249d444d ("random: Make crng state queryable"). This primitive now
allows for streamlining the DRBG reseeding from get_random_bytes() by
replacing that aforementioned asynchronous work scheduling from
random_ready_callbacks with some simpler, synchronous code in
drbg_generate() next to the related logic already present therein. Apart
from improving overall code readability, this change will also enable DRBG
users to rely on wait_for_random_bytes() for ensuring that the initial
seeding has completed, if desired.
The previous patches already laid the grounds by making drbg_seed() to
record at each DRBG instance whether it was being seeded at a time when
rng_is_initialized() still had been false as indicated by
->seeded == DRBG_SEED_STATE_PARTIAL.
All that remains to be done now is to make drbg_generate() check for this
condition, determine whether rng_is_initialized() has flipped to true in
the meanwhile and invoke a reseed from get_random_bytes() if so.
Make this move:
- rename the former drbg_async_seed() work handler, i.e. the one in charge
of reseeding a DRBG instance from get_random_bytes(), to
"drbg_seed_from_random()",
- change its signature as appropriate, i.e. make it take a struct
drbg_state rather than a work_struct and change its return type from
"void" to "int" in order to allow for passing error information from
e.g. its __drbg_seed() invocation onwards to callers,
- make drbg_generate() invoke this drbg_seed_from_random() once it
encounters a DRBG instance with ->seeded == DRBG_SEED_STATE_PARTIAL by
the time rng_is_initialized() has flipped to true and
- prune everything related to the former, random_ready_callback based
mechanism.
As drbg_seed_from_random() is now getting invoked from drbg_generate() with
the ->drbg_mutex being held, it must not attempt to recursively grab it
once again. Remove the corresponding mutex operations from what is now
drbg_seed_from_random(). Furthermore, as drbg_seed_from_random() can now
report errors directly to its caller, there's no need for it to temporarily
switch the DRBG's ->seeded state to DRBG_SEED_STATE_UNSEEDED so that a
failure of the subsequently invoked __drbg_seed() will get signaled to
drbg_generate(). Don't do it then.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:07 +03:00
return ret ;
2015-05-25 16:09:36 +03:00
}
crypto: drbg - reseed 'nopr' drbgs periodically from get_random_bytes()
In contrast to the fully prediction resistant 'pr' DRBGs, the 'nopr'
variants get seeded once at boot and reseeded only rarely thereafter,
namely only after 2^20 requests have been served each. AFAICT, this
reseeding based on the number of requests served is primarily motivated
by information theoretic considerations, c.f. NIST SP800-90Ar1,
sec. 8.6.8 ("Reseeding").
However, given the relatively large seed lifetime of 2^20 requests, the
'nopr' DRBGs can hardly be considered to provide any prediction resistance
whatsoever, i.e. to protect against threats like side channel leaks of the
internal DRBG state (think e.g. leaked VM snapshots). This is expected and
completely in line with the 'nopr' naming, but as e.g. the
"drbg_nopr_hmac_sha512" implementation is potentially being used for
providing the "stdrng" and thus, the crypto_default_rng serving the
in-kernel crypto, it would certainly be desirable to achieve at least the
same level of prediction resistance as get_random_bytes() does.
Note that the chacha20 rngs underlying get_random_bytes() get reseeded
every CRNG_RESEED_INTERVAL == 5min: the secondary, per-NUMA node rngs from
the primary one and the primary rng in turn from the entropy pool, provided
sufficient entropy is available.
The 'nopr' DRBGs do draw randomness from get_random_bytes() for their
initial seed already, so making them to reseed themselves periodically from
get_random_bytes() in order to let them benefit from the latter's
prediction resistance is not such a big change conceptually.
In principle, it would have been also possible to make the 'nopr' DRBGs to
periodically invoke a full reseeding operation, i.e. to also consider the
jitterentropy source (if enabled) in addition to get_random_bytes() for the
seed value. However, get_random_bytes() is relatively lightweight as
compared to the jitterentropy generation process and thus, even though the
'nopr' reseeding is supposed to get invoked infrequently, it's IMO still
worthwhile to avoid occasional latency spikes for drbg_generate() and
stick to get_random_bytes() only. As an additional remark, note that
drawing randomness from the non-SP800-90B-conforming get_random_bytes()
only won't adversely affect SP800-90A conformance either: the very same is
being done during boot via drbg_seed_from_random() already once
rng_is_initialized() flips to true and it follows that if the DRBG
implementation does conform to SP800-90A now, it will continue to do so.
Make the 'nopr' DRBGs to reseed themselves periodically from
get_random_bytes() every CRNG_RESEED_INTERVAL == 5min.
More specifically, introduce a new member ->last_seed_time to struct
drbg_state for recording in units of jiffies when the last seeding
operation had taken place. Make __drbg_seed() maintain it and let
drbg_generate() invoke a reseed from get_random_bytes() via
drbg_seed_from_random() if more than 5min have passed by since the last
seeding operation. Be careful to not to reseed if in testing mode though,
or otherwise the drbg related tests in crypto/testmgr.c would fail to
reproduce the expected output.
In order to keep the formatting clean in drbg_generate() wrap the logic
for deciding whether or not a reseed is due in a new helper,
drbg_nopr_reseed_interval_elapsed().
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:09 +03:00
static bool drbg_nopr_reseed_interval_elapsed ( struct drbg_state * drbg )
{
unsigned long next_reseed ;
/* Don't ever reseed from get_random_bytes() in test mode. */
if ( list_empty ( & drbg - > test_data . list ) )
return false ;
/*
* Obtain fresh entropy for the nopr DRBGs after 300 s have
* elapsed in order to still achieve sort of partial
* prediction resistance over the time domain at least . Note
* that the period of 300 s has been chosen to match the
* CRNG_RESEED_INTERVAL of the get_random_bytes ( ) ' chacha
* rngs .
*/
next_reseed = drbg - > last_seed_time + 300 * HZ ;
return time_after ( jiffies , next_reseed ) ;
}
2014-05-31 17:44:17 +04:00
/*
* Seeding or reseeding of the DRBG
*
* @ drbg : DRBG state struct
* @ pers : personalization / additional information buffer
* @ reseed : 0 for initial seed process , 1 for reseeding
*
* return :
* 0 on success
* error value otherwise
*/
static int drbg_seed ( struct drbg_state * drbg , struct drbg_string * pers ,
bool reseed )
{
2015-06-09 16:55:38 +03:00
int ret ;
unsigned char entropy [ ( ( 32 + 16 ) * 2 ) ] ;
unsigned int entropylen = drbg_sec_strength ( drbg - > core - > flags ) ;
2014-05-31 17:44:17 +04:00
struct drbg_string data1 ;
2014-06-28 23:58:24 +04:00
LIST_HEAD ( seedlist ) ;
crypto: drbg - track whether DRBG was seeded with !rng_is_initialized()
Currently, the DRBG implementation schedules asynchronous works from
random_ready_callbacks for reseeding the DRBG instances with output from
get_random_bytes() once the latter has sufficient entropy available.
However, as the get_random_bytes() initialization state can get queried by
means of rng_is_initialized() now, there is no real need for this
asynchronous reseeding logic anymore and it's better to keep things simple
by doing it synchronously when needed instead, i.e. from drbg_generate()
once rng_is_initialized() has flipped to true.
Of course, for this to work, drbg_generate() would need some means by which
it can tell whether or not rng_is_initialized() has flipped to true since
the last seeding from get_random_bytes(). Or equivalently, whether or not
the last seed from get_random_bytes() has happened when
rng_is_initialized() was still evaluating to false.
As it currently stands, enum drbg_seed_state allows for the representation
of two different DRBG seeding states: DRBG_SEED_STATE_UNSEEDED and
DRBG_SEED_STATE_FULL. The former makes drbg_generate() to invoke a full
reseeding operation involving both, the rather expensive jitterentropy as
well as the get_random_bytes() randomness sources. The DRBG_SEED_STATE_FULL
state on the other hand implies that no reseeding at all is required for a
!->pr DRBG variant.
Introduce the new DRBG_SEED_STATE_PARTIAL state to enum drbg_seed_state for
representing the condition that a DRBG was being seeded when
rng_is_initialized() had still been false. In particular, this new state
implies that
- the given DRBG instance has been fully seeded from the jitterentropy
source (if enabled)
- and drbg_generate() is supposed to reseed from get_random_bytes()
*only* once rng_is_initialized() turns to true.
Up to now, the __drbg_seed() helper used to set the given DRBG instance's
->seeded state to constant DRBG_SEED_STATE_FULL. Introduce a new argument
allowing for the specification of the to be written ->seeded value instead.
Make the first of its two callers, drbg_seed(), determine the appropriate
value based on rng_is_initialized(). The remaining caller,
drbg_async_seed(), is known to get invoked only once rng_is_initialized()
is true, hence let it pass constant DRBG_SEED_STATE_FULL for the new
argument to __drbg_seed().
There is no change in behaviour, except for that the pr_devel() in
drbg_generate() would now report "unseeded" for ->pr DRBG instances which
had last been seeded when rng_is_initialized() was still evaluating to
false.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:05 +03:00
enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL ;
2014-05-31 17:44:17 +04:00
/* 9.1 / 9.2 / 9.3.1 step 3 */
if ( pers & & pers - > len > ( drbg_max_addtl ( drbg ) ) ) {
2014-07-06 04:24:03 +04:00
pr_devel ( " DRBG: personalization string too long %zu \n " ,
2014-05-31 17:44:17 +04:00
pers - > len ) ;
return - EINVAL ;
}
2015-04-21 05:46:41 +03:00
if ( list_empty ( & drbg - > test_data . list ) ) {
drbg_string_fill ( & data1 , drbg - > test_data . buf ,
drbg - > test_data . len ) ;
2014-05-31 17:44:17 +04:00
pr_devel ( " DRBG: using test entropy \n " ) ;
} else {
2015-06-09 16:55:38 +03:00
/*
* Gather entropy equal to the security strength of the DRBG .
* With a derivation function , a nonce is required in addition
* to the entropy . A nonce must be at least 1 / 2 of the security
* strength of the DRBG in size . Thus , entropy + nonce is 3 / 2
* of the strength . The consideration of a nonce is only
* applicable during initial seeding .
*/
BUG_ON ( ! entropylen ) ;
if ( ! reseed )
entropylen = ( ( entropylen + 1 ) / 2 ) * 3 ;
BUG_ON ( ( entropylen * 2 ) > sizeof ( entropy ) ) ;
2015-05-25 16:09:59 +03:00
/* Get seed from in-kernel /dev/urandom */
crypto: drbg - track whether DRBG was seeded with !rng_is_initialized()
Currently, the DRBG implementation schedules asynchronous works from
random_ready_callbacks for reseeding the DRBG instances with output from
get_random_bytes() once the latter has sufficient entropy available.
However, as the get_random_bytes() initialization state can get queried by
means of rng_is_initialized() now, there is no real need for this
asynchronous reseeding logic anymore and it's better to keep things simple
by doing it synchronously when needed instead, i.e. from drbg_generate()
once rng_is_initialized() has flipped to true.
Of course, for this to work, drbg_generate() would need some means by which
it can tell whether or not rng_is_initialized() has flipped to true since
the last seeding from get_random_bytes(). Or equivalently, whether or not
the last seed from get_random_bytes() has happened when
rng_is_initialized() was still evaluating to false.
As it currently stands, enum drbg_seed_state allows for the representation
of two different DRBG seeding states: DRBG_SEED_STATE_UNSEEDED and
DRBG_SEED_STATE_FULL. The former makes drbg_generate() to invoke a full
reseeding operation involving both, the rather expensive jitterentropy as
well as the get_random_bytes() randomness sources. The DRBG_SEED_STATE_FULL
state on the other hand implies that no reseeding at all is required for a
!->pr DRBG variant.
Introduce the new DRBG_SEED_STATE_PARTIAL state to enum drbg_seed_state for
representing the condition that a DRBG was being seeded when
rng_is_initialized() had still been false. In particular, this new state
implies that
- the given DRBG instance has been fully seeded from the jitterentropy
source (if enabled)
- and drbg_generate() is supposed to reseed from get_random_bytes()
*only* once rng_is_initialized() turns to true.
Up to now, the __drbg_seed() helper used to set the given DRBG instance's
->seeded state to constant DRBG_SEED_STATE_FULL. Introduce a new argument
allowing for the specification of the to be written ->seeded value instead.
Make the first of its two callers, drbg_seed(), determine the appropriate
value based on rng_is_initialized(). The remaining caller,
drbg_async_seed(), is known to get invoked only once rng_is_initialized()
is true, hence let it pass constant DRBG_SEED_STATE_FULL for the new
argument to __drbg_seed().
There is no change in behaviour, except for that the pr_devel() in
drbg_generate() would now report "unseeded" for ->pr DRBG instances which
had last been seeded when rng_is_initialized() was still evaluating to
false.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:05 +03:00
if ( ! rng_is_initialized ( ) )
new_seed_state = DRBG_SEED_STATE_PARTIAL ;
2019-05-08 17:19:24 +03:00
ret = drbg_get_random_bytes ( drbg , entropy , entropylen ) ;
if ( ret )
goto out ;
2015-06-09 16:55:38 +03:00
if ( ! drbg - > jent ) {
drbg_string_fill ( & data1 , entropy , entropylen ) ;
pr_devel ( " DRBG: (re)seeding with %u bytes of entropy \n " ,
entropylen ) ;
2015-05-25 16:09:59 +03:00
} else {
2021-11-30 17:10:07 +03:00
/*
* Get seed from Jitter RNG , failures are
* fatal only in FIPS mode .
*/
2015-06-09 16:55:38 +03:00
ret = crypto_rng_get_bytes ( drbg - > jent ,
entropy + entropylen ,
entropylen ) ;
2021-11-30 17:10:07 +03:00
if ( fips_enabled & & ret ) {
2015-06-09 16:55:38 +03:00
pr_devel ( " DRBG: jent failed with %d \n " , ret ) ;
crypto: drbg - always seeded with SP800-90B compliant noise source
As the Jitter RNG provides an SP800-90B compliant noise source, use this
noise source always for the (re)seeding of the DRBG.
To make sure the DRBG is always properly seeded, the reseed threshold
is reduced to 1<<20 generate operations.
The Jitter RNG may report health test failures. Such health test
failures are treated as transient as follows. The DRBG will not reseed
from the Jitter RNG (but from get_random_bytes) in case of a health
test failure. Though, it produces the requested random number.
The Jitter RNG has a failure counter where at most 1024 consecutive
resets due to a health test failure are considered as a transient error.
If more consecutive resets are required, the Jitter RNG will return
a permanent error which is returned to the caller by the DRBG. With this
approach, the worst case reseed threshold is significantly lower than
mandated by SP800-90A in order to seed with an SP800-90B noise source:
the DRBG has a reseed threshold of 2^20 * 1024 = 2^30 generate requests.
Yet, in case of a transient Jitter RNG health test failure, the DRBG is
seeded with the data obtained from get_random_bytes.
However, if the Jitter RNG fails during the initial seeding operation
even due to a health test error, the DRBG will send an error to the
caller because at that time, the DRBG has received no seed that is
SP800-90B compliant.
Signed-off-by: Stephan Mueller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2020-04-17 22:34:03 +03:00
/*
* Do not treat the transient failure of the
* Jitter RNG as an error that needs to be
* reported . The combined number of the
* maximum reseed threshold times the maximum
* number of Jitter RNG transient errors is
* less than the reseed threshold required by
* SP800 - 90 A allowing us to treat the
* transient errors as such .
*
* However , we mandate that at least the first
* seeding operation must succeed with the
* Jitter RNG .
*/
if ( ! reseed | | ret ! = - EAGAIN )
goto out ;
2015-06-09 16:55:38 +03:00
}
drbg_string_fill ( & data1 , entropy , entropylen * 2 ) ;
pr_devel ( " DRBG: (re)seeding with %u bytes of entropy \n " ,
entropylen * 2 ) ;
2015-05-25 16:09:59 +03:00
}
2014-05-31 17:44:17 +04:00
}
2014-06-28 23:58:24 +04:00
list_add_tail ( & data1 . list , & seedlist ) ;
2014-05-31 17:44:17 +04:00
/*
* concatenation of entropy with personalization str / addtl input )
* the variable pers is directly handed in by the caller , so check its
* contents whether it is appropriate
*/
2014-06-28 23:58:24 +04:00
if ( pers & & pers - > buf & & 0 < pers - > len ) {
list_add_tail ( & pers - > list , & seedlist ) ;
2014-05-31 17:44:17 +04:00
pr_devel ( " DRBG: using personalization string \n " ) ;
}
2014-08-17 19:39:31 +04:00
if ( ! reseed ) {
memset ( drbg - > V , 0 , drbg_statelen ( drbg ) ) ;
memset ( drbg - > C , 0 , drbg_statelen ( drbg ) ) ;
}
crypto: drbg - track whether DRBG was seeded with !rng_is_initialized()
Currently, the DRBG implementation schedules asynchronous works from
random_ready_callbacks for reseeding the DRBG instances with output from
get_random_bytes() once the latter has sufficient entropy available.
However, as the get_random_bytes() initialization state can get queried by
means of rng_is_initialized() now, there is no real need for this
asynchronous reseeding logic anymore and it's better to keep things simple
by doing it synchronously when needed instead, i.e. from drbg_generate()
once rng_is_initialized() has flipped to true.
Of course, for this to work, drbg_generate() would need some means by which
it can tell whether or not rng_is_initialized() has flipped to true since
the last seeding from get_random_bytes(). Or equivalently, whether or not
the last seed from get_random_bytes() has happened when
rng_is_initialized() was still evaluating to false.
As it currently stands, enum drbg_seed_state allows for the representation
of two different DRBG seeding states: DRBG_SEED_STATE_UNSEEDED and
DRBG_SEED_STATE_FULL. The former makes drbg_generate() to invoke a full
reseeding operation involving both, the rather expensive jitterentropy as
well as the get_random_bytes() randomness sources. The DRBG_SEED_STATE_FULL
state on the other hand implies that no reseeding at all is required for a
!->pr DRBG variant.
Introduce the new DRBG_SEED_STATE_PARTIAL state to enum drbg_seed_state for
representing the condition that a DRBG was being seeded when
rng_is_initialized() had still been false. In particular, this new state
implies that
- the given DRBG instance has been fully seeded from the jitterentropy
source (if enabled)
- and drbg_generate() is supposed to reseed from get_random_bytes()
*only* once rng_is_initialized() turns to true.
Up to now, the __drbg_seed() helper used to set the given DRBG instance's
->seeded state to constant DRBG_SEED_STATE_FULL. Introduce a new argument
allowing for the specification of the to be written ->seeded value instead.
Make the first of its two callers, drbg_seed(), determine the appropriate
value based on rng_is_initialized(). The remaining caller,
drbg_async_seed(), is known to get invoked only once rng_is_initialized()
is true, hence let it pass constant DRBG_SEED_STATE_FULL for the new
argument to __drbg_seed().
There is no change in behaviour, except for that the pr_devel() in
drbg_generate() would now report "unseeded" for ->pr DRBG instances which
had last been seeded when rng_is_initialized() was still evaluating to
false.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:05 +03:00
ret = __drbg_seed ( drbg , & seedlist , reseed , new_seed_state ) ;
2015-05-25 16:09:14 +03:00
2019-05-08 17:19:24 +03:00
out :
2015-06-09 16:55:38 +03:00
memzero_explicit ( entropy , entropylen * 2 ) ;
2015-05-25 16:09:36 +03:00
2014-05-31 17:44:17 +04:00
return ret ;
}
/* Free all substructures in a DRBG state without the DRBG state structure */
static inline void drbg_dealloc_state ( struct drbg_state * drbg )
{
if ( ! drbg )
return ;
2020-08-07 09:18:13 +03:00
kfree_sensitive ( drbg - > Vbuf ) ;
2018-04-12 09:40:55 +03:00
drbg - > Vbuf = NULL ;
2017-09-14 18:10:28 +03:00
drbg - > V = NULL ;
2020-08-07 09:18:13 +03:00
kfree_sensitive ( drbg - > Cbuf ) ;
2018-04-12 09:40:55 +03:00
drbg - > Cbuf = NULL ;
2017-09-14 18:10:28 +03:00
drbg - > C = NULL ;
2020-08-07 09:18:13 +03:00
kfree_sensitive ( drbg - > scratchpadbuf ) ;
2016-06-14 08:35:13 +03:00
drbg - > scratchpadbuf = NULL ;
2014-05-31 17:44:17 +04:00
drbg - > reseed_ctr = 0 ;
2015-04-20 06:29:15 +03:00
drbg - > d_ops = NULL ;
drbg - > core = NULL ;
2019-05-08 17:19:24 +03:00
if ( IS_ENABLED ( CONFIG_CRYPTO_FIPS ) ) {
2020-08-07 09:18:13 +03:00
kfree_sensitive ( drbg - > prev ) ;
2019-05-08 17:19:24 +03:00
drbg - > prev = NULL ;
drbg - > fips_primed = false ;
}
2014-05-31 17:44:17 +04:00
}
/*
* Allocate all sub - structures for a DRBG state .
* The DRBG state structure must already be allocated .
*/
static inline int drbg_alloc_state ( struct drbg_state * drbg )
{
int ret = - ENOMEM ;
unsigned int sb_size = 0 ;
2015-04-20 06:29:15 +03:00
switch ( drbg - > core - > flags & DRBG_TYPE_MASK ) {
# ifdef CONFIG_CRYPTO_DRBG_HMAC
case DRBG_HMAC :
drbg - > d_ops = & drbg_hmac_ops ;
break ;
# endif /* CONFIG_CRYPTO_DRBG_HMAC */
# ifdef CONFIG_CRYPTO_DRBG_HASH
case DRBG_HASH :
drbg - > d_ops = & drbg_hash_ops ;
break ;
# endif /* CONFIG_CRYPTO_DRBG_HASH */
# ifdef CONFIG_CRYPTO_DRBG_CTR
case DRBG_CTR :
drbg - > d_ops = & drbg_ctr_ops ;
break ;
# endif /* CONFIG_CRYPTO_DRBG_CTR */
default :
ret = - EOPNOTSUPP ;
goto err ;
}
2016-06-14 08:35:13 +03:00
ret = drbg - > d_ops - > crypto_init ( drbg ) ;
if ( ret < 0 )
2014-05-31 17:44:17 +04:00
goto err ;
2016-06-14 08:35:13 +03:00
drbg - > Vbuf = kmalloc ( drbg_statelen ( drbg ) + ret , GFP_KERNEL ) ;
2016-08-20 18:06:51 +03:00
if ( ! drbg - > Vbuf ) {
ret = - ENOMEM ;
2016-06-14 08:35:13 +03:00
goto fini ;
2016-08-20 18:06:51 +03:00
}
2016-06-14 08:35:13 +03:00
drbg - > V = PTR_ALIGN ( drbg - > Vbuf , ret + 1 ) ;
drbg - > Cbuf = kmalloc ( drbg_statelen ( drbg ) + ret , GFP_KERNEL ) ;
2016-08-20 18:06:51 +03:00
if ( ! drbg - > Cbuf ) {
ret = - ENOMEM ;
2016-06-14 08:35:13 +03:00
goto fini ;
2016-08-20 18:06:51 +03:00
}
2016-06-14 08:35:13 +03:00
drbg - > C = PTR_ALIGN ( drbg - > Cbuf , ret + 1 ) ;
2014-05-31 17:44:17 +04:00
/* scratchpad is only generated for CTR and Hash */
if ( drbg - > core - > flags & DRBG_HMAC )
sb_size = 0 ;
else if ( drbg - > core - > flags & DRBG_CTR )
sb_size = drbg_statelen ( drbg ) + drbg_blocklen ( drbg ) + /* temp */
drbg_statelen ( drbg ) + /* df_data */
drbg_blocklen ( drbg ) + /* pad */
drbg_blocklen ( drbg ) + /* iv */
2014-07-01 19:08:48 +04:00
drbg_statelen ( drbg ) + drbg_blocklen ( drbg ) ; /* temp */
2014-05-31 17:44:17 +04:00
else
sb_size = drbg_statelen ( drbg ) + drbg_blocklen ( drbg ) ;
if ( 0 < sb_size ) {
2016-06-14 08:35:13 +03:00
drbg - > scratchpadbuf = kzalloc ( sb_size + ret , GFP_KERNEL ) ;
2016-08-20 18:06:51 +03:00
if ( ! drbg - > scratchpadbuf ) {
ret = - ENOMEM ;
2016-06-14 08:35:13 +03:00
goto fini ;
2016-08-20 18:06:51 +03:00
}
2016-06-14 08:35:13 +03:00
drbg - > scratchpad = PTR_ALIGN ( drbg - > scratchpadbuf , ret + 1 ) ;
2014-05-31 17:44:17 +04:00
}
2015-05-25 16:09:14 +03:00
2019-05-08 17:19:24 +03:00
if ( IS_ENABLED ( CONFIG_CRYPTO_FIPS ) ) {
drbg - > prev = kzalloc ( drbg_sec_strength ( drbg - > core - > flags ) ,
GFP_KERNEL ) ;
2020-04-30 11:13:53 +03:00
if ( ! drbg - > prev ) {
ret = - ENOMEM ;
2019-05-08 17:19:24 +03:00
goto fini ;
2020-04-30 11:13:53 +03:00
}
2019-05-08 17:19:24 +03:00
drbg - > fips_primed = false ;
}
2014-05-31 17:44:17 +04:00
return 0 ;
2016-06-14 08:35:13 +03:00
fini :
drbg - > d_ops - > crypto_fini ( drbg ) ;
2014-05-31 17:44:17 +04:00
err :
drbg_dealloc_state ( drbg ) ;
return ret ;
}
/*************************************************************************
* DRBG interface functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* DRBG generate function as required by SP800 - 90 A - this function
* generates random numbers
*
* @ drbg DRBG state handle
* @ buf Buffer where to store the random numbers - - the buffer must already
* be pre - allocated by caller
* @ buflen Length of output buffer - this value defines the number of random
* bytes pulled from DRBG
* @ addtl Additional input that is mixed into state , may be NULL - - note
* the entropy is pulled by the DRBG internally unconditionally
* as defined in SP800 - 90 A . The additional input is mixed into
* the state in addition to the pulled entropy .
*
2015-03-06 10:26:31 +03:00
* return : 0 when all bytes are generated ; < 0 in case of an error
2014-05-31 17:44:17 +04:00
*/
static int drbg_generate ( struct drbg_state * drbg ,
unsigned char * buf , unsigned int buflen ,
struct drbg_string * addtl )
{
int len = 0 ;
2014-07-06 04:25:36 +04:00
LIST_HEAD ( addtllist ) ;
2014-05-31 17:44:17 +04:00
2015-04-20 06:29:15 +03:00
if ( ! drbg - > core ) {
pr_devel ( " DRBG: not yet seeded \n " ) ;
return - EINVAL ;
}
2014-05-31 17:44:17 +04:00
if ( 0 = = buflen | | ! buf ) {
pr_devel ( " DRBG: no output buffer provided \n " ) ;
return - EINVAL ;
}
if ( addtl & & NULL = = addtl - > buf & & 0 < addtl - > len ) {
pr_devel ( " DRBG: wrong format of additional information \n " ) ;
return - EINVAL ;
}
/* 9.3.1 step 2 */
len = - EINVAL ;
2015-04-18 20:36:17 +03:00
if ( buflen > ( drbg_max_request_bytes ( drbg ) ) ) {
2014-05-31 17:44:17 +04:00
pr_devel ( " DRBG: requested random numbers too large %u \n " ,
buflen ) ;
goto err ;
}
/* 9.3.1 step 3 is implicit with the chosen DRBG */
/* 9.3.1 step 4 */
2015-04-18 20:36:17 +03:00
if ( addtl & & addtl - > len > ( drbg_max_addtl ( drbg ) ) ) {
2014-05-31 17:44:17 +04:00
pr_devel ( " DRBG: additional information string too long %zu \n " ,
addtl - > len ) ;
goto err ;
}
/* 9.3.1 step 5 is implicit with the chosen DRBG */
/*
* 9.3 .1 step 6 and 9 supplemented by 9.3 .2 step c is implemented
* here . The spec is a bit convoluted here , we make it simpler .
*/
crypto: drbg - reseed often if seedsource is degraded
As required by SP800-90A, the DRBG implements are reseeding threshold.
This threshold is at 2**48 (64 bit) and 2**32 bit (32 bit) as
implemented in drbg_max_requests.
With the recently introduced changes, the DRBG is now always used as a
stdrng which is initialized very early in the boot cycle. To ensure that
sufficient entropy is present, the Jitter RNG is added to even provide
entropy at early boot time.
However, the 2nd seed source, the nonblocking pool, is usually
degraded at that time. Therefore, the DRBG is seeded with the Jitter RNG
(which I believe contains good entropy, which however is questioned by
others) and is seeded with a degradded nonblocking pool. This seed is
now used for quasi the lifetime of the system (2**48 requests is a lot).
The patch now changes the reseed threshold as follows: up until the time
the DRBG obtains a seed from a fully iniitialized nonblocking pool, the
reseeding threshold is lowered such that the DRBG is forced to reseed
itself resonably often. Once it obtains the seed from a fully
initialized nonblocking pool, the reseed threshold is set to the value
required by SP800-90A.
Signed-off-by: Stephan Mueller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-06-10 04:33:37 +03:00
if ( drbg - > reseed_threshold < drbg - > reseed_ctr )
crypto: drbg - prepare for more fine-grained tracking of seeding state
There are two different randomness sources the DRBGs are getting seeded
from, namely the jitterentropy source (if enabled) and get_random_bytes().
At initial DRBG seeding time during boot, the latter might not have
collected sufficient entropy for seeding itself yet and thus, the DRBG
implementation schedules a reseed work from a random_ready_callback once
that has happened. This is particularly important for the !->pr DRBG
instances, for which (almost) no further reseeds are getting triggered
during their lifetime.
Because collecting data from the jitterentropy source is a rather expensive
operation, the aforementioned asynchronously scheduled reseed work
restricts itself to get_random_bytes() only. That is, it in some sense
amends the initial DRBG seed derived from jitterentropy output at full
(estimated) entropy with fresh randomness obtained from get_random_bytes()
once that has been seeded with sufficient entropy itself.
With the advent of rng_is_initialized(), there is no real need for doing
the reseed operation from an asynchronously scheduled work anymore and a
subsequent patch will make it synchronous by moving it next to related
logic already present in drbg_generate().
However, for tracking whether a full reseed including the jitterentropy
source is required or a "partial" reseed involving only get_random_bytes()
would be sufficient already, the boolean struct drbg_state's ->seeded
member must become a tristate value.
Prepare for this by introducing the new enum drbg_seed_state and change
struct drbg_state's ->seeded member's type from bool to that type.
For facilitating review, enum drbg_seed_state is made to only contain
two members corresponding to the former ->seeded values of false and true
resp. at this point: DRBG_SEED_STATE_UNSEEDED and DRBG_SEED_STATE_FULL. A
third one for tracking the intermediate state of "seeded from jitterentropy
only" will be introduced with a subsequent patch.
There is no change in behaviour at this point.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:04 +03:00
drbg - > seeded = DRBG_SEED_STATE_UNSEEDED ;
2014-05-31 17:44:17 +04:00
crypto: drbg - prepare for more fine-grained tracking of seeding state
There are two different randomness sources the DRBGs are getting seeded
from, namely the jitterentropy source (if enabled) and get_random_bytes().
At initial DRBG seeding time during boot, the latter might not have
collected sufficient entropy for seeding itself yet and thus, the DRBG
implementation schedules a reseed work from a random_ready_callback once
that has happened. This is particularly important for the !->pr DRBG
instances, for which (almost) no further reseeds are getting triggered
during their lifetime.
Because collecting data from the jitterentropy source is a rather expensive
operation, the aforementioned asynchronously scheduled reseed work
restricts itself to get_random_bytes() only. That is, it in some sense
amends the initial DRBG seed derived from jitterentropy output at full
(estimated) entropy with fresh randomness obtained from get_random_bytes()
once that has been seeded with sufficient entropy itself.
With the advent of rng_is_initialized(), there is no real need for doing
the reseed operation from an asynchronously scheduled work anymore and a
subsequent patch will make it synchronous by moving it next to related
logic already present in drbg_generate().
However, for tracking whether a full reseed including the jitterentropy
source is required or a "partial" reseed involving only get_random_bytes()
would be sufficient already, the boolean struct drbg_state's ->seeded
member must become a tristate value.
Prepare for this by introducing the new enum drbg_seed_state and change
struct drbg_state's ->seeded member's type from bool to that type.
For facilitating review, enum drbg_seed_state is made to only contain
two members corresponding to the former ->seeded values of false and true
resp. at this point: DRBG_SEED_STATE_UNSEEDED and DRBG_SEED_STATE_FULL. A
third one for tracking the intermediate state of "seeded from jitterentropy
only" will be introduced with a subsequent patch.
There is no change in behaviour at this point.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:04 +03:00
if ( drbg - > pr | | drbg - > seeded = = DRBG_SEED_STATE_UNSEEDED ) {
2014-05-31 17:44:17 +04:00
pr_devel ( " DRBG: reseeding before generation (prediction "
" resistance: %s, state %s) \n " ,
drbg - > pr ? " true " : " false " ,
crypto: drbg - prepare for more fine-grained tracking of seeding state
There are two different randomness sources the DRBGs are getting seeded
from, namely the jitterentropy source (if enabled) and get_random_bytes().
At initial DRBG seeding time during boot, the latter might not have
collected sufficient entropy for seeding itself yet and thus, the DRBG
implementation schedules a reseed work from a random_ready_callback once
that has happened. This is particularly important for the !->pr DRBG
instances, for which (almost) no further reseeds are getting triggered
during their lifetime.
Because collecting data from the jitterentropy source is a rather expensive
operation, the aforementioned asynchronously scheduled reseed work
restricts itself to get_random_bytes() only. That is, it in some sense
amends the initial DRBG seed derived from jitterentropy output at full
(estimated) entropy with fresh randomness obtained from get_random_bytes()
once that has been seeded with sufficient entropy itself.
With the advent of rng_is_initialized(), there is no real need for doing
the reseed operation from an asynchronously scheduled work anymore and a
subsequent patch will make it synchronous by moving it next to related
logic already present in drbg_generate().
However, for tracking whether a full reseed including the jitterentropy
source is required or a "partial" reseed involving only get_random_bytes()
would be sufficient already, the boolean struct drbg_state's ->seeded
member must become a tristate value.
Prepare for this by introducing the new enum drbg_seed_state and change
struct drbg_state's ->seeded member's type from bool to that type.
For facilitating review, enum drbg_seed_state is made to only contain
two members corresponding to the former ->seeded values of false and true
resp. at this point: DRBG_SEED_STATE_UNSEEDED and DRBG_SEED_STATE_FULL. A
third one for tracking the intermediate state of "seeded from jitterentropy
only" will be introduced with a subsequent patch.
There is no change in behaviour at this point.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:04 +03:00
( drbg - > seeded = = DRBG_SEED_STATE_FULL ?
" seeded " : " unseeded " ) ) ;
2014-05-31 17:44:17 +04:00
/* 9.3.1 steps 7.1 through 7.3 */
2015-04-18 20:36:17 +03:00
len = drbg_seed ( drbg , addtl , true ) ;
2014-05-31 17:44:17 +04:00
if ( len )
goto err ;
/* 9.3.1 step 7.4 */
addtl = NULL ;
crypto: drbg - make reseeding from get_random_bytes() synchronous
get_random_bytes() usually hasn't full entropy available by the time DRBG
instances are first getting seeded from it during boot. Thus, the DRBG
implementation registers random_ready_callbacks which would in turn
schedule some work for reseeding the DRBGs once get_random_bytes() has
sufficient entropy available.
For reference, the relevant history around handling DRBG (re)seeding in
the context of a not yet fully seeded get_random_bytes() is:
commit 16b369a91d0d ("random: Blocking API for accessing
nonblocking_pool")
commit 4c7879907edd ("crypto: drbg - add async seeding operation")
commit 205a525c3342 ("random: Add callback API for random pool
readiness")
commit 57225e679788 ("crypto: drbg - Use callback API for random
readiness")
commit c2719503f5e1 ("random: Remove kernel blocking API")
However, some time later, the initialization state of get_random_bytes()
has been made queryable via rng_is_initialized() introduced with commit
9a47249d444d ("random: Make crng state queryable"). This primitive now
allows for streamlining the DRBG reseeding from get_random_bytes() by
replacing that aforementioned asynchronous work scheduling from
random_ready_callbacks with some simpler, synchronous code in
drbg_generate() next to the related logic already present therein. Apart
from improving overall code readability, this change will also enable DRBG
users to rely on wait_for_random_bytes() for ensuring that the initial
seeding has completed, if desired.
The previous patches already laid the grounds by making drbg_seed() to
record at each DRBG instance whether it was being seeded at a time when
rng_is_initialized() still had been false as indicated by
->seeded == DRBG_SEED_STATE_PARTIAL.
All that remains to be done now is to make drbg_generate() check for this
condition, determine whether rng_is_initialized() has flipped to true in
the meanwhile and invoke a reseed from get_random_bytes() if so.
Make this move:
- rename the former drbg_async_seed() work handler, i.e. the one in charge
of reseeding a DRBG instance from get_random_bytes(), to
"drbg_seed_from_random()",
- change its signature as appropriate, i.e. make it take a struct
drbg_state rather than a work_struct and change its return type from
"void" to "int" in order to allow for passing error information from
e.g. its __drbg_seed() invocation onwards to callers,
- make drbg_generate() invoke this drbg_seed_from_random() once it
encounters a DRBG instance with ->seeded == DRBG_SEED_STATE_PARTIAL by
the time rng_is_initialized() has flipped to true and
- prune everything related to the former, random_ready_callback based
mechanism.
As drbg_seed_from_random() is now getting invoked from drbg_generate() with
the ->drbg_mutex being held, it must not attempt to recursively grab it
once again. Remove the corresponding mutex operations from what is now
drbg_seed_from_random(). Furthermore, as drbg_seed_from_random() can now
report errors directly to its caller, there's no need for it to temporarily
switch the DRBG's ->seeded state to DRBG_SEED_STATE_UNSEEDED so that a
failure of the subsequently invoked __drbg_seed() will get signaled to
drbg_generate(). Don't do it then.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:07 +03:00
} else if ( rng_is_initialized ( ) & &
crypto: drbg - reseed 'nopr' drbgs periodically from get_random_bytes()
In contrast to the fully prediction resistant 'pr' DRBGs, the 'nopr'
variants get seeded once at boot and reseeded only rarely thereafter,
namely only after 2^20 requests have been served each. AFAICT, this
reseeding based on the number of requests served is primarily motivated
by information theoretic considerations, c.f. NIST SP800-90Ar1,
sec. 8.6.8 ("Reseeding").
However, given the relatively large seed lifetime of 2^20 requests, the
'nopr' DRBGs can hardly be considered to provide any prediction resistance
whatsoever, i.e. to protect against threats like side channel leaks of the
internal DRBG state (think e.g. leaked VM snapshots). This is expected and
completely in line with the 'nopr' naming, but as e.g. the
"drbg_nopr_hmac_sha512" implementation is potentially being used for
providing the "stdrng" and thus, the crypto_default_rng serving the
in-kernel crypto, it would certainly be desirable to achieve at least the
same level of prediction resistance as get_random_bytes() does.
Note that the chacha20 rngs underlying get_random_bytes() get reseeded
every CRNG_RESEED_INTERVAL == 5min: the secondary, per-NUMA node rngs from
the primary one and the primary rng in turn from the entropy pool, provided
sufficient entropy is available.
The 'nopr' DRBGs do draw randomness from get_random_bytes() for their
initial seed already, so making them to reseed themselves periodically from
get_random_bytes() in order to let them benefit from the latter's
prediction resistance is not such a big change conceptually.
In principle, it would have been also possible to make the 'nopr' DRBGs to
periodically invoke a full reseeding operation, i.e. to also consider the
jitterentropy source (if enabled) in addition to get_random_bytes() for the
seed value. However, get_random_bytes() is relatively lightweight as
compared to the jitterentropy generation process and thus, even though the
'nopr' reseeding is supposed to get invoked infrequently, it's IMO still
worthwhile to avoid occasional latency spikes for drbg_generate() and
stick to get_random_bytes() only. As an additional remark, note that
drawing randomness from the non-SP800-90B-conforming get_random_bytes()
only won't adversely affect SP800-90A conformance either: the very same is
being done during boot via drbg_seed_from_random() already once
rng_is_initialized() flips to true and it follows that if the DRBG
implementation does conform to SP800-90A now, it will continue to do so.
Make the 'nopr' DRBGs to reseed themselves periodically from
get_random_bytes() every CRNG_RESEED_INTERVAL == 5min.
More specifically, introduce a new member ->last_seed_time to struct
drbg_state for recording in units of jiffies when the last seeding
operation had taken place. Make __drbg_seed() maintain it and let
drbg_generate() invoke a reseed from get_random_bytes() via
drbg_seed_from_random() if more than 5min have passed by since the last
seeding operation. Be careful to not to reseed if in testing mode though,
or otherwise the drbg related tests in crypto/testmgr.c would fail to
reproduce the expected output.
In order to keep the formatting clean in drbg_generate() wrap the logic
for deciding whether or not a reseed is due in a new helper,
drbg_nopr_reseed_interval_elapsed().
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:09 +03:00
( drbg - > seeded = = DRBG_SEED_STATE_PARTIAL | |
drbg_nopr_reseed_interval_elapsed ( drbg ) ) ) {
crypto: drbg - make reseeding from get_random_bytes() synchronous
get_random_bytes() usually hasn't full entropy available by the time DRBG
instances are first getting seeded from it during boot. Thus, the DRBG
implementation registers random_ready_callbacks which would in turn
schedule some work for reseeding the DRBGs once get_random_bytes() has
sufficient entropy available.
For reference, the relevant history around handling DRBG (re)seeding in
the context of a not yet fully seeded get_random_bytes() is:
commit 16b369a91d0d ("random: Blocking API for accessing
nonblocking_pool")
commit 4c7879907edd ("crypto: drbg - add async seeding operation")
commit 205a525c3342 ("random: Add callback API for random pool
readiness")
commit 57225e679788 ("crypto: drbg - Use callback API for random
readiness")
commit c2719503f5e1 ("random: Remove kernel blocking API")
However, some time later, the initialization state of get_random_bytes()
has been made queryable via rng_is_initialized() introduced with commit
9a47249d444d ("random: Make crng state queryable"). This primitive now
allows for streamlining the DRBG reseeding from get_random_bytes() by
replacing that aforementioned asynchronous work scheduling from
random_ready_callbacks with some simpler, synchronous code in
drbg_generate() next to the related logic already present therein. Apart
from improving overall code readability, this change will also enable DRBG
users to rely on wait_for_random_bytes() for ensuring that the initial
seeding has completed, if desired.
The previous patches already laid the grounds by making drbg_seed() to
record at each DRBG instance whether it was being seeded at a time when
rng_is_initialized() still had been false as indicated by
->seeded == DRBG_SEED_STATE_PARTIAL.
All that remains to be done now is to make drbg_generate() check for this
condition, determine whether rng_is_initialized() has flipped to true in
the meanwhile and invoke a reseed from get_random_bytes() if so.
Make this move:
- rename the former drbg_async_seed() work handler, i.e. the one in charge
of reseeding a DRBG instance from get_random_bytes(), to
"drbg_seed_from_random()",
- change its signature as appropriate, i.e. make it take a struct
drbg_state rather than a work_struct and change its return type from
"void" to "int" in order to allow for passing error information from
e.g. its __drbg_seed() invocation onwards to callers,
- make drbg_generate() invoke this drbg_seed_from_random() once it
encounters a DRBG instance with ->seeded == DRBG_SEED_STATE_PARTIAL by
the time rng_is_initialized() has flipped to true and
- prune everything related to the former, random_ready_callback based
mechanism.
As drbg_seed_from_random() is now getting invoked from drbg_generate() with
the ->drbg_mutex being held, it must not attempt to recursively grab it
once again. Remove the corresponding mutex operations from what is now
drbg_seed_from_random(). Furthermore, as drbg_seed_from_random() can now
report errors directly to its caller, there's no need for it to temporarily
switch the DRBG's ->seeded state to DRBG_SEED_STATE_UNSEEDED so that a
failure of the subsequently invoked __drbg_seed() will get signaled to
drbg_generate(). Don't do it then.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:07 +03:00
len = drbg_seed_from_random ( drbg ) ;
if ( len )
goto err ;
2014-05-31 17:44:17 +04:00
}
2014-07-06 04:25:36 +04:00
if ( addtl & & 0 < addtl - > len )
list_add_tail ( & addtl - > list , & addtllist ) ;
2014-05-31 17:44:17 +04:00
/* 9.3.1 step 8 and 10 */
2015-04-18 20:36:17 +03:00
len = drbg - > d_ops - > generate ( drbg , buf , buflen , & addtllist ) ;
2014-05-31 17:44:17 +04:00
/* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */
2015-04-18 20:36:17 +03:00
drbg - > reseed_ctr + + ;
2014-05-31 17:44:17 +04:00
if ( 0 > = len )
goto err ;
/*
* Section 11.3 .3 requires to re - perform self tests after some
* generated random numbers . The chosen value after which self
* test is performed is arbitrary , but it should be reasonable .
* However , we do not perform the self tests because of the following
* reasons : it is mathematically impossible that the initial self tests
* were successfully and the following are not . If the initial would
* pass and the following would not , the kernel integrity is violated .
* In this case , the entire kernel operation is questionable and it
* is unlikely that the integrity violation only affects the
* correct operation of the DRBG .
*
* Albeit the following code is commented out , it is provided in
* case somebody has a need to implement the test of 11.3 .3 .
*/
#if 0
2015-04-18 20:36:17 +03:00
if ( drbg - > reseed_ctr & & ! ( drbg - > reseed_ctr % 4096 ) ) {
2014-05-31 17:44:17 +04:00
int err = 0 ;
pr_devel ( " DRBG: start to perform self test \n " ) ;
if ( drbg - > core - > flags & DRBG_HMAC )
err = alg_test ( " drbg_pr_hmac_sha256 " ,
" drbg_pr_hmac_sha256 " , 0 , 0 ) ;
else if ( drbg - > core - > flags & DRBG_CTR )
err = alg_test ( " drbg_pr_ctr_aes128 " ,
" drbg_pr_ctr_aes128 " , 0 , 0 ) ;
else
err = alg_test ( " drbg_pr_sha256 " ,
" drbg_pr_sha256 " , 0 , 0 ) ;
if ( err ) {
pr_err ( " DRBG: periodical self test failed \n " ) ;
/*
* uninstantiate implies that from now on , only errors
* are returned when reusing this DRBG cipher handle
*/
drbg_uninstantiate ( drbg ) ;
return 0 ;
} else {
pr_devel ( " DRBG: self test successful \n " ) ;
}
}
# endif
2015-03-06 10:26:31 +03:00
/*
* All operations were successful , return 0 as mandated by
* the kernel crypto API interface .
*/
len = 0 ;
2014-05-31 17:44:17 +04:00
err :
return len ;
}
/*
* Wrapper around drbg_generate which can pull arbitrary long strings
* from the DRBG without hitting the maximum request limitation .
*
* Parameters : see drbg_generate
* Return codes : see drbg_generate - - if one drbg_generate request fails ,
* the entire drbg_generate_long request fails
*/
static int drbg_generate_long ( struct drbg_state * drbg ,
unsigned char * buf , unsigned int buflen ,
struct drbg_string * addtl )
{
2015-04-18 20:35:45 +03:00
unsigned int len = 0 ;
2014-05-31 17:44:17 +04:00
unsigned int slice = 0 ;
do {
2015-04-18 20:35:45 +03:00
int err = 0 ;
2014-05-31 17:44:17 +04:00
unsigned int chunk = 0 ;
slice = ( ( buflen - len ) / drbg_max_request_bytes ( drbg ) ) ;
chunk = slice ? drbg_max_request_bytes ( drbg ) : ( buflen - len ) ;
2015-04-18 20:36:17 +03:00
mutex_lock ( & drbg - > drbg_mutex ) ;
2015-04-18 20:35:45 +03:00
err = drbg_generate ( drbg , buf + len , chunk , addtl ) ;
2015-04-18 20:36:17 +03:00
mutex_unlock ( & drbg - > drbg_mutex ) ;
2015-04-18 20:35:45 +03:00
if ( 0 > err )
return err ;
len + = chunk ;
2014-07-31 23:47:33 +04:00
} while ( slice > 0 & & ( len < buflen ) ) ;
2015-04-18 20:35:45 +03:00
return 0 ;
2014-05-31 17:44:17 +04:00
}
2015-06-09 16:55:38 +03:00
static int drbg_prepare_hrng ( struct drbg_state * drbg )
{
/* We do not need an HRNG in test mode. */
if ( list_empty ( & drbg - > test_data . list ) )
return 0 ;
crypto: drbg - always seeded with SP800-90B compliant noise source
As the Jitter RNG provides an SP800-90B compliant noise source, use this
noise source always for the (re)seeding of the DRBG.
To make sure the DRBG is always properly seeded, the reseed threshold
is reduced to 1<<20 generate operations.
The Jitter RNG may report health test failures. Such health test
failures are treated as transient as follows. The DRBG will not reseed
from the Jitter RNG (but from get_random_bytes) in case of a health
test failure. Though, it produces the requested random number.
The Jitter RNG has a failure counter where at most 1024 consecutive
resets due to a health test failure are considered as a transient error.
If more consecutive resets are required, the Jitter RNG will return
a permanent error which is returned to the caller by the DRBG. With this
approach, the worst case reseed threshold is significantly lower than
mandated by SP800-90A in order to seed with an SP800-90B noise source:
the DRBG has a reseed threshold of 2^20 * 1024 = 2^30 generate requests.
Yet, in case of a transient Jitter RNG health test failure, the DRBG is
seeded with the data obtained from get_random_bytes.
However, if the Jitter RNG fails during the initial seeding operation
even due to a health test error, the DRBG will send an error to the
caller because at that time, the DRBG has received no seed that is
SP800-90B compliant.
Signed-off-by: Stephan Mueller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2020-04-17 22:34:03 +03:00
drbg - > jent = crypto_alloc_rng ( " jitterentropy_rng " , 0 , 0 ) ;
2021-11-15 17:18:08 +03:00
if ( IS_ERR ( drbg - > jent ) ) {
const int err = PTR_ERR ( drbg - > jent ) ;
drbg - > jent = NULL ;
if ( fips_enabled | | err ! = - ENOENT )
return err ;
pr_info ( " DRBG: Continuing without Jitter RNG \n " ) ;
}
crypto: drbg - always seeded with SP800-90B compliant noise source
As the Jitter RNG provides an SP800-90B compliant noise source, use this
noise source always for the (re)seeding of the DRBG.
To make sure the DRBG is always properly seeded, the reseed threshold
is reduced to 1<<20 generate operations.
The Jitter RNG may report health test failures. Such health test
failures are treated as transient as follows. The DRBG will not reseed
from the Jitter RNG (but from get_random_bytes) in case of a health
test failure. Though, it produces the requested random number.
The Jitter RNG has a failure counter where at most 1024 consecutive
resets due to a health test failure are considered as a transient error.
If more consecutive resets are required, the Jitter RNG will return
a permanent error which is returned to the caller by the DRBG. With this
approach, the worst case reseed threshold is significantly lower than
mandated by SP800-90A in order to seed with an SP800-90B noise source:
the DRBG has a reseed threshold of 2^20 * 1024 = 2^30 generate requests.
Yet, in case of a transient Jitter RNG health test failure, the DRBG is
seeded with the data obtained from get_random_bytes.
However, if the Jitter RNG fails during the initial seeding operation
even due to a health test error, the DRBG will send an error to the
caller because at that time, the DRBG has received no seed that is
SP800-90B compliant.
Signed-off-by: Stephan Mueller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2020-04-17 22:34:03 +03:00
crypto: drbg - make reseeding from get_random_bytes() synchronous
get_random_bytes() usually hasn't full entropy available by the time DRBG
instances are first getting seeded from it during boot. Thus, the DRBG
implementation registers random_ready_callbacks which would in turn
schedule some work for reseeding the DRBGs once get_random_bytes() has
sufficient entropy available.
For reference, the relevant history around handling DRBG (re)seeding in
the context of a not yet fully seeded get_random_bytes() is:
commit 16b369a91d0d ("random: Blocking API for accessing
nonblocking_pool")
commit 4c7879907edd ("crypto: drbg - add async seeding operation")
commit 205a525c3342 ("random: Add callback API for random pool
readiness")
commit 57225e679788 ("crypto: drbg - Use callback API for random
readiness")
commit c2719503f5e1 ("random: Remove kernel blocking API")
However, some time later, the initialization state of get_random_bytes()
has been made queryable via rng_is_initialized() introduced with commit
9a47249d444d ("random: Make crng state queryable"). This primitive now
allows for streamlining the DRBG reseeding from get_random_bytes() by
replacing that aforementioned asynchronous work scheduling from
random_ready_callbacks with some simpler, synchronous code in
drbg_generate() next to the related logic already present therein. Apart
from improving overall code readability, this change will also enable DRBG
users to rely on wait_for_random_bytes() for ensuring that the initial
seeding has completed, if desired.
The previous patches already laid the grounds by making drbg_seed() to
record at each DRBG instance whether it was being seeded at a time when
rng_is_initialized() still had been false as indicated by
->seeded == DRBG_SEED_STATE_PARTIAL.
All that remains to be done now is to make drbg_generate() check for this
condition, determine whether rng_is_initialized() has flipped to true in
the meanwhile and invoke a reseed from get_random_bytes() if so.
Make this move:
- rename the former drbg_async_seed() work handler, i.e. the one in charge
of reseeding a DRBG instance from get_random_bytes(), to
"drbg_seed_from_random()",
- change its signature as appropriate, i.e. make it take a struct
drbg_state rather than a work_struct and change its return type from
"void" to "int" in order to allow for passing error information from
e.g. its __drbg_seed() invocation onwards to callers,
- make drbg_generate() invoke this drbg_seed_from_random() once it
encounters a DRBG instance with ->seeded == DRBG_SEED_STATE_PARTIAL by
the time rng_is_initialized() has flipped to true and
- prune everything related to the former, random_ready_callback based
mechanism.
As drbg_seed_from_random() is now getting invoked from drbg_generate() with
the ->drbg_mutex being held, it must not attempt to recursively grab it
once again. Remove the corresponding mutex operations from what is now
drbg_seed_from_random(). Furthermore, as drbg_seed_from_random() can now
report errors directly to its caller, there's no need for it to temporarily
switch the DRBG's ->seeded state to DRBG_SEED_STATE_UNSEEDED so that a
failure of the subsequently invoked __drbg_seed() will get signaled to
drbg_generate(). Don't do it then.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:07 +03:00
return 0 ;
2015-06-09 16:55:38 +03:00
}
2014-05-31 17:44:17 +04:00
/*
* DRBG instantiation function as required by SP800 - 90 A - this function
* sets up the DRBG handle , performs the initial seeding and all sanity
* checks required by SP800 - 90 A
*
* @ drbg memory of state - - if NULL , new memory is allocated
* @ pers Personalization string that is mixed into state , may be NULL - - note
* the entropy is pulled by the DRBG internally unconditionally
* as defined in SP800 - 90 A . The additional input is mixed into
* the state in addition to the pulled entropy .
* @ coreref reference to core
* @ pr prediction resistance enabled
*
* return
* 0 on success
* error value otherwise
*/
static int drbg_instantiate ( struct drbg_state * drbg , struct drbg_string * pers ,
int coreref , bool pr )
{
2015-04-20 06:29:15 +03:00
int ret ;
bool reseed = true ;
2014-05-31 17:44:17 +04:00
pr_devel ( " DRBG: Initializing DRBG core %d with prediction resistance "
" %s \n " , coreref , pr ? " enabled " : " disabled " ) ;
2015-04-18 20:36:17 +03:00
mutex_lock ( & drbg - > drbg_mutex ) ;
2014-05-31 17:44:17 +04:00
/* 9.1 step 1 is implicit with the selected DRBG type */
/*
* 9.1 step 2 is implicit as caller can select prediction resistance
* and the flag is copied into drbg - > flags - -
* all DRBG types support prediction resistance
*/
/* 9.1 step 4 is implicit in drbg_sec_strength */
2015-04-20 06:29:15 +03:00
if ( ! drbg - > core ) {
drbg - > core = & drbg_cores [ coreref ] ;
drbg - > pr = pr ;
crypto: drbg - prepare for more fine-grained tracking of seeding state
There are two different randomness sources the DRBGs are getting seeded
from, namely the jitterentropy source (if enabled) and get_random_bytes().
At initial DRBG seeding time during boot, the latter might not have
collected sufficient entropy for seeding itself yet and thus, the DRBG
implementation schedules a reseed work from a random_ready_callback once
that has happened. This is particularly important for the !->pr DRBG
instances, for which (almost) no further reseeds are getting triggered
during their lifetime.
Because collecting data from the jitterentropy source is a rather expensive
operation, the aforementioned asynchronously scheduled reseed work
restricts itself to get_random_bytes() only. That is, it in some sense
amends the initial DRBG seed derived from jitterentropy output at full
(estimated) entropy with fresh randomness obtained from get_random_bytes()
once that has been seeded with sufficient entropy itself.
With the advent of rng_is_initialized(), there is no real need for doing
the reseed operation from an asynchronously scheduled work anymore and a
subsequent patch will make it synchronous by moving it next to related
logic already present in drbg_generate().
However, for tracking whether a full reseed including the jitterentropy
source is required or a "partial" reseed involving only get_random_bytes()
would be sufficient already, the boolean struct drbg_state's ->seeded
member must become a tristate value.
Prepare for this by introducing the new enum drbg_seed_state and change
struct drbg_state's ->seeded member's type from bool to that type.
For facilitating review, enum drbg_seed_state is made to only contain
two members corresponding to the former ->seeded values of false and true
resp. at this point: DRBG_SEED_STATE_UNSEEDED and DRBG_SEED_STATE_FULL. A
third one for tracking the intermediate state of "seeded from jitterentropy
only" will be introduced with a subsequent patch.
There is no change in behaviour at this point.
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:04 +03:00
drbg - > seeded = DRBG_SEED_STATE_UNSEEDED ;
crypto: drbg - reseed 'nopr' drbgs periodically from get_random_bytes()
In contrast to the fully prediction resistant 'pr' DRBGs, the 'nopr'
variants get seeded once at boot and reseeded only rarely thereafter,
namely only after 2^20 requests have been served each. AFAICT, this
reseeding based on the number of requests served is primarily motivated
by information theoretic considerations, c.f. NIST SP800-90Ar1,
sec. 8.6.8 ("Reseeding").
However, given the relatively large seed lifetime of 2^20 requests, the
'nopr' DRBGs can hardly be considered to provide any prediction resistance
whatsoever, i.e. to protect against threats like side channel leaks of the
internal DRBG state (think e.g. leaked VM snapshots). This is expected and
completely in line with the 'nopr' naming, but as e.g. the
"drbg_nopr_hmac_sha512" implementation is potentially being used for
providing the "stdrng" and thus, the crypto_default_rng serving the
in-kernel crypto, it would certainly be desirable to achieve at least the
same level of prediction resistance as get_random_bytes() does.
Note that the chacha20 rngs underlying get_random_bytes() get reseeded
every CRNG_RESEED_INTERVAL == 5min: the secondary, per-NUMA node rngs from
the primary one and the primary rng in turn from the entropy pool, provided
sufficient entropy is available.
The 'nopr' DRBGs do draw randomness from get_random_bytes() for their
initial seed already, so making them to reseed themselves periodically from
get_random_bytes() in order to let them benefit from the latter's
prediction resistance is not such a big change conceptually.
In principle, it would have been also possible to make the 'nopr' DRBGs to
periodically invoke a full reseeding operation, i.e. to also consider the
jitterentropy source (if enabled) in addition to get_random_bytes() for the
seed value. However, get_random_bytes() is relatively lightweight as
compared to the jitterentropy generation process and thus, even though the
'nopr' reseeding is supposed to get invoked infrequently, it's IMO still
worthwhile to avoid occasional latency spikes for drbg_generate() and
stick to get_random_bytes() only. As an additional remark, note that
drawing randomness from the non-SP800-90B-conforming get_random_bytes()
only won't adversely affect SP800-90A conformance either: the very same is
being done during boot via drbg_seed_from_random() already once
rng_is_initialized() flips to true and it follows that if the DRBG
implementation does conform to SP800-90A now, it will continue to do so.
Make the 'nopr' DRBGs to reseed themselves periodically from
get_random_bytes() every CRNG_RESEED_INTERVAL == 5min.
More specifically, introduce a new member ->last_seed_time to struct
drbg_state for recording in units of jiffies when the last seeding
operation had taken place. Make __drbg_seed() maintain it and let
drbg_generate() invoke a reseed from get_random_bytes() via
drbg_seed_from_random() if more than 5min have passed by since the last
seeding operation. Be careful to not to reseed if in testing mode though,
or otherwise the drbg related tests in crypto/testmgr.c would fail to
reproduce the expected output.
In order to keep the formatting clean in drbg_generate() wrap the logic
for deciding whether or not a reseed is due in a new helper,
drbg_nopr_reseed_interval_elapsed().
Signed-off-by: Nicolai Stange <nstange@suse.de>
Reviewed-by: Stephan Müller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-11-15 17:18:09 +03:00
drbg - > last_seed_time = 0 ;
crypto: drbg - reseed often if seedsource is degraded
As required by SP800-90A, the DRBG implements are reseeding threshold.
This threshold is at 2**48 (64 bit) and 2**32 bit (32 bit) as
implemented in drbg_max_requests.
With the recently introduced changes, the DRBG is now always used as a
stdrng which is initialized very early in the boot cycle. To ensure that
sufficient entropy is present, the Jitter RNG is added to even provide
entropy at early boot time.
However, the 2nd seed source, the nonblocking pool, is usually
degraded at that time. Therefore, the DRBG is seeded with the Jitter RNG
(which I believe contains good entropy, which however is questioned by
others) and is seeded with a degradded nonblocking pool. This seed is
now used for quasi the lifetime of the system (2**48 requests is a lot).
The patch now changes the reseed threshold as follows: up until the time
the DRBG obtains a seed from a fully iniitialized nonblocking pool, the
reseeding threshold is lowered such that the DRBG is forced to reseed
itself resonably often. Once it obtains the seed from a fully
initialized nonblocking pool, the reseed threshold is set to the value
required by SP800-90A.
Signed-off-by: Stephan Mueller <smueller@chronox.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-06-10 04:33:37 +03:00
drbg - > reseed_threshold = drbg_max_requests ( drbg ) ;
2014-05-31 17:44:17 +04:00
2015-04-20 06:29:15 +03:00
ret = drbg_alloc_state ( drbg ) ;
if ( ret )
goto unlock ;
2015-06-09 16:55:38 +03:00
ret = drbg_prepare_hrng ( drbg ) ;
if ( ret )
goto free_everything ;
2015-04-20 06:29:15 +03:00
reseed = false ;
}
ret = drbg_seed ( drbg , pers , reseed ) ;
2015-06-09 16:55:38 +03:00
if ( ret & & ! reseed )
goto free_everything ;
2014-05-31 17:44:17 +04:00
2015-04-18 20:36:17 +03:00
mutex_unlock ( & drbg - > drbg_mutex ) ;
2015-04-20 06:29:15 +03:00
return ret ;
2014-05-31 17:44:17 +04:00
2015-04-18 20:36:17 +03:00
unlock :
mutex_unlock ( & drbg - > drbg_mutex ) ;
2014-05-31 17:44:17 +04:00
return ret ;
2015-06-09 16:55:38 +03:00
free_everything :
mutex_unlock ( & drbg - > drbg_mutex ) ;
drbg_uninstantiate ( drbg ) ;
return ret ;
2014-05-31 17:44:17 +04:00
}
/*
* DRBG uninstantiate function as required by SP800 - 90 A - this function
* frees all buffers and the DRBG handle
*
* @ drbg DRBG state handle
*
* return
* 0 on success
*/
static int drbg_uninstantiate ( struct drbg_state * drbg )
{
2020-06-07 16:20:26 +03:00
if ( ! IS_ERR_OR_NULL ( drbg - > jent ) )
crypto_free_rng ( drbg - > jent ) ;
drbg - > jent = NULL ;
2015-04-20 06:29:15 +03:00
if ( drbg - > d_ops )
drbg - > d_ops - > crypto_fini ( drbg ) ;
2014-05-31 17:44:17 +04:00
drbg_dealloc_state ( drbg ) ;
/* no scrubbing of test_data -- this shall survive an uninstantiate */
return 0 ;
}
/*
* Helper function for setting the test data in the DRBG
*
* @ drbg DRBG state handle
2015-04-21 05:46:41 +03:00
* @ data test data
* @ len test data length
2014-05-31 17:44:17 +04:00
*/
2015-04-21 05:46:41 +03:00
static void drbg_kcapi_set_entropy ( struct crypto_rng * tfm ,
const u8 * data , unsigned int len )
2014-05-31 17:44:17 +04:00
{
2015-04-21 05:46:41 +03:00
struct drbg_state * drbg = crypto_rng_ctx ( tfm ) ;
mutex_lock ( & drbg - > drbg_mutex ) ;
drbg_string_fill ( & drbg - > test_data , data , len ) ;
2015-04-18 20:36:17 +03:00
mutex_unlock ( & drbg - > drbg_mutex ) ;
2014-05-31 17:44:17 +04:00
}
/***************************************************************
* Kernel crypto API cipher invocations requested by DRBG
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
struct sdesc {
struct shash_desc shash ;
char ctx [ ] ;
} ;
static int drbg_init_hash_kernel ( struct drbg_state * drbg )
{
struct sdesc * sdesc ;
struct crypto_shash * tfm ;
tfm = crypto_alloc_shash ( drbg - > core - > backend_cra_name , 0 , 0 ) ;
if ( IS_ERR ( tfm ) ) {
2015-06-10 16:27:48 +03:00
pr_info ( " DRBG: could not allocate digest TFM handle: %s \n " ,
drbg - > core - > backend_cra_name ) ;
2014-05-31 17:44:17 +04:00
return PTR_ERR ( tfm ) ;
}
BUG_ON ( drbg_blocklen ( drbg ) ! = crypto_shash_digestsize ( tfm ) ) ;
sdesc = kzalloc ( sizeof ( struct shash_desc ) + crypto_shash_descsize ( tfm ) ,
GFP_KERNEL ) ;
if ( ! sdesc ) {
crypto_free_shash ( tfm ) ;
return - ENOMEM ;
}
sdesc - > shash . tfm = tfm ;
drbg - > priv_data = sdesc ;
2016-06-14 08:35:13 +03:00
return crypto_shash_alignmask ( tfm ) ;
2014-05-31 17:44:17 +04:00
}
static int drbg_fini_hash_kernel ( struct drbg_state * drbg )
{
struct sdesc * sdesc = ( struct sdesc * ) drbg - > priv_data ;
if ( sdesc ) {
crypto_free_shash ( sdesc - > shash . tfm ) ;
2020-08-07 09:18:13 +03:00
kfree_sensitive ( sdesc ) ;
2014-05-31 17:44:17 +04:00
}
drbg - > priv_data = NULL ;
return 0 ;
}
2016-03-28 17:47:55 +03:00
static void drbg_kcapi_hmacsetkey ( struct drbg_state * drbg ,
const unsigned char * key )
{
struct sdesc * sdesc = ( struct sdesc * ) drbg - > priv_data ;
crypto_shash_setkey ( sdesc - > shash . tfm , key , drbg_statelen ( drbg ) ) ;
}
static int drbg_kcapi_hash ( struct drbg_state * drbg , unsigned char * outval ,
const struct list_head * in )
2014-05-31 17:44:17 +04:00
{
struct sdesc * sdesc = ( struct sdesc * ) drbg - > priv_data ;
2014-06-28 23:58:24 +04:00
struct drbg_string * input = NULL ;
2014-05-31 17:44:17 +04:00
crypto_shash_init ( & sdesc - > shash ) ;
2014-06-28 23:58:24 +04:00
list_for_each_entry ( input , in , list )
crypto_shash_update ( & sdesc - > shash , input - > buf , input - > len ) ;
2014-05-31 17:44:17 +04:00
return crypto_shash_final ( & sdesc - > shash , outval ) ;
}
# endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
# ifdef CONFIG_CRYPTO_DRBG_CTR
2016-06-14 08:34:13 +03:00
static int drbg_fini_sym_kernel ( struct drbg_state * drbg )
{
struct crypto_cipher * tfm =
( struct crypto_cipher * ) drbg - > priv_data ;
if ( tfm )
crypto_free_cipher ( tfm ) ;
drbg - > priv_data = NULL ;
if ( drbg - > ctr_handle )
crypto_free_skcipher ( drbg - > ctr_handle ) ;
drbg - > ctr_handle = NULL ;
if ( drbg - > ctr_req )
2016-06-15 14:13:25 +03:00
skcipher_request_free ( drbg - > ctr_req ) ;
2016-06-14 08:34:13 +03:00
drbg - > ctr_req = NULL ;
2016-11-29 11:45:04 +03:00
kfree ( drbg - > outscratchpadbuf ) ;
drbg - > outscratchpadbuf = NULL ;
2016-06-14 08:34:13 +03:00
return 0 ;
}
2014-05-31 17:44:17 +04:00
static int drbg_init_sym_kernel ( struct drbg_state * drbg )
{
2015-03-01 22:39:17 +03:00
struct crypto_cipher * tfm ;
2016-06-14 08:34:13 +03:00
struct crypto_skcipher * sk_tfm ;
struct skcipher_request * req ;
unsigned int alignmask ;
char ctr_name [ CRYPTO_MAX_ALG_NAME ] ;
2014-05-31 17:44:17 +04:00
2015-03-01 22:39:17 +03:00
tfm = crypto_alloc_cipher ( drbg - > core - > backend_cra_name , 0 , 0 ) ;
2014-05-31 17:44:17 +04:00
if ( IS_ERR ( tfm ) ) {
2015-06-10 16:27:48 +03:00
pr_info ( " DRBG: could not allocate cipher TFM handle: %s \n " ,
drbg - > core - > backend_cra_name ) ;
2014-05-31 17:44:17 +04:00
return PTR_ERR ( tfm ) ;
}
2015-03-01 22:39:17 +03:00
BUG_ON ( drbg_blocklen ( drbg ) ! = crypto_cipher_blocksize ( tfm ) ) ;
2014-05-31 17:44:17 +04:00
drbg - > priv_data = tfm ;
2016-06-14 08:34:13 +03:00
if ( snprintf ( ctr_name , CRYPTO_MAX_ALG_NAME , " ctr(%s) " ,
drbg - > core - > backend_cra_name ) > = CRYPTO_MAX_ALG_NAME ) {
drbg_fini_sym_kernel ( drbg ) ;
return - EINVAL ;
}
sk_tfm = crypto_alloc_skcipher ( ctr_name , 0 , 0 ) ;
if ( IS_ERR ( sk_tfm ) ) {
pr_info ( " DRBG: could not allocate CTR cipher TFM handle: %s \n " ,
ctr_name ) ;
drbg_fini_sym_kernel ( drbg ) ;
return PTR_ERR ( sk_tfm ) ;
}
drbg - > ctr_handle = sk_tfm ;
2017-10-18 10:00:41 +03:00
crypto_init_wait ( & drbg - > ctr_wait ) ;
2016-06-14 08:34:13 +03:00
req = skcipher_request_alloc ( sk_tfm , GFP_KERNEL ) ;
if ( ! req ) {
pr_info ( " DRBG: could not allocate request queue \n " ) ;
drbg_fini_sym_kernel ( drbg ) ;
2016-06-17 12:16:19 +03:00
return - ENOMEM ;
2016-06-14 08:34:13 +03:00
}
drbg - > ctr_req = req ;
2017-10-18 10:00:41 +03:00
skcipher_request_set_callback ( req , CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP ,
crypto_req_done , & drbg - > ctr_wait ) ;
2016-06-14 08:34:13 +03:00
alignmask = crypto_skcipher_alignmask ( sk_tfm ) ;
2016-11-29 11:45:04 +03:00
drbg - > outscratchpadbuf = kmalloc ( DRBG_OUTSCRATCHLEN + alignmask ,
GFP_KERNEL ) ;
if ( ! drbg - > outscratchpadbuf ) {
drbg_fini_sym_kernel ( drbg ) ;
return - ENOMEM ;
}
drbg - > outscratchpad = ( u8 * ) PTR_ALIGN ( drbg - > outscratchpadbuf ,
alignmask + 1 ) ;
2018-07-10 18:56:33 +03:00
sg_init_table ( & drbg - > sg_in , 1 ) ;
2018-07-20 20:42:01 +03:00
sg_init_one ( & drbg - > sg_out , drbg - > outscratchpad , DRBG_OUTSCRATCHLEN ) ;
2018-07-10 18:56:33 +03:00
2016-06-14 08:35:13 +03:00
return alignmask ;
2014-05-31 17:44:17 +04:00
}
2016-05-31 14:11:57 +03:00
static void drbg_kcapi_symsetkey ( struct drbg_state * drbg ,
const unsigned char * key )
2014-05-31 17:44:17 +04:00
{
2015-03-01 22:39:17 +03:00
struct crypto_cipher * tfm =
( struct crypto_cipher * ) drbg - > priv_data ;
2014-05-31 17:44:17 +04:00
2015-03-01 22:39:17 +03:00
crypto_cipher_setkey ( tfm , key , ( drbg_keylen ( drbg ) ) ) ;
2016-05-31 14:11:57 +03:00
}
static int drbg_kcapi_sym ( struct drbg_state * drbg , unsigned char * outval ,
const struct drbg_string * in )
{
struct crypto_cipher * tfm =
( struct crypto_cipher * ) drbg - > priv_data ;
2015-03-01 22:39:17 +03:00
/* there is only component in *in */
BUG_ON ( in - > len < drbg_blocklen ( drbg ) ) ;
crypto_cipher_encrypt_one ( tfm , outval , in - > buf ) ;
return 0 ;
2014-05-31 17:44:17 +04:00
}
2016-06-14 08:34:13 +03:00
2016-06-14 08:35:37 +03:00
static int drbg_kcapi_sym_ctr ( struct drbg_state * drbg ,
u8 * inbuf , u32 inlen ,
u8 * outbuf , u32 outlen )
2016-06-14 08:34:13 +03:00
{
2018-07-10 18:56:33 +03:00
struct scatterlist * sg_in = & drbg - > sg_in , * sg_out = & drbg - > sg_out ;
2018-07-20 20:42:01 +03:00
u32 scratchpad_use = min_t ( u32 , outlen , DRBG_OUTSCRATCHLEN ) ;
2016-11-29 11:45:04 +03:00
int ret ;
2016-06-14 08:34:13 +03:00
2018-07-20 20:42:01 +03:00
if ( inbuf ) {
/* Use caller-provided input buffer */
sg_set_buf ( sg_in , inbuf , inlen ) ;
} else {
/* Use scratchpad for in-place operation */
inlen = scratchpad_use ;
memset ( drbg - > outscratchpad , 0 , scratchpad_use ) ;
sg_set_buf ( sg_in , drbg - > outscratchpad , scratchpad_use ) ;
}
2016-06-14 08:34:13 +03:00
while ( outlen ) {
2016-11-29 11:45:04 +03:00
u32 cryptlen = min3 ( inlen , outlen , ( u32 ) DRBG_OUTSCRATCHLEN ) ;
2016-06-14 08:34:13 +03:00
2016-11-29 11:45:04 +03:00
/* Output buffer may not be valid for SGL, use scratchpad */
2018-07-10 18:56:33 +03:00
skcipher_request_set_crypt ( drbg - > ctr_req , sg_in , sg_out ,
2016-06-14 08:34:13 +03:00
cryptlen , drbg - > V ) ;
2017-10-18 10:00:41 +03:00
ret = crypto_wait_req ( crypto_skcipher_encrypt ( drbg - > ctr_req ) ,
& drbg - > ctr_wait ) ;
if ( ret )
2016-11-29 11:45:04 +03:00
goto out ;
2017-10-18 10:00:41 +03:00
crypto_init_wait ( & drbg - > ctr_wait ) ;
2016-06-14 08:34:13 +03:00
2016-11-29 11:45:04 +03:00
memcpy ( outbuf , drbg - > outscratchpad , cryptlen ) ;
2018-07-20 20:42:01 +03:00
memzero_explicit ( drbg - > outscratchpad , cryptlen ) ;
2016-11-29 11:45:04 +03:00
2016-06-14 08:34:13 +03:00
outlen - = cryptlen ;
2016-11-18 14:27:56 +03:00
outbuf + = cryptlen ;
2016-06-14 08:34:13 +03:00
}
2016-11-29 11:45:04 +03:00
ret = 0 ;
2016-06-14 08:34:13 +03:00
2016-11-29 11:45:04 +03:00
out :
return ret ;
2016-06-14 08:34:13 +03:00
}
2014-05-31 17:44:17 +04:00
# endif /* CONFIG_CRYPTO_DRBG_CTR */
/***************************************************************
* Kernel crypto API interface to register DRBG
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Look up the DRBG flags by given kernel crypto API cra_name
* The code uses the drbg_cores definition to do this
*
* @ cra_name kernel crypto API cra_name
* @ coreref reference to integer which is filled with the pointer to
* the applicable core
* @ pr reference for setting prediction resistance
*
* return : flags
*/
static inline void drbg_convert_tfm_core ( const char * cra_driver_name ,
int * coreref , bool * pr )
{
int i = 0 ;
size_t start = 0 ;
int len = 0 ;
* pr = true ;
/* disassemble the names */
if ( ! memcmp ( cra_driver_name , " drbg_nopr_ " , 10 ) ) {
start = 10 ;
* pr = false ;
} else if ( ! memcmp ( cra_driver_name , " drbg_pr_ " , 8 ) ) {
start = 8 ;
} else {
return ;
}
/* remove the first part */
len = strlen ( cra_driver_name ) - start ;
for ( i = 0 ; ARRAY_SIZE ( drbg_cores ) > i ; i + + ) {
if ( ! memcmp ( cra_driver_name + start , drbg_cores [ i ] . cra_name ,
len ) ) {
* coreref = i ;
return ;
}
}
}
static int drbg_kcapi_init ( struct crypto_tfm * tfm )
{
struct drbg_state * drbg = crypto_tfm_ctx ( tfm ) ;
2015-04-18 20:36:17 +03:00
mutex_init ( & drbg - > drbg_mutex ) ;
2015-04-20 06:29:15 +03:00
return 0 ;
2014-05-31 17:44:17 +04:00
}
static void drbg_kcapi_cleanup ( struct crypto_tfm * tfm )
{
drbg_uninstantiate ( crypto_tfm_ctx ( tfm ) ) ;
}
/*
* Generate random numbers invoked by the kernel crypto API :
* The API of the kernel crypto API is extended as follows :
*
2015-04-21 05:46:41 +03:00
* src is additional input supplied to the RNG .
* slen is the length of src .
* dst is the output buffer where random data is to be stored .
* dlen is the length of dst .
2014-05-31 17:44:17 +04:00
*/
2015-04-21 05:46:41 +03:00
static int drbg_kcapi_random ( struct crypto_rng * tfm ,
const u8 * src , unsigned int slen ,
u8 * dst , unsigned int dlen )
2014-05-31 17:44:17 +04:00
{
struct drbg_state * drbg = crypto_rng_ctx ( tfm ) ;
2015-04-21 05:46:41 +03:00
struct drbg_string * addtl = NULL ;
struct drbg_string string ;
if ( slen ) {
2014-06-28 23:58:24 +04:00
/* linked list variable is now local to allow modification */
2015-04-21 05:46:41 +03:00
drbg_string_fill ( & string , src , slen ) ;
addtl = & string ;
2014-05-31 17:44:17 +04:00
}
2015-04-21 05:46:41 +03:00
return drbg_generate_long ( drbg , dst , dlen , addtl ) ;
2014-05-31 17:44:17 +04:00
}
/*
2015-04-20 06:29:15 +03:00
* Seed the DRBG invoked by the kernel crypto API
2014-05-31 17:44:17 +04:00
*/
2015-04-21 05:46:41 +03:00
static int drbg_kcapi_seed ( struct crypto_rng * tfm ,
const u8 * seed , unsigned int slen )
2014-05-31 17:44:17 +04:00
{
struct drbg_state * drbg = crypto_rng_ctx ( tfm ) ;
struct crypto_tfm * tfm_base = crypto_rng_tfm ( tfm ) ;
bool pr = false ;
2015-04-21 05:46:41 +03:00
struct drbg_string string ;
struct drbg_string * seed_string = NULL ;
2014-05-31 17:44:17 +04:00
int coreref = 0 ;
drbg_convert_tfm_core ( crypto_tfm_alg_driver_name ( tfm_base ) , & coreref ,
& pr ) ;
if ( 0 < slen ) {
2015-04-21 05:46:41 +03:00
drbg_string_fill ( & string , seed , slen ) ;
seed_string = & string ;
2014-05-31 17:44:17 +04:00
}
2015-04-21 05:46:41 +03:00
return drbg_instantiate ( drbg , seed_string , coreref , pr ) ;
2014-05-31 17:44:17 +04:00
}
/***************************************************************
* Kernel module : code to load the module
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Tests as defined in 11.3 .2 in addition to the cipher tests : testing
* of the error handling .
*
* Note : testing of failing seed source as defined in 11.3 .2 is not applicable
* as seed source of get_random_bytes does not fail .
*
* Note 2 : There is no sensible way of testing the reseed counter
* enforcement , so skip it .
*/
static inline int __init drbg_healthcheck_sanity ( void )
{
int len = 0 ;
# define OUTBUFLEN 16
unsigned char buf [ OUTBUFLEN ] ;
struct drbg_state * drbg = NULL ;
2021-09-10 22:01:17 +03:00
int ret ;
2014-05-31 17:44:17 +04:00
int rc = - EFAULT ;
bool pr = false ;
int coreref = 0 ;
struct drbg_string addtl ;
size_t max_addtllen , max_request_bytes ;
/* only perform test in FIPS mode */
if ( ! fips_enabled )
return 0 ;
# ifdef CONFIG_CRYPTO_DRBG_CTR
drbg_convert_tfm_core ( " drbg_nopr_ctr_aes128 " , & coreref , & pr ) ;
2014-07-06 04:23:03 +04:00
# elif defined CONFIG_CRYPTO_DRBG_HASH
2014-05-31 17:44:17 +04:00
drbg_convert_tfm_core ( " drbg_nopr_sha256 " , & coreref , & pr ) ;
# else
drbg_convert_tfm_core ( " drbg_nopr_hmac_sha256 " , & coreref , & pr ) ;
# endif
drbg = kzalloc ( sizeof ( struct drbg_state ) , GFP_KERNEL ) ;
if ( ! drbg )
return - ENOMEM ;
2015-04-20 06:26:48 +03:00
mutex_init ( & drbg - > drbg_mutex ) ;
2016-08-09 22:02:36 +03:00
drbg - > core = & drbg_cores [ coreref ] ;
drbg - > reseed_threshold = drbg_max_requests ( drbg ) ;
2015-04-20 06:26:48 +03:00
2014-05-31 17:44:17 +04:00
/*
* if the following tests fail , it is likely that there is a buffer
* overflow as buf is much smaller than the requested or provided
* string lengths - - in case the error handling does not succeed
* we may get an OOPS . And we want to get an OOPS as this is a
* grave bug .
*/
max_addtllen = drbg_max_addtl ( drbg ) ;
max_request_bytes = drbg_max_request_bytes ( drbg ) ;
drbg_string_fill ( & addtl , buf , max_addtllen + 1 ) ;
/* overflow addtllen with additonal info string */
len = drbg_generate ( drbg , buf , OUTBUFLEN , & addtl ) ;
BUG_ON ( 0 < len ) ;
/* overflow max_bits */
len = drbg_generate ( drbg , buf , ( max_request_bytes + 1 ) , NULL ) ;
BUG_ON ( 0 < len ) ;
/* overflow max addtllen with personalization string */
2016-08-09 22:02:36 +03:00
ret = drbg_seed ( drbg , & addtl , false ) ;
2014-05-31 17:44:17 +04:00
BUG_ON ( 0 = = ret ) ;
/* all tests passed */
rc = 0 ;
pr_devel ( " DRBG: Sanity tests for failure code paths successfully "
" completed \n " ) ;
2016-08-09 22:02:36 +03:00
kfree ( drbg ) ;
2014-05-31 17:44:17 +04:00
return rc ;
}
2015-04-21 05:46:41 +03:00
static struct rng_alg drbg_algs [ 22 ] ;
2014-05-31 17:44:17 +04:00
/*
* Fill the array drbg_algs used to register the different DRBGs
* with the kernel crypto API . To fill the array , the information
* from drbg_cores [ ] is used .
*/
2015-04-21 05:46:41 +03:00
static inline void __init drbg_fill_array ( struct rng_alg * alg ,
2014-05-31 17:44:17 +04:00
const struct drbg_core * core , int pr )
{
int pos = 0 ;
2015-06-03 09:49:28 +03:00
static int priority = 200 ;
2014-05-31 17:44:17 +04:00
2015-04-21 05:46:41 +03:00
memcpy ( alg - > base . cra_name , " stdrng " , 6 ) ;
2014-05-31 17:44:17 +04:00
if ( pr ) {
2015-04-21 05:46:41 +03:00
memcpy ( alg - > base . cra_driver_name , " drbg_pr_ " , 8 ) ;
2014-05-31 17:44:17 +04:00
pos = 8 ;
} else {
2015-04-21 05:46:41 +03:00
memcpy ( alg - > base . cra_driver_name , " drbg_nopr_ " , 10 ) ;
2014-05-31 17:44:17 +04:00
pos = 10 ;
}
2015-04-21 05:46:41 +03:00
memcpy ( alg - > base . cra_driver_name + pos , core - > cra_name ,
2014-05-31 17:44:17 +04:00
strlen ( core - > cra_name ) ) ;
2015-04-21 05:46:41 +03:00
alg - > base . cra_priority = priority ;
2014-05-31 17:44:17 +04:00
priority + + ;
/*
* If FIPS mode enabled , the selected DRBG shall have the
* highest cra_priority over other stdrng instances to ensure
* it is selected .
*/
if ( fips_enabled )
2015-04-21 05:46:41 +03:00
alg - > base . cra_priority + = 200 ;
alg - > base . cra_ctxsize = sizeof ( struct drbg_state ) ;
alg - > base . cra_module = THIS_MODULE ;
alg - > base . cra_init = drbg_kcapi_init ;
alg - > base . cra_exit = drbg_kcapi_cleanup ;
alg - > generate = drbg_kcapi_random ;
alg - > seed = drbg_kcapi_seed ;
alg - > set_ent = drbg_kcapi_set_entropy ;
alg - > seedsize = 0 ;
2014-05-31 17:44:17 +04:00
}
static int __init drbg_init ( void )
{
unsigned int i = 0 ; /* pointer to drbg_algs */
unsigned int j = 0 ; /* pointer to drbg_cores */
2016-08-20 18:06:51 +03:00
int ret ;
2014-05-31 17:44:17 +04:00
ret = drbg_healthcheck_sanity ( ) ;
if ( ret )
return ret ;
if ( ARRAY_SIZE ( drbg_cores ) * 2 > ARRAY_SIZE ( drbg_algs ) ) {
pr_info ( " DRBG: Cannot register all DRBG types "
2014-07-06 04:24:03 +04:00
" (slots needed: %zu, slots available: %zu) \n " ,
2014-05-31 17:44:17 +04:00
ARRAY_SIZE ( drbg_cores ) * 2 , ARRAY_SIZE ( drbg_algs ) ) ;
2016-08-20 18:06:51 +03:00
return - EFAULT ;
2014-05-31 17:44:17 +04:00
}
/*
* each DRBG definition can be used with PR and without PR , thus
* we instantiate each DRBG in drbg_cores [ ] twice .
*
* As the order of placing them into the drbg_algs array matters
* ( the later DRBGs receive a higher cra_priority ) we register the
* prediction resistance DRBGs first as the should not be too
* interesting .
*/
for ( j = 0 ; ARRAY_SIZE ( drbg_cores ) > j ; j + + , i + + )
drbg_fill_array ( & drbg_algs [ i ] , & drbg_cores [ j ] , 1 ) ;
for ( j = 0 ; ARRAY_SIZE ( drbg_cores ) > j ; j + + , i + + )
drbg_fill_array ( & drbg_algs [ i ] , & drbg_cores [ j ] , 0 ) ;
2015-04-21 05:46:41 +03:00
return crypto_register_rngs ( drbg_algs , ( ARRAY_SIZE ( drbg_cores ) * 2 ) ) ;
2014-05-31 17:44:17 +04:00
}
2014-07-10 12:52:04 +04:00
static void __exit drbg_exit ( void )
2014-05-31 17:44:17 +04:00
{
2015-04-21 05:46:41 +03:00
crypto_unregister_rngs ( drbg_algs , ( ARRAY_SIZE ( drbg_cores ) * 2 ) ) ;
2014-05-31 17:44:17 +04:00
}
2019-04-12 07:57:42 +03:00
subsys_initcall ( drbg_init ) ;
2014-05-31 17:44:17 +04:00
module_exit ( drbg_exit ) ;
2014-07-06 04:23:03 +04:00
# ifndef CRYPTO_DRBG_HASH_STRING
# define CRYPTO_DRBG_HASH_STRING ""
2014-05-31 17:44:17 +04:00
# endif
2014-07-06 04:23:03 +04:00
# ifndef CRYPTO_DRBG_HMAC_STRING
# define CRYPTO_DRBG_HMAC_STRING ""
2014-05-31 17:44:17 +04:00
# endif
2014-07-06 04:23:03 +04:00
# ifndef CRYPTO_DRBG_CTR_STRING
# define CRYPTO_DRBG_CTR_STRING ""
2014-05-31 17:44:17 +04:00
# endif
2014-07-06 04:23:03 +04:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Stephan Mueller <smueller@chronox.de> " ) ;
MODULE_DESCRIPTION ( " NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
" using following cores: "
CRYPTO_DRBG_HASH_STRING
CRYPTO_DRBG_HMAC_STRING
CRYPTO_DRBG_CTR_STRING ) ;
2015-06-03 09:49:28 +03:00
MODULE_ALIAS_CRYPTO ( " stdrng " ) ;
2020-12-11 15:27:15 +03:00
MODULE_IMPORT_NS ( CRYPTO_INTERNAL ) ;