2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
# include "checksum.h"
2022-07-18 19:42:58 -04:00
# include "errcode.h"
2017-03-16 22:18:50 -08:00
# include "super.h"
# include "super-io.h"
# include <linux/crc32c.h>
# include <linux/crypto.h>
2021-06-17 13:42:09 +02:00
# include <linux/xxhash.h>
2017-03-16 22:18:50 -08:00
# include <linux/key.h>
# include <linux/random.h>
# include <linux/scatterlist.h>
# include <crypto/algapi.h>
# include <crypto/chacha.h>
# include <crypto/hash.h>
# include <crypto/poly1305.h>
# include <crypto/skcipher.h>
# include <keys/user-type.h>
2021-06-17 11:29:59 +02:00
/*
* bch2_checksum state is an abstraction of the checksum state calculated over different pages .
* it features page merging without having the checksum algorithm lose its state .
* for native checksum aglorithms ( like crc ) , a default seed value will do .
* for hash - like algorithms , a state needs to be stored
*/
struct bch2_checksum_state {
union {
u64 seed ;
2021-06-17 13:42:09 +02:00
struct xxh64_state h64state ;
2021-06-17 11:29:59 +02:00
} ;
unsigned int type ;
} ;
static void bch2_checksum_init ( struct bch2_checksum_state * state )
2017-03-16 22:18:50 -08:00
{
2021-06-17 11:29:59 +02:00
switch ( state - > type ) {
2021-11-11 12:11:33 -05:00
case BCH_CSUM_none :
case BCH_CSUM_crc32c :
case BCH_CSUM_crc64 :
2021-06-17 11:29:59 +02:00
state - > seed = 0 ;
break ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_crc32c_nonzero :
2021-06-17 11:29:59 +02:00
state - > seed = U32_MAX ;
break ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_crc64_nonzero :
2021-06-17 11:29:59 +02:00
state - > seed = U64_MAX ;
break ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_xxhash :
2021-06-17 13:42:09 +02:00
xxh64_reset ( & state - > h64state , 0 ) ;
break ;
2017-03-16 22:18:50 -08:00
default :
BUG ( ) ;
}
}
2021-06-17 11:29:59 +02:00
static u64 bch2_checksum_final ( const struct bch2_checksum_state * state )
2017-03-16 22:18:50 -08:00
{
2021-06-17 11:29:59 +02:00
switch ( state - > type ) {
2021-11-11 12:11:33 -05:00
case BCH_CSUM_none :
case BCH_CSUM_crc32c :
case BCH_CSUM_crc64 :
2021-06-17 11:29:59 +02:00
return state - > seed ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_crc32c_nonzero :
2021-06-17 11:29:59 +02:00
return state - > seed ^ U32_MAX ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_crc64_nonzero :
2021-06-17 11:29:59 +02:00
return state - > seed ^ U64_MAX ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_xxhash :
2021-06-17 13:42:09 +02:00
return xxh64_digest ( & state - > h64state ) ;
2017-03-16 22:18:50 -08:00
default :
BUG ( ) ;
}
}
2021-06-17 11:29:59 +02:00
static void bch2_checksum_update ( struct bch2_checksum_state * state , const void * data , size_t len )
2017-03-16 22:18:50 -08:00
{
2021-06-17 11:29:59 +02:00
switch ( state - > type ) {
2021-11-11 12:11:33 -05:00
case BCH_CSUM_none :
2021-06-17 11:29:59 +02:00
return ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_crc32c_nonzero :
case BCH_CSUM_crc32c :
2021-06-17 11:29:59 +02:00
state - > seed = crc32c ( state - > seed , data , len ) ;
break ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_crc64_nonzero :
case BCH_CSUM_crc64 :
2021-06-17 11:29:59 +02:00
state - > seed = crc64_be ( state - > seed , data , len ) ;
break ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_xxhash :
2021-06-17 13:42:09 +02:00
xxh64_update ( & state - > h64state , data , len ) ;
break ;
2017-03-16 22:18:50 -08:00
default :
BUG ( ) ;
}
}
2022-02-19 00:42:12 -05:00
static inline int do_encrypt_sg ( struct crypto_sync_skcipher * tfm ,
struct nonce nonce ,
struct scatterlist * sg , size_t len )
2017-03-16 22:18:50 -08:00
{
SYNC_SKCIPHER_REQUEST_ON_STACK ( req , tfm ) ;
int ret ;
skcipher_request_set_sync_tfm ( req , tfm ) ;
skcipher_request_set_crypt ( req , sg , sg , len , nonce . d ) ;
ret = crypto_skcipher_encrypt ( req ) ;
2022-02-19 00:42:12 -05:00
if ( ret )
pr_err ( " got error %i from crypto_skcipher_encrypt() " , ret ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
2022-02-19 00:42:12 -05:00
static inline int do_encrypt ( struct crypto_sync_skcipher * tfm ,
2017-03-16 22:18:50 -08:00
struct nonce nonce ,
void * buf , size_t len )
{
2022-06-03 02:34:14 -04:00
if ( ! is_vmalloc_addr ( buf ) ) {
struct scatterlist sg ;
sg_init_table ( & sg , 1 ) ;
sg_set_page ( & sg ,
is_vmalloc_addr ( buf )
? vmalloc_to_page ( buf )
: virt_to_page ( buf ) ,
len , offset_in_page ( buf ) ) ;
return do_encrypt_sg ( tfm , nonce , & sg , len ) ;
} else {
unsigned pages = buf_pages ( buf , len ) ;
struct scatterlist * sg ;
size_t orig_len = len ;
int ret , i ;
2022-10-19 18:31:33 -04:00
sg = kmalloc_array ( pages , sizeof ( * sg ) , GFP_KERNEL ) ;
2022-06-03 02:34:14 -04:00
if ( ! sg )
2023-03-14 15:35:57 -04:00
return - BCH_ERR_ENOMEM_do_encrypt ;
2022-06-03 02:34:14 -04:00
sg_init_table ( sg , pages ) ;
for ( i = 0 ; i < pages ; i + + ) {
unsigned offset = offset_in_page ( buf ) ;
bcachefs: Fix -Wcompare-distinct-pointer-types in do_encrypt()
When building bcachefs for 32-bit ARM, there is a warning when using
min() to compare a variable of type 'size_t' with an expression of type
'unsigned long':
fs/bcachefs/checksum.c:142:22: error: comparison of distinct pointer types ('typeof (len) *' (aka 'unsigned int *') and 'typeof (((1UL) << 12) - offset) *' (aka 'unsigned long *')) [-Werror,-Wcompare-distinct-pointer-types]
142 | unsigned pg_len = min(len, PAGE_SIZE - offset);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/minmax.h:69:19: note: expanded from macro 'min'
69 | #define min(x, y) __careful_cmp(x, y, <)
| ^~~~~~~~~~~~~~~~~~~~~~
include/linux/minmax.h:38:24: note: expanded from macro '__careful_cmp'
38 | __builtin_choose_expr(__safe_cmp(x, y), \
| ^~~~~~~~~~~~~~~~
include/linux/minmax.h:28:4: note: expanded from macro '__safe_cmp'
28 | (__typecheck(x, y) && __no_side_effects(x, y))
| ^~~~~~~~~~~~~~~~~
include/linux/minmax.h:22:28: note: expanded from macro '__typecheck'
22 | (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
| ~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~
1 error generated.
On 64-bit architectures, size_t is 'unsigned long', so there is no
warning when comparing these two expressions. Use min_t(size_t, ...) for
this situation, eliminating the warning.
Fixes: 1fb50457684f ("bcachefs: Fix memory corruption in encryption path")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-09-12 12:15:43 -07:00
unsigned pg_len = min_t ( size_t , len , PAGE_SIZE - offset ) ;
2022-06-03 02:34:14 -04:00
sg_set_page ( sg + i , vmalloc_to_page ( buf ) , pg_len , offset ) ;
buf + = pg_len ;
len - = pg_len ;
}
ret = do_encrypt_sg ( tfm , nonce , sg , orig_len ) ;
kfree ( sg ) ;
return ret ;
}
2017-03-16 22:18:50 -08:00
}
int bch2_chacha_encrypt_key ( struct bch_key * key , struct nonce nonce ,
void * buf , size_t len )
{
struct crypto_sync_skcipher * chacha20 =
crypto_alloc_sync_skcipher ( " chacha20 " , 0 , 0 ) ;
int ret ;
2023-09-19 22:20:25 -04:00
ret = PTR_ERR_OR_ZERO ( chacha20 ) ;
if ( ret ) {
pr_err ( " error requesting chacha20 cipher: %s " , bch2_err_str ( ret ) ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
ret = crypto_skcipher_setkey ( & chacha20 - > base ,
( void * ) key , sizeof ( * key ) ) ;
if ( ret ) {
2023-09-19 22:20:25 -04:00
pr_err ( " error from crypto_skcipher_setkey(): %s " , bch2_err_str ( ret ) ) ;
2017-03-16 22:18:50 -08:00
goto err ;
}
2022-02-19 00:42:12 -05:00
ret = do_encrypt ( chacha20 , nonce , buf , len ) ;
2017-03-16 22:18:50 -08:00
err :
crypto_free_sync_skcipher ( chacha20 ) ;
return ret ;
}
2022-02-19 00:42:12 -05:00
static int gen_poly_key ( struct bch_fs * c , struct shash_desc * desc ,
struct nonce nonce )
2017-03-16 22:18:50 -08:00
{
u8 key [ POLY1305_KEY_SIZE ] ;
2022-02-19 00:42:12 -05:00
int ret ;
2017-03-16 22:18:50 -08:00
nonce . d [ 3 ] ^ = BCH_NONCE_POLY ;
memset ( key , 0 , sizeof ( key ) ) ;
2022-02-19 00:42:12 -05:00
ret = do_encrypt ( c - > chacha20 , nonce , key , sizeof ( key ) ) ;
if ( ret )
return ret ;
2017-03-16 22:18:50 -08:00
desc - > tfm = c - > poly1305 ;
crypto_shash_init ( desc ) ;
crypto_shash_update ( desc , key , sizeof ( key ) ) ;
2022-02-19 00:42:12 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
struct bch_csum bch2_checksum ( struct bch_fs * c , unsigned type ,
struct nonce nonce , const void * data , size_t len )
{
switch ( type ) {
2021-11-11 12:11:33 -05:00
case BCH_CSUM_none :
case BCH_CSUM_crc32c_nonzero :
case BCH_CSUM_crc64_nonzero :
case BCH_CSUM_crc32c :
case BCH_CSUM_xxhash :
case BCH_CSUM_crc64 : {
2021-06-17 11:29:59 +02:00
struct bch2_checksum_state state ;
2017-03-16 22:18:50 -08:00
2021-06-17 11:29:59 +02:00
state . type = type ;
2017-03-16 22:18:50 -08:00
2021-06-17 11:29:59 +02:00
bch2_checksum_init ( & state ) ;
bch2_checksum_update ( & state , data , len ) ;
return ( struct bch_csum ) { . lo = cpu_to_le64 ( bch2_checksum_final ( & state ) ) } ;
2017-03-16 22:18:50 -08:00
}
2021-11-11 12:11:33 -05:00
case BCH_CSUM_chacha20_poly1305_80 :
case BCH_CSUM_chacha20_poly1305_128 : {
2017-03-16 22:18:50 -08:00
SHASH_DESC_ON_STACK ( desc , c - > poly1305 ) ;
u8 digest [ POLY1305_DIGEST_SIZE ] ;
struct bch_csum ret = { 0 } ;
gen_poly_key ( c , desc , nonce ) ;
crypto_shash_update ( desc , data , len ) ;
crypto_shash_final ( desc , digest ) ;
memcpy ( & ret , digest , bch_crc_bytes [ type ] ) ;
return ret ;
}
default :
BUG ( ) ;
}
}
2022-02-19 00:42:12 -05:00
int bch2_encrypt ( struct bch_fs * c , unsigned type ,
2017-03-16 22:18:50 -08:00
struct nonce nonce , void * data , size_t len )
{
if ( ! bch2_csum_type_is_encryption ( type ) )
2022-02-19 00:42:12 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
2022-02-19 00:42:12 -05:00
return do_encrypt ( c - > chacha20 , nonce , data , len ) ;
2017-03-16 22:18:50 -08:00
}
static struct bch_csum __bch2_checksum_bio ( struct bch_fs * c , unsigned type ,
struct nonce nonce , struct bio * bio ,
struct bvec_iter * iter )
{
struct bio_vec bv ;
switch ( type ) {
2021-11-11 12:11:33 -05:00
case BCH_CSUM_none :
2017-03-16 22:18:50 -08:00
return ( struct bch_csum ) { 0 } ;
2021-11-11 12:11:33 -05:00
case BCH_CSUM_crc32c_nonzero :
case BCH_CSUM_crc64_nonzero :
case BCH_CSUM_crc32c :
case BCH_CSUM_xxhash :
case BCH_CSUM_crc64 : {
2021-06-17 11:29:59 +02:00
struct bch2_checksum_state state ;
state . type = type ;
bch2_checksum_init ( & state ) ;
2017-03-16 22:18:50 -08:00
# ifdef CONFIG_HIGHMEM
__bio_for_each_segment ( bv , bio , * iter , * iter ) {
2023-08-07 12:04:05 -04:00
void * p = kmap_local_page ( bv . bv_page ) + bv . bv_offset ;
2021-06-17 11:29:59 +02:00
bch2_checksum_update ( & state , p , bv . bv_len ) ;
2023-08-07 12:04:05 -04:00
kunmap_local ( p ) ;
2017-03-16 22:18:50 -08:00
}
# else
2019-07-04 03:48:25 -04:00
__bio_for_each_bvec ( bv , bio , * iter , * iter )
2021-06-17 11:29:59 +02:00
bch2_checksum_update ( & state , page_address ( bv . bv_page ) + bv . bv_offset ,
2017-03-16 22:18:50 -08:00
bv . bv_len ) ;
# endif
2021-06-17 11:29:59 +02:00
return ( struct bch_csum ) { . lo = cpu_to_le64 ( bch2_checksum_final ( & state ) ) } ;
2017-03-16 22:18:50 -08:00
}
2021-11-11 12:11:33 -05:00
case BCH_CSUM_chacha20_poly1305_80 :
case BCH_CSUM_chacha20_poly1305_128 : {
2017-03-16 22:18:50 -08:00
SHASH_DESC_ON_STACK ( desc , c - > poly1305 ) ;
u8 digest [ POLY1305_DIGEST_SIZE ] ;
struct bch_csum ret = { 0 } ;
gen_poly_key ( c , desc , nonce ) ;
# ifdef CONFIG_HIGHMEM
__bio_for_each_segment ( bv , bio , * iter , * iter ) {
2023-08-07 12:04:05 -04:00
void * p = kmap_local_page ( bv . bv_page ) + bv . bv_offset ;
2017-03-16 22:18:50 -08:00
crypto_shash_update ( desc , p , bv . bv_len ) ;
2023-08-07 12:04:05 -04:00
kunmap_local ( p ) ;
2017-03-16 22:18:50 -08:00
}
# else
2019-07-04 03:48:25 -04:00
__bio_for_each_bvec ( bv , bio , * iter , * iter )
2017-03-16 22:18:50 -08:00
crypto_shash_update ( desc ,
page_address ( bv . bv_page ) + bv . bv_offset ,
bv . bv_len ) ;
# endif
crypto_shash_final ( desc , digest ) ;
memcpy ( & ret , digest , bch_crc_bytes [ type ] ) ;
return ret ;
}
default :
BUG ( ) ;
}
}
struct bch_csum bch2_checksum_bio ( struct bch_fs * c , unsigned type ,
struct nonce nonce , struct bio * bio )
{
struct bvec_iter iter = bio - > bi_iter ;
return __bch2_checksum_bio ( c , type , nonce , bio , & iter ) ;
}
2022-11-01 03:37:53 -04:00
int __bch2_encrypt_bio ( struct bch_fs * c , unsigned type ,
2022-02-19 00:42:12 -05:00
struct nonce nonce , struct bio * bio )
2017-03-16 22:18:50 -08:00
{
struct bio_vec bv ;
struct bvec_iter iter ;
struct scatterlist sgl [ 16 ] , * sg = sgl ;
size_t bytes = 0 ;
2022-02-19 00:42:12 -05:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
if ( ! bch2_csum_type_is_encryption ( type ) )
2022-02-19 00:42:12 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
sg_init_table ( sgl , ARRAY_SIZE ( sgl ) ) ;
bio_for_each_segment ( bv , bio , iter ) {
if ( sg = = sgl + ARRAY_SIZE ( sgl ) ) {
sg_mark_end ( sg - 1 ) ;
2022-02-19 00:42:12 -05:00
ret = do_encrypt_sg ( c - > chacha20 , nonce , sgl , bytes ) ;
if ( ret )
return ret ;
2017-03-16 22:18:50 -08:00
nonce = nonce_add ( nonce , bytes ) ;
bytes = 0 ;
sg_init_table ( sgl , ARRAY_SIZE ( sgl ) ) ;
sg = sgl ;
}
sg_set_page ( sg + + , bv . bv_page , bv . bv_len , bv . bv_offset ) ;
bytes + = bv . bv_len ;
}
sg_mark_end ( sg - 1 ) ;
2022-02-19 00:42:12 -05:00
return do_encrypt_sg ( c - > chacha20 , nonce , sgl , bytes ) ;
2017-03-16 22:18:50 -08:00
}
2019-05-12 22:23:30 -04:00
struct bch_csum bch2_checksum_merge ( unsigned type , struct bch_csum a ,
struct bch_csum b , size_t b_len )
2017-03-16 22:18:50 -08:00
{
2021-06-17 11:29:59 +02:00
struct bch2_checksum_state state ;
state . type = type ;
bch2_checksum_init ( & state ) ;
2023-09-27 07:23:37 -04:00
state . seed = le64_to_cpu ( a . lo ) ;
2021-06-17 11:29:59 +02:00
2017-03-16 22:18:50 -08:00
BUG_ON ( ! bch2_checksum_mergeable ( type ) ) ;
while ( b_len ) {
2023-09-12 18:41:22 -04:00
unsigned page_len = min_t ( unsigned , b_len , PAGE_SIZE ) ;
2017-03-16 22:18:50 -08:00
2021-06-17 11:29:59 +02:00
bch2_checksum_update ( & state ,
2023-09-12 18:41:22 -04:00
page_address ( ZERO_PAGE ( 0 ) ) , page_len ) ;
b_len - = page_len ;
2017-03-16 22:18:50 -08:00
}
2023-09-27 07:23:37 -04:00
a . lo = cpu_to_le64 ( bch2_checksum_final ( & state ) ) ;
2017-03-16 22:18:50 -08:00
a . lo ^ = b . lo ;
a . hi ^ = b . hi ;
return a ;
}
int bch2_rechecksum_bio ( struct bch_fs * c , struct bio * bio ,
struct bversion version ,
struct bch_extent_crc_unpacked crc_old ,
struct bch_extent_crc_unpacked * crc_a ,
struct bch_extent_crc_unpacked * crc_b ,
unsigned len_a , unsigned len_b ,
unsigned new_csum_type )
{
struct bvec_iter iter = bio - > bi_iter ;
struct nonce nonce = extent_nonce ( version , crc_old ) ;
struct bch_csum merged = { 0 } ;
struct crc_split {
struct bch_extent_crc_unpacked * crc ;
unsigned len ;
unsigned csum_type ;
struct bch_csum csum ;
} splits [ 3 ] = {
2023-09-12 18:41:22 -04:00
{ crc_a , len_a , new_csum_type , { 0 } } ,
{ crc_b , len_b , new_csum_type , { 0 } } ,
{ NULL , bio_sectors ( bio ) - len_a - len_b , new_csum_type , { 0 } } ,
2017-03-16 22:18:50 -08:00
} , * i ;
bool mergeable = crc_old . csum_type = = new_csum_type & &
bch2_checksum_mergeable ( new_csum_type ) ;
unsigned crc_nonce = crc_old . nonce ;
BUG_ON ( len_a + len_b > bio_sectors ( bio ) ) ;
BUG_ON ( crc_old . uncompressed_size ! = bio_sectors ( bio ) ) ;
2018-02-23 16:26:10 -05:00
BUG_ON ( crc_is_compressed ( crc_old ) ) ;
2017-03-16 22:18:50 -08:00
BUG_ON ( bch2_csum_type_is_encryption ( crc_old . csum_type ) ! =
bch2_csum_type_is_encryption ( new_csum_type ) ) ;
for ( i = splits ; i < splits + ARRAY_SIZE ( splits ) ; i + + ) {
iter . bi_size = i - > len < < 9 ;
if ( mergeable | | i - > crc )
i - > csum = __bch2_checksum_bio ( c , i - > csum_type ,
nonce , bio , & iter ) ;
else
bio_advance_iter ( bio , & iter , i - > len < < 9 ) ;
nonce = nonce_add ( nonce , i - > len < < 9 ) ;
}
if ( mergeable )
for ( i = splits ; i < splits + ARRAY_SIZE ( splits ) ; i + + )
merged = bch2_checksum_merge ( new_csum_type , merged ,
i - > csum , i - > len < < 9 ) ;
else
merged = bch2_checksum_bio ( c , crc_old . csum_type ,
extent_nonce ( version , crc_old ) , bio ) ;
2023-07-16 22:31:19 -04:00
if ( bch2_crc_cmp ( merged , crc_old . csum ) & & ! c - > opts . no_data_io ) {
2023-08-07 12:04:05 -04:00
bch_err ( c , " checksum error in %s() (memory corruption or bug?) \n "
2022-06-18 19:03:25 -04:00
" expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s) " ,
2023-08-07 12:04:05 -04:00
__func__ ,
2022-06-18 19:03:25 -04:00
crc_old . csum . hi ,
crc_old . csum . lo ,
merged . hi ,
merged . lo ,
bch2_csum_types [ crc_old . csum_type ] ,
bch2_csum_types [ new_csum_type ] ) ;
2017-03-16 22:18:50 -08:00
return - EIO ;
2022-06-18 19:03:25 -04:00
}
2017-03-16 22:18:50 -08:00
for ( i = splits ; i < splits + ARRAY_SIZE ( splits ) ; i + + ) {
if ( i - > crc )
* i - > crc = ( struct bch_extent_crc_unpacked ) {
. csum_type = i - > csum_type ,
2018-02-23 16:26:10 -05:00
. compression_type = crc_old . compression_type ,
2017-03-16 22:18:50 -08:00
. compressed_size = i - > len ,
. uncompressed_size = i - > len ,
. offset = 0 ,
. live_size = i - > len ,
. nonce = crc_nonce ,
. csum = i - > csum ,
} ;
if ( bch2_csum_type_is_encryption ( new_csum_type ) )
crc_nonce + = i - > len ;
}
return 0 ;
}
2023-08-05 15:43:00 -04:00
/* BCH_SB_FIELD_crypt: */
static int bch2_sb_crypt_validate ( struct bch_sb * sb ,
struct bch_sb_field * f ,
struct printbuf * err )
{
struct bch_sb_field_crypt * crypt = field_to_type ( f , crypt ) ;
if ( vstruct_bytes ( & crypt - > field ) < sizeof ( * crypt ) ) {
prt_printf ( err , " wrong size (got %zu should be %zu) " ,
vstruct_bytes ( & crypt - > field ) , sizeof ( * crypt ) ) ;
return - BCH_ERR_invalid_sb_crypt ;
}
if ( BCH_CRYPT_KDF_TYPE ( crypt ) ) {
prt_printf ( err , " bad kdf type %llu " , BCH_CRYPT_KDF_TYPE ( crypt ) ) ;
return - BCH_ERR_invalid_sb_crypt ;
}
return 0 ;
}
static void bch2_sb_crypt_to_text ( struct printbuf * out , struct bch_sb * sb ,
struct bch_sb_field * f )
{
struct bch_sb_field_crypt * crypt = field_to_type ( f , crypt ) ;
prt_printf ( out , " KFD: %llu " , BCH_CRYPT_KDF_TYPE ( crypt ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " scrypt n: %llu " , BCH_KDF_SCRYPT_N ( crypt ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " scrypt r: %llu " , BCH_KDF_SCRYPT_R ( crypt ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " scrypt p: %llu " , BCH_KDF_SCRYPT_P ( crypt ) ) ;
prt_newline ( out ) ;
}
const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
. validate = bch2_sb_crypt_validate ,
. to_text = bch2_sb_crypt_to_text ,
} ;
2017-03-16 22:18:50 -08:00
# ifdef __KERNEL__
2022-01-04 19:05:08 -05:00
static int __bch2_request_key ( char * key_description , struct bch_key * key )
2017-03-16 22:18:50 -08:00
{
struct key * keyring_key ;
const struct user_key_payload * ukp ;
int ret ;
2022-05-14 07:00:22 -04:00
keyring_key = request_key ( & key_type_user , key_description , NULL ) ;
2017-03-16 22:18:50 -08:00
if ( IS_ERR ( keyring_key ) )
return PTR_ERR ( keyring_key ) ;
down_read ( & keyring_key - > sem ) ;
ukp = dereference_key_locked ( keyring_key ) ;
if ( ukp - > datalen = = sizeof ( * key ) ) {
memcpy ( key , ukp - > data , ukp - > datalen ) ;
ret = 0 ;
} else {
ret = - EINVAL ;
}
up_read ( & keyring_key - > sem ) ;
key_put ( keyring_key ) ;
return ret ;
}
# else
# include <keyutils.h>
2022-01-04 19:05:08 -05:00
static int __bch2_request_key ( char * key_description , struct bch_key * key )
2017-03-16 22:18:50 -08:00
{
key_serial_t key_id ;
2023-09-26 17:20:39 -04:00
key_id = request_key ( " user " , key_description , NULL ,
KEY_SPEC_SESSION_KEYRING ) ;
if ( key_id > = 0 )
goto got_key ;
2017-03-16 22:18:50 -08:00
key_id = request_key ( " user " , key_description , NULL ,
KEY_SPEC_USER_KEYRING ) ;
2023-09-26 17:20:39 -04:00
if ( key_id > = 0 )
goto got_key ;
key_id = request_key ( " user " , key_description , NULL ,
KEY_SPEC_USER_SESSION_KEYRING ) ;
if ( key_id > = 0 )
goto got_key ;
return - errno ;
got_key :
2017-03-16 22:18:50 -08:00
if ( keyctl_read ( key_id , ( void * ) key , sizeof ( * key ) ) ! = sizeof ( * key ) )
return - 1 ;
return 0 ;
}
2023-09-26 17:20:39 -04:00
# include "../crypto.h"
2017-03-16 22:18:50 -08:00
# endif
2022-01-04 19:05:08 -05:00
int bch2_request_key ( struct bch_sb * sb , struct bch_key * key )
{
2023-02-03 21:01:40 -05:00
struct printbuf key_description = PRINTBUF ;
int ret ;
2022-01-04 19:05:08 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( & key_description , " bcachefs: " ) ;
pr_uuid ( & key_description , sb - > user_uuid . b ) ;
2022-01-04 19:05:08 -05:00
2023-02-03 21:01:40 -05:00
ret = __bch2_request_key ( key_description . buf , key ) ;
printbuf_exit ( & key_description ) ;
2023-09-26 17:20:39 -04:00
# ifndef __KERNEL__
if ( ret ) {
char * passphrase = read_passphrase ( " Enter passphrase: " ) ;
struct bch_encrypted_key sb_key ;
bch2_passphrase_check ( sb , passphrase ,
key , & sb_key ) ;
ret = 0 ;
}
# endif
/* stash with memfd, pass memfd fd to mount */
2023-02-03 21:01:40 -05:00
return ret ;
2022-01-04 19:05:08 -05:00
}
2023-09-23 19:07:16 -04:00
# ifndef __KERNEL__
int bch2_revoke_key ( struct bch_sb * sb )
{
key_serial_t key_id ;
struct printbuf key_description = PRINTBUF ;
prt_printf ( & key_description , " bcachefs: " ) ;
pr_uuid ( & key_description , sb - > user_uuid . b ) ;
key_id = request_key ( " user " , key_description . buf , NULL , KEY_SPEC_USER_KEYRING ) ;
printbuf_exit ( & key_description ) ;
if ( key_id < 0 )
return errno ;
keyctl_revoke ( key_id ) ;
return 0 ;
}
# endif
2017-03-16 22:18:50 -08:00
int bch2_decrypt_sb_key ( struct bch_fs * c ,
struct bch_sb_field_crypt * crypt ,
struct bch_key * key )
{
struct bch_encrypted_key sb_key = crypt - > key ;
struct bch_key user_key ;
int ret = 0 ;
/* is key encrypted? */
if ( ! bch2_key_is_encrypted ( & sb_key ) )
goto out ;
ret = bch2_request_key ( c - > disk_sb . sb , & user_key ) ;
if ( ret ) {
2022-07-18 19:42:58 -04:00
bch_err ( c , " error requesting encryption key: %s " , bch2_err_str ( ret ) ) ;
2017-03-16 22:18:50 -08:00
goto err ;
}
/* decrypt real key: */
ret = bch2_chacha_encrypt_key ( & user_key , bch2_sb_key_nonce ( c ) ,
2023-09-19 22:20:25 -04:00
& sb_key , sizeof ( sb_key ) ) ;
2017-03-16 22:18:50 -08:00
if ( ret )
goto err ;
if ( bch2_key_is_encrypted ( & sb_key ) ) {
bch_err ( c , " incorrect encryption key " ) ;
ret = - EINVAL ;
goto err ;
}
out :
* key = sb_key . key ;
err :
memzero_explicit ( & sb_key , sizeof ( sb_key ) ) ;
memzero_explicit ( & user_key , sizeof ( user_key ) ) ;
return ret ;
}
static int bch2_alloc_ciphers ( struct bch_fs * c )
{
2022-07-18 19:42:58 -04:00
int ret ;
2017-03-16 22:18:50 -08:00
if ( ! c - > chacha20 )
c - > chacha20 = crypto_alloc_sync_skcipher ( " chacha20 " , 0 , 0 ) ;
2022-07-18 19:42:58 -04:00
ret = PTR_ERR_OR_ZERO ( c - > chacha20 ) ;
if ( ret ) {
bch_err ( c , " error requesting chacha20 module: %s " , bch2_err_str ( ret ) ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
if ( ! c - > poly1305 )
c - > poly1305 = crypto_alloc_shash ( " poly1305 " , 0 , 0 ) ;
2022-07-18 19:42:58 -04:00
ret = PTR_ERR_OR_ZERO ( c - > poly1305 ) ;
if ( ret ) {
bch_err ( c , " error requesting poly1305 module: %s " , bch2_err_str ( ret ) ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
return 0 ;
}
int bch2_disable_encryption ( struct bch_fs * c )
{
struct bch_sb_field_crypt * crypt ;
struct bch_key key ;
int ret = - EINVAL ;
mutex_lock ( & c - > sb_lock ) ;
2023-09-26 17:49:34 -04:00
crypt = bch2_sb_field_get ( c - > disk_sb . sb , crypt ) ;
2017-03-16 22:18:50 -08:00
if ( ! crypt )
goto out ;
/* is key encrypted? */
ret = 0 ;
if ( bch2_key_is_encrypted ( & crypt - > key ) )
goto out ;
ret = bch2_decrypt_sb_key ( c , crypt , & key ) ;
if ( ret )
goto out ;
2023-07-06 22:47:42 -04:00
crypt - > key . magic = cpu_to_le64 ( BCH_KEY_MAGIC ) ;
2017-03-16 22:18:50 -08:00
crypt - > key . key = key ;
SET_BCH_SB_ENCRYPTION_TYPE ( c - > disk_sb . sb , 0 ) ;
bch2_write_super ( c ) ;
out :
mutex_unlock ( & c - > sb_lock ) ;
return ret ;
}
int bch2_enable_encryption ( struct bch_fs * c , bool keyed )
{
struct bch_encrypted_key key ;
struct bch_key user_key ;
struct bch_sb_field_crypt * crypt ;
int ret = - EINVAL ;
mutex_lock ( & c - > sb_lock ) ;
/* Do we already have an encryption key? */
2023-09-26 17:49:34 -04:00
if ( bch2_sb_field_get ( c - > disk_sb . sb , crypt ) )
2017-03-16 22:18:50 -08:00
goto err ;
ret = bch2_alloc_ciphers ( c ) ;
if ( ret )
goto err ;
2023-07-06 22:47:42 -04:00
key . magic = cpu_to_le64 ( BCH_KEY_MAGIC ) ;
2017-03-16 22:18:50 -08:00
get_random_bytes ( & key . key , sizeof ( key . key ) ) ;
if ( keyed ) {
ret = bch2_request_key ( c - > disk_sb . sb , & user_key ) ;
if ( ret ) {
2022-07-18 19:42:58 -04:00
bch_err ( c , " error requesting encryption key: %s " , bch2_err_str ( ret ) ) ;
2017-03-16 22:18:50 -08:00
goto err ;
}
ret = bch2_chacha_encrypt_key ( & user_key , bch2_sb_key_nonce ( c ) ,
& key , sizeof ( key ) ) ;
if ( ret )
goto err ;
}
ret = crypto_skcipher_setkey ( & c - > chacha20 - > base ,
( void * ) & key . key , sizeof ( key . key ) ) ;
if ( ret )
goto err ;
2023-09-26 17:49:34 -04:00
crypt = bch2_sb_field_resize ( & c - > disk_sb , crypt ,
sizeof ( * crypt ) / sizeof ( u64 ) ) ;
2017-03-16 22:18:50 -08:00
if ( ! crypt ) {
2023-03-14 15:35:57 -04:00
ret = - BCH_ERR_ENOSPC_sb_crypt ;
2017-03-16 22:18:50 -08:00
goto err ;
}
crypt - > key = key ;
/* write superblock */
SET_BCH_SB_ENCRYPTION_TYPE ( c - > disk_sb . sb , 1 ) ;
bch2_write_super ( c ) ;
err :
mutex_unlock ( & c - > sb_lock ) ;
memzero_explicit ( & user_key , sizeof ( user_key ) ) ;
memzero_explicit ( & key , sizeof ( key ) ) ;
return ret ;
}
void bch2_fs_encryption_exit ( struct bch_fs * c )
{
if ( ! IS_ERR_OR_NULL ( c - > poly1305 ) )
crypto_free_shash ( c - > poly1305 ) ;
if ( ! IS_ERR_OR_NULL ( c - > chacha20 ) )
crypto_free_sync_skcipher ( c - > chacha20 ) ;
if ( ! IS_ERR_OR_NULL ( c - > sha256 ) )
crypto_free_shash ( c - > sha256 ) ;
}
int bch2_fs_encryption_init ( struct bch_fs * c )
{
struct bch_sb_field_crypt * crypt ;
struct bch_key key ;
int ret = 0 ;
c - > sha256 = crypto_alloc_shash ( " sha256 " , 0 , 0 ) ;
2022-07-18 19:42:58 -04:00
ret = PTR_ERR_OR_ZERO ( c - > sha256 ) ;
if ( ret ) {
bch_err ( c , " error requesting sha256 module: %s " , bch2_err_str ( ret ) ) ;
2017-03-16 22:18:50 -08:00
goto out ;
}
2023-09-26 17:49:34 -04:00
crypt = bch2_sb_field_get ( c - > disk_sb . sb , crypt ) ;
2017-03-16 22:18:50 -08:00
if ( ! crypt )
goto out ;
ret = bch2_alloc_ciphers ( c ) ;
if ( ret )
goto out ;
ret = bch2_decrypt_sb_key ( c , crypt , & key ) ;
if ( ret )
goto out ;
ret = crypto_skcipher_setkey ( & c - > chacha20 - > base ,
( void * ) & key . key , sizeof ( key . key ) ) ;
if ( ret )
goto out ;
out :
memzero_explicit ( & key , sizeof ( key ) ) ;
return ret ;
}