2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
2020-05-25 14:57:06 -04:00
# include "btree_update_interior.h"
2019-02-10 19:34:47 -05:00
# include "buckets.h"
2017-03-16 22:18:50 -08:00
# include "checksum.h"
# include "disk_groups.h"
2018-11-01 15:13:19 -04:00
# include "ec.h"
2017-03-16 22:18:50 -08:00
# include "error.h"
# include "io.h"
# include "journal.h"
2021-03-04 19:06:26 -05:00
# include "journal_io.h"
2022-03-10 16:43:52 -05:00
# include "journal_sb.h"
2019-04-04 21:53:12 -04:00
# include "journal_seq_blacklist.h"
2017-03-16 22:18:50 -08:00
# include "replicas.h"
# include "quota.h"
# include "super-io.h"
# include "super.h"
2022-04-03 15:13:20 -04:00
# include "trace.h"
2017-03-16 22:18:50 -08:00
# include "vstructs.h"
2022-03-15 21:36:33 +13:00
# include "counters.h"
2017-03-16 22:18:50 -08:00
# include <linux/backing-dev.h>
# include <linux/sort.h>
static const struct blk_holder_ops bch2_sb_handle_bdev_ops = {
} ;
2023-06-28 19:53:05 -04:00
static const char * const bch2_metadata_versions [ ] = {
# define x(t, n) [n] = #t,
BCH_METADATA_VERSIONS ( )
# undef x
} ;
void bch2_version_to_text ( struct printbuf * out , unsigned v )
{
const char * str = v < ARRAY_SIZE ( bch2_metadata_versions )
? bch2_metadata_versions [ v ]
: " (unknown version) " ;
prt_printf ( out , " %u: %s " , v , str ) ;
}
2017-03-16 22:18:50 -08:00
const char * const bch2_sb_fields [ ] = {
# define x(name, nr) #name,
BCH_SB_FIELDS ( )
# undef x
NULL
} ;
2022-01-03 23:38:50 -05:00
static int bch2_sb_field_validate ( struct bch_sb * , struct bch_sb_field * ,
struct printbuf * ) ;
2017-03-16 22:18:50 -08:00
struct bch_sb_field * bch2_sb_field_get ( struct bch_sb * sb ,
enum bch_sb_field_type type )
{
struct bch_sb_field * f ;
/* XXX: need locking around superblock to access optional fields */
vstruct_for_each ( sb , f )
if ( le32_to_cpu ( f - > type ) = = type )
return f ;
return NULL ;
}
static struct bch_sb_field * __bch2_sb_field_resize ( struct bch_sb_handle * sb ,
struct bch_sb_field * f ,
unsigned u64s )
{
unsigned old_u64s = f ? le32_to_cpu ( f - > u64s ) : 0 ;
unsigned sb_u64s = le32_to_cpu ( sb - > sb - > u64s ) + u64s - old_u64s ;
2021-04-06 14:00:56 -04:00
BUG_ON ( __vstruct_bytes ( struct bch_sb , sb_u64s ) > sb - > buffer_size ) ;
2017-03-16 22:18:50 -08:00
2019-12-18 17:34:36 -05:00
if ( ! f & & ! u64s ) {
/* nothing to do: */
} else if ( ! f ) {
2017-03-16 22:18:50 -08:00
f = vstruct_last ( sb - > sb ) ;
memset ( f , 0 , sizeof ( u64 ) * u64s ) ;
f - > u64s = cpu_to_le32 ( u64s ) ;
f - > type = 0 ;
} else {
void * src , * dst ;
src = vstruct_end ( f ) ;
2018-10-30 14:32:47 -04:00
if ( u64s ) {
f - > u64s = cpu_to_le32 ( u64s ) ;
dst = vstruct_end ( f ) ;
} else {
dst = f ;
}
2017-03-16 22:18:50 -08:00
memmove ( dst , src , vstruct_end ( sb - > sb ) - src ) ;
if ( dst > src )
memset ( src , 0 , dst - src ) ;
}
sb - > sb - > u64s = cpu_to_le32 ( sb_u64s ) ;
2018-10-30 14:32:47 -04:00
return u64s ? f : NULL ;
}
void bch2_sb_field_delete ( struct bch_sb_handle * sb ,
enum bch_sb_field_type type )
{
struct bch_sb_field * f = bch2_sb_field_get ( sb - > sb , type ) ;
if ( f )
__bch2_sb_field_resize ( sb , f , 0 ) ;
2017-03-16 22:18:50 -08:00
}
/* Superblock realloc/free: */
void bch2_free_super ( struct bch_sb_handle * sb )
{
2022-10-19 18:31:33 -04:00
kfree ( sb - > bio ) ;
2017-03-16 22:18:50 -08:00
if ( ! IS_ERR_OR_NULL ( sb - > bdev ) )
blkdev_put ( sb - > bdev , sb - > holder ) ;
kfree ( sb - > holder ) ;
2021-04-06 14:00:56 -04:00
kfree ( sb - > sb ) ;
2017-03-16 22:18:50 -08:00
memset ( sb , 0 , sizeof ( * sb ) ) ;
}
int bch2_sb_realloc ( struct bch_sb_handle * sb , unsigned u64s )
{
size_t new_bytes = __vstruct_bytes ( struct bch_sb , u64s ) ;
2021-04-06 14:00:56 -04:00
size_t new_buffer_size ;
2017-03-16 22:18:50 -08:00
struct bch_sb * new_sb ;
struct bio * bio ;
2021-04-06 14:00:56 -04:00
if ( sb - > bdev )
new_bytes = max_t ( size_t , new_bytes , bdev_logical_block_size ( sb - > bdev ) ) ;
new_buffer_size = roundup_pow_of_two ( new_bytes ) ;
if ( sb - > sb & & sb - > buffer_size > = new_buffer_size )
2017-03-16 22:18:50 -08:00
return 0 ;
if ( sb - > have_layout ) {
u64 max_bytes = 512 < < sb - > sb - > layout . sb_max_size_bits ;
if ( new_bytes > max_bytes ) {
pr_err ( " %pg: superblock too big: want %zu but have %llu " ,
sb - > bdev , new_bytes , max_bytes ) ;
2022-09-18 17:10:33 -04:00
return - BCH_ERR_ENOSPC_sb ;
2017-03-16 22:18:50 -08:00
}
}
2021-04-06 14:00:56 -04:00
if ( sb - > buffer_size > = new_buffer_size & & sb - > sb )
2017-03-16 22:18:50 -08:00
return 0 ;
if ( dynamic_fault ( " bcachefs:add:super_realloc " ) )
2023-03-14 15:35:57 -04:00
return - BCH_ERR_ENOMEM_sb_realloc_injected ;
2017-03-16 22:18:50 -08:00
if ( sb - > have_bio ) {
2021-04-06 14:00:56 -04:00
unsigned nr_bvecs = DIV_ROUND_UP ( new_buffer_size , PAGE_SIZE ) ;
2017-03-16 22:18:50 -08:00
bio = bio_kmalloc ( nr_bvecs , GFP_KERNEL ) ;
if ( ! bio )
2023-03-14 15:35:57 -04:00
return - BCH_ERR_ENOMEM_sb_bio_realloc ;
2017-03-16 22:18:50 -08:00
bio_init ( bio , NULL , bio - > bi_inline_vecs , nr_bvecs , 0 ) ;
2022-10-19 18:31:33 -04:00
kfree ( sb - > bio ) ;
2017-03-16 22:18:50 -08:00
sb - > bio = bio ;
}
2021-04-06 14:00:56 -04:00
new_sb = krealloc ( sb - > sb , new_buffer_size , GFP_NOFS | __GFP_ZERO ) ;
2017-03-16 22:18:50 -08:00
if ( ! new_sb )
2023-03-14 15:35:57 -04:00
return - BCH_ERR_ENOMEM_sb_buf_realloc ;
2017-03-16 22:18:50 -08:00
sb - > sb = new_sb ;
2021-04-06 14:00:56 -04:00
sb - > buffer_size = new_buffer_size ;
2017-03-16 22:18:50 -08:00
return 0 ;
}
struct bch_sb_field * bch2_sb_field_resize ( struct bch_sb_handle * sb ,
enum bch_sb_field_type type ,
unsigned u64s )
{
struct bch_sb_field * f = bch2_sb_field_get ( sb - > sb , type ) ;
ssize_t old_u64s = f ? le32_to_cpu ( f - > u64s ) : 0 ;
ssize_t d = - old_u64s + u64s ;
if ( bch2_sb_realloc ( sb , le32_to_cpu ( sb - > sb - > u64s ) + d ) )
return NULL ;
if ( sb - > fs_sb ) {
struct bch_fs * c = container_of ( sb , struct bch_fs , disk_sb ) ;
struct bch_dev * ca ;
unsigned i ;
lockdep_assert_held ( & c - > sb_lock ) ;
/* XXX: we're not checking that offline device have enough space */
for_each_online_member ( ca , c , i ) {
struct bch_sb_handle * sb = & ca - > disk_sb ;
if ( bch2_sb_realloc ( sb , le32_to_cpu ( sb - > sb - > u64s ) + d ) ) {
percpu_ref_put ( & ca - > ref ) ;
return NULL ;
}
}
}
2019-02-09 18:24:20 -05:00
f = bch2_sb_field_get ( sb - > sb , type ) ;
2017-03-16 22:18:50 -08:00
f = __bch2_sb_field_resize ( sb , f , u64s ) ;
2018-10-30 14:32:47 -04:00
if ( f )
f - > type = cpu_to_le32 ( type ) ;
2017-03-16 22:18:50 -08:00
return f ;
}
/* Superblock validate: */
static inline void __bch2_sb_layout_size_assert ( void )
{
BUILD_BUG_ON ( sizeof ( struct bch_sb_layout ) ! = 512 ) ;
}
2022-01-03 23:38:50 -05:00
static int validate_sb_layout ( struct bch_sb_layout * layout , struct printbuf * out )
2017-03-16 22:18:50 -08:00
{
u64 offset , prev_offset , max_sectors ;
unsigned i ;
if ( ! uuid_equal ( & layout - > magic , & BCACHE_MAGIC ) & &
2022-01-03 23:38:50 -05:00
! uuid_equal ( & layout - > magic , & BCHFS_MAGIC ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Not a bcachefs superblock layout " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_layout ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( layout - > layout_type ! = 0 ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Invalid superblock layout type %u " ,
2022-01-03 23:38:50 -05:00
layout - > layout_type ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_layout_type ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( ! layout - > nr_superblocks ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Invalid superblock layout: no superblocks " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_layout_nr_superblocks ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( layout - > nr_superblocks > ARRAY_SIZE ( layout - > sb_offset ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Invalid superblock layout: too many superblocks " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_layout_nr_superblocks ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
max_sectors = 1 < < layout - > sb_max_size_bits ;
prev_offset = le64_to_cpu ( layout - > sb_offset [ 0 ] ) ;
for ( i = 1 ; i < layout - > nr_superblocks ; i + + ) {
offset = le64_to_cpu ( layout - > sb_offset [ i ] ) ;
2022-01-03 23:38:50 -05:00
if ( offset < prev_offset + max_sectors ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Invalid superblock layout: superblocks overlap \n "
2022-01-03 23:38:50 -05:00
" (sb %u ends at %llu next starts at %llu " ,
i - 1 , prev_offset + max_sectors , offset ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_layout_superblocks_overlap ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
prev_offset = offset ;
}
2022-01-03 23:38:50 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2023-06-28 20:27:07 -04:00
static int bch2_sb_compatible ( struct bch_sb * sb , struct printbuf * out )
2017-03-16 22:18:50 -08:00
{
2023-06-28 20:27:07 -04:00
u16 version = le16_to_cpu ( sb - > version ) ;
u16 version_min = le16_to_cpu ( sb - > version_min ) ;
if ( ! bch2_version_compatible ( version ) ) {
prt_str ( out , " Unsupported superblock version " ) ;
bch2_version_to_text ( out , version ) ;
prt_str ( out , " (min " ) ;
bch2_version_to_text ( out , bcachefs_metadata_version_min ) ;
prt_str ( out , " , max " ) ;
bch2_version_to_text ( out , bcachefs_metadata_version_current ) ;
prt_str ( out , " ) " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_version ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2023-06-28 20:27:07 -04:00
if ( ! bch2_version_compatible ( version_min ) ) {
prt_str ( out , " Unsupported superblock version_min " ) ;
bch2_version_to_text ( out , version_min ) ;
prt_str ( out , " (min " ) ;
bch2_version_to_text ( out , bcachefs_metadata_version_min ) ;
prt_str ( out , " , max " ) ;
bch2_version_to_text ( out , bcachefs_metadata_version_current ) ;
prt_str ( out , " ) " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_version ;
2022-01-03 23:38:50 -05:00
}
if ( version_min > version ) {
2023-06-28 20:27:07 -04:00
prt_str ( out , " Bad minimum version " ) ;
bch2_version_to_text ( out , version_min ) ;
prt_str ( out , " , greater than version field " ) ;
bch2_version_to_text ( out , version ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_version ;
2022-01-03 23:38:50 -05:00
}
2018-11-01 15:10:01 -04:00
2023-06-28 20:27:07 -04:00
return 0 ;
}
static int bch2_sb_validate ( struct bch_sb_handle * disk_sb , struct printbuf * out ,
int rw )
{
struct bch_sb * sb = disk_sb - > sb ;
struct bch_sb_field * f ;
struct bch_sb_field_members * mi ;
enum bch_opt_id opt_id ;
u16 block_size ;
int ret ;
ret = bch2_sb_compatible ( sb , out ) ;
if ( ret )
return ret ;
2018-11-12 18:30:55 -05:00
if ( sb - > features [ 1 ] | |
2022-01-03 23:38:50 -05:00
( le64_to_cpu ( sb - > features [ 0 ] ) & ( ~ 0ULL < < BCH_FEATURE_NR ) ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Filesystem has incompatible features " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_features ;
2022-01-03 23:38:50 -05:00
}
2018-11-12 18:30:55 -05:00
2017-03-16 22:18:50 -08:00
block_size = le16_to_cpu ( sb - > block_size ) ;
2022-01-03 23:38:50 -05:00
if ( block_size > PAGE_SECTORS ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Block size too big (got %u, max %u) " ,
2022-01-03 23:38:50 -05:00
block_size , PAGE_SECTORS ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_block_size ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( bch2_is_zero ( sb - > user_uuid . b , sizeof ( sb - > user_uuid ) ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Bad user UUID (got zeroes) " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_uuid ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( bch2_is_zero ( sb - > uuid . b , sizeof ( sb - > uuid ) ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Bad intenal UUID (got zeroes) " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_uuid ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
if ( ! sb - > nr_devices | |
2022-01-03 23:38:50 -05:00
sb - > nr_devices > BCH_SB_MEMBERS_MAX ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Bad number of member devices %u (max %u) " ,
2022-01-03 23:38:50 -05:00
sb - > nr_devices , BCH_SB_MEMBERS_MAX ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_too_many_members ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( sb - > dev_idx > = sb - > nr_devices ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Bad dev_idx (got %u, nr_devices %u) " ,
2022-01-03 23:38:50 -05:00
sb - > dev_idx , sb - > nr_devices ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_dev_idx ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
if ( ! sb - > time_precision | |
2022-01-03 23:38:50 -05:00
le32_to_cpu ( sb - > time_precision ) > NSEC_PER_SEC ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Invalid time precision: %u (min 1, max %lu) " ,
2022-01-03 23:38:50 -05:00
le32_to_cpu ( sb - > time_precision ) , NSEC_PER_SEC ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_time_precision ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-03-21 03:03:03 -04:00
if ( rw = = READ ) {
/*
* Been seeing a bug where these are getting inexplicably
2023-06-28 20:27:07 -04:00
* zeroed , so we ' re now validating them , but we have to be
2022-03-21 03:03:03 -04:00
* careful not to preven people ' s filesystems from mounting :
*/
if ( ! BCH_SB_JOURNAL_FLUSH_DELAY ( sb ) )
SET_BCH_SB_JOURNAL_FLUSH_DELAY ( sb , 1000 ) ;
if ( ! BCH_SB_JOURNAL_RECLAIM_DELAY ( sb ) )
SET_BCH_SB_JOURNAL_RECLAIM_DELAY ( sb , 1000 ) ;
}
2022-03-21 00:15:38 -04:00
for ( opt_id = 0 ; opt_id < bch2_opts_nr ; opt_id + + ) {
const struct bch_option * opt = bch2_opt_table + opt_id ;
if ( opt - > get_sb ! = BCH2_NO_SB_OPT ) {
u64 v = bch2_opt_from_sb ( sb , opt_id ) ;
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Invalid option " ) ;
2022-03-21 00:15:38 -04:00
ret = bch2_opt_validate ( opt , v , out ) ;
if ( ret )
return ret ;
printbuf_reset ( out ) ;
}
}
2017-03-16 22:18:50 -08:00
/* validate layout */
2022-01-03 23:38:50 -05:00
ret = validate_sb_layout ( & sb - > layout , out ) ;
if ( ret )
return ret ;
2017-03-16 22:18:50 -08:00
vstruct_for_each ( sb , f ) {
2022-01-03 23:38:50 -05:00
if ( ! f - > u64s ) {
2022-11-19 22:39:08 -05:00
prt_printf ( out , " Invalid superblock: optional field with size 0 (type %u) " ,
2022-01-03 23:38:50 -05:00
le32_to_cpu ( f - > type ) ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_field_size ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( vstruct_next ( f ) > vstruct_last ( sb ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Invalid superblock: optional field extends past end of superblock (type %u) " ,
2022-01-03 23:38:50 -05:00
le32_to_cpu ( f - > type ) ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_field_size ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
}
/* members must be validated first: */
mi = bch2_sb_get_members ( sb ) ;
2022-01-03 23:38:50 -05:00
if ( ! mi ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Invalid superblock: member info area missing " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_members_missing ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
ret = bch2_sb_field_validate ( sb , & mi - > field , out ) ;
if ( ret )
return ret ;
2017-03-16 22:18:50 -08:00
vstruct_for_each ( sb , f ) {
if ( le32_to_cpu ( f - > type ) = = BCH_SB_FIELD_members )
continue ;
2022-01-03 23:38:50 -05:00
ret = bch2_sb_field_validate ( sb , f , out ) ;
if ( ret )
return ret ;
2017-03-16 22:18:50 -08:00
}
2022-01-03 23:38:50 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
/* device open: */
static void bch2_sb_update ( struct bch_fs * c )
{
struct bch_sb * src = c - > disk_sb . sb ;
struct bch_sb_field_members * mi = bch2_sb_get_members ( src ) ;
struct bch_dev * ca ;
unsigned i ;
lockdep_assert_held ( & c - > sb_lock ) ;
c - > sb . uuid = src - > uuid ;
c - > sb . user_uuid = src - > user_uuid ;
2018-11-01 15:10:01 -04:00
c - > sb . version = le16_to_cpu ( src - > version ) ;
2021-03-21 16:03:23 -04:00
c - > sb . version_min = le16_to_cpu ( src - > version_min ) ;
2017-03-16 22:18:50 -08:00
c - > sb . nr_devices = src - > nr_devices ;
c - > sb . clean = BCH_SB_CLEAN ( src ) ;
c - > sb . encryption_type = BCH_SB_ENCRYPTION_TYPE ( src ) ;
2021-04-28 22:51:42 -04:00
c - > sb . nsec_per_time_unit = le32_to_cpu ( src - > time_precision ) ;
c - > sb . time_units_per_sec = NSEC_PER_SEC / c - > sb . nsec_per_time_unit ;
/* XXX this is wrong, we need a 96 or 128 bit integer type */
c - > sb . time_base_lo = div_u64 ( le64_to_cpu ( src - > time_base_lo ) ,
c - > sb . nsec_per_time_unit ) ;
2017-03-16 22:18:50 -08:00
c - > sb . time_base_hi = le32_to_cpu ( src - > time_base_hi ) ;
2021-04-28 22:51:42 -04:00
2017-03-16 22:18:50 -08:00
c - > sb . features = le64_to_cpu ( src - > features [ 0 ] ) ;
2019-02-06 11:56:51 -05:00
c - > sb . compat = le64_to_cpu ( src - > compat [ 0 ] ) ;
2017-03-16 22:18:50 -08:00
for_each_member_device ( ca , c , i )
ca - > mi = bch2_mi_to_cpu ( mi - > members + i ) ;
}
2023-02-12 22:08:39 -05:00
static int __copy_super ( struct bch_sb_handle * dst_handle , struct bch_sb * src )
2017-03-16 22:18:50 -08:00
{
struct bch_sb_field * src_f , * dst_f ;
struct bch_sb * dst = dst_handle - > sb ;
2018-10-30 14:32:47 -04:00
unsigned i ;
2017-03-16 22:18:50 -08:00
dst - > version = src - > version ;
2018-11-01 15:10:01 -04:00
dst - > version_min = src - > version_min ;
2017-03-16 22:18:50 -08:00
dst - > seq = src - > seq ;
dst - > uuid = src - > uuid ;
dst - > user_uuid = src - > user_uuid ;
memcpy ( dst - > label , src - > label , sizeof ( dst - > label ) ) ;
dst - > block_size = src - > block_size ;
dst - > nr_devices = src - > nr_devices ;
dst - > time_base_lo = src - > time_base_lo ;
dst - > time_base_hi = src - > time_base_hi ;
dst - > time_precision = src - > time_precision ;
memcpy ( dst - > flags , src - > flags , sizeof ( dst - > flags ) ) ;
memcpy ( dst - > features , src - > features , sizeof ( dst - > features ) ) ;
memcpy ( dst - > compat , src - > compat , sizeof ( dst - > compat ) ) ;
2018-10-30 14:32:47 -04:00
for ( i = 0 ; i < BCH_SB_FIELD_NR ; i + + ) {
2023-02-12 22:08:39 -05:00
int d ;
2022-03-10 16:43:52 -05:00
if ( ( 1U < < i ) & BCH_SINGLE_DEVICE_SB_FIELDS )
2017-03-16 22:18:50 -08:00
continue ;
2018-10-30 14:32:47 -04:00
src_f = bch2_sb_field_get ( src , i ) ;
dst_f = bch2_sb_field_get ( dst , i ) ;
2023-02-12 22:08:39 -05:00
d = ( src_f ? le32_to_cpu ( src_f - > u64s ) : 0 ) -
( dst_f ? le32_to_cpu ( dst_f - > u64s ) : 0 ) ;
if ( d > 0 ) {
int ret = bch2_sb_realloc ( dst_handle , le32_to_cpu ( dst_handle - > sb - > u64s ) + d ) ;
if ( ret )
return ret ;
dst = dst_handle - > sb ;
dst_f = bch2_sb_field_get ( dst , i ) ;
}
2017-03-16 22:18:50 -08:00
dst_f = __bch2_sb_field_resize ( dst_handle , dst_f ,
2018-10-30 14:32:47 -04:00
src_f ? le32_to_cpu ( src_f - > u64s ) : 0 ) ;
2017-03-16 22:18:50 -08:00
2018-10-30 14:32:47 -04:00
if ( src_f )
memcpy ( dst_f , src_f , vstruct_bytes ( src_f ) ) ;
2017-03-16 22:18:50 -08:00
}
2023-02-12 22:08:39 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
int bch2_sb_to_fs ( struct bch_fs * c , struct bch_sb * src )
{
int ret ;
lockdep_assert_held ( & c - > sb_lock ) ;
2023-02-12 22:08:39 -05:00
ret = bch2_sb_realloc ( & c - > disk_sb , 0 ) ? :
__copy_super ( & c - > disk_sb , src ) ? :
bch2_sb_replicas_to_cpu_replicas ( c ) ? :
bch2_sb_disk_groups_to_cpu ( c ) ;
2017-03-16 22:18:50 -08:00
if ( ret )
return ret ;
bch2_sb_update ( c ) ;
return 0 ;
}
int bch2_sb_from_fs ( struct bch_fs * c , struct bch_dev * ca )
{
2023-02-12 22:08:39 -05:00
return __copy_super ( & ca - > disk_sb , c - > disk_sb . sb ) ;
2017-03-16 22:18:50 -08:00
}
/* read superblock: */
2022-01-03 23:38:50 -05:00
static int read_one_super ( struct bch_sb_handle * sb , u64 offset , struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
struct bch_csum csum ;
size_t bytes ;
2022-01-03 23:38:50 -05:00
int ret ;
2017-03-16 22:18:50 -08:00
reread :
bio_reset ( sb - > bio , sb - > bdev , REQ_OP_READ | REQ_SYNC | REQ_META ) ;
sb - > bio - > bi_iter . bi_sector = offset ;
2021-04-06 14:00:56 -04:00
bch2_bio_map ( sb - > bio , sb - > sb , sb - > buffer_size ) ;
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
ret = submit_bio_wait ( sb - > bio ) ;
if ( ret ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " IO error: %i " , ret ) ;
2022-01-03 23:38:50 -05:00
return ret ;
}
2017-03-16 22:18:50 -08:00
if ( ! uuid_equal ( & sb - > sb - > magic , & BCACHE_MAGIC ) & &
2022-01-03 23:38:50 -05:00
! uuid_equal ( & sb - > sb - > magic , & BCHFS_MAGIC ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " Not a bcachefs superblock " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_magic ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2023-06-28 20:27:07 -04:00
ret = bch2_sb_compatible ( sb - > sb , err ) ;
if ( ret )
return ret ;
2017-03-16 22:18:50 -08:00
bytes = vstruct_bytes ( sb - > sb ) ;
2022-01-03 23:38:50 -05:00
if ( bytes > 512 < < sb - > sb - > layout . sb_max_size_bits ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " Invalid superblock: too big (got %zu bytes, layout max %lu) " ,
2022-01-03 23:38:50 -05:00
bytes , 512UL < < sb - > sb - > layout . sb_max_size_bits ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_too_big ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2021-04-06 14:00:56 -04:00
if ( bytes > sb - > buffer_size ) {
2023-03-14 15:35:57 -04:00
ret = bch2_sb_realloc ( sb , le32_to_cpu ( sb - > sb - > u64s ) ) ;
if ( ret )
return ret ;
2017-03-16 22:18:50 -08:00
goto reread ;
}
2022-01-03 23:38:50 -05:00
if ( BCH_SB_CSUM_TYPE ( sb - > sb ) > = BCH_CSUM_NR ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " unknown checksum type %llu " , BCH_SB_CSUM_TYPE ( sb - > sb ) ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_csum_type ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
/* XXX: verify MACs */
csum = csum_vstruct ( NULL , BCH_SB_CSUM_TYPE ( sb - > sb ) ,
null_nonce ( ) , sb - > sb ) ;
2022-01-03 23:38:50 -05:00
if ( bch2_crc_cmp ( csum , sb - > sb - > csum ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " bad checksum " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_csum ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2019-03-21 23:13:46 -04:00
sb - > seq = le64_to_cpu ( sb - > sb - > seq ) ;
2022-01-03 23:38:50 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
int bch2_read_super ( const char * path , struct bch_opts * opts ,
struct bch_sb_handle * sb )
{
u64 offset = opt_get ( * opts , sb ) ;
struct bch_sb_layout layout ;
2022-02-25 13:18:19 -05:00
struct printbuf err = PRINTBUF ;
2017-03-16 22:18:50 -08:00
__le64 * i ;
int ret ;
pr_verbose_init ( * opts , " " ) ;
memset ( sb , 0 , sizeof ( * sb ) ) ;
sb - > mode = BLK_OPEN_READ ;
sb - > have_bio = true ;
sb - > holder = kmalloc ( 1 , GFP_KERNEL ) ;
if ( ! sb - > holder )
return - ENOMEM ;
if ( ! opt_get ( * opts , noexcl ) )
sb - > mode | = BLK_OPEN_EXCL ;
if ( ! opt_get ( * opts , nochanges ) )
sb - > mode | = BLK_OPEN_WRITE ;
sb - > bdev = blkdev_get_by_path ( path , sb - > mode , sb - > holder , & bch2_sb_handle_bdev_ops ) ;
if ( IS_ERR ( sb - > bdev ) & &
PTR_ERR ( sb - > bdev ) = = - EACCES & &
opt_get ( * opts , read_only ) ) {
sb - > mode & = ~ BLK_OPEN_WRITE ;
sb - > bdev = blkdev_get_by_path ( path , sb - > mode , sb - > holder , & bch2_sb_handle_bdev_ops ) ;
if ( ! IS_ERR ( sb - > bdev ) )
opt_set ( * opts , nochanges , true ) ;
}
if ( IS_ERR ( sb - > bdev ) ) {
ret = PTR_ERR ( sb - > bdev ) ;
goto out ;
}
ret = bch2_sb_realloc ( sb , 0 ) ;
2022-01-03 23:38:50 -05:00
if ( ret ) {
2023-02-03 21:01:40 -05:00
prt_printf ( & err , " error allocating memory for superblock " ) ;
2017-03-16 22:18:50 -08:00
goto err ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( bch2_fs_init_fault ( " read_super " ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( & err , " dynamic fault " ) ;
2022-01-03 23:38:50 -05:00
ret = - EFAULT ;
2017-03-16 22:18:50 -08:00
goto err ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
ret = read_one_super ( sb , offset , & err ) ;
if ( ! ret )
2017-03-16 22:18:50 -08:00
goto got_super ;
if ( opt_defined ( * opts , sb ) )
goto err ;
2022-01-03 23:38:50 -05:00
printk ( KERN_ERR " bcachefs (%s): error reading default superblock: %s " ,
2022-02-25 13:18:19 -05:00
path , err . buf ) ;
printbuf_reset ( & err ) ;
2017-03-16 22:18:50 -08:00
/*
* Error reading primary superblock - read location of backup
* superblocks :
*/
bio_reset ( sb - > bio , sb - > bdev , REQ_OP_READ | REQ_SYNC | REQ_META ) ;
sb - > bio - > bi_iter . bi_sector = BCH_SB_LAYOUT_SECTOR ;
/*
* use sb buffer to read layout , since sb buffer is page aligned but
* layout won ' t be :
*/
2019-07-03 19:27:42 -04:00
bch2_bio_map ( sb - > bio , sb - > sb , sizeof ( struct bch_sb_layout ) ) ;
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
ret = submit_bio_wait ( sb - > bio ) ;
if ( ret ) {
2023-02-03 21:01:40 -05:00
prt_printf ( & err , " IO error: %i " , ret ) ;
2017-03-16 22:18:50 -08:00
goto err ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
memcpy ( & layout , sb - > sb , sizeof ( layout ) ) ;
2022-01-03 23:38:50 -05:00
ret = validate_sb_layout ( & layout , & err ) ;
if ( ret )
2017-03-16 22:18:50 -08:00
goto err ;
for ( i = layout . sb_offset ;
i < layout . sb_offset + layout . nr_superblocks ; i + + ) {
offset = le64_to_cpu ( * i ) ;
if ( offset = = opt_get ( * opts , sb ) )
continue ;
2022-01-03 23:38:50 -05:00
ret = read_one_super ( sb , offset , & err ) ;
if ( ! ret )
2017-03-16 22:18:50 -08:00
goto got_super ;
}
goto err ;
got_super :
if ( le16_to_cpu ( sb - > sb - > block_size ) < < 9 <
2021-12-14 14:24:41 -05:00
bdev_logical_block_size ( sb - > bdev ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( & err , " block size (%u) smaller than device block size (%u) " ,
2021-12-14 14:24:41 -05:00
le16_to_cpu ( sb - > sb - > block_size ) < < 9 ,
bdev_logical_block_size ( sb - > bdev ) ) ;
2022-11-19 22:39:08 -05:00
ret = - BCH_ERR_block_size_too_small ;
2022-01-03 23:38:50 -05:00
goto err ;
2021-12-14 14:24:41 -05:00
}
2017-03-16 22:18:50 -08:00
ret = 0 ;
sb - > have_layout = true ;
2022-01-03 23:38:50 -05:00
2022-03-21 03:03:03 -04:00
ret = bch2_sb_validate ( sb , & err , READ ) ;
2022-01-03 23:38:50 -05:00
if ( ret ) {
printk ( KERN_ERR " bcachefs (%s): error validating superblock: %s " ,
2022-02-25 13:18:19 -05:00
path , err . buf ) ;
2022-01-03 23:38:50 -05:00
goto err_no_print ;
}
2017-03-16 22:18:50 -08:00
out :
pr_verbose_init ( * opts , " ret %i " , ret ) ;
2022-02-25 13:18:19 -05:00
printbuf_exit ( & err ) ;
2017-03-16 22:18:50 -08:00
return ret ;
err :
2022-01-03 23:38:50 -05:00
printk ( KERN_ERR " bcachefs (%s): error reading superblock: %s " ,
2022-02-25 13:18:19 -05:00
path , err . buf ) ;
2021-12-14 14:24:41 -05:00
err_no_print :
bch2_free_super ( sb ) ;
2017-03-16 22:18:50 -08:00
goto out ;
}
/* write superblock: */
static void write_super_endio ( struct bio * bio )
{
struct bch_dev * ca = bio - > bi_private ;
/* XXX: return errors directly */
2020-12-03 13:57:22 -05:00
if ( bch2_dev_io_err_on ( bio - > bi_status , ca , " superblock write error: %s " ,
2020-07-21 13:34:22 -04:00
bch2_blk_status_to_str ( bio - > bi_status ) ) )
2017-03-16 22:18:50 -08:00
ca - > sb_write_error = 1 ;
closure_put ( & ca - > fs - > sb_write ) ;
percpu_ref_put ( & ca - > io_ref ) ;
}
2019-03-21 23:13:46 -04:00
static void read_back_super ( struct bch_fs * c , struct bch_dev * ca )
{
struct bch_sb * sb = ca - > disk_sb . sb ;
struct bio * bio = ca - > disk_sb . bio ;
bio_reset ( bio , ca - > disk_sb . bdev , REQ_OP_READ | REQ_SYNC | REQ_META ) ;
bio - > bi_iter . bi_sector = le64_to_cpu ( sb - > layout . sb_offset [ 0 ] ) ;
bio - > bi_end_io = write_super_endio ;
bio - > bi_private = ca ;
2019-07-03 19:27:42 -04:00
bch2_bio_map ( bio , ca - > sb_read_scratch , PAGE_SIZE ) ;
2019-03-21 23:13:46 -04:00
2020-07-09 18:28:11 -04:00
this_cpu_add ( ca - > io_done - > sectors [ READ ] [ BCH_DATA_sb ] ,
2019-03-21 23:13:46 -04:00
bio_sectors ( bio ) ) ;
percpu_ref_get ( & ca - > io_ref ) ;
closure_bio_submit ( bio , & c - > sb_write ) ;
}
2017-03-16 22:18:50 -08:00
static void write_one_super ( struct bch_fs * c , struct bch_dev * ca , unsigned idx )
{
struct bch_sb * sb = ca - > disk_sb . sb ;
struct bio * bio = ca - > disk_sb . bio ;
sb - > offset = sb - > layout . sb_offset [ idx ] ;
2021-06-13 22:01:08 +02:00
SET_BCH_SB_CSUM_TYPE ( sb , bch2_csum_opt_to_type ( c - > opts . metadata_checksum , false ) ) ;
2017-03-16 22:18:50 -08:00
sb - > csum = csum_vstruct ( c , BCH_SB_CSUM_TYPE ( sb ) ,
null_nonce ( ) , sb ) ;
bio_reset ( bio , ca - > disk_sb . bdev , REQ_OP_WRITE | REQ_SYNC | REQ_META ) ;
bio - > bi_iter . bi_sector = le64_to_cpu ( sb - > offset ) ;
bio - > bi_end_io = write_super_endio ;
bio - > bi_private = ca ;
2019-07-03 19:27:42 -04:00
bch2_bio_map ( bio , sb ,
roundup ( ( size_t ) vstruct_bytes ( sb ) ,
bdev_logical_block_size ( ca - > disk_sb . bdev ) ) ) ;
2017-03-16 22:18:50 -08:00
2020-07-09 18:28:11 -04:00
this_cpu_add ( ca - > io_done - > sectors [ WRITE ] [ BCH_DATA_sb ] ,
2017-03-16 22:18:50 -08:00
bio_sectors ( bio ) ) ;
percpu_ref_get ( & ca - > io_ref ) ;
closure_bio_submit ( bio , & c - > sb_write ) ;
}
2019-03-21 23:13:46 -04:00
int bch2_write_super ( struct bch_fs * c )
2017-03-16 22:18:50 -08:00
{
struct closure * cl = & c - > sb_write ;
struct bch_dev * ca ;
2022-02-25 13:18:19 -05:00
struct printbuf err = PRINTBUF ;
2017-03-16 22:18:50 -08:00
unsigned i , sb = 0 , nr_wrote ;
struct bch_devs_mask sb_written ;
bool wrote , can_mount_without_written , can_mount_with_written ;
2021-04-09 19:04:57 -04:00
unsigned degraded_flags = BCH_FORCE_IF_DEGRADED ;
2019-03-21 23:13:46 -04:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
2022-08-27 12:48:36 -04:00
trace_and_count ( c , write_super , c , _RET_IP_ ) ;
2022-04-03 15:13:20 -04:00
2021-04-09 19:04:57 -04:00
if ( c - > opts . very_degraded )
degraded_flags | = BCH_FORCE_IF_LOST ;
2017-03-16 22:18:50 -08:00
lockdep_assert_held ( & c - > sb_lock ) ;
closure_init_stack ( cl ) ;
memset ( & sb_written , 0 , sizeof ( sb_written ) ) ;
2022-12-02 19:46:49 -05:00
if ( c - > opts . version_upgrade ) {
c - > disk_sb . sb - > magic = BCHFS_MAGIC ;
c - > disk_sb . sb - > layout . magic = BCHFS_MAGIC ;
}
2017-03-16 22:18:50 -08:00
le64_add_cpu ( & c - > disk_sb . sb - > seq , 1 ) ;
2019-03-28 09:34:55 -04:00
if ( test_bit ( BCH_FS_ERROR , & c - > flags ) )
SET_BCH_SB_HAS_ERRORS ( c - > disk_sb . sb , 1 ) ;
2021-04-24 16:32:35 -04:00
if ( test_bit ( BCH_FS_TOPOLOGY_ERROR , & c - > flags ) )
SET_BCH_SB_HAS_TOPOLOGY_ERRORS ( c - > disk_sb . sb , 1 ) ;
2019-03-28 09:34:55 -04:00
2021-03-04 19:06:26 -05:00
SET_BCH_SB_BIG_ENDIAN ( c - > disk_sb . sb , CPU_BIG_ENDIAN ) ;
2022-03-15 21:36:33 +13:00
bch2_sb_counters_from_cpu ( c ) ;
2017-03-16 22:18:50 -08:00
for_each_online_member ( ca , c , i )
bch2_sb_from_fs ( c , ca ) ;
for_each_online_member ( ca , c , i ) {
2022-02-25 13:18:19 -05:00
printbuf_reset ( & err ) ;
2022-01-03 23:38:50 -05:00
2022-03-21 03:03:03 -04:00
ret = bch2_sb_validate ( & ca - > disk_sb , & err , WRITE ) ;
2022-01-03 23:38:50 -05:00
if ( ret ) {
2022-02-25 13:18:19 -05:00
bch2_fs_inconsistent ( c , " sb invalid before write: %s " , err . buf ) ;
2022-01-03 23:38:50 -05:00
percpu_ref_put ( & ca - > io_ref ) ;
2017-03-16 22:18:50 -08:00
goto out ;
}
}
2019-03-28 09:34:55 -04:00
if ( c - > opts . nochanges )
2017-03-16 22:18:50 -08:00
goto out ;
2022-04-02 17:24:25 -04:00
/*
* Defer writing the superblock until filesystem initialization is
* complete - don ' t write out a partly initialized superblock :
*/
if ( ! BCH_SB_INITIALIZED ( c - > disk_sb . sb ) )
goto out ;
2017-03-16 22:18:50 -08:00
for_each_online_member ( ca , c , i ) {
__set_bit ( ca - > dev_idx , sb_written . d ) ;
ca - > sb_write_error = 0 ;
}
2019-03-21 23:13:46 -04:00
for_each_online_member ( ca , c , i )
read_back_super ( c , ca ) ;
closure_sync ( cl ) ;
for_each_online_member ( ca , c , i ) {
2022-01-01 19:04:33 -05:00
if ( ca - > sb_write_error )
continue ;
if ( le64_to_cpu ( ca - > sb_read_scratch - > seq ) < ca - > disk_sb . seq ) {
bch2_fs_fatal_error ( c ,
" Superblock write was silently dropped! (seq %llu expected %llu) " ,
le64_to_cpu ( ca - > sb_read_scratch - > seq ) ,
ca - > disk_sb . seq ) ;
percpu_ref_put ( & ca - > io_ref ) ;
2022-12-11 20:37:11 -05:00
ret = - BCH_ERR_erofs_sb_err ;
2022-01-01 19:04:33 -05:00
goto out ;
}
if ( le64_to_cpu ( ca - > sb_read_scratch - > seq ) > ca - > disk_sb . seq ) {
2019-03-21 23:13:46 -04:00
bch2_fs_fatal_error ( c ,
2022-01-01 19:04:33 -05:00
" Superblock modified by another process (seq %llu expected %llu) " ,
le64_to_cpu ( ca - > sb_read_scratch - > seq ) ,
ca - > disk_sb . seq ) ;
2019-03-21 23:13:46 -04:00
percpu_ref_put ( & ca - > io_ref ) ;
2022-12-11 20:37:11 -05:00
ret = - BCH_ERR_erofs_sb_err ;
2019-03-21 23:13:46 -04:00
goto out ;
}
}
2017-03-16 22:18:50 -08:00
do {
wrote = false ;
for_each_online_member ( ca , c , i )
2019-03-21 23:13:46 -04:00
if ( ! ca - > sb_write_error & &
sb < ca - > disk_sb . sb - > layout . nr_superblocks ) {
2017-03-16 22:18:50 -08:00
write_one_super ( c , ca , sb ) ;
wrote = true ;
}
closure_sync ( cl ) ;
sb + + ;
} while ( wrote ) ;
2019-03-21 23:13:46 -04:00
for_each_online_member ( ca , c , i ) {
2017-03-16 22:18:50 -08:00
if ( ca - > sb_write_error )
__clear_bit ( ca - > dev_idx , sb_written . d ) ;
2019-03-21 23:13:46 -04:00
else
ca - > disk_sb . seq = le64_to_cpu ( ca - > disk_sb . sb - > seq ) ;
}
2017-03-16 22:18:50 -08:00
nr_wrote = dev_mask_nr ( & sb_written ) ;
can_mount_with_written =
2021-04-09 19:04:57 -04:00
bch2_have_enough_devs ( c , sb_written , degraded_flags , false ) ;
2017-03-16 22:18:50 -08:00
for ( i = 0 ; i < ARRAY_SIZE ( sb_written . d ) ; i + + )
sb_written . d [ i ] = ~ sb_written . d [ i ] ;
can_mount_without_written =
2021-04-09 19:04:57 -04:00
bch2_have_enough_devs ( c , sb_written , degraded_flags , false ) ;
2017-03-16 22:18:50 -08:00
/*
* If we would be able to mount _without_ the devices we successfully
* wrote superblocks to , we weren ' t able to write to enough devices :
*
* Exception : if we can mount without the successes because we haven ' t
* written anything ( new filesystem ) , we continue if we ' d be able to
* mount with the devices we did successfully write to :
*/
2019-03-21 23:13:46 -04:00
if ( bch2_fs_fatal_err_on ( ! nr_wrote | |
2021-02-06 23:17:26 -05:00
! can_mount_with_written | |
2019-03-21 23:13:46 -04:00
( can_mount_without_written & &
! can_mount_with_written ) , c ,
2021-11-03 17:23:49 -04:00
" Unable to write superblock to sufficient devices (from %ps) " ,
( void * ) _RET_IP_ ) )
2019-03-21 23:13:46 -04:00
ret = - 1 ;
2017-03-16 22:18:50 -08:00
out :
/* Make new options visible after they're persistent: */
bch2_sb_update ( c ) ;
2022-02-25 13:18:19 -05:00
printbuf_exit ( & err ) ;
2019-03-21 23:13:46 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2019-11-29 13:47:42 -05:00
void __bch2_check_set_feature ( struct bch_fs * c , unsigned feat )
{
mutex_lock ( & c - > sb_lock ) ;
if ( ! ( c - > sb . features & ( 1ULL < < feat ) ) ) {
c - > disk_sb . sb - > features [ 0 ] | = cpu_to_le64 ( 1ULL < < feat ) ;
bch2_write_super ( c ) ;
}
mutex_unlock ( & c - > sb_lock ) ;
}
2017-03-16 22:18:50 -08:00
/* BCH_SB_FIELD_members: */
2022-02-20 05:00:45 -05:00
static int bch2_sb_members_validate ( struct bch_sb * sb ,
2022-01-03 23:38:50 -05:00
struct bch_sb_field * f ,
struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
struct bch_sb_field_members * mi = field_to_type ( f , members ) ;
2022-01-03 23:38:50 -05:00
unsigned i ;
2017-03-16 22:18:50 -08:00
if ( ( void * ) ( mi - > members + sb - > nr_devices ) >
2022-01-03 23:38:50 -05:00
vstruct_end ( & mi - > field ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " too many devices for section size " ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_members ;
2022-01-03 23:38:50 -05:00
}
for ( i = 0 ; i < sb - > nr_devices ; i + + ) {
struct bch_member * m = mi - > members + i ;
2017-03-16 22:18:50 -08:00
if ( ! bch2_member_exists ( m ) )
continue ;
2022-01-03 23:38:50 -05:00
if ( le64_to_cpu ( m - > nbuckets ) > LONG_MAX ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " device %u: too many buckets (got %llu, max %lu) " ,
2022-01-03 23:38:50 -05:00
i , le64_to_cpu ( m - > nbuckets ) , LONG_MAX ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_members ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
if ( le64_to_cpu ( m - > nbuckets ) -
2022-01-03 23:38:50 -05:00
le16_to_cpu ( m - > first_bucket ) < BCH_MIN_NR_NBUCKETS ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " device %u: not enough buckets (got %llu, max %u) " ,
2022-01-03 23:38:50 -05:00
i , le64_to_cpu ( m - > nbuckets ) , BCH_MIN_NR_NBUCKETS ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_members ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
if ( le16_to_cpu ( m - > bucket_size ) <
2022-01-03 23:38:50 -05:00
le16_to_cpu ( sb - > block_size ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " device %u: bucket size %u smaller than block size %u " ,
2022-01-03 23:38:50 -05:00
i , le16_to_cpu ( m - > bucket_size ) , le16_to_cpu ( sb - > block_size ) ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_members ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
if ( le16_to_cpu ( m - > bucket_size ) <
2022-01-03 23:38:50 -05:00
BCH_SB_BTREE_NODE_SIZE ( sb ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " device %u: bucket size %u smaller than btree node size %llu " ,
2022-01-03 23:38:50 -05:00
i , le16_to_cpu ( m - > bucket_size ) , BCH_SB_BTREE_NODE_SIZE ( sb ) ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_members ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
}
2022-01-03 23:38:50 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2022-02-20 05:00:45 -05:00
static void bch2_sb_members_to_text ( struct printbuf * out , struct bch_sb * sb ,
struct bch_sb_field * f )
{
struct bch_sb_field_members * mi = field_to_type ( f , members ) ;
struct bch_sb_field_disk_groups * gi = bch2_sb_get_disk_groups ( sb ) ;
unsigned i ;
for ( i = 0 ; i < sb - > nr_devices ; i + + ) {
struct bch_member * m = mi - > members + i ;
unsigned data_have = bch2_sb_dev_has_data ( sb , i ) ;
u64 bucket_size = le16_to_cpu ( m - > bucket_size ) ;
u64 device_size = le64_to_cpu ( m - > nbuckets ) * bucket_size ;
if ( ! bch2_member_exists ( m ) )
continue ;
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Device: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %u " , i ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
printbuf_indent_add ( out , 2 ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " UUID: " ) ;
prt_tab ( out ) ;
2022-02-20 05:00:45 -05:00
pr_uuid ( out , m - > uuid . b ) ;
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
prt_printf ( out , " Size: " ) ;
prt_tab ( out ) ;
prt_units_u64 ( out , device_size < < 9 ) ;
prt_newline ( out ) ;
prt_printf ( out , " Bucket size: " ) ;
prt_tab ( out ) ;
prt_units_u64 ( out , bucket_size < < 9 ) ;
prt_newline ( out ) ;
prt_printf ( out , " First bucket: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %u " , le16_to_cpu ( m - > first_bucket ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " Buckets: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %llu " , le64_to_cpu ( m - > nbuckets ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " Last mount: " ) ;
prt_tab ( out ) ;
2022-02-20 05:00:45 -05:00
if ( m - > last_mount )
pr_time ( out , le64_to_cpu ( m - > last_mount ) ) ;
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (never) " ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " State: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %s " ,
2022-02-20 05:00:45 -05:00
BCH_MEMBER_STATE ( m ) < BCH_MEMBER_STATE_NR
? bch2_member_states [ BCH_MEMBER_STATE ( m ) ]
: " unknown " ) ;
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Label: " ) ;
prt_tab ( out ) ;
2022-02-20 05:00:45 -05:00
if ( BCH_MEMBER_GROUP ( m ) ) {
unsigned idx = BCH_MEMBER_GROUP ( m ) - 1 ;
if ( idx < disk_groups_nr ( gi ) )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %s (%u) " ,
2022-02-20 05:00:45 -05:00
gi - > entries [ idx ] . label , idx ) ;
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (bad disk labels section) " ) ;
2022-02-20 05:00:45 -05:00
} else {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (none) " ) ;
2022-02-20 05:00:45 -05:00
}
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Data allowed: " ) ;
prt_tab ( out ) ;
2022-02-20 05:00:45 -05:00
if ( BCH_MEMBER_DATA_ALLOWED ( m ) )
2023-02-03 21:01:40 -05:00
prt_bitflags ( out , bch2_data_types , BCH_MEMBER_DATA_ALLOWED ( m ) ) ;
2022-02-20 05:00:45 -05:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (none) " ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Has data: " ) ;
prt_tab ( out ) ;
2022-02-20 05:00:45 -05:00
if ( data_have )
2023-02-03 21:01:40 -05:00
prt_bitflags ( out , bch2_data_types , data_have ) ;
2022-02-20 05:00:45 -05:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (none) " ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Discard: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %llu " , BCH_MEMBER_DISCARD ( m ) ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Freespace initialized: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %llu " , BCH_MEMBER_FREESPACE_INITIALIZED ( m ) ) ;
prt_newline ( out ) ;
2021-12-11 17:13:09 -05:00
2023-02-03 21:01:40 -05:00
printbuf_indent_sub ( out , 2 ) ;
2022-02-20 05:00:45 -05:00
}
}
2017-03-16 22:18:50 -08:00
static const struct bch_sb_field_ops bch_sb_field_ops_members = {
2022-02-20 05:00:45 -05:00
. validate = bch2_sb_members_validate ,
. to_text = bch2_sb_members_to_text ,
2017-03-16 22:18:50 -08:00
} ;
/* BCH_SB_FIELD_crypt: */
2022-02-20 05:00:45 -05:00
static int bch2_sb_crypt_validate ( struct bch_sb * sb ,
2022-01-03 23:38:50 -05:00
struct bch_sb_field * f ,
struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
struct bch_sb_field_crypt * crypt = field_to_type ( f , crypt ) ;
2022-01-03 23:38:50 -05:00
if ( vstruct_bytes ( & crypt - > field ) < sizeof ( * crypt ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " wrong size (got %zu should be %zu) " ,
2022-01-03 23:38:50 -05:00
vstruct_bytes ( & crypt - > field ) , sizeof ( * crypt ) ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_crypt ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( BCH_CRYPT_KDF_TYPE ( crypt ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " bad kdf type %llu " , BCH_CRYPT_KDF_TYPE ( crypt ) ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_crypt ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2022-02-20 05:00:45 -05:00
static void bch2_sb_crypt_to_text ( struct printbuf * out , struct bch_sb * sb ,
struct bch_sb_field * f )
{
struct bch_sb_field_crypt * crypt = field_to_type ( f , crypt ) ;
2023-02-03 21:01:40 -05:00
prt_printf ( out , " KFD: %llu " , BCH_CRYPT_KDF_TYPE ( crypt ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " scrypt n: %llu " , BCH_KDF_SCRYPT_N ( crypt ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " scrypt r: %llu " , BCH_KDF_SCRYPT_R ( crypt ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " scrypt p: %llu " , BCH_KDF_SCRYPT_P ( crypt ) ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
}
2017-03-16 22:18:50 -08:00
static const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
2022-02-20 05:00:45 -05:00
. validate = bch2_sb_crypt_validate ,
. to_text = bch2_sb_crypt_to_text ,
2017-03-16 22:18:50 -08:00
} ;
/* BCH_SB_FIELD_clean: */
2022-02-20 05:00:45 -05:00
int bch2_sb_clean_validate_late ( struct bch_fs * c , struct bch_sb_field_clean * clean , int write )
2018-11-01 15:10:01 -04:00
{
struct jset_entry * entry ;
2021-03-04 19:06:26 -05:00
int ret ;
2018-11-01 15:10:01 -04:00
for ( entry = clean - > start ;
entry < ( struct jset_entry * ) vstruct_end ( & clean - > field ) ;
2021-03-04 19:06:26 -05:00
entry = vstruct_next ( entry ) ) {
2022-09-26 16:23:19 -04:00
ret = bch2_journal_entry_validate ( c , NULL , entry ,
2021-03-04 19:06:26 -05:00
le16_to_cpu ( c - > disk_sb . sb - > version ) ,
BCH_SB_BIG_ENDIAN ( c - > disk_sb . sb ) ,
write ) ;
if ( ret )
return ret ;
}
return 0 ;
2018-11-01 15:10:01 -04:00
}
2019-03-21 22:19:57 -04:00
int bch2_fs_mark_dirty ( struct bch_fs * c )
2017-03-16 22:18:50 -08:00
{
2019-03-21 23:13:46 -04:00
int ret ;
/*
* Unconditionally write superblock , to verify it hasn ' t changed before
* we go rw :
*/
2017-03-16 22:18:50 -08:00
mutex_lock ( & c - > sb_lock ) ;
2019-03-21 23:13:46 -04:00
SET_BCH_SB_CLEAN ( c - > disk_sb . sb , false ) ;
2021-05-23 17:04:13 -04:00
c - > disk_sb . sb - > features [ 0 ] | = cpu_to_le64 ( BCH_SB_FEATURES_ALWAYS ) ;
2021-07-11 13:54:07 -04:00
c - > disk_sb . sb - > compat [ 0 ] & = cpu_to_le64 ( ( 1ULL < < BCH_COMPAT_NR ) - 1 ) ;
2019-03-21 23:13:46 -04:00
ret = bch2_write_super ( c ) ;
2019-01-24 17:54:51 -05:00
mutex_unlock ( & c - > sb_lock ) ;
2019-03-21 22:19:57 -04:00
2019-03-21 23:13:46 -04:00
return ret ;
2019-01-24 17:54:51 -05:00
}
2017-03-16 22:18:50 -08:00
2021-01-21 15:28:59 -05:00
static struct jset_entry * jset_entry_init ( struct jset_entry * * end , size_t size )
2019-10-11 17:05:11 -07:00
{
2021-01-21 15:28:59 -05:00
struct jset_entry * entry = * end ;
unsigned u64s = DIV_ROUND_UP ( size , sizeof ( u64 ) ) ;
2019-10-11 17:05:11 -07:00
2021-01-21 15:28:59 -05:00
memset ( entry , 0 , u64s * sizeof ( u64 ) ) ;
2019-10-11 17:05:11 -07:00
/*
* The u64s field counts from the start of data , ignoring the shared
* fields .
*/
2021-05-23 17:04:13 -04:00
entry - > u64s = cpu_to_le16 ( u64s - 1 ) ;
2019-10-11 17:05:11 -07:00
2021-01-21 15:28:59 -05:00
* end = vstruct_next ( * end ) ;
return entry ;
2019-10-11 17:05:11 -07:00
}
2021-01-21 15:28:59 -05:00
void bch2_journal_super_entries_add_common ( struct bch_fs * c ,
struct jset_entry * * end ,
u64 journal_seq )
2019-01-24 17:54:51 -05:00
{
2021-01-21 21:52:06 -05:00
struct bch_dev * ca ;
unsigned i , dev ;
2017-03-16 22:18:50 -08:00
2019-02-10 19:34:47 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2019-01-24 17:54:51 -05:00
2019-02-10 19:34:47 -05:00
if ( ! journal_seq ) {
for ( i = 0 ; i < ARRAY_SIZE ( c - > usage ) ; i + + )
bch2_fs_usage_acc_to_base ( c , i ) ;
} else {
2020-11-13 18:36:33 -05:00
bch2_fs_usage_acc_to_base ( c , journal_seq & JOURNAL_BUF_MASK ) ;
2019-02-10 19:34:47 -05:00
}
2019-01-24 17:54:51 -05:00
{
2019-02-09 19:20:57 -05:00
struct jset_entry_usage * u =
2021-01-21 15:28:59 -05:00
container_of ( jset_entry_init ( end , sizeof ( * u ) ) ,
struct jset_entry_usage , entry ) ;
2019-01-24 17:54:51 -05:00
u - > entry . type = BCH_JSET_ENTRY_usage ;
2021-12-31 17:06:29 -05:00
u - > entry . btree_id = BCH_FS_USAGE_inodes ;
2019-02-10 19:34:47 -05:00
u - > v = cpu_to_le64 ( c - > usage_base - > nr_inodes ) ;
2019-01-24 17:54:51 -05:00
}
{
2019-02-09 19:20:57 -05:00
struct jset_entry_usage * u =
2021-01-21 15:28:59 -05:00
container_of ( jset_entry_init ( end , sizeof ( * u ) ) ,
struct jset_entry_usage , entry ) ;
2019-02-09 19:20:57 -05:00
2019-01-24 17:54:51 -05:00
u - > entry . type = BCH_JSET_ENTRY_usage ;
2021-12-31 17:06:29 -05:00
u - > entry . btree_id = BCH_FS_USAGE_key_version ;
2019-02-09 19:20:57 -05:00
u - > v = cpu_to_le64 ( atomic64_read ( & c - > key_version ) ) ;
}
for ( i = 0 ; i < BCH_REPLICAS_MAX ; i + + ) {
struct jset_entry_usage * u =
2021-01-21 15:28:59 -05:00
container_of ( jset_entry_init ( end , sizeof ( * u ) ) ,
struct jset_entry_usage , entry ) ;
2019-02-09 19:20:57 -05:00
u - > entry . type = BCH_JSET_ENTRY_usage ;
2021-12-31 17:06:29 -05:00
u - > entry . btree_id = BCH_FS_USAGE_reserved ;
2019-02-09 19:20:57 -05:00
u - > entry . level = i ;
2019-02-10 19:34:47 -05:00
u - > v = cpu_to_le64 ( c - > usage_base - > persistent_reserved [ i ] ) ;
2019-01-24 17:54:51 -05:00
}
for ( i = 0 ; i < c - > replicas . nr ; i + + ) {
struct bch_replicas_entry * e =
cpu_replicas_entry ( & c - > replicas , i ) ;
2019-02-09 19:20:57 -05:00
struct jset_entry_data_usage * u =
2021-01-21 15:28:59 -05:00
container_of ( jset_entry_init ( end , sizeof ( * u ) + e - > nr_devs ) ,
struct jset_entry_data_usage , entry ) ;
2019-01-24 17:54:51 -05:00
2019-02-09 19:20:57 -05:00
u - > entry . type = BCH_JSET_ENTRY_data_usage ;
2019-02-10 19:34:47 -05:00
u - > v = cpu_to_le64 ( c - > usage_base - > replicas [ i ] ) ;
2019-01-24 17:54:51 -05:00
unsafe_memcpy ( & u - > r , e , replicas_entry_bytes ( e ) ,
" embedded variable length struct " ) ;
}
2021-01-21 21:52:06 -05:00
for_each_member_device ( ca , c , dev ) {
unsigned b = sizeof ( struct jset_entry_dev_usage ) +
sizeof ( struct jset_entry_dev_usage_type ) * BCH_DATA_NR ;
struct jset_entry_dev_usage * u =
container_of ( jset_entry_init ( end , b ) ,
struct jset_entry_dev_usage , entry ) ;
u - > entry . type = BCH_JSET_ENTRY_dev_usage ;
u - > dev = cpu_to_le32 ( dev ) ;
u - > buckets_ec = cpu_to_le64 ( ca - > usage_base - > buckets_ec ) ;
for ( i = 0 ; i < BCH_DATA_NR ; i + + ) {
u - > d [ i ] . buckets = cpu_to_le64 ( ca - > usage_base - > d [ i ] . buckets ) ;
u - > d [ i ] . sectors = cpu_to_le64 ( ca - > usage_base - > d [ i ] . sectors ) ;
u - > d [ i ] . fragmented = cpu_to_le64 ( ca - > usage_base - > d [ i ] . fragmented ) ;
}
}
2019-02-10 19:34:47 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2019-01-24 17:54:51 -05:00
2021-01-21 15:28:59 -05:00
for ( i = 0 ; i < 2 ; i + + ) {
struct jset_entry_clock * clock =
container_of ( jset_entry_init ( end , sizeof ( * clock ) ) ,
struct jset_entry_clock , entry ) ;
clock - > entry . type = BCH_JSET_ENTRY_clock ;
clock - > rw = i ;
2021-05-23 17:04:13 -04:00
clock - > time = cpu_to_le64 ( atomic64_read ( & c - > io_clock [ i ] . now ) ) ;
2021-01-21 15:28:59 -05:00
}
2019-01-24 17:54:51 -05:00
}
2019-03-21 22:19:57 -04:00
void bch2_fs_mark_clean ( struct bch_fs * c )
2019-01-24 17:54:51 -05:00
{
struct bch_sb_field_clean * sb_clean ;
struct jset_entry * entry ;
unsigned u64s ;
2021-03-04 19:06:26 -05:00
int ret ;
2019-01-24 17:54:51 -05:00
mutex_lock ( & c - > sb_lock ) ;
if ( BCH_SB_CLEAN ( c - > disk_sb . sb ) )
goto out ;
SET_BCH_SB_CLEAN ( c - > disk_sb . sb , true ) ;
2021-05-23 17:04:13 -04:00
c - > disk_sb . sb - > compat [ 0 ] | = cpu_to_le64 ( 1ULL < < BCH_COMPAT_alloc_info ) ;
c - > disk_sb . sb - > compat [ 0 ] | = cpu_to_le64 ( 1ULL < < BCH_COMPAT_alloc_metadata ) ;
c - > disk_sb . sb - > features [ 0 ] & = cpu_to_le64 ( ~ ( 1ULL < < BCH_FEATURE_extents_above_btree_updates ) ) ;
c - > disk_sb . sb - > features [ 0 ] & = cpu_to_le64 ( ~ ( 1ULL < < BCH_FEATURE_btree_updates_journalled ) ) ;
2019-02-06 11:56:51 -05:00
2019-01-24 17:54:51 -05:00
u64s = sizeof ( * sb_clean ) / sizeof ( u64 ) + c - > journal . entry_u64s_reserved ;
2017-03-16 22:18:50 -08:00
sb_clean = bch2_sb_resize_clean ( & c - > disk_sb , u64s ) ;
if ( ! sb_clean ) {
bch_err ( c , " error resizing superblock while setting filesystem clean " ) ;
goto out ;
}
sb_clean - > flags = 0 ;
2022-02-28 16:35:42 -05:00
sb_clean - > journal_seq = cpu_to_le64 ( atomic64_read ( & c - > journal . seq ) ) ;
2019-03-07 19:45:57 -05:00
/* Trying to catch outstanding bug: */
BUG_ON ( le64_to_cpu ( sb_clean - > journal_seq ) > S64_MAX ) ;
2017-03-16 22:18:50 -08:00
entry = sb_clean - > start ;
2021-01-21 15:28:59 -05:00
bch2_journal_super_entries_add_common ( c , & entry , 0 ) ;
2020-05-25 14:57:06 -04:00
entry = bch2_btree_roots_to_journal_entries ( c , entry , entry ) ;
2019-01-24 17:54:51 -05:00
BUG_ON ( ( void * ) entry > vstruct_end ( & sb_clean - > field ) ) ;
2017-03-16 22:18:50 -08:00
memset ( entry , 0 ,
vstruct_end ( & sb_clean - > field ) - ( void * ) entry ) ;
2021-03-04 19:06:26 -05:00
/*
* this should be in the write path , and we should be validating every
* superblock section :
*/
2022-02-20 05:00:45 -05:00
ret = bch2_sb_clean_validate_late ( c , sb_clean , WRITE ) ;
2021-03-04 19:06:26 -05:00
if ( ret ) {
bch_err ( c , " error writing marking filesystem clean: validate error " ) ;
goto out ;
}
2018-11-01 15:10:01 -04:00
2017-03-16 22:18:50 -08:00
bch2_write_super ( c ) ;
out :
mutex_unlock ( & c - > sb_lock ) ;
}
2022-02-20 05:00:45 -05:00
static int bch2_sb_clean_validate ( struct bch_sb * sb ,
2022-01-03 23:38:50 -05:00
struct bch_sb_field * f ,
struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
struct bch_sb_field_clean * clean = field_to_type ( f , clean ) ;
2022-01-03 23:38:50 -05:00
if ( vstruct_bytes ( & clean - > field ) < sizeof ( * clean ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " wrong size (got %zu should be %zu) " ,
2022-01-03 23:38:50 -05:00
vstruct_bytes ( & clean - > field ) , sizeof ( * clean ) ) ;
2022-11-19 22:39:08 -05:00
return - BCH_ERR_invalid_sb_clean ;
2022-01-03 23:38:50 -05:00
}
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2022-02-20 05:00:45 -05:00
static void bch2_sb_clean_to_text ( struct printbuf * out , struct bch_sb * sb ,
struct bch_sb_field * f )
{
struct bch_sb_field_clean * clean = field_to_type ( f , clean ) ;
struct jset_entry * entry ;
2023-02-03 21:01:40 -05:00
prt_printf ( out , " flags: %x " , le32_to_cpu ( clean - > flags ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " journal_seq: %llu " , le64_to_cpu ( clean - > journal_seq ) ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
for ( entry = clean - > start ;
entry ! = vstruct_end ( & clean - > field ) ;
entry = vstruct_next ( entry ) ) {
if ( entry - > type = = BCH_JSET_ENTRY_btree_keys & &
! entry - > u64s )
continue ;
bch2_journal_entry_to_text ( out , NULL , entry ) ;
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
}
}
2017-03-16 22:18:50 -08:00
static const struct bch_sb_field_ops bch_sb_field_ops_clean = {
2022-02-20 05:00:45 -05:00
. validate = bch2_sb_clean_validate ,
. to_text = bch2_sb_clean_to_text ,
2017-03-16 22:18:50 -08:00
} ;
static const struct bch_sb_field_ops * bch2_sb_field_ops [ ] = {
# define x(f, nr) \
[ BCH_SB_FIELD_ # # f ] = & bch_sb_field_ops_ # # f ,
BCH_SB_FIELDS ( )
# undef x
} ;
2022-01-03 23:38:50 -05:00
static int bch2_sb_field_validate ( struct bch_sb * sb , struct bch_sb_field * f ,
2022-02-27 11:57:42 -05:00
struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
unsigned type = le32_to_cpu ( f - > type ) ;
2022-02-27 11:57:42 -05:00
struct printbuf field_err = PRINTBUF ;
2022-01-03 23:38:50 -05:00
int ret ;
2017-03-16 22:18:50 -08:00
2022-01-03 23:38:50 -05:00
if ( type > = BCH_SB_FIELD_NR )
return 0 ;
2022-02-27 11:57:42 -05:00
ret = bch2_sb_field_ops [ type ] - > validate ( sb , f , & field_err ) ;
2022-01-03 23:38:50 -05:00
if ( ret ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " Invalid superblock section %s: %s " ,
2022-02-27 11:57:42 -05:00
bch2_sb_fields [ type ] ,
field_err . buf ) ;
2023-02-03 21:01:40 -05:00
prt_newline ( err ) ;
2022-02-27 11:57:42 -05:00
bch2_sb_field_to_text ( err , sb , f ) ;
2022-01-03 23:38:50 -05:00
}
2022-02-27 11:57:42 -05:00
printbuf_exit ( & field_err ) ;
2022-01-03 23:38:50 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2018-11-09 01:24:07 -05:00
void bch2_sb_field_to_text ( struct printbuf * out , struct bch_sb * sb ,
struct bch_sb_field * f )
2017-03-16 22:18:50 -08:00
{
unsigned type = le32_to_cpu ( f - > type ) ;
2018-11-09 01:24:07 -05:00
const struct bch_sb_field_ops * ops = type < BCH_SB_FIELD_NR
? bch2_sb_field_ops [ type ] : NULL ;
2017-03-16 22:18:50 -08:00
2023-02-03 21:01:40 -05:00
if ( ! out - > nr_tabstops )
printbuf_tabstop_push ( out , 32 ) ;
2022-03-05 12:01:16 -05:00
2018-11-09 01:24:07 -05:00
if ( ops )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %s " , bch2_sb_fields [ type ] ) ;
2018-11-09 01:24:07 -05:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (unknown field %u) " , type ) ;
2018-11-09 01:24:07 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (size %zu): " , vstruct_bytes ( f ) ) ;
prt_newline ( out ) ;
2017-03-16 22:18:50 -08:00
2022-02-20 05:00:45 -05:00
if ( ops & & ops - > to_text ) {
2023-02-03 21:01:40 -05:00
printbuf_indent_add ( out , 2 ) ;
2018-11-09 01:24:07 -05:00
bch2_sb_field_ops [ type ] - > to_text ( out , sb , f ) ;
2023-02-03 21:01:40 -05:00
printbuf_indent_sub ( out , 2 ) ;
2022-02-20 05:00:45 -05:00
}
}
void bch2_sb_layout_to_text ( struct printbuf * out , struct bch_sb_layout * l )
{
unsigned i ;
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Type: %u " , l - > layout_type ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_str ( out , " Superblock max size: " ) ;
prt_units_u64 ( out , 512 < < l - > sb_max_size_bits ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Nr superblocks: %u " , l - > nr_superblocks ) ;
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_str ( out , " Offsets: " ) ;
2022-02-20 05:00:45 -05:00
for ( i = 0 ; i < l - > nr_superblocks ; i + + ) {
if ( i )
2023-02-03 21:01:40 -05:00
prt_str ( out , " , " ) ;
prt_printf ( out , " %llu " , le64_to_cpu ( l - > sb_offset [ i ] ) ) ;
2022-02-20 05:00:45 -05:00
}
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
}
void bch2_sb_to_text ( struct printbuf * out , struct bch_sb * sb ,
bool print_layout , unsigned fields )
{
struct bch_sb_field_members * mi ;
struct bch_sb_field * f ;
u64 fields_have = 0 ;
unsigned nr_devices = 0 ;
2023-02-03 21:01:40 -05:00
if ( ! out - > nr_tabstops )
2022-09-16 18:39:01 -04:00
printbuf_tabstop_push ( out , 44 ) ;
2022-03-05 12:01:16 -05:00
2022-02-20 05:00:45 -05:00
mi = bch2_sb_get_members ( sb ) ;
if ( mi ) {
struct bch_member * m ;
for ( m = mi - > members ;
m < mi - > members + sb - > nr_devices ;
m + + )
nr_devices + = bch2_member_exists ( m ) ;
}
2023-02-03 21:01:40 -05:00
prt_printf ( out , " External UUID: " ) ;
prt_tab ( out ) ;
2022-02-20 05:00:45 -05:00
pr_uuid ( out , sb - > user_uuid . b ) ;
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " Internal UUID: " ) ;
prt_tab ( out ) ;
2022-02-20 05:00:45 -05:00
pr_uuid ( out , sb - > uuid . b ) ;
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
prt_str ( out , " Device index: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %u " , sb - > dev_idx ) ;
prt_newline ( out ) ;
prt_str ( out , " Label: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %.*s " , ( int ) sizeof ( sb - > label ) , sb - > label ) ;
prt_newline ( out ) ;
prt_str ( out , " Version: " ) ;
prt_tab ( out ) ;
2023-06-28 19:53:05 -04:00
bch2_version_to_text ( out , le16_to_cpu ( sb - > version ) ) ;
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
prt_printf ( out , " Oldest version on disk: " ) ;
prt_tab ( out ) ;
2023-06-28 19:53:05 -04:00
bch2_version_to_text ( out , le16_to_cpu ( sb - > version_min ) ) ;
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
prt_printf ( out , " Created: " ) ;
prt_tab ( out ) ;
2022-02-20 05:00:45 -05:00
if ( sb - > time_base_lo )
2022-02-23 11:46:34 -05:00
pr_time ( out , div_u64 ( le64_to_cpu ( sb - > time_base_lo ) , NSEC_PER_SEC ) ) ;
2022-02-20 05:00:45 -05:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (not set) " ) ;
prt_newline ( out ) ;
prt_printf ( out , " Sequence number: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %llu " , le64_to_cpu ( sb - > seq ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " Superblock size: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %zu " , vstruct_bytes ( sb ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " Clean: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %llu " , BCH_SB_CLEAN ( sb ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " Devices: " ) ;
prt_tab ( out ) ;
prt_printf ( out , " %u " , nr_devices ) ;
prt_newline ( out ) ;
prt_printf ( out , " Sections: " ) ;
2022-03-05 12:01:16 -05:00
vstruct_for_each ( sb , f )
fields_have | = 1 < < le32_to_cpu ( f - > type ) ;
2023-02-03 21:01:40 -05:00
prt_tab ( out ) ;
prt_bitflags ( out , bch2_sb_fields , fields_have ) ;
prt_newline ( out ) ;
prt_printf ( out , " Features: " ) ;
prt_tab ( out ) ;
prt_bitflags ( out , bch2_sb_features , le64_to_cpu ( sb - > features [ 0 ] ) ) ;
prt_newline ( out ) ;
prt_printf ( out , " Compat features: " ) ;
prt_tab ( out ) ;
prt_bitflags ( out , bch2_sb_compat , le64_to_cpu ( sb - > compat [ 0 ] ) ) ;
prt_newline ( out ) ;
prt_newline ( out ) ;
prt_printf ( out , " Options: " ) ;
prt_newline ( out ) ;
printbuf_indent_add ( out , 2 ) ;
2022-03-05 12:01:16 -05:00
{
enum bch_opt_id id ;
2022-02-20 05:00:45 -05:00
2022-03-05 12:01:16 -05:00
for ( id = 0 ; id < bch2_opts_nr ; id + + ) {
const struct bch_option * opt = bch2_opt_table + id ;
2022-02-20 05:00:45 -05:00
2022-03-05 12:01:16 -05:00
if ( opt - > get_sb ! = BCH2_NO_SB_OPT ) {
u64 v = bch2_opt_from_sb ( sb , id ) ;
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %s: " , opt - > attr . name ) ;
prt_tab ( out ) ;
2022-03-05 12:01:16 -05:00
bch2_opt_to_text ( out , NULL , sb , opt , v ,
OPT_HUMAN_READABLE | OPT_SHOW_FULL_LIST ) ;
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
2022-03-05 12:01:16 -05:00
}
}
}
2022-02-20 05:00:45 -05:00
2023-02-03 21:01:40 -05:00
printbuf_indent_sub ( out , 2 ) ;
2022-02-20 05:00:45 -05:00
if ( print_layout ) {
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
prt_printf ( out , " layout: " ) ;
prt_newline ( out ) ;
printbuf_indent_add ( out , 2 ) ;
2022-02-20 05:00:45 -05:00
bch2_sb_layout_to_text ( out , & sb - > layout ) ;
2023-02-03 21:01:40 -05:00
printbuf_indent_sub ( out , 2 ) ;
2022-02-20 05:00:45 -05:00
}
vstruct_for_each ( sb , f )
if ( fields & ( 1 < < le32_to_cpu ( f - > type ) ) ) {
2023-02-03 21:01:40 -05:00
prt_newline ( out ) ;
2022-02-20 05:00:45 -05:00
bch2_sb_field_to_text ( out , sb , f ) ;
}
2017-03-16 22:18:50 -08:00
}