2023-08-05 15:40:21 -04:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_SB_MEMBERS_H
# define _BCACHEFS_SB_MEMBERS_H
2023-12-23 21:02:45 -05:00
# include "darray.h"
2024-04-12 18:45:47 -04:00
# include "bkey_types.h"
2023-12-23 21:02:45 -05:00
2023-10-25 16:29:37 -04:00
extern char * const bch2_member_error_strs [ ] ;
2023-10-31 23:19:59 -04:00
static inline struct bch_member *
__bch2_members_v2_get_mut ( struct bch_sb_field_members_v2 * mi , unsigned i )
{
return ( void * ) mi - > _members + ( i * le16_to_cpu ( mi - > member_bytes ) ) ;
}
2023-10-25 15:51:16 -04:00
int bch2_sb_members_v2_init ( struct bch_fs * c ) ;
2023-10-25 16:29:37 -04:00
int bch2_sb_members_cpy_v2_v1 ( struct bch_sb_handle * disk_sb ) ;
2023-09-25 00:02:56 -04:00
struct bch_member * bch2_members_v2_get_mut ( struct bch_sb * sb , int i ) ;
2023-09-24 23:55:37 -04:00
struct bch_member bch2_sb_member_get ( struct bch_sb * sb , int i ) ;
2023-08-05 15:40:21 -04:00
static inline bool bch2_dev_is_online ( struct bch_dev * ca )
{
return ! percpu_ref_is_zero ( & ca - > io_ref ) ;
}
static inline bool bch2_dev_is_readable ( struct bch_dev * ca )
{
return bch2_dev_is_online ( ca ) & &
ca - > mi . state ! = BCH_MEMBER_STATE_failed ;
}
static inline unsigned dev_mask_nr ( const struct bch_devs_mask * devs )
{
return bitmap_weight ( devs - > d , BCH_SB_MEMBERS_MAX ) ;
}
static inline bool bch2_dev_list_has_dev ( struct bch_devs_list devs ,
unsigned dev )
{
2023-12-23 21:02:45 -05:00
darray_for_each ( devs , i )
if ( * i = = dev )
2023-08-05 15:40:21 -04:00
return true ;
return false ;
}
static inline void bch2_dev_list_drop_dev ( struct bch_devs_list * devs ,
unsigned dev )
{
2023-12-23 21:02:45 -05:00
darray_for_each ( * devs , i )
if ( * i = = dev ) {
darray_remove_item ( devs , i ) ;
2023-08-05 15:40:21 -04:00
return ;
}
}
static inline void bch2_dev_list_add_dev ( struct bch_devs_list * devs ,
unsigned dev )
{
if ( ! bch2_dev_list_has_dev ( * devs , dev ) ) {
2023-12-23 21:02:45 -05:00
BUG_ON ( devs - > nr > = ARRAY_SIZE ( devs - > data ) ) ;
devs - > data [ devs - > nr + + ] = dev ;
2023-08-05 15:40:21 -04:00
}
}
static inline struct bch_devs_list bch2_dev_list_single ( unsigned dev )
{
2023-12-23 21:02:45 -05:00
return ( struct bch_devs_list ) { . nr = 1 , . data [ 0 ] = dev } ;
2023-08-05 15:40:21 -04:00
}
2023-12-17 02:34:05 -05:00
static inline struct bch_dev * __bch2_next_dev_idx ( struct bch_fs * c , unsigned idx ,
const struct bch_devs_mask * mask )
2023-08-05 15:40:21 -04:00
{
struct bch_dev * ca = NULL ;
2023-12-17 02:34:05 -05:00
while ( ( idx = mask
? find_next_bit ( mask - > d , c - > sb . nr_devices , idx )
: idx ) < c - > sb . nr_devices & &
! ( ca = rcu_dereference_check ( c - > devs [ idx ] ,
2023-08-05 15:40:21 -04:00
lockdep_is_held ( & c - > state_lock ) ) ) )
2023-12-17 02:34:05 -05:00
idx + + ;
2023-08-05 15:40:21 -04:00
return ca ;
}
2023-12-17 02:34:05 -05:00
static inline struct bch_dev * __bch2_next_dev ( struct bch_fs * c , struct bch_dev * ca ,
const struct bch_devs_mask * mask )
{
return __bch2_next_dev_idx ( c , ca ? ca - > dev_idx + 1 : 0 , mask ) ;
}
# define for_each_member_device_rcu(_c, _ca, _mask) \
for ( struct bch_dev * _ca = NULL ; \
( _ca = __bch2_next_dev ( ( _c ) , _ca , ( _mask ) ) ) ; )
2023-08-05 15:40:21 -04:00
2024-05-03 17:39:16 -04:00
static inline void bch2_dev_get ( struct bch_dev * ca )
{
2024-05-03 18:07:40 -04:00
# ifdef CONFIG_BCACHEFS_DEBUG
BUG_ON ( atomic_long_inc_return ( & ca - > ref ) < = 1L ) ;
# else
2024-05-03 17:39:16 -04:00
percpu_ref_get ( & ca - > ref ) ;
2024-05-03 18:07:40 -04:00
# endif
2024-05-03 17:39:16 -04:00
}
static inline void __bch2_dev_put ( struct bch_dev * ca )
{
2024-05-03 18:07:40 -04:00
# ifdef CONFIG_BCACHEFS_DEBUG
long r = atomic_long_dec_return ( & ca - > ref ) ;
if ( r < ( long ) ! ca - > dying )
panic ( " bch_dev->ref underflow, last put: %pS \n " , ( void * ) ca - > last_put ) ;
ca - > last_put = _THIS_IP_ ;
if ( ! r )
complete ( & ca - > ref_completion ) ;
# else
2024-05-03 17:39:16 -04:00
percpu_ref_put ( & ca - > ref ) ;
2024-05-03 18:07:40 -04:00
# endif
2024-05-03 17:39:16 -04:00
}
static inline void bch2_dev_put ( struct bch_dev * ca )
2023-08-05 15:40:21 -04:00
{
2023-12-16 23:47:29 -05:00
if ( ca )
2024-05-03 17:39:16 -04:00
__bch2_dev_put ( ca ) ;
}
2023-08-05 15:40:21 -04:00
2024-05-03 17:39:16 -04:00
static inline struct bch_dev * bch2_get_next_dev ( struct bch_fs * c , struct bch_dev * ca )
{
rcu_read_lock ( ) ;
bch2_dev_put ( ca ) ;
2023-12-17 02:34:05 -05:00
if ( ( ca = __bch2_next_dev ( c , ca , NULL ) ) )
2024-05-03 17:39:16 -04:00
bch2_dev_get ( ca ) ;
2023-08-05 15:40:21 -04:00
rcu_read_unlock ( ) ;
return ca ;
}
/*
* If you break early , you must drop your ref on the current device
*/
2023-12-16 23:47:29 -05:00
# define __for_each_member_device(_c, _ca) \
for ( ; ( _ca = bch2_get_next_dev ( _c , _ca ) ) ; )
# define for_each_member_device(_c, _ca) \
for ( struct bch_dev * _ca = NULL ; \
( _ca = bch2_get_next_dev ( _c , _ca ) ) ; )
2023-08-05 15:40:21 -04:00
static inline struct bch_dev * bch2_get_next_online_dev ( struct bch_fs * c ,
2023-12-16 23:47:29 -05:00
struct bch_dev * ca ,
unsigned state_mask )
2023-08-05 15:40:21 -04:00
{
2024-05-04 12:55:44 -04:00
rcu_read_lock ( ) ;
2023-12-16 23:47:29 -05:00
if ( ca )
percpu_ref_put ( & ca - > io_ref ) ;
2023-08-05 15:40:21 -04:00
2023-12-17 02:34:05 -05:00
while ( ( ca = __bch2_next_dev ( c , ca , NULL ) ) & &
2023-08-05 15:40:21 -04:00
( ! ( ( 1 < < ca - > mi . state ) & state_mask ) | |
! percpu_ref_tryget ( & ca - > io_ref ) ) )
2023-12-17 02:34:05 -05:00
;
2023-08-05 15:40:21 -04:00
rcu_read_unlock ( ) ;
return ca ;
}
2023-12-16 23:47:29 -05:00
# define __for_each_online_member(_c, _ca, state_mask) \
for ( struct bch_dev * _ca = NULL ; \
( _ca = bch2_get_next_online_dev ( _c , _ca , state_mask ) ) ; )
2023-08-05 15:40:21 -04:00
2023-12-16 23:47:29 -05:00
# define for_each_online_member(c, ca) \
__for_each_online_member ( c , ca , ~ 0 )
2023-08-05 15:40:21 -04:00
2023-12-16 23:47:29 -05:00
# define for_each_rw_member(c, ca) \
__for_each_online_member ( c , ca , BIT ( BCH_MEMBER_STATE_rw ) )
2023-08-05 15:40:21 -04:00
2023-12-16 23:47:29 -05:00
# define for_each_readable_member(c, ca) \
__for_each_online_member ( c , ca , BIT ( BCH_MEMBER_STATE_rw ) | BIT ( BCH_MEMBER_STATE_ro ) )
2023-08-05 15:40:21 -04:00
2024-04-11 23:31:55 -04:00
static inline bool bch2_dev_exists ( const struct bch_fs * c , unsigned dev )
{
return dev < c - > sb . nr_devices & & c - > devs [ dev ] ;
}
2024-04-30 15:43:20 -04:00
static inline bool bucket_valid ( const struct bch_dev * ca , u64 b )
{
return b - ca - > mi . first_bucket < ca - > mi . nbuckets_minus_first ;
}
2024-04-30 20:38:05 -04:00
static inline struct bch_dev * bch2_dev_have_ref ( const struct bch_fs * c , unsigned dev )
{
EBUG_ON ( ! bch2_dev_exists ( c , dev ) ) ;
return rcu_dereference_check ( c - > devs [ dev ] , 1 ) ;
}
2024-04-11 23:31:55 -04:00
static inline struct bch_dev * bch2_dev_locked ( struct bch_fs * c , unsigned dev )
2023-08-05 15:40:21 -04:00
{
2024-04-11 23:31:55 -04:00
EBUG_ON ( ! bch2_dev_exists ( c , dev ) ) ;
2023-08-05 15:40:21 -04:00
2024-04-11 23:31:55 -04:00
return rcu_dereference_protected ( c - > devs [ dev ] ,
2023-08-05 15:40:21 -04:00
lockdep_is_held ( & c - > sb_lock ) | |
lockdep_is_held ( & c - > state_lock ) ) ;
}
2024-04-30 15:30:35 -04:00
static inline struct bch_dev * bch2_dev_rcu ( struct bch_fs * c , unsigned dev )
2024-04-11 23:31:55 -04:00
{
return c & & dev < c - > sb . nr_devices
? rcu_dereference ( c - > devs [ dev ] )
: NULL ;
}
2024-04-30 15:30:35 -04:00
static inline struct bch_dev * bch2_dev_tryget_noerror ( struct bch_fs * c , unsigned dev )
{
rcu_read_lock ( ) ;
struct bch_dev * ca = bch2_dev_rcu ( c , dev ) ;
if ( ca )
bch2_dev_get ( ca ) ;
rcu_read_unlock ( ) ;
return ca ;
}
void bch2_dev_missing ( struct bch_fs * , unsigned ) ;
static inline struct bch_dev * bch2_dev_tryget ( struct bch_fs * c , unsigned dev )
{
struct bch_dev * ca = bch2_dev_tryget_noerror ( c , dev ) ;
if ( ! ca )
bch2_dev_missing ( c , dev ) ;
return ca ;
}
2024-04-30 15:43:20 -04:00
static inline struct bch_dev * bch2_dev_bucket_tryget_noerror ( struct bch_fs * c , struct bpos bucket )
{
struct bch_dev * ca = bch2_dev_tryget_noerror ( c , bucket . inode ) ;
if ( ca & & ! bucket_valid ( ca , bucket . offset ) ) {
bch2_dev_put ( ca ) ;
ca = NULL ;
}
return ca ;
}
void bch2_dev_bucket_missing ( struct bch_fs * , struct bpos ) ;
static inline struct bch_dev * bch2_dev_bucket_tryget ( struct bch_fs * c , struct bpos bucket )
{
struct bch_dev * ca = bch2_dev_bucket_tryget_noerror ( c , bucket ) ;
if ( ! ca )
bch2_dev_bucket_missing ( c , bucket ) ;
return ca ;
}
2024-05-01 17:04:08 -04:00
static inline struct bch_dev * bch2_dev_iterate_noerror ( struct bch_fs * c , struct bch_dev * ca , unsigned dev_idx )
{
if ( ca & & ca - > dev_idx = = dev_idx )
return ca ;
bch2_dev_put ( ca ) ;
return bch2_dev_tryget_noerror ( c , dev_idx ) ;
}
2024-04-30 15:37:25 -04:00
static inline struct bch_dev * bch2_dev_iterate ( struct bch_fs * c , struct bch_dev * ca , unsigned dev_idx )
{
if ( ca & & ca - > dev_idx = = dev_idx )
return ca ;
bch2_dev_put ( ca ) ;
return bch2_dev_tryget ( c , dev_idx ) ;
}
2024-04-30 15:37:51 -04:00
static inline struct bch_dev * bch2_dev_get_ioref ( struct bch_fs * c , unsigned dev , int rw )
2024-05-03 12:43:31 -04:00
{
rcu_read_lock ( ) ;
struct bch_dev * ca = bch2_dev_rcu ( c , dev ) ;
if ( ca & & ! percpu_ref_tryget ( & ca - > io_ref ) )
ca = NULL ;
rcu_read_unlock ( ) ;
if ( ca & &
( ca - > mi . state = = BCH_MEMBER_STATE_rw | |
( ca - > mi . state = = BCH_MEMBER_STATE_ro & & rw = = READ ) ) )
return ca ;
if ( ca )
percpu_ref_put ( & ca - > io_ref ) ;
return NULL ;
}
2023-08-05 15:40:21 -04:00
/* XXX kill, move to struct bch_fs */
static inline struct bch_devs_mask bch2_online_devs ( struct bch_fs * c )
{
struct bch_devs_mask devs ;
memset ( & devs , 0 , sizeof ( devs ) ) ;
2023-12-16 23:47:29 -05:00
for_each_online_member ( c , ca )
2023-08-05 15:40:21 -04:00
__set_bit ( ca - > dev_idx , devs . d ) ;
return devs ;
}
2023-09-25 00:06:32 -04:00
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1 ;
2023-09-25 00:02:56 -04:00
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2 ;
2024-04-11 23:31:55 -04:00
static inline bool bch2_member_alive ( struct bch_member * m )
2023-10-25 16:29:37 -04:00
{
return ! bch2_is_zero ( & m - > uuid , sizeof ( m - > uuid ) ) ;
}
2024-04-11 23:31:55 -04:00
static inline bool bch2_member_exists ( struct bch_sb * sb , unsigned dev )
2023-10-25 16:29:37 -04:00
{
if ( dev < sb - > nr_devices ) {
2023-10-31 23:19:59 -04:00
struct bch_member m = bch2_sb_member_get ( sb , dev ) ;
2024-04-11 23:31:55 -04:00
return bch2_member_alive ( & m ) ;
2023-10-25 16:29:37 -04:00
}
return false ;
}
static inline struct bch_member_cpu bch2_mi_to_cpu ( struct bch_member * mi )
{
return ( struct bch_member_cpu ) {
. nbuckets = le64_to_cpu ( mi - > nbuckets ) ,
2024-04-11 21:18:35 -04:00
. nbuckets_minus_first = le64_to_cpu ( mi - > nbuckets ) -
le16_to_cpu ( mi - > first_bucket ) ,
2023-10-25 16:29:37 -04:00
. first_bucket = le16_to_cpu ( mi - > first_bucket ) ,
. bucket_size = le16_to_cpu ( mi - > bucket_size ) ,
. group = BCH_MEMBER_GROUP ( mi ) ,
. state = BCH_MEMBER_STATE ( mi ) ,
. discard = BCH_MEMBER_DISCARD ( mi ) ,
. data_allowed = BCH_MEMBER_DATA_ALLOWED ( mi ) ,
. durability = BCH_MEMBER_DURABILITY ( mi )
? BCH_MEMBER_DURABILITY ( mi ) - 1
: 1 ,
. freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED ( mi ) ,
2024-04-11 23:31:55 -04:00
. valid = bch2_member_alive ( mi ) ,
2024-04-12 18:45:47 -04:00
. btree_bitmap_shift = mi - > btree_bitmap_shift ,
. btree_allocated_bitmap = le64_to_cpu ( mi - > btree_allocated_bitmap ) ,
2023-10-25 16:29:37 -04:00
} ;
}
void bch2_sb_members_from_cpu ( struct bch_fs * ) ;
void bch2_dev_io_errors_to_text ( struct printbuf * , struct bch_dev * ) ;
void bch2_dev_errors_reset ( struct bch_dev * ) ;
2024-04-12 18:45:47 -04:00
static inline bool bch2_dev_btree_bitmap_marked_sectors ( struct bch_dev * ca , u64 start , unsigned sectors )
{
u64 end = start + sectors ;
2024-04-16 19:16:45 -04:00
if ( end > 64ULL < < ca - > mi . btree_bitmap_shift )
2024-04-12 18:45:47 -04:00
return false ;
2024-04-16 19:16:45 -04:00
for ( unsigned bit = start > > ca - > mi . btree_bitmap_shift ;
( u64 ) bit < < ca - > mi . btree_bitmap_shift < end ;
2024-04-12 18:45:47 -04:00
bit + + )
if ( ! ( ca - > mi . btree_allocated_bitmap & BIT_ULL ( bit ) ) )
return false ;
return true ;
}
bool bch2_dev_btree_bitmap_marked ( struct bch_fs * , struct bkey_s_c ) ;
void bch2_dev_btree_bitmap_mark ( struct bch_fs * , struct bkey_s_c ) ;
2023-08-05 15:40:21 -04:00
# endif /* _BCACHEFS_SB_MEMBERS_H */