2023-08-05 22:40:21 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_SB_MEMBERS_H
# define _BCACHEFS_SB_MEMBERS_H
2023-12-24 05:02:45 +03:00
# include "darray.h"
2023-10-25 23:29:37 +03:00
extern char * const bch2_member_error_strs [ ] ;
2023-11-01 06:19:59 +03:00
static inline struct bch_member *
__bch2_members_v2_get_mut ( struct bch_sb_field_members_v2 * mi , unsigned i )
{
return ( void * ) mi - > _members + ( i * le16_to_cpu ( mi - > member_bytes ) ) ;
}
2023-10-25 22:51:16 +03:00
int bch2_sb_members_v2_init ( struct bch_fs * c ) ;
2023-10-25 23:29:37 +03:00
int bch2_sb_members_cpy_v2_v1 ( struct bch_sb_handle * disk_sb ) ;
2023-09-25 07:02:56 +03:00
struct bch_member * bch2_members_v2_get_mut ( struct bch_sb * sb , int i ) ;
2023-09-25 06:55:37 +03:00
struct bch_member bch2_sb_member_get ( struct bch_sb * sb , int i ) ;
2023-08-05 22:40:21 +03:00
static inline bool bch2_dev_is_online ( struct bch_dev * ca )
{
return ! percpu_ref_is_zero ( & ca - > io_ref ) ;
}
static inline bool bch2_dev_is_readable ( struct bch_dev * ca )
{
return bch2_dev_is_online ( ca ) & &
ca - > mi . state ! = BCH_MEMBER_STATE_failed ;
}
static inline bool bch2_dev_get_ioref ( struct bch_dev * ca , int rw )
{
if ( ! percpu_ref_tryget ( & ca - > io_ref ) )
return false ;
if ( ca - > mi . state = = BCH_MEMBER_STATE_rw | |
( ca - > mi . state = = BCH_MEMBER_STATE_ro & & rw = = READ ) )
return true ;
percpu_ref_put ( & ca - > io_ref ) ;
return false ;
}
static inline unsigned dev_mask_nr ( const struct bch_devs_mask * devs )
{
return bitmap_weight ( devs - > d , BCH_SB_MEMBERS_MAX ) ;
}
static inline bool bch2_dev_list_has_dev ( struct bch_devs_list devs ,
unsigned dev )
{
2023-12-24 05:02:45 +03:00
darray_for_each ( devs , i )
if ( * i = = dev )
2023-08-05 22:40:21 +03:00
return true ;
return false ;
}
static inline void bch2_dev_list_drop_dev ( struct bch_devs_list * devs ,
unsigned dev )
{
2023-12-24 05:02:45 +03:00
darray_for_each ( * devs , i )
if ( * i = = dev ) {
darray_remove_item ( devs , i ) ;
2023-08-05 22:40:21 +03:00
return ;
}
}
static inline void bch2_dev_list_add_dev ( struct bch_devs_list * devs ,
unsigned dev )
{
if ( ! bch2_dev_list_has_dev ( * devs , dev ) ) {
2023-12-24 05:02:45 +03:00
BUG_ON ( devs - > nr > = ARRAY_SIZE ( devs - > data ) ) ;
devs - > data [ devs - > nr + + ] = dev ;
2023-08-05 22:40:21 +03:00
}
}
static inline struct bch_devs_list bch2_dev_list_single ( unsigned dev )
{
2023-12-24 05:02:45 +03:00
return ( struct bch_devs_list ) { . nr = 1 , . data [ 0 ] = dev } ;
2023-08-05 22:40:21 +03:00
}
2023-12-17 10:34:05 +03:00
static inline struct bch_dev * __bch2_next_dev_idx ( struct bch_fs * c , unsigned idx ,
const struct bch_devs_mask * mask )
2023-08-05 22:40:21 +03:00
{
struct bch_dev * ca = NULL ;
2023-12-17 10:34:05 +03:00
while ( ( idx = mask
? find_next_bit ( mask - > d , c - > sb . nr_devices , idx )
: idx ) < c - > sb . nr_devices & &
! ( ca = rcu_dereference_check ( c - > devs [ idx ] ,
2023-08-05 22:40:21 +03:00
lockdep_is_held ( & c - > state_lock ) ) ) )
2023-12-17 10:34:05 +03:00
idx + + ;
2023-08-05 22:40:21 +03:00
return ca ;
}
2023-12-17 10:34:05 +03:00
static inline struct bch_dev * __bch2_next_dev ( struct bch_fs * c , struct bch_dev * ca ,
const struct bch_devs_mask * mask )
{
return __bch2_next_dev_idx ( c , ca ? ca - > dev_idx + 1 : 0 , mask ) ;
}
# define for_each_member_device_rcu(_c, _ca, _mask) \
for ( struct bch_dev * _ca = NULL ; \
( _ca = __bch2_next_dev ( ( _c ) , _ca , ( _mask ) ) ) ; )
2023-08-05 22:40:21 +03:00
2023-12-17 07:47:29 +03:00
static inline struct bch_dev * bch2_get_next_dev ( struct bch_fs * c , struct bch_dev * ca )
2023-08-05 22:40:21 +03:00
{
2023-12-17 07:47:29 +03:00
if ( ca )
percpu_ref_put ( & ca - > ref ) ;
2023-08-05 22:40:21 +03:00
rcu_read_lock ( ) ;
2023-12-17 10:34:05 +03:00
if ( ( ca = __bch2_next_dev ( c , ca , NULL ) ) )
2023-08-05 22:40:21 +03:00
percpu_ref_get ( & ca - > ref ) ;
rcu_read_unlock ( ) ;
return ca ;
}
/*
* If you break early , you must drop your ref on the current device
*/
2023-12-17 07:47:29 +03:00
# define __for_each_member_device(_c, _ca) \
for ( ; ( _ca = bch2_get_next_dev ( _c , _ca ) ) ; )
# define for_each_member_device(_c, _ca) \
for ( struct bch_dev * _ca = NULL ; \
( _ca = bch2_get_next_dev ( _c , _ca ) ) ; )
2023-08-05 22:40:21 +03:00
static inline struct bch_dev * bch2_get_next_online_dev ( struct bch_fs * c ,
2023-12-17 07:47:29 +03:00
struct bch_dev * ca ,
unsigned state_mask )
2023-08-05 22:40:21 +03:00
{
2023-12-17 07:47:29 +03:00
if ( ca )
percpu_ref_put ( & ca - > io_ref ) ;
2023-08-05 22:40:21 +03:00
rcu_read_lock ( ) ;
2023-12-17 10:34:05 +03:00
while ( ( ca = __bch2_next_dev ( c , ca , NULL ) ) & &
2023-08-05 22:40:21 +03:00
( ! ( ( 1 < < ca - > mi . state ) & state_mask ) | |
! percpu_ref_tryget ( & ca - > io_ref ) ) )
2023-12-17 10:34:05 +03:00
;
2023-08-05 22:40:21 +03:00
rcu_read_unlock ( ) ;
return ca ;
}
2023-12-17 07:47:29 +03:00
# define __for_each_online_member(_c, _ca, state_mask) \
for ( struct bch_dev * _ca = NULL ; \
( _ca = bch2_get_next_online_dev ( _c , _ca , state_mask ) ) ; )
2023-08-05 22:40:21 +03:00
2023-12-17 07:47:29 +03:00
# define for_each_online_member(c, ca) \
__for_each_online_member ( c , ca , ~ 0 )
2023-08-05 22:40:21 +03:00
2023-12-17 07:47:29 +03:00
# define for_each_rw_member(c, ca) \
__for_each_online_member ( c , ca , BIT ( BCH_MEMBER_STATE_rw ) )
2023-08-05 22:40:21 +03:00
2023-12-17 07:47:29 +03:00
# define for_each_readable_member(c, ca) \
__for_each_online_member ( c , ca , BIT ( BCH_MEMBER_STATE_rw ) | BIT ( BCH_MEMBER_STATE_ro ) )
2023-08-05 22:40:21 +03:00
/*
* If a key exists that references a device , the device won ' t be going away and
* we can omit rcu_read_lock ( ) :
*/
static inline struct bch_dev * bch_dev_bkey_exists ( const struct bch_fs * c , unsigned idx )
{
EBUG_ON ( idx > = c - > sb . nr_devices | | ! c - > devs [ idx ] ) ;
return rcu_dereference_check ( c - > devs [ idx ] , 1 ) ;
}
static inline struct bch_dev * bch_dev_locked ( struct bch_fs * c , unsigned idx )
{
EBUG_ON ( idx > = c - > sb . nr_devices | | ! c - > devs [ idx ] ) ;
return rcu_dereference_protected ( c - > devs [ idx ] ,
lockdep_is_held ( & c - > sb_lock ) | |
lockdep_is_held ( & c - > state_lock ) ) ;
}
/* XXX kill, move to struct bch_fs */
static inline struct bch_devs_mask bch2_online_devs ( struct bch_fs * c )
{
struct bch_devs_mask devs ;
memset ( & devs , 0 , sizeof ( devs ) ) ;
2023-12-17 07:47:29 +03:00
for_each_online_member ( c , ca )
2023-08-05 22:40:21 +03:00
__set_bit ( ca - > dev_idx , devs . d ) ;
return devs ;
}
2023-09-25 07:06:32 +03:00
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1 ;
2023-09-25 07:02:56 +03:00
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2 ;
2023-10-25 23:29:37 +03:00
static inline bool bch2_member_exists ( struct bch_member * m )
{
return ! bch2_is_zero ( & m - > uuid , sizeof ( m - > uuid ) ) ;
}
2023-11-01 06:19:59 +03:00
static inline bool bch2_dev_exists ( struct bch_sb * sb , unsigned dev )
2023-10-25 23:29:37 +03:00
{
if ( dev < sb - > nr_devices ) {
2023-11-01 06:19:59 +03:00
struct bch_member m = bch2_sb_member_get ( sb , dev ) ;
2023-10-25 23:29:37 +03:00
return bch2_member_exists ( & m ) ;
}
return false ;
}
static inline struct bch_member_cpu bch2_mi_to_cpu ( struct bch_member * mi )
{
return ( struct bch_member_cpu ) {
. nbuckets = le64_to_cpu ( mi - > nbuckets ) ,
. first_bucket = le16_to_cpu ( mi - > first_bucket ) ,
. bucket_size = le16_to_cpu ( mi - > bucket_size ) ,
. group = BCH_MEMBER_GROUP ( mi ) ,
. state = BCH_MEMBER_STATE ( mi ) ,
. discard = BCH_MEMBER_DISCARD ( mi ) ,
. data_allowed = BCH_MEMBER_DATA_ALLOWED ( mi ) ,
. durability = BCH_MEMBER_DURABILITY ( mi )
? BCH_MEMBER_DURABILITY ( mi ) - 1
: 1 ,
. freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED ( mi ) ,
. valid = bch2_member_exists ( mi ) ,
} ;
}
void bch2_sb_members_from_cpu ( struct bch_fs * ) ;
void bch2_dev_io_errors_to_text ( struct printbuf * , struct bch_dev * ) ;
void bch2_dev_errors_reset ( struct bch_dev * ) ;
2023-08-05 22:40:21 +03:00
# endif /* _BCACHEFS_SB_MEMBERS_H */