2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
2019-02-11 03:34:47 +03:00
# include "buckets.h"
2019-01-25 01:12:00 +03:00
# include "journal.h"
2017-03-17 09:18:50 +03:00
# include "replicas.h"
# include "super-io.h"
static int bch2_cpu_replicas_to_sb_replicas ( struct bch_fs * ,
struct bch_replicas_cpu * ) ;
/* Replicas tracking - in memory: */
2019-09-20 21:28:35 +03:00
static void verify_replicas_entry ( struct bch_replicas_entry * e )
2019-01-21 23:32:13 +03:00
{
2019-09-20 21:28:35 +03:00
# ifdef CONFIG_BCACHEFS_DEBUG
2019-01-21 23:32:13 +03:00
unsigned i ;
2019-09-20 21:28:35 +03:00
BUG_ON ( e - > data_type > = BCH_DATA_NR ) ;
BUG_ON ( ! e - > nr_devs ) ;
BUG_ON ( e - > nr_required > 1 & &
e - > nr_required > = e - > nr_devs ) ;
2019-01-21 23:32:13 +03:00
for ( i = 0 ; i + 1 < e - > nr_devs ; i + + )
BUG_ON ( e - > devs [ i ] > = e - > devs [ i + 1 ] ) ;
# endif
}
2021-02-02 23:56:44 +03:00
void bch2_replicas_entry_sort ( struct bch_replicas_entry * e )
2018-10-30 21:14:19 +03:00
{
bubble_sort ( e - > devs , e - > nr_devs , u8_cmp ) ;
}
2017-03-17 09:18:50 +03:00
static void bch2_cpu_replicas_sort ( struct bch_replicas_cpu * r )
{
eytzinger0_sort ( r - > entries , r - > nr , r - > entry_size , memcmp , NULL ) ;
}
2023-07-07 05:47:42 +03:00
static void bch2_replicas_entry_v0_to_text ( struct printbuf * out ,
struct bch_replicas_entry_v0 * e )
2022-02-20 13:00:45 +03:00
{
unsigned i ;
if ( e - > data_type < BCH_DATA_NR )
2023-02-04 05:01:40 +03:00
prt_printf ( out , " %s " , bch2_data_types [ e - > data_type ] ) ;
2022-02-20 13:00:45 +03:00
else
2023-02-04 05:01:40 +03:00
prt_printf ( out , " (invalid data type %u) " , e - > data_type ) ;
2022-02-20 13:00:45 +03:00
2023-02-04 05:01:40 +03:00
prt_printf ( out , " : %u [ " , e - > nr_devs ) ;
2022-02-20 13:00:45 +03:00
for ( i = 0 ; i < e - > nr_devs ; i + + )
2023-02-04 05:01:40 +03:00
prt_printf ( out , i ? " %u " : " %u " , e - > devs [ i ] ) ;
prt_printf ( out , " ] " ) ;
2022-02-20 13:00:45 +03:00
}
2019-01-21 23:32:13 +03:00
void bch2_replicas_entry_to_text ( struct printbuf * out ,
struct bch_replicas_entry * e )
2017-03-17 09:18:50 +03:00
{
2018-10-30 21:14:19 +03:00
unsigned i ;
2017-03-17 09:18:50 +03:00
2022-01-04 07:38:50 +03:00
if ( e - > data_type < BCH_DATA_NR )
2023-02-04 05:01:40 +03:00
prt_printf ( out , " %s " , bch2_data_types [ e - > data_type ] ) ;
2022-01-04 07:38:50 +03:00
else
2023-02-04 05:01:40 +03:00
prt_printf ( out , " (invalid data type %u) " , e - > data_type ) ;
2017-03-17 09:18:50 +03:00
2023-02-04 05:01:40 +03:00
prt_printf ( out , " : %u/%u [ " , e - > nr_required , e - > nr_devs ) ;
2018-10-30 21:14:19 +03:00
for ( i = 0 ; i < e - > nr_devs ; i + + )
2023-02-04 05:01:40 +03:00
prt_printf ( out , i ? " %u " : " %u " , e - > devs [ i ] ) ;
prt_printf ( out , " ] " ) ;
2017-03-17 09:18:50 +03:00
}
2018-11-09 09:24:07 +03:00
void bch2_cpu_replicas_to_text ( struct printbuf * out ,
2022-01-04 07:38:50 +03:00
struct bch_replicas_cpu * r )
2017-03-17 09:18:50 +03:00
{
2018-10-30 21:14:19 +03:00
struct bch_replicas_entry * e ;
2017-03-17 09:18:50 +03:00
bool first = true ;
for_each_cpu_replicas_entry ( r , e ) {
if ( ! first )
2023-02-04 05:01:40 +03:00
prt_printf ( out , " " ) ;
2017-03-17 09:18:50 +03:00
first = false ;
2019-01-21 23:32:13 +03:00
bch2_replicas_entry_to_text ( out , e ) ;
2017-03-17 09:18:50 +03:00
}
}
2018-10-30 21:32:21 +03:00
static void extent_to_replicas ( struct bkey_s_c k ,
struct bch_replicas_entry * r )
{
2018-11-01 22:10:01 +03:00
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c ( k ) ;
const union bch_extent_entry * entry ;
struct extent_ptr_decoded p ;
2018-10-30 21:32:21 +03:00
2018-11-01 22:10:01 +03:00
r - > nr_required = 1 ;
2018-10-30 21:32:47 +03:00
2018-11-01 22:10:01 +03:00
bkey_for_each_ptr_decode ( k . k , ptrs , p , entry ) {
if ( p . ptr . cached )
continue ;
2018-11-01 22:13:19 +03:00
2020-01-05 00:33:32 +03:00
if ( ! p . has_ec )
r - > devs [ r - > nr_devs + + ] = p . ptr . dev ;
else
2019-08-22 20:20:38 +03:00
r - > nr_required = 0 ;
2018-11-01 22:13:19 +03:00
}
}
static void stripe_to_replicas ( struct bkey_s_c k ,
struct bch_replicas_entry * r )
{
2018-11-01 22:10:01 +03:00
struct bkey_s_c_stripe s = bkey_s_c_to_stripe ( k ) ;
const struct bch_extent_ptr * ptr ;
2018-11-01 22:13:19 +03:00
2018-11-01 22:10:01 +03:00
r - > nr_required = s . v - > nr_blocks - s . v - > nr_redundant ;
2018-11-01 22:13:19 +03:00
2018-11-01 22:10:01 +03:00
for ( ptr = s . v - > ptrs ;
ptr < s . v - > ptrs + s . v - > nr_blocks ;
ptr + + )
r - > devs [ r - > nr_devs + + ] = ptr - > dev ;
2018-10-30 21:32:21 +03:00
}
2019-03-11 21:59:58 +03:00
void bch2_bkey_to_replicas ( struct bch_replicas_entry * e ,
struct bkey_s_c k )
2018-10-30 21:32:21 +03:00
{
e - > nr_devs = 0 ;
2018-11-01 22:10:01 +03:00
switch ( k . k - > type ) {
case KEY_TYPE_btree_ptr :
2020-02-07 21:38:02 +03:00
case KEY_TYPE_btree_ptr_v2 :
2020-07-10 01:28:11 +03:00
e - > data_type = BCH_DATA_btree ;
2018-10-30 21:32:21 +03:00
extent_to_replicas ( k , e ) ;
break ;
2018-11-01 22:10:01 +03:00
case KEY_TYPE_extent :
2019-08-16 16:59:56 +03:00
case KEY_TYPE_reflink_v :
2020-07-10 01:28:11 +03:00
e - > data_type = BCH_DATA_user ;
2018-10-30 21:32:21 +03:00
extent_to_replicas ( k , e ) ;
break ;
2018-11-01 22:10:01 +03:00
case KEY_TYPE_stripe :
2020-07-10 01:31:51 +03:00
e - > data_type = BCH_DATA_parity ;
2018-11-01 22:13:19 +03:00
stripe_to_replicas ( k , e ) ;
break ;
2018-10-30 21:32:21 +03:00
}
2021-02-02 23:56:44 +03:00
bch2_replicas_entry_sort ( e ) ;
2018-10-30 21:32:21 +03:00
}
2019-01-21 23:32:13 +03:00
void bch2_devlist_to_replicas ( struct bch_replicas_entry * e ,
enum bch_data_type data_type ,
struct bch_devs_list devs )
2017-03-17 09:18:50 +03:00
{
unsigned i ;
BUG_ON ( ! data_type | |
2020-07-10 01:28:11 +03:00
data_type = = BCH_DATA_sb | |
2017-03-17 09:18:50 +03:00
data_type > = BCH_DATA_NR ) ;
2018-10-30 21:14:19 +03:00
e - > data_type = data_type ;
e - > nr_devs = 0 ;
2018-10-30 21:32:47 +03:00
e - > nr_required = 1 ;
2017-03-17 09:18:50 +03:00
2018-10-30 21:14:19 +03:00
for ( i = 0 ; i < devs . nr ; i + + )
e - > devs [ e - > nr_devs + + ] = devs . devs [ i ] ;
2017-03-17 09:18:50 +03:00
2021-02-02 23:56:44 +03:00
bch2_replicas_entry_sort ( e ) ;
2017-03-17 09:18:50 +03:00
}
2018-12-01 18:32:48 +03:00
static struct bch_replicas_cpu
2017-03-17 09:18:50 +03:00
cpu_replicas_add_entry ( struct bch_replicas_cpu * old ,
2018-10-30 21:14:19 +03:00
struct bch_replicas_entry * new_entry )
2017-03-17 09:18:50 +03:00
{
2018-12-01 18:32:48 +03:00
unsigned i ;
struct bch_replicas_cpu new = {
. nr = old - > nr + 1 ,
. entry_size = max_t ( unsigned , old - > entry_size ,
replicas_entry_bytes ( new_entry ) ) ,
} ;
2017-03-17 09:18:50 +03:00
2019-01-21 23:32:13 +03:00
BUG_ON ( ! new_entry - > data_type ) ;
2019-09-20 21:28:35 +03:00
verify_replicas_entry ( new_entry ) ;
2019-01-21 23:32:13 +03:00
2021-01-22 03:14:37 +03:00
new . entries = kcalloc ( new . nr , new . entry_size , GFP_KERNEL ) ;
2018-12-01 18:32:48 +03:00
if ( ! new . entries )
return new ;
2017-03-17 09:18:50 +03:00
for ( i = 0 ; i < old - > nr ; i + + )
2018-12-01 18:32:48 +03:00
memcpy ( cpu_replicas_entry ( & new , i ) ,
2017-03-17 09:18:50 +03:00
cpu_replicas_entry ( old , i ) ,
2018-10-30 21:14:19 +03:00
old - > entry_size ) ;
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
memcpy ( cpu_replicas_entry ( & new , old - > nr ) ,
2018-10-30 21:14:19 +03:00
new_entry ,
replicas_entry_bytes ( new_entry ) ) ;
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
bch2_cpu_replicas_sort ( & new ) ;
2017-03-17 09:18:50 +03:00
return new ;
}
2019-01-21 23:32:13 +03:00
static inline int __replicas_entry_idx ( struct bch_replicas_cpu * r ,
struct bch_replicas_entry * search )
{
int idx , entry_size = replicas_entry_bytes ( search ) ;
if ( unlikely ( entry_size > r - > entry_size ) )
return - 1 ;
2019-09-20 21:28:35 +03:00
verify_replicas_entry ( search ) ;
2019-01-21 23:32:13 +03:00
# define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
idx = eytzinger0_find ( r - > entries , r - > nr , r - > entry_size ,
entry_cmp , search ) ;
# undef entry_cmp
return idx < r - > nr ? idx : - 1 ;
}
int bch2_replicas_entry_idx ( struct bch_fs * c ,
struct bch_replicas_entry * search )
{
2021-02-02 23:56:44 +03:00
bch2_replicas_entry_sort ( search ) ;
2019-01-21 23:32:13 +03:00
return __replicas_entry_idx ( & c - > replicas , search ) ;
}
2018-11-08 01:48:32 +03:00
static bool __replicas_has_entry ( struct bch_replicas_cpu * r ,
struct bch_replicas_entry * search )
2017-03-17 09:18:50 +03:00
{
2019-01-21 23:32:13 +03:00
return __replicas_entry_idx ( r , search ) > = 0 ;
2017-03-17 09:18:50 +03:00
}
2020-07-10 23:13:52 +03:00
bool bch2_replicas_marked ( struct bch_fs * c ,
struct bch_replicas_entry * search )
2018-11-08 01:48:32 +03:00
{
2020-07-10 23:13:52 +03:00
bool marked ;
2019-01-21 23:32:13 +03:00
if ( ! search - > nr_devs )
return true ;
2019-09-20 21:28:35 +03:00
verify_replicas_entry ( search ) ;
2019-01-21 23:32:13 +03:00
2019-03-16 01:20:46 +03:00
percpu_down_read ( & c - > mark_lock ) ;
2020-07-10 23:13:52 +03:00
marked = __replicas_has_entry ( & c - > replicas , search ) & &
( likely ( ( ! c - > replicas_gc . entries ) ) | |
__replicas_has_entry ( & c - > replicas_gc , search ) ) ;
2018-12-01 18:32:48 +03:00
percpu_up_read ( & c - > mark_lock ) ;
2018-11-08 01:48:32 +03:00
return marked ;
}
2019-02-11 03:34:47 +03:00
static void __replicas_table_update ( struct bch_fs_usage * dst ,
2019-01-21 23:32:13 +03:00
struct bch_replicas_cpu * dst_r ,
2019-02-11 03:34:47 +03:00
struct bch_fs_usage * src ,
2019-01-21 23:32:13 +03:00
struct bch_replicas_cpu * src_r )
{
int src_idx , dst_idx ;
* dst = * src ;
for ( src_idx = 0 ; src_idx < src_r - > nr ; src_idx + + ) {
2019-02-15 02:38:52 +03:00
if ( ! src - > replicas [ src_idx ] )
2019-01-21 23:32:13 +03:00
continue ;
dst_idx = __replicas_entry_idx ( dst_r ,
cpu_replicas_entry ( src_r , src_idx ) ) ;
BUG_ON ( dst_idx < 0 ) ;
2019-02-15 02:38:52 +03:00
dst - > replicas [ dst_idx ] = src - > replicas [ src_idx ] ;
2019-01-21 23:32:13 +03:00
}
}
2019-02-11 03:34:47 +03:00
static void __replicas_table_update_pcpu ( struct bch_fs_usage __percpu * dst_p ,
struct bch_replicas_cpu * dst_r ,
struct bch_fs_usage __percpu * src_p ,
struct bch_replicas_cpu * src_r )
{
unsigned src_nr = sizeof ( struct bch_fs_usage ) / sizeof ( u64 ) + src_r - > nr ;
struct bch_fs_usage * dst , * src = ( void * )
2023-07-07 05:47:42 +03:00
bch2_acc_percpu_u64s ( ( u64 __percpu * ) src_p , src_nr ) ;
2019-02-11 03:34:47 +03:00
preempt_disable ( ) ;
dst = this_cpu_ptr ( dst_p ) ;
preempt_enable ( ) ;
__replicas_table_update ( dst , dst_r , src , src_r ) ;
}
2019-01-21 23:32:13 +03:00
/*
* Resize filesystem accounting :
*/
static int replicas_table_update ( struct bch_fs * c ,
struct bch_replicas_cpu * new_r )
{
2020-11-14 02:36:33 +03:00
struct bch_fs_usage __percpu * new_usage [ JOURNAL_BUF_NR ] ;
2019-02-11 03:34:47 +03:00
struct bch_fs_usage_online * new_scratch = NULL ;
struct bch_fs_usage __percpu * new_gc = NULL ;
struct bch_fs_usage * new_base = NULL ;
unsigned i , bytes = sizeof ( struct bch_fs_usage ) +
sizeof ( u64 ) * new_r - > nr ;
unsigned scratch_bytes = sizeof ( struct bch_fs_usage_online ) +
2019-01-21 23:32:13 +03:00
sizeof ( u64 ) * new_r - > nr ;
2020-11-14 02:36:33 +03:00
int ret = 0 ;
memset ( new_usage , 0 , sizeof ( new_usage ) ) ;
2019-02-11 03:34:47 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( new_usage ) ; i + + )
if ( ! ( new_usage [ i ] = __alloc_percpu_gfp ( bytes ,
2021-01-22 03:14:37 +03:00
sizeof ( u64 ) , GFP_KERNEL ) ) )
2019-02-11 03:34:47 +03:00
goto err ;
2021-01-22 03:14:37 +03:00
if ( ! ( new_base = kzalloc ( bytes , GFP_KERNEL ) ) | |
! ( new_scratch = kmalloc ( scratch_bytes , GFP_KERNEL ) ) | |
2019-02-11 03:34:47 +03:00
( c - > usage_gc & &
2021-01-22 03:14:37 +03:00
! ( new_gc = __alloc_percpu_gfp ( bytes , sizeof ( u64 ) , GFP_KERNEL ) ) ) )
2019-02-11 03:34:47 +03:00
goto err ;
2019-02-15 04:39:17 +03:00
2019-02-11 03:34:47 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( new_usage ) ; i + + )
if ( c - > usage [ i ] )
__replicas_table_update_pcpu ( new_usage [ i ] , new_r ,
c - > usage [ i ] , & c - > replicas ) ;
if ( c - > usage_base )
__replicas_table_update ( new_base , new_r ,
c - > usage_base , & c - > replicas ) ;
if ( c - > usage_gc )
__replicas_table_update_pcpu ( new_gc , new_r ,
c - > usage_gc , & c - > replicas ) ;
for ( i = 0 ; i < ARRAY_SIZE ( new_usage ) ; i + + )
swap ( c - > usage [ i ] , new_usage [ i ] ) ;
swap ( c - > usage_base , new_base ) ;
2019-02-15 04:39:17 +03:00
swap ( c - > usage_scratch , new_scratch ) ;
2019-02-11 03:34:47 +03:00
swap ( c - > usage_gc , new_gc ) ;
2019-02-15 04:39:17 +03:00
swap ( c - > replicas , * new_r ) ;
2020-11-14 02:36:33 +03:00
out :
2019-02-11 03:34:47 +03:00
free_percpu ( new_gc ) ;
2019-03-16 01:20:46 +03:00
kfree ( new_scratch ) ;
2021-04-17 00:26:25 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( new_usage ) ; i + + )
free_percpu ( new_usage [ i ] ) ;
2019-02-11 03:34:47 +03:00
kfree ( new_base ) ;
2019-01-21 23:32:13 +03:00
return ret ;
2020-11-14 02:36:33 +03:00
err :
bch_err ( c , " error updating replicas table: memory allocation failure " ) ;
2023-03-14 22:35:57 +03:00
ret = - BCH_ERR_ENOMEM_replicas_table ;
2020-11-14 02:36:33 +03:00
goto out ;
2019-01-21 23:32:13 +03:00
}
2019-01-25 01:12:00 +03:00
static unsigned reserve_journal_replicas ( struct bch_fs * c ,
struct bch_replicas_cpu * r )
{
struct bch_replicas_entry * e ;
unsigned journal_res_u64s = 0 ;
/* nr_inodes: */
journal_res_u64s + =
DIV_ROUND_UP ( sizeof ( struct jset_entry_usage ) , sizeof ( u64 ) ) ;
/* key_version: */
journal_res_u64s + =
DIV_ROUND_UP ( sizeof ( struct jset_entry_usage ) , sizeof ( u64 ) ) ;
2019-02-10 03:20:57 +03:00
/* persistent_reserved: */
journal_res_u64s + =
DIV_ROUND_UP ( sizeof ( struct jset_entry_usage ) , sizeof ( u64 ) ) *
BCH_REPLICAS_MAX ;
2019-01-25 01:12:00 +03:00
for_each_cpu_replicas_entry ( r , e )
journal_res_u64s + =
2019-02-10 03:20:57 +03:00
DIV_ROUND_UP ( sizeof ( struct jset_entry_data_usage ) +
2019-01-25 01:12:00 +03:00
e - > nr_devs , sizeof ( u64 ) ) ;
return journal_res_u64s ;
}
2017-03-17 09:18:50 +03:00
noinline
static int bch2_mark_replicas_slowpath ( struct bch_fs * c ,
2018-10-30 21:14:19 +03:00
struct bch_replicas_entry * new_entry )
2017-03-17 09:18:50 +03:00
{
2018-12-01 18:32:48 +03:00
struct bch_replicas_cpu new_r , new_gc ;
2020-04-29 19:57:04 +03:00
int ret = 0 ;
2017-03-17 09:18:50 +03:00
2019-09-20 21:28:35 +03:00
verify_replicas_entry ( new_entry ) ;
2018-12-01 18:32:48 +03:00
memset ( & new_r , 0 , sizeof ( new_r ) ) ;
memset ( & new_gc , 0 , sizeof ( new_gc ) ) ;
2017-03-17 09:18:50 +03:00
mutex_lock ( & c - > sb_lock ) ;
2018-12-01 18:32:48 +03:00
if ( c - > replicas_gc . entries & &
! __replicas_has_entry ( & c - > replicas_gc , new_entry ) ) {
new_gc = cpu_replicas_add_entry ( & c - > replicas_gc , new_entry ) ;
2023-03-14 22:35:57 +03:00
if ( ! new_gc . entries ) {
ret = - BCH_ERR_ENOMEM_cpu_replicas ;
2017-03-17 09:18:50 +03:00
goto err ;
2023-03-14 22:35:57 +03:00
}
2017-03-17 09:18:50 +03:00
}
2018-12-01 18:32:48 +03:00
if ( ! __replicas_has_entry ( & c - > replicas , new_entry ) ) {
new_r = cpu_replicas_add_entry ( & c - > replicas , new_entry ) ;
2023-03-14 22:35:57 +03:00
if ( ! new_r . entries ) {
ret = - BCH_ERR_ENOMEM_cpu_replicas ;
2017-03-17 09:18:50 +03:00
goto err ;
2023-03-14 22:35:57 +03:00
}
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
ret = bch2_cpu_replicas_to_sb_replicas ( c , & new_r ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
goto err ;
2019-01-25 01:12:00 +03:00
bch2_journal_entry_res_resize ( & c - > journal ,
& c - > replicas_journal_res ,
reserve_journal_replicas ( c , & new_r ) ) ;
2017-03-17 09:18:50 +03:00
}
2018-12-01 18:32:48 +03:00
if ( ! new_r . entries & &
! new_gc . entries )
goto out ;
2017-03-17 09:18:50 +03:00
/* allocations done, now commit: */
2018-12-01 18:32:48 +03:00
if ( new_r . entries )
2017-03-17 09:18:50 +03:00
bch2_write_super ( c ) ;
/* don't update in memory replicas until changes are persistent */
2018-12-01 18:32:48 +03:00
percpu_down_write ( & c - > mark_lock ) ;
if ( new_r . entries )
2019-01-21 23:32:13 +03:00
ret = replicas_table_update ( c , & new_r ) ;
2018-12-01 18:32:48 +03:00
if ( new_gc . entries )
swap ( new_gc , c - > replicas_gc ) ;
percpu_up_write ( & c - > mark_lock ) ;
out :
2017-03-17 09:18:50 +03:00
mutex_unlock ( & c - > sb_lock ) ;
2018-12-01 18:32:48 +03:00
kfree ( new_r . entries ) ;
kfree ( new_gc . entries ) ;
2017-03-17 09:18:50 +03:00
return ret ;
2020-04-29 19:57:04 +03:00
err :
2023-09-11 08:37:34 +03:00
bch_err_msg ( c , ret , " adding replicas entry " ) ;
2020-04-29 19:57:04 +03:00
goto out ;
2017-03-17 09:18:50 +03:00
}
2020-07-10 23:13:52 +03:00
int bch2_mark_replicas ( struct bch_fs * c , struct bch_replicas_entry * r )
{
2022-01-11 08:19:52 +03:00
return likely ( bch2_replicas_marked ( c , r ) )
? 0 : bch2_mark_replicas_slowpath ( c , r ) ;
2020-07-10 23:13:52 +03:00
}
2021-04-04 03:29:05 +03:00
/* replicas delta list: */
int bch2_replicas_delta_list_mark ( struct bch_fs * c ,
struct replicas_delta_list * r )
{
struct replicas_delta * d = r - > d ;
struct replicas_delta * top = ( void * ) r - > d + r - > used ;
int ret = 0 ;
for ( d = r - > d ; ! ret & & d ! = top ; d = replicas_delta_next ( d ) )
ret = bch2_mark_replicas ( c , & d - > r ) ;
return ret ;
}
/*
* Old replicas_gc mechanism : only used for journal replicas entries now , should
* die at some point :
*/
2017-03-17 09:18:50 +03:00
int bch2_replicas_gc_end ( struct bch_fs * c , int ret )
{
lockdep_assert_held ( & c - > replicas_gc_lock ) ;
mutex_lock ( & c - > sb_lock ) ;
2019-02-11 03:34:47 +03:00
percpu_down_write ( & c - > mark_lock ) ;
2017-03-17 09:18:50 +03:00
2023-10-27 06:02:42 +03:00
ret = ret ? :
bch2_cpu_replicas_to_sb_replicas ( c , & c - > replicas_gc ) ? :
replicas_table_update ( c , & c - > replicas_gc ) ;
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
kfree ( c - > replicas_gc . entries ) ;
c - > replicas_gc . entries = NULL ;
2019-02-11 03:34:47 +03:00
2018-12-01 18:32:48 +03:00
percpu_up_write ( & c - > mark_lock ) ;
2017-03-17 09:18:50 +03:00
2019-02-11 03:34:47 +03:00
if ( ! ret )
bch2_write_super ( c ) ;
2017-03-17 09:18:50 +03:00
mutex_unlock ( & c - > sb_lock ) ;
2019-02-11 03:34:47 +03:00
2017-03-17 09:18:50 +03:00
return ret ;
}
int bch2_replicas_gc_start ( struct bch_fs * c , unsigned typemask )
{
2018-10-30 21:14:19 +03:00
struct bch_replicas_entry * e ;
2018-12-01 18:32:48 +03:00
unsigned i = 0 ;
2017-03-17 09:18:50 +03:00
lockdep_assert_held ( & c - > replicas_gc_lock ) ;
mutex_lock ( & c - > sb_lock ) ;
2018-12-01 18:32:48 +03:00
BUG_ON ( c - > replicas_gc . entries ) ;
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
c - > replicas_gc . nr = 0 ;
c - > replicas_gc . entry_size = 0 ;
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
for_each_cpu_replicas_entry ( & c - > replicas , e )
if ( ! ( ( 1 < < e - > data_type ) & typemask ) ) {
c - > replicas_gc . nr + + ;
c - > replicas_gc . entry_size =
max_t ( unsigned , c - > replicas_gc . entry_size ,
replicas_entry_bytes ( e ) ) ;
}
c - > replicas_gc . entries = kcalloc ( c - > replicas_gc . nr ,
c - > replicas_gc . entry_size ,
2021-01-22 03:14:37 +03:00
GFP_KERNEL ) ;
2018-12-01 18:32:48 +03:00
if ( ! c - > replicas_gc . entries ) {
2017-03-17 09:18:50 +03:00
mutex_unlock ( & c - > sb_lock ) ;
2020-04-29 19:57:04 +03:00
bch_err ( c , " error allocating c->replicas_gc " ) ;
2023-03-14 22:35:57 +03:00
return - BCH_ERR_ENOMEM_replicas_gc ;
2017-03-17 09:18:50 +03:00
}
2018-12-01 18:32:48 +03:00
for_each_cpu_replicas_entry ( & c - > replicas , e )
2017-03-17 09:18:50 +03:00
if ( ! ( ( 1 < < e - > data_type ) & typemask ) )
2018-12-01 18:32:48 +03:00
memcpy ( cpu_replicas_entry ( & c - > replicas_gc , i + + ) ,
e , c - > replicas_gc . entry_size ) ;
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
bch2_cpu_replicas_sort ( & c - > replicas_gc ) ;
2017-03-17 09:18:50 +03:00
mutex_unlock ( & c - > sb_lock ) ;
return 0 ;
}
2023-05-03 01:22:12 +03:00
/*
* New much simpler mechanism for clearing out unneeded replicas entries - drop
* replicas entries that have 0 sectors used .
*
* However , we don ' t track sector counts for journal usage , so this doesn ' t drop
* any BCH_DATA_journal entries ; the old bch2_replicas_gc_ ( start | end ) mechanism
* is retained for that .
*/
2019-05-01 00:15:39 +03:00
int bch2_replicas_gc2 ( struct bch_fs * c )
{
struct bch_replicas_cpu new = { 0 } ;
unsigned i , nr ;
int ret = 0 ;
bch2_journal_meta ( & c - > journal ) ;
retry :
nr = READ_ONCE ( c - > replicas . nr ) ;
new . entry_size = READ_ONCE ( c - > replicas . entry_size ) ;
new . entries = kcalloc ( nr , new . entry_size , GFP_KERNEL ) ;
2020-04-29 19:57:04 +03:00
if ( ! new . entries ) {
bch_err ( c , " error allocating c->replicas_gc " ) ;
2023-03-14 22:35:57 +03:00
return - BCH_ERR_ENOMEM_replicas_gc ;
2020-04-29 19:57:04 +03:00
}
2019-05-01 00:15:39 +03:00
mutex_lock ( & c - > sb_lock ) ;
percpu_down_write ( & c - > mark_lock ) ;
if ( nr ! = c - > replicas . nr | |
new . entry_size ! = c - > replicas . entry_size ) {
percpu_up_write ( & c - > mark_lock ) ;
mutex_unlock ( & c - > sb_lock ) ;
kfree ( new . entries ) ;
goto retry ;
}
for ( i = 0 ; i < c - > replicas . nr ; i + + ) {
struct bch_replicas_entry * e =
cpu_replicas_entry ( & c - > replicas , i ) ;
2020-07-10 01:28:11 +03:00
if ( e - > data_type = = BCH_DATA_journal | |
2021-01-10 21:38:09 +03:00
c - > usage_base - > replicas [ i ] | |
percpu_u64_get ( & c - > usage [ 0 ] - > replicas [ i ] ) | |
percpu_u64_get ( & c - > usage [ 1 ] - > replicas [ i ] ) | |
percpu_u64_get ( & c - > usage [ 2 ] - > replicas [ i ] ) | |
percpu_u64_get ( & c - > usage [ 3 ] - > replicas [ i ] ) )
2019-05-01 00:15:39 +03:00
memcpy ( cpu_replicas_entry ( & new , new . nr + + ) ,
e , new . entry_size ) ;
}
bch2_cpu_replicas_sort ( & new ) ;
2023-10-27 06:02:42 +03:00
ret = bch2_cpu_replicas_to_sb_replicas ( c , & new ) ? :
replicas_table_update ( c , & new ) ;
2019-05-01 00:15:39 +03:00
kfree ( new . entries ) ;
percpu_up_write ( & c - > mark_lock ) ;
if ( ! ret )
bch2_write_super ( c ) ;
mutex_unlock ( & c - > sb_lock ) ;
return ret ;
}
2019-01-25 03:09:49 +03:00
int bch2_replicas_set_usage ( struct bch_fs * c ,
struct bch_replicas_entry * r ,
u64 sectors )
{
int ret , idx = bch2_replicas_entry_idx ( c , r ) ;
if ( idx < 0 ) {
struct bch_replicas_cpu n ;
n = cpu_replicas_add_entry ( & c - > replicas , r ) ;
if ( ! n . entries )
2023-03-14 22:35:57 +03:00
return - BCH_ERR_ENOMEM_cpu_replicas ;
2019-01-25 03:09:49 +03:00
ret = replicas_table_update ( c , & n ) ;
if ( ret )
return ret ;
kfree ( n . entries ) ;
idx = bch2_replicas_entry_idx ( c , r ) ;
BUG_ON ( ret < 0 ) ;
}
2019-02-11 03:34:47 +03:00
c - > usage_base - > replicas [ idx ] = sectors ;
2019-01-25 03:09:49 +03:00
return 0 ;
}
2017-03-17 09:18:50 +03:00
/* Replicas tracking - superblock: */
2018-12-01 18:32:48 +03:00
static int
__bch2_sb_replicas_to_cpu_replicas ( struct bch_sb_field_replicas * sb_r ,
struct bch_replicas_cpu * cpu_r )
2017-03-17 09:18:50 +03:00
{
2018-10-30 21:14:19 +03:00
struct bch_replicas_entry * e , * dst ;
2018-10-30 21:32:47 +03:00
unsigned nr = 0 , entry_size = 0 , idx = 0 ;
2017-03-17 09:18:50 +03:00
2018-10-30 21:32:47 +03:00
for_each_replicas_entry ( sb_r , e ) {
entry_size = max_t ( unsigned , entry_size ,
replicas_entry_bytes ( e ) ) ;
nr + + ;
}
2017-03-17 09:18:50 +03:00
2021-01-22 03:14:37 +03:00
cpu_r - > entries = kcalloc ( nr , entry_size , GFP_KERNEL ) ;
2018-12-01 18:32:48 +03:00
if ( ! cpu_r - > entries )
2023-03-14 22:35:57 +03:00
return - BCH_ERR_ENOMEM_cpu_replicas ;
2017-03-17 09:18:50 +03:00
cpu_r - > nr = nr ;
cpu_r - > entry_size = entry_size ;
2018-10-30 21:32:47 +03:00
for_each_replicas_entry ( sb_r , e ) {
dst = cpu_replicas_entry ( cpu_r , idx + + ) ;
memcpy ( dst , e , replicas_entry_bytes ( e ) ) ;
2021-02-02 23:56:44 +03:00
bch2_replicas_entry_sort ( dst ) ;
2018-10-30 21:32:47 +03:00
}
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
return 0 ;
2018-10-30 21:32:47 +03:00
}
2018-12-01 18:32:48 +03:00
static int
__bch2_sb_replicas_v0_to_cpu_replicas ( struct bch_sb_field_replicas_v0 * sb_r ,
struct bch_replicas_cpu * cpu_r )
2018-10-30 21:32:47 +03:00
{
struct bch_replicas_entry_v0 * e ;
unsigned nr = 0 , entry_size = 0 , idx = 0 ;
for_each_replicas_entry ( sb_r , e ) {
entry_size = max_t ( unsigned , entry_size ,
replicas_entry_bytes ( e ) ) ;
nr + + ;
}
entry_size + = sizeof ( struct bch_replicas_entry ) -
sizeof ( struct bch_replicas_entry_v0 ) ;
2021-01-22 03:14:37 +03:00
cpu_r - > entries = kcalloc ( nr , entry_size , GFP_KERNEL ) ;
2018-12-01 18:32:48 +03:00
if ( ! cpu_r - > entries )
2023-03-14 22:35:57 +03:00
return - BCH_ERR_ENOMEM_cpu_replicas ;
2018-10-30 21:32:47 +03:00
cpu_r - > nr = nr ;
cpu_r - > entry_size = entry_size ;
for_each_replicas_entry ( sb_r , e ) {
struct bch_replicas_entry * dst =
cpu_replicas_entry ( cpu_r , idx + + ) ;
dst - > data_type = e - > data_type ;
dst - > nr_devs = e - > nr_devs ;
dst - > nr_required = 1 ;
memcpy ( dst - > devs , e - > devs , e - > nr_devs ) ;
2021-02-02 23:56:44 +03:00
bch2_replicas_entry_sort ( dst ) ;
2018-10-30 21:32:47 +03:00
}
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
return 0 ;
2017-03-17 09:18:50 +03:00
}
int bch2_sb_replicas_to_cpu_replicas ( struct bch_fs * c )
{
2018-10-30 21:32:47 +03:00
struct bch_sb_field_replicas * sb_v1 ;
struct bch_sb_field_replicas_v0 * sb_v0 ;
2018-12-01 18:32:48 +03:00
struct bch_replicas_cpu new_r = { 0 , 0 , NULL } ;
int ret = 0 ;
2017-03-17 09:18:50 +03:00
2023-09-27 00:49:34 +03:00
if ( ( sb_v1 = bch2_sb_field_get ( c - > disk_sb . sb , replicas ) ) )
2018-12-01 18:32:48 +03:00
ret = __bch2_sb_replicas_to_cpu_replicas ( sb_v1 , & new_r ) ;
2023-09-27 00:49:34 +03:00
else if ( ( sb_v0 = bch2_sb_field_get ( c - > disk_sb . sb , replicas_v0 ) ) )
2018-12-01 18:32:48 +03:00
ret = __bch2_sb_replicas_v0_to_cpu_replicas ( sb_v0 , & new_r ) ;
if ( ret )
2023-03-14 22:35:57 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
bch2_cpu_replicas_sort ( & new_r ) ;
percpu_down_write ( & c - > mark_lock ) ;
2019-01-25 01:12:00 +03:00
2019-01-21 23:32:13 +03:00
ret = replicas_table_update ( c , & new_r ) ;
2018-12-01 18:32:48 +03:00
percpu_up_write ( & c - > mark_lock ) ;
2018-10-30 21:32:47 +03:00
2018-12-01 18:32:48 +03:00
kfree ( new_r . entries ) ;
2017-03-17 09:18:50 +03:00
return 0 ;
}
2018-10-30 21:32:47 +03:00
static int bch2_cpu_replicas_to_sb_replicas_v0 ( struct bch_fs * c ,
struct bch_replicas_cpu * r )
{
struct bch_sb_field_replicas_v0 * sb_r ;
struct bch_replicas_entry_v0 * dst ;
struct bch_replicas_entry * src ;
size_t bytes ;
bytes = sizeof ( struct bch_sb_field_replicas ) ;
for_each_cpu_replicas_entry ( r , src )
bytes + = replicas_entry_bytes ( src ) - 1 ;
2023-09-27 00:49:34 +03:00
sb_r = bch2_sb_field_resize ( & c - > disk_sb , replicas_v0 ,
2018-10-30 21:32:47 +03:00
DIV_ROUND_UP ( bytes , sizeof ( u64 ) ) ) ;
if ( ! sb_r )
2022-09-19 00:10:33 +03:00
return - BCH_ERR_ENOSPC_sb_replicas ;
2018-10-30 21:32:47 +03:00
bch2_sb_field_delete ( & c - > disk_sb , BCH_SB_FIELD_replicas ) ;
2023-09-27 00:49:34 +03:00
sb_r = bch2_sb_field_get ( c - > disk_sb . sb , replicas_v0 ) ;
2018-10-30 21:32:47 +03:00
memset ( & sb_r - > entries , 0 ,
vstruct_end ( & sb_r - > field ) -
( void * ) & sb_r - > entries ) ;
dst = sb_r - > entries ;
for_each_cpu_replicas_entry ( r , src ) {
dst - > data_type = src - > data_type ;
dst - > nr_devs = src - > nr_devs ;
memcpy ( dst - > devs , src - > devs , src - > nr_devs ) ;
dst = replicas_entry_next ( dst ) ;
BUG_ON ( ( void * ) dst > vstruct_end ( & sb_r - > field ) ) ;
}
return 0 ;
}
2017-03-17 09:18:50 +03:00
static int bch2_cpu_replicas_to_sb_replicas ( struct bch_fs * c ,
struct bch_replicas_cpu * r )
{
struct bch_sb_field_replicas * sb_r ;
2018-10-30 21:14:19 +03:00
struct bch_replicas_entry * dst , * src ;
2018-10-30 21:32:47 +03:00
bool need_v1 = false ;
2018-10-30 21:14:19 +03:00
size_t bytes ;
2017-03-17 09:18:50 +03:00
bytes = sizeof ( struct bch_sb_field_replicas ) ;
2018-10-30 21:32:47 +03:00
for_each_cpu_replicas_entry ( r , src ) {
2018-10-30 21:14:19 +03:00
bytes + = replicas_entry_bytes ( src ) ;
2018-10-30 21:32:47 +03:00
if ( src - > nr_required ! = 1 )
need_v1 = true ;
}
if ( ! need_v1 )
return bch2_cpu_replicas_to_sb_replicas_v0 ( c , r ) ;
2017-03-17 09:18:50 +03:00
2023-09-27 00:49:34 +03:00
sb_r = bch2_sb_field_resize ( & c - > disk_sb , replicas ,
2018-10-30 21:14:19 +03:00
DIV_ROUND_UP ( bytes , sizeof ( u64 ) ) ) ;
2017-03-17 09:18:50 +03:00
if ( ! sb_r )
2022-09-19 00:10:33 +03:00
return - BCH_ERR_ENOSPC_sb_replicas ;
2017-03-17 09:18:50 +03:00
2018-10-30 21:32:47 +03:00
bch2_sb_field_delete ( & c - > disk_sb , BCH_SB_FIELD_replicas_v0 ) ;
2023-09-27 00:49:34 +03:00
sb_r = bch2_sb_field_get ( c - > disk_sb . sb , replicas ) ;
2018-10-30 21:32:47 +03:00
2017-03-17 09:18:50 +03:00
memset ( & sb_r - > entries , 0 ,
vstruct_end ( & sb_r - > field ) -
( void * ) & sb_r - > entries ) ;
2018-10-30 21:14:19 +03:00
dst = sb_r - > entries ;
for_each_cpu_replicas_entry ( r , src ) {
memcpy ( dst , src , replicas_entry_bytes ( src ) ) ;
2017-03-17 09:18:50 +03:00
2018-10-30 21:14:19 +03:00
dst = replicas_entry_next ( dst ) ;
2017-03-17 09:18:50 +03:00
2018-10-30 21:14:19 +03:00
BUG_ON ( ( void * ) dst > vstruct_end ( & sb_r - > field ) ) ;
2017-03-17 09:18:50 +03:00
}
return 0 ;
}
2022-01-04 07:38:50 +03:00
static int bch2_cpu_replicas_validate ( struct bch_replicas_cpu * cpu_r ,
struct bch_sb * sb ,
struct printbuf * err )
2018-10-30 21:14:19 +03:00
{
2022-01-04 07:38:50 +03:00
unsigned i , j ;
2018-10-30 21:14:19 +03:00
sort_cmp_size ( cpu_r - > entries ,
cpu_r - > nr ,
cpu_r - > entry_size ,
memcmp , NULL ) ;
2022-01-04 07:38:50 +03:00
for ( i = 0 ; i < cpu_r - > nr ; i + + ) {
struct bch_replicas_entry * e =
2018-10-30 21:14:19 +03:00
cpu_replicas_entry ( cpu_r , i ) ;
2022-01-04 07:38:50 +03:00
if ( e - > data_type > = BCH_DATA_NR ) {
2023-02-04 05:01:40 +03:00
prt_printf ( err , " invalid data type in entry " ) ;
2022-01-04 07:38:50 +03:00
bch2_replicas_entry_to_text ( err , e ) ;
2022-11-20 06:39:08 +03:00
return - BCH_ERR_invalid_sb_replicas ;
2022-01-04 07:38:50 +03:00
}
2018-10-30 21:14:19 +03:00
2022-01-04 07:38:50 +03:00
if ( ! e - > nr_devs ) {
2023-02-04 05:01:40 +03:00
prt_printf ( err , " no devices in entry " ) ;
2022-01-04 07:38:50 +03:00
bch2_replicas_entry_to_text ( err , e ) ;
2022-11-20 06:39:08 +03:00
return - BCH_ERR_invalid_sb_replicas ;
2022-01-04 07:38:50 +03:00
}
2018-10-30 21:14:19 +03:00
2022-01-04 07:38:50 +03:00
if ( e - > nr_required > 1 & &
e - > nr_required > = e - > nr_devs ) {
2023-02-04 05:01:40 +03:00
prt_printf ( err , " bad nr_required in entry " ) ;
2022-01-04 07:38:50 +03:00
bch2_replicas_entry_to_text ( err , e ) ;
2022-11-20 06:39:08 +03:00
return - BCH_ERR_invalid_sb_replicas ;
2022-01-04 07:38:50 +03:00
}
2017-03-17 09:18:50 +03:00
2022-01-04 07:38:50 +03:00
for ( j = 0 ; j < e - > nr_devs ; j + + )
2023-09-25 06:55:37 +03:00
if ( ! bch2_dev_exists ( sb , e - > devs [ j ] ) ) {
2023-02-04 05:01:40 +03:00
prt_printf ( err , " invalid device %u in entry " , e - > devs [ j ] ) ;
2022-01-04 07:38:50 +03:00
bch2_replicas_entry_to_text ( err , e ) ;
2022-11-20 06:39:08 +03:00
return - BCH_ERR_invalid_sb_replicas ;
2022-01-04 07:38:50 +03:00
}
2017-03-17 09:18:50 +03:00
2022-01-04 07:38:50 +03:00
if ( i + 1 < cpu_r - > nr ) {
struct bch_replicas_entry * n =
cpu_replicas_entry ( cpu_r , i + 1 ) ;
2017-03-17 09:18:50 +03:00
2022-01-04 07:38:50 +03:00
BUG_ON ( memcmp ( e , n , cpu_r - > entry_size ) > 0 ) ;
2017-03-17 09:18:50 +03:00
2022-01-04 07:38:50 +03:00
if ( ! memcmp ( e , n , cpu_r - > entry_size ) ) {
2023-02-04 05:01:40 +03:00
prt_printf ( err , " duplicate replicas entry " ) ;
2022-01-04 07:38:50 +03:00
bch2_replicas_entry_to_text ( err , e ) ;
2022-11-20 06:39:08 +03:00
return - BCH_ERR_invalid_sb_replicas ;
2022-01-04 07:38:50 +03:00
}
}
2017-03-17 09:18:50 +03:00
}
2022-01-04 07:38:50 +03:00
return 0 ;
}
2022-02-20 13:00:45 +03:00
static int bch2_sb_replicas_validate ( struct bch_sb * sb , struct bch_sb_field * f ,
2022-01-04 07:38:50 +03:00
struct printbuf * err )
{
struct bch_sb_field_replicas * sb_r = field_to_type ( f , replicas ) ;
struct bch_replicas_cpu cpu_r ;
int ret ;
2023-03-14 22:35:57 +03:00
ret = __bch2_sb_replicas_to_cpu_replicas ( sb_r , & cpu_r ) ;
if ( ret )
return ret ;
2017-03-17 09:18:50 +03:00
2022-01-04 07:38:50 +03:00
ret = bch2_cpu_replicas_validate ( & cpu_r , sb , err ) ;
2018-12-01 18:32:48 +03:00
kfree ( cpu_r . entries ) ;
2022-01-04 07:38:50 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
}
2018-11-09 09:24:07 +03:00
static void bch2_sb_replicas_to_text ( struct printbuf * out ,
struct bch_sb * sb ,
struct bch_sb_field * f )
2017-03-17 09:18:50 +03:00
{
2018-11-09 09:24:07 +03:00
struct bch_sb_field_replicas * r = field_to_type ( f , replicas ) ;
2017-03-17 09:18:50 +03:00
struct bch_replicas_entry * e ;
bool first = true ;
for_each_replicas_entry ( r , e ) {
if ( ! first )
2023-02-04 05:01:40 +03:00
prt_printf ( out , " " ) ;
2017-03-17 09:18:50 +03:00
first = false ;
2019-01-21 23:32:13 +03:00
bch2_replicas_entry_to_text ( out , e ) ;
2017-03-17 09:18:50 +03:00
}
2023-02-04 05:01:40 +03:00
prt_newline ( out ) ;
2017-03-17 09:18:50 +03:00
}
2018-11-09 09:24:07 +03:00
const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
2022-02-20 13:00:45 +03:00
. validate = bch2_sb_replicas_validate ,
2018-11-09 09:24:07 +03:00
. to_text = bch2_sb_replicas_to_text ,
} ;
2022-02-20 13:00:45 +03:00
static int bch2_sb_replicas_v0_validate ( struct bch_sb * sb , struct bch_sb_field * f ,
2022-01-04 07:38:50 +03:00
struct printbuf * err )
2018-10-30 21:32:47 +03:00
{
struct bch_sb_field_replicas_v0 * sb_r = field_to_type ( f , replicas_v0 ) ;
2022-01-04 07:38:50 +03:00
struct bch_replicas_cpu cpu_r ;
int ret ;
2018-10-30 21:32:47 +03:00
2023-03-14 22:35:57 +03:00
ret = __bch2_sb_replicas_v0_to_cpu_replicas ( sb_r , & cpu_r ) ;
if ( ret )
return ret ;
2018-10-30 21:32:47 +03:00
2022-01-04 07:38:50 +03:00
ret = bch2_cpu_replicas_validate ( & cpu_r , sb , err ) ;
2018-12-01 18:32:48 +03:00
kfree ( cpu_r . entries ) ;
2022-01-04 07:38:50 +03:00
return ret ;
2018-10-30 21:32:47 +03:00
}
2022-02-20 13:00:45 +03:00
static void bch2_sb_replicas_v0_to_text ( struct printbuf * out ,
struct bch_sb * sb ,
struct bch_sb_field * f )
{
struct bch_sb_field_replicas_v0 * sb_r = field_to_type ( f , replicas_v0 ) ;
struct bch_replicas_entry_v0 * e ;
bool first = true ;
for_each_replicas_entry ( sb_r , e ) {
if ( ! first )
2023-02-04 05:01:40 +03:00
prt_printf ( out , " " ) ;
2022-02-20 13:00:45 +03:00
first = false ;
bch2_replicas_entry_v0_to_text ( out , e ) ;
}
2023-02-04 05:01:40 +03:00
prt_newline ( out ) ;
2022-02-20 13:00:45 +03:00
}
2018-10-30 21:32:47 +03:00
const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
2022-02-20 13:00:45 +03:00
. validate = bch2_sb_replicas_v0_validate ,
. to_text = bch2_sb_replicas_v0_to_text ,
2018-10-30 21:32:47 +03:00
} ;
2017-03-17 09:18:50 +03:00
/* Query replicas: */
2021-02-07 07:17:26 +03:00
bool bch2_have_enough_devs ( struct bch_fs * c , struct bch_devs_mask devs ,
unsigned flags , bool print )
2017-03-17 09:18:50 +03:00
{
2018-10-30 21:14:19 +03:00
struct bch_replicas_entry * e ;
2021-02-07 07:17:26 +03:00
bool ret = true ;
2017-03-17 09:18:50 +03:00
2018-12-01 18:32:48 +03:00
percpu_down_read ( & c - > mark_lock ) ;
for_each_cpu_replicas_entry ( & c - > replicas , e ) {
2021-04-03 06:41:10 +03:00
unsigned i , nr_online = 0 , nr_failed = 0 , dflags = 0 ;
2021-02-07 07:17:26 +03:00
bool metadata = e - > data_type < BCH_DATA_user ;
2017-03-17 09:18:50 +03:00
2021-10-21 07:38:13 +03:00
if ( e - > data_type = = BCH_DATA_cached )
continue ;
2021-04-03 06:41:10 +03:00
for ( i = 0 ; i < e - > nr_devs ; i + + ) {
struct bch_dev * ca = bch_dev_bkey_exists ( c , e - > devs [ i ] ) ;
2021-02-07 07:17:26 +03:00
nr_online + = test_bit ( e - > devs [ i ] , devs . d ) ;
2021-02-21 03:47:58 +03:00
nr_failed + = ca - > mi . state = = BCH_MEMBER_STATE_failed ;
2021-04-03 06:41:10 +03:00
}
if ( nr_failed = = e - > nr_devs )
continue ;
2017-03-17 09:18:50 +03:00
2021-02-07 07:17:26 +03:00
if ( nr_online < e - > nr_required )
dflags | = metadata
? BCH_FORCE_IF_METADATA_LOST
: BCH_FORCE_IF_DATA_LOST ;
2017-03-17 09:18:50 +03:00
2021-02-07 07:17:26 +03:00
if ( nr_online < e - > nr_devs )
dflags | = metadata
? BCH_FORCE_IF_METADATA_DEGRADED
: BCH_FORCE_IF_DATA_DEGRADED ;
2017-03-17 09:18:50 +03:00
2021-02-07 07:17:26 +03:00
if ( dflags & ~ flags ) {
if ( print ) {
2022-02-25 21:18:19 +03:00
struct printbuf buf = PRINTBUF ;
2017-03-17 09:18:50 +03:00
2022-02-25 21:18:19 +03:00
bch2_replicas_entry_to_text ( & buf , e ) ;
2021-02-07 07:17:26 +03:00
bch_err ( c , " insufficient devices online (%u) for replicas entry %s " ,
2022-02-25 21:18:19 +03:00
nr_online , buf . buf ) ;
printbuf_exit ( & buf ) ;
2021-02-07 07:17:26 +03:00
}
ret = false ;
break ;
}
2017-03-17 09:18:50 +03:00
2021-02-07 07:17:26 +03:00
}
2018-12-01 18:32:48 +03:00
percpu_up_read ( & c - > mark_lock ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
2022-02-20 13:00:45 +03:00
unsigned bch2_sb_dev_has_data ( struct bch_sb * sb , unsigned dev )
2017-03-17 09:18:50 +03:00
{
2022-02-20 13:00:45 +03:00
struct bch_sb_field_replicas * replicas ;
struct bch_sb_field_replicas_v0 * replicas_v0 ;
unsigned i , data_has = 0 ;
2023-09-27 00:49:34 +03:00
replicas = bch2_sb_field_get ( sb , replicas ) ;
replicas_v0 = bch2_sb_field_get ( sb , replicas_v0 ) ;
2022-02-20 13:00:45 +03:00
if ( replicas ) {
struct bch_replicas_entry * r ;
for_each_replicas_entry ( replicas , r )
for ( i = 0 ; i < r - > nr_devs ; i + + )
if ( r - > devs [ i ] = = dev )
data_has | = 1 < < r - > data_type ;
} else if ( replicas_v0 ) {
struct bch_replicas_entry_v0 * r ;
for_each_replicas_entry_v0 ( replicas_v0 , r )
for ( i = 0 ; i < r - > nr_devs ; i + + )
if ( r - > devs [ i ] = = dev )
data_has | = 1 < < r - > data_type ;
}
2017-03-17 09:18:50 +03:00
2022-02-20 13:00:45 +03:00
return data_has ;
}
2018-10-30 21:14:19 +03:00
2022-02-20 13:00:45 +03:00
unsigned bch2_dev_has_data ( struct bch_fs * c , struct bch_dev * ca )
{
unsigned ret ;
mutex_lock ( & c - > sb_lock ) ;
ret = bch2_sb_dev_has_data ( c - > disk_sb . sb , ca - > dev_idx ) ;
mutex_unlock ( & c - > sb_lock ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
2019-01-25 01:12:00 +03:00
2021-04-24 07:24:25 +03:00
void bch2_fs_replicas_exit ( struct bch_fs * c )
{
unsigned i ;
kfree ( c - > usage_scratch ) ;
for ( i = 0 ; i < ARRAY_SIZE ( c - > usage ) ; i + + )
free_percpu ( c - > usage [ i ] ) ;
kfree ( c - > usage_base ) ;
kfree ( c - > replicas . entries ) ;
kfree ( c - > replicas_gc . entries ) ;
mempool_exit ( & c - > replicas_delta_pool ) ;
}
2019-01-25 01:12:00 +03:00
int bch2_fs_replicas_init ( struct bch_fs * c )
{
2021-02-03 21:10:55 +03:00
bch2_journal_entry_res_resize ( & c - > journal ,
& c - > replicas_journal_res ,
reserve_journal_replicas ( c , & c - > replicas ) ) ;
2019-02-15 04:39:17 +03:00
2021-04-24 07:24:25 +03:00
return mempool_init_kmalloc_pool ( & c - > replicas_delta_pool , 1 ,
REPLICAS_DELTA_LIST_MAX ) ? :
replicas_table_update ( c , & c - > replicas ) ;
2019-01-25 01:12:00 +03:00
}