2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
/*
* Code for manipulating bucket marks for garbage collection .
*
* Copyright 2014 Datera , Inc .
*/
# include "bcachefs.h"
2018-10-06 00:46:55 -04:00
# include "alloc_background.h"
2018-11-05 02:31:48 -05:00
# include "bset.h"
2017-03-16 22:18:50 -08:00
# include "btree_gc.h"
2018-11-05 02:31:48 -05:00
# include "btree_update.h"
2017-03-16 22:18:50 -08:00
# include "buckets.h"
2022-01-04 22:32:09 -05:00
# include "buckets_waiting_for_journal.h"
2018-11-01 15:13:19 -04:00
# include "ec.h"
2017-03-16 22:18:50 -08:00
# include "error.h"
2021-10-29 21:14:23 -04:00
# include "inode.h"
2017-03-16 22:18:50 -08:00
# include "movinggc.h"
2021-10-19 12:27:47 -04:00
# include "recovery.h"
2021-05-23 02:31:33 -04:00
# include "reflink.h"
2019-01-21 15:32:13 -05:00
# include "replicas.h"
2021-03-16 00:42:25 -04:00
# include "subvolume.h"
2017-03-16 22:18:50 -08:00
# include "trace.h"
# include <linux/preempt.h>
2020-07-09 18:31:51 -04:00
static inline void fs_usage_data_type_to_base ( struct bch_fs_usage * fs_usage ,
enum bch_data_type data_type ,
s64 sectors )
{
switch ( data_type ) {
case BCH_DATA_btree :
fs_usage - > btree + = sectors ;
break ;
case BCH_DATA_user :
case BCH_DATA_parity :
fs_usage - > data + = sectors ;
break ;
case BCH_DATA_cached :
fs_usage - > cached + = sectors ;
break ;
default :
break ;
}
}
2019-01-24 20:25:40 -05:00
void bch2_fs_usage_initialize ( struct bch_fs * c )
{
struct bch_fs_usage * usage ;
2021-01-21 21:52:06 -05:00
struct bch_dev * ca ;
2019-02-14 20:39:17 -05:00
unsigned i ;
2019-01-24 20:25:40 -05:00
percpu_down_write ( & c - > mark_lock ) ;
2019-02-10 19:34:47 -05:00
usage = c - > usage_base ;
for ( i = 0 ; i < ARRAY_SIZE ( c - > usage ) ; i + + )
bch2_fs_usage_acc_to_base ( c , i ) ;
2019-01-24 20:25:40 -05:00
2019-02-09 19:20:57 -05:00
for ( i = 0 ; i < BCH_REPLICAS_MAX ; i + + )
2019-02-14 18:38:52 -05:00
usage - > reserved + = usage - > persistent_reserved [ i ] ;
2019-02-09 19:20:57 -05:00
2019-01-24 20:25:40 -05:00
for ( i = 0 ; i < c - > replicas . nr ; i + + ) {
struct bch_replicas_entry * e =
cpu_replicas_entry ( & c - > replicas , i ) ;
2020-07-09 18:31:51 -04:00
fs_usage_data_type_to_base ( usage , e - > data_type , usage - > replicas [ i ] ) ;
2019-01-24 20:25:40 -05:00
}
2021-01-21 21:52:06 -05:00
for_each_member_device ( ca , c , i ) {
struct bch_dev_usage dev = bch2_dev_usage_read ( ca ) ;
usage - > hidden + = ( dev . d [ BCH_DATA_sb ] . buckets +
dev . d [ BCH_DATA_journal ] . buckets ) *
ca - > mi . bucket_size ;
}
2019-01-24 20:25:40 -05:00
percpu_up_write ( & c - > mark_lock ) ;
}
2021-01-21 21:52:06 -05:00
static inline struct bch_dev_usage * dev_usage_ptr ( struct bch_dev * ca ,
unsigned journal_seq ,
bool gc )
{
2021-11-15 15:02:13 -05:00
BUG_ON ( ! gc & & ! journal_seq ) ;
2021-01-21 21:52:06 -05:00
return this_cpu_ptr ( gc
? ca - > usage_gc
: ca - > usage [ journal_seq & JOURNAL_BUF_MASK ] ) ;
}
2020-07-22 13:27:00 -04:00
struct bch_dev_usage bch2_dev_usage_read ( struct bch_dev * ca )
2017-03-16 22:18:50 -08:00
{
2021-01-21 21:52:06 -05:00
struct bch_fs * c = ca - > fs ;
2019-02-14 18:38:52 -05:00
struct bch_dev_usage ret ;
2021-01-21 21:52:06 -05:00
unsigned seq , i , u64s = dev_usage_u64s ( ) ;
2019-02-14 18:38:52 -05:00
2021-01-21 21:52:06 -05:00
do {
seq = read_seqcount_begin ( & c - > usage_lock ) ;
memcpy ( & ret , ca - > usage_base , u64s * sizeof ( u64 ) ) ;
for ( i = 0 ; i < ARRAY_SIZE ( ca - > usage ) ; i + + )
acc_u64s_percpu ( ( u64 * ) & ret , ( u64 __percpu * ) ca - > usage [ i ] , u64s ) ;
} while ( read_seqcount_retry ( & c - > usage_lock , seq ) ) ;
2019-02-14 18:38:52 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2019-02-10 19:34:47 -05:00
static inline struct bch_fs_usage * fs_usage_ptr ( struct bch_fs * c ,
unsigned journal_seq ,
bool gc )
2017-03-16 22:18:50 -08:00
{
2021-11-28 14:31:19 -05:00
percpu_rwsem_assert_held ( & c - > mark_lock ) ;
2021-11-15 15:02:13 -05:00
BUG_ON ( ! gc & & ! journal_seq ) ;
2019-02-10 19:34:47 -05:00
return this_cpu_ptr ( gc
? c - > usage_gc
2020-11-13 18:36:33 -05:00
: c - > usage [ journal_seq & JOURNAL_BUF_MASK ] ) ;
2019-02-10 19:34:47 -05:00
}
u64 bch2_fs_usage_read_one ( struct bch_fs * c , u64 * v )
{
ssize_t offset = v - ( u64 * ) c - > usage_base ;
2020-11-13 18:36:33 -05:00
unsigned i , seq ;
2019-02-10 19:34:47 -05:00
u64 ret ;
BUG_ON ( offset < 0 | | offset > = fs_usage_u64s ( c ) ) ;
percpu_rwsem_assert_held ( & c - > mark_lock ) ;
do {
seq = read_seqcount_begin ( & c - > usage_lock ) ;
2020-11-13 18:36:33 -05:00
ret = * v ;
for ( i = 0 ; i < ARRAY_SIZE ( c - > usage ) ; i + + )
ret + = percpu_u64_get ( ( u64 __percpu * ) c - > usage [ i ] + offset ) ;
2019-02-10 19:34:47 -05:00
} while ( read_seqcount_retry ( & c - > usage_lock , seq ) ) ;
return ret ;
}
struct bch_fs_usage_online * bch2_fs_usage_read ( struct bch_fs * c )
{
struct bch_fs_usage_online * ret ;
2021-04-13 10:30:58 -04:00
unsigned seq , i , v , u64s = fs_usage_u64s ( c ) + 1 ;
2020-11-13 18:36:33 -05:00
retry :
ret = kmalloc ( u64s * sizeof ( u64 ) , GFP_NOFS ) ;
if ( unlikely ( ! ret ) )
return NULL ;
2019-01-21 15:32:13 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2021-04-13 10:30:58 -04:00
v = fs_usage_u64s ( c ) + 1 ;
2020-11-13 18:36:33 -05:00
if ( unlikely ( u64s ! = v ) ) {
u64s = v ;
2019-01-21 15:32:13 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2020-11-13 18:36:33 -05:00
kfree ( ret ) ;
goto retry ;
2019-01-21 15:32:13 -05:00
}
2019-02-10 19:34:47 -05:00
ret - > online_reserved = percpu_u64_get ( c - > online_reserved ) ;
do {
seq = read_seqcount_begin ( & c - > usage_lock ) ;
2021-03-28 20:56:25 -04:00
memcpy ( & ret - > u , c - > usage_base , u64s * sizeof ( u64 ) ) ;
2019-02-10 19:34:47 -05:00
for ( i = 0 ; i < ARRAY_SIZE ( c - > usage ) ; i + + )
acc_u64s_percpu ( ( u64 * ) & ret - > u , ( u64 __percpu * ) c - > usage [ i ] , u64s ) ;
} while ( read_seqcount_retry ( & c - > usage_lock , seq ) ) ;
2019-01-21 15:32:13 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2019-02-10 19:34:47 -05:00
void bch2_fs_usage_acc_to_base ( struct bch_fs * c , unsigned idx )
{
2021-01-21 21:52:06 -05:00
struct bch_dev * ca ;
unsigned i , u64s = fs_usage_u64s ( c ) ;
2019-02-10 19:34:47 -05:00
BUG_ON ( idx > = ARRAY_SIZE ( c - > usage ) ) ;
preempt_disable ( ) ;
write_seqcount_begin ( & c - > usage_lock ) ;
acc_u64s_percpu ( ( u64 * ) c - > usage_base ,
( u64 __percpu * ) c - > usage [ idx ] , u64s ) ;
percpu_memset ( c - > usage [ idx ] , 0 , u64s * sizeof ( u64 ) ) ;
2021-01-21 21:52:06 -05:00
rcu_read_lock ( ) ;
for_each_member_device_rcu ( ca , c , i , NULL ) {
u64s = dev_usage_u64s ( ) ;
acc_u64s_percpu ( ( u64 * ) ca - > usage_base ,
( u64 __percpu * ) ca - > usage [ idx ] , u64s ) ;
percpu_memset ( ca - > usage [ idx ] , 0 , u64s * sizeof ( u64 ) ) ;
}
rcu_read_unlock ( ) ;
2019-02-10 19:34:47 -05:00
write_seqcount_end ( & c - > usage_lock ) ;
preempt_enable ( ) ;
}
void bch2_fs_usage_to_text ( struct printbuf * out ,
struct bch_fs * c ,
struct bch_fs_usage_online * fs_usage )
{
unsigned i ;
pr_buf ( out , " capacity: \t \t \t %llu \n " , c - > capacity ) ;
pr_buf ( out , " hidden: \t \t \t \t %llu \n " ,
fs_usage - > u . hidden ) ;
pr_buf ( out , " data: \t \t \t \t %llu \n " ,
fs_usage - > u . data ) ;
pr_buf ( out , " cached: \t \t \t \t %llu \n " ,
fs_usage - > u . cached ) ;
pr_buf ( out , " reserved: \t \t \t %llu \n " ,
fs_usage - > u . reserved ) ;
pr_buf ( out , " nr_inodes: \t \t \t %llu \n " ,
fs_usage - > u . nr_inodes ) ;
pr_buf ( out , " online reserved: \t \t %llu \n " ,
fs_usage - > online_reserved ) ;
for ( i = 0 ;
i < ARRAY_SIZE ( fs_usage - > u . persistent_reserved ) ;
i + + ) {
pr_buf ( out , " %u replicas: \n " , i + 1 ) ;
pr_buf ( out , " \t reserved: \t \t %llu \n " ,
fs_usage - > u . persistent_reserved [ i ] ) ;
}
for ( i = 0 ; i < c - > replicas . nr ; i + + ) {
struct bch_replicas_entry * e =
cpu_replicas_entry ( & c - > replicas , i ) ;
pr_buf ( out , " \t " ) ;
bch2_replicas_entry_to_text ( out , e ) ;
pr_buf ( out , " : \t %llu \n " , fs_usage - > u . replicas [ i ] ) ;
}
}
2017-03-16 22:18:50 -08:00
static u64 reserve_factor ( u64 r )
{
return r + ( round_up ( r , ( 1 < < RESERVE_FACTOR ) ) > > RESERVE_FACTOR ) ;
}
2019-02-10 19:34:47 -05:00
u64 bch2_fs_sectors_used ( struct bch_fs * c , struct bch_fs_usage_online * fs_usage )
2017-03-16 22:18:50 -08:00
{
2019-02-10 19:34:47 -05:00
return min ( fs_usage - > u . hidden +
fs_usage - > u . btree +
fs_usage - > u . data +
reserve_factor ( fs_usage - > u . reserved +
2019-02-14 18:38:52 -05:00
fs_usage - > online_reserved ) ,
2019-01-21 15:32:13 -05:00
c - > capacity ) ;
2018-11-27 08:23:22 -05:00
}
2019-02-14 18:38:52 -05:00
static struct bch_fs_usage_short
__bch2_fs_usage_read_short ( struct bch_fs * c )
{
struct bch_fs_usage_short ret ;
u64 data , reserved ;
ret . capacity = c - > capacity -
2019-02-10 19:34:47 -05:00
bch2_fs_usage_read_one ( c , & c - > usage_base - > hidden ) ;
2019-02-14 18:38:52 -05:00
2019-02-10 19:34:47 -05:00
data = bch2_fs_usage_read_one ( c , & c - > usage_base - > data ) +
bch2_fs_usage_read_one ( c , & c - > usage_base - > btree ) ;
reserved = bch2_fs_usage_read_one ( c , & c - > usage_base - > reserved ) +
percpu_u64_get ( c - > online_reserved ) ;
2019-02-14 18:38:52 -05:00
ret . used = min ( ret . capacity , data + reserve_factor ( reserved ) ) ;
ret . free = ret . capacity - ret . used ;
2019-02-10 19:34:47 -05:00
ret . nr_inodes = bch2_fs_usage_read_one ( c , & c - > usage_base - > nr_inodes ) ;
2019-02-14 18:38:52 -05:00
return ret ;
}
2018-11-27 08:23:22 -05:00
struct bch_fs_usage_short
bch2_fs_usage_read_short ( struct bch_fs * c )
{
struct bch_fs_usage_short ret ;
2019-02-14 18:38:52 -05:00
percpu_down_read ( & c - > mark_lock ) ;
ret = __bch2_fs_usage_read_short ( c ) ;
percpu_up_read ( & c - > mark_lock ) ;
2018-11-27 08:23:22 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2022-02-10 19:09:40 -05:00
static inline int is_unavailable_bucket ( struct bch_alloc_v4 a )
2017-03-16 22:18:50 -08:00
{
2022-02-10 19:09:40 -05:00
return a . dirty_sectors | | a . stripe ;
2017-03-16 22:18:50 -08:00
}
2021-01-21 20:51:51 -05:00
static inline int bucket_sectors_fragmented ( struct bch_dev * ca ,
2022-02-10 19:09:40 -05:00
struct bch_alloc_v4 a )
2017-03-16 22:18:50 -08:00
{
2022-02-10 19:09:40 -05:00
return a . dirty_sectors
? max ( 0 , ( int ) ca - > mi . bucket_size - ( int ) a . dirty_sectors )
2021-01-21 20:51:51 -05:00
: 0 ;
2017-03-16 22:18:50 -08:00
}
2022-02-10 19:09:40 -05:00
static inline enum bch_data_type bucket_type ( struct bch_alloc_v4 a )
2017-03-16 22:18:50 -08:00
{
2022-02-10 19:09:40 -05:00
return a . cached_sectors & & ! a . dirty_sectors
2020-07-09 18:28:11 -04:00
? BCH_DATA_cached
2022-02-10 19:09:40 -05:00
: a . data_type ;
2017-03-16 22:18:50 -08:00
}
2018-12-01 11:32:12 -05:00
static inline void account_bucket ( struct bch_fs_usage * fs_usage ,
struct bch_dev_usage * dev_usage ,
enum bch_data_type type ,
int nr , s64 size )
{
2020-07-09 18:28:11 -04:00
if ( type = = BCH_DATA_sb | | type = = BCH_DATA_journal )
2019-02-14 18:38:52 -05:00
fs_usage - > hidden + = size ;
2018-12-01 11:32:12 -05:00
2021-01-21 20:51:51 -05:00
dev_usage - > d [ type ] . buckets + = nr ;
2018-12-01 11:32:12 -05:00
}
2017-03-16 22:18:50 -08:00
static void bch2_dev_usage_update ( struct bch_fs * c , struct bch_dev * ca ,
2022-02-10 19:09:40 -05:00
struct bch_alloc_v4 old ,
struct bch_alloc_v4 new ,
2021-01-21 21:52:06 -05:00
u64 journal_seq , bool gc )
2017-03-16 22:18:50 -08:00
{
2021-06-10 21:44:27 -04:00
struct bch_fs_usage * fs_usage ;
2020-06-18 21:06:42 -04:00
struct bch_dev_usage * u ;
2017-03-16 22:18:50 -08:00
preempt_disable ( ) ;
2021-06-10 21:44:27 -04:00
fs_usage = fs_usage_ptr ( c , journal_seq , gc ) ;
2021-01-21 21:52:06 -05:00
u = dev_usage_ptr ( ca , journal_seq , gc ) ;
2017-03-16 22:18:50 -08:00
2018-12-01 11:32:12 -05:00
if ( bucket_type ( old ) )
2020-06-18 21:06:42 -04:00
account_bucket ( fs_usage , u , bucket_type ( old ) ,
2018-12-01 11:32:12 -05:00
- 1 , - ca - > mi . bucket_size ) ;
2018-11-24 17:09:44 -05:00
2018-12-01 11:32:12 -05:00
if ( bucket_type ( new ) )
2020-06-18 21:06:42 -04:00
account_bucket ( fs_usage , u , bucket_type ( new ) ,
2018-12-01 11:32:12 -05:00
1 , ca - > mi . bucket_size ) ;
2017-03-16 22:18:50 -08:00
2020-06-18 21:06:42 -04:00
u - > buckets_unavailable + =
2017-03-16 22:18:50 -08:00
is_unavailable_bucket ( new ) - is_unavailable_bucket ( old ) ;
2021-01-21 20:51:51 -05:00
u - > d [ old . data_type ] . sectors - = old . dirty_sectors ;
u - > d [ new . data_type ] . sectors + = new . dirty_sectors ;
u - > d [ BCH_DATA_cached ] . sectors + =
2017-03-16 22:18:50 -08:00
( int ) new . cached_sectors - ( int ) old . cached_sectors ;
2021-01-21 20:51:51 -05:00
u - > d [ old . data_type ] . fragmented - = bucket_sectors_fragmented ( ca , old ) ;
u - > d [ new . data_type ] . fragmented + = bucket_sectors_fragmented ( ca , new ) ;
2017-03-16 22:18:50 -08:00
preempt_enable ( ) ;
}
2022-02-10 19:09:40 -05:00
static void bch2_dev_usage_update_m ( struct bch_fs * c , struct bch_dev * ca ,
2022-02-14 00:07:38 -05:00
struct bucket old , struct bucket new ,
2022-02-10 19:09:40 -05:00
u64 journal_seq , bool gc )
{
struct bch_alloc_v4 old_a = {
. gen = old . gen ,
. data_type = old . data_type ,
. dirty_sectors = old . dirty_sectors ,
. cached_sectors = old . cached_sectors ,
. stripe = old . stripe ,
} ;
struct bch_alloc_v4 new_a = {
. gen = new . gen ,
. data_type = new . data_type ,
. dirty_sectors = new . dirty_sectors ,
. cached_sectors = new . cached_sectors ,
. stripe = new . stripe ,
} ;
bch2_dev_usage_update ( c , ca , old_a , new_a , journal_seq , gc ) ;
}
2021-06-10 21:44:27 -04:00
static inline int __update_replicas ( struct bch_fs * c ,
struct bch_fs_usage * fs_usage ,
struct bch_replicas_entry * r ,
s64 sectors )
{
int idx = bch2_replicas_entry_idx ( c , r ) ;
if ( idx < 0 )
return - 1 ;
fs_usage_data_type_to_base ( fs_usage , r - > data_type , sectors ) ;
fs_usage - > replicas [ idx ] + = sectors ;
return 0 ;
}
2021-11-28 15:13:54 -05:00
static inline int update_replicas ( struct bch_fs * c , struct bkey_s_c k ,
2021-06-10 21:44:27 -04:00
struct bch_replicas_entry * r , s64 sectors ,
unsigned journal_seq , bool gc )
2019-01-21 15:32:13 -05:00
{
2021-06-10 21:44:27 -04:00
struct bch_fs_usage __percpu * fs_usage ;
2021-11-28 14:31:19 -05:00
int idx , ret = 0 ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2019-01-21 15:32:13 -05:00
2021-11-28 14:31:19 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2022-02-25 13:18:19 -05:00
buf . atomic + + ;
2021-11-28 14:31:19 -05:00
idx = bch2_replicas_entry_idx ( c , r ) ;
2021-11-28 15:13:54 -05:00
if ( idx < 0 & &
( test_bit ( BCH_FS_REBUILD_REPLICAS , & c - > flags ) | |
fsck_err ( c , " no replicas entry \n "
" while marking %s " ,
2022-02-25 13:18:19 -05:00
( bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ) ) {
2021-11-28 15:13:54 -05:00
percpu_up_read ( & c - > mark_lock ) ;
ret = bch2_mark_replicas ( c , r ) ;
percpu_down_read ( & c - > mark_lock ) ;
2022-02-25 13:18:19 -05:00
if ( ret )
goto err ;
2021-11-28 15:13:54 -05:00
idx = bch2_replicas_entry_idx ( c , r ) ;
}
2021-11-28 14:31:19 -05:00
if ( idx < 0 ) {
ret = - 1 ;
goto err ;
}
2019-01-21 15:32:13 -05:00
2021-06-10 21:44:27 -04:00
preempt_disable ( ) ;
fs_usage = fs_usage_ptr ( c , journal_seq , gc ) ;
2020-07-09 18:31:51 -04:00
fs_usage_data_type_to_base ( fs_usage , r - > data_type , sectors ) ;
2019-02-14 18:38:52 -05:00
fs_usage - > replicas [ idx ] + = sectors ;
2021-06-10 21:44:27 -04:00
preempt_enable ( ) ;
2021-11-28 14:31:19 -05:00
err :
2021-11-28 15:13:54 -05:00
fsck_err :
2021-11-28 14:31:19 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
2021-11-28 14:31:19 -05:00
return ret ;
2019-01-21 15:32:13 -05:00
}
2021-04-21 18:08:39 -04:00
static inline int update_cached_sectors ( struct bch_fs * c ,
2021-11-28 15:13:54 -05:00
struct bkey_s_c k ,
2021-06-10 21:44:27 -04:00
unsigned dev , s64 sectors ,
unsigned journal_seq , bool gc )
2019-01-21 15:32:13 -05:00
{
struct bch_replicas_padded r ;
bch2_replicas_entry_cached ( & r . e , dev ) ;
2021-11-28 15:13:54 -05:00
return update_replicas ( c , k , & r . e , sectors , journal_seq , gc ) ;
2019-01-21 15:32:13 -05:00
}
2019-05-24 11:56:20 -04:00
static struct replicas_delta_list *
replicas_deltas_realloc ( struct btree_trans * trans , unsigned more )
{
struct replicas_delta_list * d = trans - > fs_usage_deltas ;
unsigned new_size = d ? ( d - > size + more ) * 2 : 128 ;
2021-04-24 00:24:25 -04:00
unsigned alloc_size = sizeof ( * d ) + new_size ;
WARN_ON_ONCE ( alloc_size > REPLICAS_DELTA_LIST_MAX ) ;
2019-05-24 11:56:20 -04:00
if ( ! d | | d - > used + more > d - > size ) {
2021-04-24 00:24:25 -04:00
d = krealloc ( d , alloc_size , GFP_NOIO | __GFP_ZERO ) ;
BUG_ON ( ! d & & alloc_size > REPLICAS_DELTA_LIST_MAX ) ;
if ( ! d ) {
d = mempool_alloc ( & trans - > c - > replicas_delta_pool , GFP_NOIO ) ;
memset ( d , 0 , REPLICAS_DELTA_LIST_MAX ) ;
if ( trans - > fs_usage_deltas )
memcpy ( d , trans - > fs_usage_deltas ,
trans - > fs_usage_deltas - > size + sizeof ( * d ) ) ;
new_size = REPLICAS_DELTA_LIST_MAX - sizeof ( * d ) ;
kfree ( trans - > fs_usage_deltas ) ;
}
2019-05-24 11:56:20 -04:00
d - > size = new_size ;
trans - > fs_usage_deltas = d ;
}
return d ;
}
static inline void update_replicas_list ( struct btree_trans * trans ,
struct bch_replicas_entry * r ,
s64 sectors )
{
struct replicas_delta_list * d ;
struct replicas_delta * n ;
2019-08-22 13:20:38 -04:00
unsigned b ;
if ( ! sectors )
return ;
2019-05-24 11:56:20 -04:00
2019-08-22 13:20:38 -04:00
b = replicas_entry_bytes ( r ) + 8 ;
2019-05-24 11:56:20 -04:00
d = replicas_deltas_realloc ( trans , b ) ;
n = ( void * ) d - > d + d - > used ;
n - > delta = sectors ;
memcpy ( ( void * ) n + offsetof ( struct replicas_delta , r ) ,
r , replicas_entry_bytes ( r ) ) ;
2021-04-03 19:41:09 -04:00
bch2_replicas_entry_sort ( & n - > r ) ;
2019-05-24 11:56:20 -04:00
d - > used + = b ;
}
static inline void update_cached_sectors_list ( struct btree_trans * trans ,
unsigned dev , s64 sectors )
{
struct bch_replicas_padded r ;
bch2_replicas_entry_cached ( & r . e , dev ) ;
update_replicas_list ( trans , & r . e , sectors ) ;
}
2022-03-13 00:26:52 -05:00
int bch2_mark_alloc ( struct btree_trans * trans ,
struct bkey_s_c old , struct bkey_s_c new ,
unsigned flags )
2019-02-13 14:46:32 -05:00
{
2019-12-31 16:17:42 -05:00
bool gc = flags & BTREE_TRIGGER_GC ;
2021-10-29 18:43:18 -04:00
u64 journal_seq = trans - > journal_res . seq ;
struct bch_fs * c = trans - > c ;
2021-12-31 20:03:29 -05:00
struct bch_alloc_v4 old_a , new_a ;
struct bch_dev * ca = bch_dev_bkey_exists ( c , new . k - > p . inode ) ;
2021-11-28 14:31:19 -05:00
int ret = 0 ;
2020-07-06 19:16:25 -04:00
2021-12-31 20:03:29 -05:00
if ( bch2_trans_inconsistent_on ( new . k - > p . offset < ca - > mi . first_bucket | |
new . k - > p . offset > = ca - > mi . nbuckets , trans ,
2022-03-02 22:18:56 -05:00
" alloc key outside range of device's buckets " ) )
return - EIO ;
2019-02-13 14:46:32 -05:00
/*
* alloc btree is read in by bch2_alloc_read , not gc :
*/
2019-12-31 16:17:42 -05:00
if ( ( flags & BTREE_TRIGGER_GC ) & &
! ( flags & BTREE_TRIGGER_BUCKET_INVALIDATE ) )
2019-02-13 14:46:32 -05:00
return 0 ;
2021-12-31 20:03:29 -05:00
bch2_alloc_to_v4 ( old , & old_a ) ;
bch2_alloc_to_v4 ( new , & new_a ) ;
2021-12-27 23:56:13 -05:00
if ( ( flags & BTREE_TRIGGER_INSERT ) & &
2021-12-31 20:03:29 -05:00
! old_a . data_type ! = ! new_a . data_type & &
new . k - > type = = KEY_TYPE_alloc_v4 ) {
struct bch_alloc_v4 * v = ( struct bch_alloc_v4 * ) new . v ;
2021-10-29 21:14:23 -04:00
BUG_ON ( ! journal_seq ) ;
2021-12-27 23:56:13 -05:00
/*
* If the btree updates referring to a bucket weren ' t flushed
* before the bucket became empty again , then the we don ' t have
* to wait on a journal flush before we can reuse the bucket :
*/
2021-12-31 20:03:29 -05:00
new_a . journal_seq = ! new_a . data_type & &
( journal_seq = = v - > journal_seq | |
bch2_journal_noflush_seq ( & c - > journal , v - > journal_seq ) )
2021-12-27 23:56:13 -05:00
? 0 : journal_seq ;
2021-12-31 20:03:29 -05:00
v - > journal_seq = new_a . journal_seq ;
2021-10-29 21:14:23 -04:00
}
2021-12-31 20:03:29 -05:00
if ( old_a . data_type & & ! new_a . data_type & & new_a . journal_seq ) {
2022-01-04 22:32:09 -05:00
ret = bch2_set_bucket_needs_journal_commit ( & c - > buckets_waiting_for_journal ,
c - > journal . flushed_seq_ondisk ,
2021-12-31 20:03:29 -05:00
new . k - > p . inode , new . k - > p . offset ,
new_a . journal_seq ) ;
2022-01-04 22:32:09 -05:00
if ( ret ) {
bch2_fs_fatal_error ( c ,
" error setting bucket_needs_journal_commit: %i " , ret ) ;
return ret ;
}
}
2022-01-09 20:48:31 -05:00
if ( ! new_a . data_type & &
( ! new_a . journal_seq | | new_a . journal_seq < c - > journal . flushed_seq_ondisk ) )
closure_wake_up ( & c - > freelist_wait ) ;
2022-02-10 04:32:19 -05:00
if ( ( flags & BTREE_TRIGGER_INSERT ) & &
BCH_ALLOC_V4_NEED_DISCARD ( & new_a ) & &
! new_a . journal_seq )
bch2_do_discards ( c ) ;
2022-02-10 18:18:41 -05:00
if ( ! old_a . data_type & &
new_a . data_type & &
should_invalidate_buckets ( ca ) )
bch2_do_invalidates ( c ) ;
2021-12-11 17:13:09 -05:00
if ( bucket_state ( new_a ) = = BUCKET_need_gc_gens ) {
atomic_inc ( & c - > kick_gc ) ;
wake_up_process ( c - > gc_thread ) ;
}
2021-11-28 14:31:19 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2021-12-31 20:03:29 -05:00
if ( ! gc & & new_a . gen ! = old_a . gen )
* bucket_gen ( ca , new . k - > p . offset ) = new_a . gen ;
2021-12-25 19:55:34 -05:00
2022-02-10 19:09:40 -05:00
bch2_dev_usage_update ( c , ca , old_a , new_a , journal_seq , gc ) ;
2022-02-10 19:26:55 -05:00
if ( gc ) {
struct bucket * g = gc_bucket ( ca , new . k - > p . offset ) ;
2022-02-14 00:07:38 -05:00
bucket_lock ( g ) ;
2022-02-10 19:26:55 -05:00
g - > gen_valid = 1 ;
2022-02-14 00:07:38 -05:00
g - > gen = new_a . gen ;
g - > data_type = new_a . data_type ;
2022-02-10 19:26:55 -05:00
g - > stripe = new_a . stripe ;
g - > stripe_redundancy = new_a . stripe_redundancy ;
2022-02-14 00:07:38 -05:00
g - > dirty_sectors = new_a . dirty_sectors ;
g - > cached_sectors = new_a . cached_sectors ;
bucket_unlock ( g ) ;
2022-02-10 19:26:55 -05:00
}
2021-11-28 14:31:19 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2019-02-13 14:46:32 -05:00
2019-03-11 14:59:58 -04:00
/*
* need to know if we ' re getting called from the invalidate path or
* not :
*/
2019-12-31 16:17:42 -05:00
if ( ( flags & BTREE_TRIGGER_BUCKET_INVALIDATE ) & &
2022-02-10 19:26:55 -05:00
old_a . cached_sectors ) {
2021-11-28 15:13:54 -05:00
ret = update_cached_sectors ( c , new , ca - > dev_idx ,
2022-02-10 19:26:55 -05:00
- old_a . cached_sectors ,
2021-11-28 15:13:54 -05:00
journal_seq , gc ) ;
2021-11-28 14:31:19 -05:00
if ( ret ) {
2021-04-21 18:08:39 -04:00
bch2_fs_fatal_error ( c , " bch2_mark_alloc(): no replicas entry while updating cached sectors " ) ;
2021-11-28 14:31:19 -05:00
return ret ;
2021-04-21 18:08:39 -04:00
}
2021-12-31 20:03:29 -05:00
trace_invalidate ( ca , bucket_to_sector ( ca , new . k - > p . offset ) ,
2022-02-10 19:26:55 -05:00
old_a . cached_sectors ) ;
2019-02-13 14:46:32 -05:00
}
return 0 ;
}
2022-04-02 18:00:04 -04:00
int bch2_mark_metadata_bucket ( struct bch_fs * c , struct bch_dev * ca ,
size_t b , enum bch_data_type data_type ,
unsigned sectors , struct gc_pos pos ,
unsigned flags )
2018-07-23 05:32:01 -04:00
{
2022-02-14 00:07:38 -05:00
struct bucket old , new , * g ;
2022-04-02 18:00:04 -04:00
int ret = 0 ;
2018-07-23 05:32:01 -04:00
2021-12-24 04:27:01 -05:00
BUG_ON ( ! ( flags & BTREE_TRIGGER_GC ) ) ;
2020-07-09 18:28:11 -04:00
BUG_ON ( data_type ! = BCH_DATA_sb & &
data_type ! = BCH_DATA_journal ) ;
2018-07-23 05:32:01 -04:00
2021-12-24 04:27:01 -05:00
/*
* Backup superblock might be past the end of our normal usable space :
*/
if ( b > = ca - > mi . nbuckets )
2022-04-02 18:00:04 -04:00
return 0 ;
2021-12-24 04:27:01 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2021-12-25 22:37:19 -05:00
g = gc_bucket ( ca , b ) ;
2022-02-14 00:07:38 -05:00
bucket_lock ( g ) ;
old = * g ;
2022-04-02 18:00:04 -04:00
if ( bch2_fs_inconsistent_on ( g - > data_type & &
g - > data_type ! = data_type , c ,
" different types of data in same bucket: %s, %s " ,
bch2_data_types [ g - > data_type ] ,
bch2_data_types [ data_type ] ) ) {
ret = - EIO ;
goto err ;
}
if ( bch2_fs_inconsistent_on ( ( u64 ) g - > dirty_sectors + sectors > ca - > mi . bucket_size , c ,
" bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size " ,
ca - > dev_idx , b , g - > gen ,
bch2_data_types [ g - > data_type ? : data_type ] ,
g - > dirty_sectors , sectors ) ) {
ret = - EIO ;
goto err ;
}
2022-02-14 00:07:38 -05:00
g - > data_type = data_type ;
g - > dirty_sectors + = sectors ;
new = * g ;
2022-04-02 18:00:04 -04:00
err :
2022-02-14 00:07:38 -05:00
bucket_unlock ( g ) ;
2022-04-02 18:00:04 -04:00
if ( ! ret )
bch2_dev_usage_update_m ( c , ca , old , new , 0 , true ) ;
2021-12-24 04:27:01 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2022-04-02 18:00:04 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-06-10 21:44:27 -04:00
static s64 ptr_disk_sectors ( s64 sectors , struct extent_ptr_decoded p )
2019-10-07 15:57:47 -04:00
{
2021-08-17 15:29:21 -04:00
EBUG_ON ( sectors < 0 ) ;
2022-03-31 22:05:33 -04:00
return crc_is_compressed ( p . crc )
2021-08-17 17:14:26 -06:00
? DIV_ROUND_UP_ULL ( sectors * p . crc . compressed_size ,
2022-03-31 22:05:33 -04:00
p . crc . uncompressed_size )
2021-06-10 21:44:27 -04:00
: sectors ;
2019-10-07 15:57:47 -04:00
}
2021-10-29 18:43:18 -04:00
static int check_bucket_ref ( struct bch_fs * c ,
struct bkey_s_c k ,
2020-10-19 22:36:24 -04:00
const struct bch_extent_ptr * ptr ,
s64 sectors , enum bch_data_type ptr_data_type ,
2022-02-13 18:15:35 -05:00
u8 b_gen , u8 bucket_data_type ,
2022-02-14 00:07:38 -05:00
u32 dirty_sectors , u32 cached_sectors )
2019-03-11 14:59:58 -04:00
{
2022-02-13 18:15:35 -05:00
struct bch_dev * ca = bch_dev_bkey_exists ( c , ptr - > dev ) ;
size_t bucket_nr = PTR_BUCKET_NR ( ca , ptr ) ;
2020-10-19 22:36:24 -04:00
u16 bucket_sectors = ! ptr - > cached
2020-06-03 18:27:07 -04:00
? dirty_sectors
: cached_sectors ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
int ret = 0 ;
2020-06-03 18:27:07 -04:00
2022-02-13 18:15:35 -05:00
if ( gen_after ( ptr - > gen , b_gen ) ) {
2020-06-03 18:27:07 -04:00
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen \n "
" while marking %s " ,
2022-02-13 18:15:35 -05:00
ptr - > dev , bucket_nr , b_gen ,
2020-10-19 22:36:24 -04:00
bch2_data_types [ bucket_data_type ? : ptr_data_type ] ,
ptr - > gen ,
2022-02-25 13:18:19 -05:00
( bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ;
ret = - EIO ;
goto err ;
2020-06-03 18:27:07 -04:00
}
2022-02-13 18:15:35 -05:00
if ( gen_cmp ( b_gen , ptr - > gen ) > BUCKET_GC_GEN_MAX ) {
2020-06-03 18:27:07 -04:00
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %u:%zu gen %u data type %s: ptr gen %u too stale \n "
" while marking %s " ,
2022-02-13 18:15:35 -05:00
ptr - > dev , bucket_nr , b_gen ,
2020-10-19 22:36:24 -04:00
bch2_data_types [ bucket_data_type ? : ptr_data_type ] ,
ptr - > gen ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ;
ret = - EIO ;
goto err ;
2020-06-03 18:27:07 -04:00
}
2022-02-13 18:15:35 -05:00
if ( b_gen ! = ptr - > gen & & ! ptr - > cached ) {
2020-06-03 18:27:07 -04:00
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
2022-02-13 18:15:35 -05:00
" bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u) \n "
2020-06-03 18:27:07 -04:00
" while marking %s " ,
2022-02-13 18:15:35 -05:00
ptr - > dev , bucket_nr , b_gen ,
* bucket_gen ( ca , bucket_nr ) ,
2020-10-19 22:36:24 -04:00
bch2_data_types [ bucket_data_type ? : ptr_data_type ] ,
ptr - > gen ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ;
ret = - EIO ;
goto err ;
2020-06-03 18:27:07 -04:00
}
2022-02-25 13:18:19 -05:00
if ( b_gen ! = ptr - > gen ) {
ret = 1 ;
goto err ;
}
2020-06-03 18:27:07 -04:00
2020-10-19 22:36:24 -04:00
if ( bucket_data_type & & ptr_data_type & &
bucket_data_type ! = ptr_data_type ) {
2020-06-03 18:27:07 -04:00
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %u:%zu gen %u different types of data in same bucket: %s, %s \n "
" while marking %s " ,
2022-02-13 18:15:35 -05:00
ptr - > dev , bucket_nr , b_gen ,
2020-10-19 22:36:24 -04:00
bch2_data_types [ bucket_data_type ] ,
2020-06-03 18:27:07 -04:00
bch2_data_types [ ptr_data_type ] ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ;
ret = - EIO ;
goto err ;
2020-06-03 18:27:07 -04:00
}
2022-02-14 00:07:38 -05:00
if ( ( unsigned ) ( bucket_sectors + sectors ) > U32_MAX ) {
2020-06-03 18:27:07 -04:00
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX \n "
" while marking %s " ,
2022-02-13 18:15:35 -05:00
ptr - > dev , bucket_nr , b_gen ,
2020-10-19 22:36:24 -04:00
bch2_data_types [ bucket_data_type ? : ptr_data_type ] ,
bucket_sectors , sectors ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ;
ret = - EIO ;
goto err ;
2020-06-03 18:27:07 -04:00
}
2022-02-25 13:18:19 -05:00
err :
printbuf_exit ( & buf ) ;
return ret ;
2020-10-19 22:36:24 -04:00
}
2021-10-29 18:43:18 -04:00
static int mark_stripe_bucket ( struct btree_trans * trans ,
struct bkey_s_c k ,
unsigned ptr_idx ,
2022-01-04 22:32:09 -05:00
unsigned flags )
2020-10-19 22:36:24 -04:00
{
2021-10-29 18:43:18 -04:00
struct bch_fs * c = trans - > c ;
2022-01-04 22:32:09 -05:00
u64 journal_seq = trans - > journal_res . seq ;
2020-07-09 18:31:51 -04:00
const struct bch_stripe * s = bkey_s_c_to_stripe ( k ) . v ;
unsigned nr_data = s - > nr_blocks - s - > nr_redundant ;
bool parity = ptr_idx > = nr_data ;
2021-11-29 16:38:27 -05:00
enum bch_data_type data_type = parity ? BCH_DATA_parity : 0 ;
s64 sectors = parity ? le16_to_cpu ( s - > sectors ) : 0 ;
2020-07-09 18:31:51 -04:00
const struct bch_extent_ptr * ptr = s - > ptrs + ptr_idx ;
2020-10-19 22:36:24 -04:00
struct bch_dev * ca = bch_dev_bkey_exists ( c , ptr - > dev ) ;
2022-02-14 00:07:38 -05:00
struct bucket old , new , * g ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2021-11-28 14:31:19 -05:00
int ret = 0 ;
2021-12-25 22:37:19 -05:00
BUG_ON ( ! ( flags & BTREE_TRIGGER_GC ) ) ;
2021-11-29 16:38:27 -05:00
/* * XXX doesn't handle deletion */
2021-11-28 14:31:19 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2022-02-25 13:18:19 -05:00
buf . atomic + + ;
2021-12-25 22:37:19 -05:00
g = PTR_GC_BUCKET ( ca , ptr ) ;
2020-10-19 22:36:24 -04:00
2022-02-14 00:07:38 -05:00
if ( g - > dirty_sectors | |
2021-11-29 16:38:27 -05:00
( g - > stripe & & g - > stripe ! = k . k - > p . offset ) ) {
2021-01-22 18:01:07 -05:00
bch2_fs_inconsistent ( c ,
" bucket %u:%zu gen %u: multiple stripes using same bucket \n %s " ,
2022-02-14 00:07:38 -05:00
ptr - > dev , PTR_BUCKET_NR ( ca , ptr ) , g - > gen ,
2022-02-25 13:18:19 -05:00
( bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ;
2021-11-28 14:31:19 -05:00
ret = - EINVAL ;
goto err ;
2021-01-22 18:01:07 -05:00
}
2020-07-22 23:11:48 -04:00
2022-02-14 00:07:38 -05:00
bucket_lock ( g ) ;
old = * g ;
2020-10-19 22:36:24 -04:00
2022-02-14 00:07:38 -05:00
ret = check_bucket_ref ( c , k , ptr , sectors , data_type ,
2022-04-02 18:00:04 -04:00
g - > gen , g - > data_type ,
g - > dirty_sectors , g - > cached_sectors ) ;
if ( ret )
2022-02-14 00:07:38 -05:00
goto err ;
2020-07-09 18:31:51 -04:00
2022-02-14 00:07:38 -05:00
if ( data_type )
2022-04-02 18:00:04 -04:00
g - > data_type = data_type ;
g - > dirty_sectors + = sectors ;
2020-10-19 22:36:24 -04:00
2021-01-22 18:01:07 -05:00
g - > stripe = k . k - > p . offset ;
g - > stripe_redundancy = s - > nr_redundant ;
2022-02-14 00:07:38 -05:00
new = * g ;
2021-11-28 14:31:19 -05:00
err :
2022-04-02 18:00:04 -04:00
bucket_unlock ( g ) ;
if ( ! ret )
bch2_dev_usage_update_m ( c , ca , old , new , journal_seq , true ) ;
2021-11-28 14:31:19 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
return ret ;
2020-10-19 22:36:24 -04:00
}
2021-10-29 18:43:18 -04:00
static int __mark_pointer ( struct btree_trans * trans ,
struct bkey_s_c k ,
2020-10-19 22:36:24 -04:00
const struct bch_extent_ptr * ptr ,
s64 sectors , enum bch_data_type ptr_data_type ,
u8 bucket_gen , u8 * bucket_data_type ,
2022-02-14 00:07:38 -05:00
u32 * dirty_sectors , u32 * cached_sectors )
2020-10-19 22:36:24 -04:00
{
2022-02-14 00:07:38 -05:00
u32 * dst_sectors = ! ptr - > cached
2020-10-19 22:36:24 -04:00
? dirty_sectors
: cached_sectors ;
2021-10-29 18:43:18 -04:00
int ret = check_bucket_ref ( trans - > c , k , ptr , sectors , ptr_data_type ,
2020-10-19 22:36:24 -04:00
bucket_gen , * bucket_data_type ,
* dirty_sectors , * cached_sectors ) ;
if ( ret )
return ret ;
* dst_sectors + = sectors ;
2020-06-03 18:27:07 -04:00
* bucket_data_type = * dirty_sectors | | * cached_sectors
? ptr_data_type : 0 ;
return 0 ;
}
2021-10-29 18:43:18 -04:00
static int bch2_mark_pointer ( struct btree_trans * trans ,
struct bkey_s_c k ,
2020-06-03 18:27:07 -04:00
struct extent_ptr_decoded p ,
s64 sectors , enum bch_data_type data_type ,
2021-10-29 18:43:18 -04:00
unsigned flags )
2018-11-01 15:21:48 -04:00
{
2021-10-29 18:43:18 -04:00
u64 journal_seq = trans - > journal_res . seq ;
struct bch_fs * c = trans - > c ;
2018-11-01 15:21:48 -04:00
struct bch_dev * ca = bch_dev_bkey_exists ( c , p . ptr . dev ) ;
2022-02-14 00:07:38 -05:00
struct bucket old , new , * g ;
2020-06-03 18:27:07 -04:00
u8 bucket_data_type ;
2021-11-28 14:31:19 -05:00
int ret = 0 ;
2021-12-25 22:37:19 -05:00
BUG_ON ( ! ( flags & BTREE_TRIGGER_GC ) ) ;
2021-11-28 14:31:19 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2021-12-25 22:37:19 -05:00
g = PTR_GC_BUCKET ( ca , & p . ptr ) ;
2022-02-14 00:07:38 -05:00
bucket_lock ( g ) ;
old = * g ;
2017-03-16 22:18:50 -08:00
2022-02-14 00:07:38 -05:00
bucket_data_type = g - > data_type ;
ret = __mark_pointer ( trans , k , & p . ptr , sectors ,
data_type , g - > gen ,
& bucket_data_type ,
& g - > dirty_sectors ,
& g - > cached_sectors ) ;
2022-04-02 18:00:04 -04:00
if ( ! ret )
g - > data_type = bucket_data_type ;
2022-02-14 00:07:38 -05:00
new = * g ;
bucket_unlock ( g ) ;
2022-04-02 18:00:04 -04:00
if ( ! ret )
bch2_dev_usage_update_m ( c , ca , old , new , journal_seq , true ) ;
2021-11-28 14:31:19 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2019-02-14 15:42:41 -05:00
2021-11-28 14:31:19 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-10-29 18:43:18 -04:00
static int bch2_mark_stripe_ptr ( struct btree_trans * trans ,
2021-11-28 15:13:54 -05:00
struct bkey_s_c k ,
2018-11-24 17:09:44 -05:00
struct bch_extent_stripe_ptr p ,
2019-01-21 15:32:13 -05:00
enum bch_data_type data_type ,
2021-06-10 21:44:27 -04:00
s64 sectors ,
2021-10-29 18:43:18 -04:00
unsigned flags )
2018-11-01 15:13:19 -04:00
{
2021-10-29 18:43:18 -04:00
struct bch_fs * c = trans - > c ;
2020-07-09 18:31:51 -04:00
struct bch_replicas_padded r ;
2021-12-25 22:37:19 -05:00
struct gc_stripe * m ;
2018-11-25 20:53:51 -05:00
2021-12-25 22:37:19 -05:00
BUG_ON ( ! ( flags & BTREE_TRIGGER_GC ) ) ;
2018-11-25 20:53:51 -05:00
2021-12-25 22:37:19 -05:00
m = genradix_ptr_alloc ( & c - > gc_stripes , p . idx , GFP_KERNEL ) ;
2021-12-30 20:14:52 -05:00
if ( ! m ) {
bch_err ( c , " error allocating memory for gc_stripes, idx %llu " ,
( u64 ) p . idx ) ;
2021-12-25 22:37:19 -05:00
return - ENOMEM ;
2021-12-30 20:14:52 -05:00
}
2018-11-01 15:13:19 -04:00
2021-12-25 22:37:19 -05:00
spin_lock ( & c - > ec_stripes_heap_lock ) ;
2020-07-09 18:31:51 -04:00
2021-12-25 22:37:19 -05:00
if ( ! m | | ! m - > alive ) {
2021-12-04 23:07:33 -05:00
spin_unlock ( & c - > ec_stripes_heap_lock ) ;
2021-12-25 22:37:19 -05:00
bch_err_ratelimited ( c , " pointer to nonexistent stripe %llu " ,
( u64 ) p . idx ) ;
bch2_inconsistent_error ( c ) ;
return - EIO ;
2018-11-25 20:53:51 -05:00
}
2018-11-01 15:13:19 -04:00
2021-12-25 22:37:19 -05:00
m - > block_sectors [ p . block ] + = sectors ;
r = m - > r ;
spin_unlock ( & c - > ec_stripes_heap_lock ) ;
r . e . data_type = data_type ;
update_replicas ( c , k , & r . e , sectors , trans - > journal_res . seq , true ) ;
2018-11-24 17:09:44 -05:00
return 0 ;
2018-11-01 15:13:19 -04:00
}
2022-03-13 00:26:52 -05:00
int bch2_mark_extent ( struct btree_trans * trans ,
struct bkey_s_c old , struct bkey_s_c new ,
unsigned flags )
2017-03-16 22:18:50 -08:00
{
2021-10-29 18:43:18 -04:00
u64 journal_seq = trans - > journal_res . seq ;
struct bch_fs * c = trans - > c ;
2021-10-29 18:58:50 -04:00
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new ;
2018-11-01 15:10:01 -04:00
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c ( k ) ;
const union bch_extent_entry * entry ;
struct extent_ptr_decoded p ;
2019-01-21 15:32:13 -05:00
struct bch_replicas_padded r ;
2021-06-10 21:44:27 -04:00
enum bch_data_type data_type = bkey_is_btree_ptr ( k . k )
? BCH_DATA_btree
: BCH_DATA_user ;
s64 sectors = bkey_is_btree_ptr ( k . k )
2021-12-14 14:24:41 -05:00
? btree_sectors ( c )
2021-06-10 21:44:27 -04:00
: k . k - > size ;
2019-01-21 15:32:13 -05:00
s64 dirty_sectors = 0 ;
2020-06-03 18:27:07 -04:00
bool stale ;
2018-11-01 15:10:01 -04:00
int ret ;
2021-12-25 22:37:19 -05:00
BUG_ON ( ! ( flags & BTREE_TRIGGER_GC ) ) ;
2019-01-21 15:32:13 -05:00
r . e . data_type = data_type ;
r . e . nr_devs = 0 ;
r . e . nr_required = 1 ;
2018-11-01 15:10:01 -04:00
bkey_for_each_ptr_decode ( k . k , ptrs , p , entry ) {
2021-06-10 21:44:27 -04:00
s64 disk_sectors = ptr_disk_sectors ( sectors , p ) ;
2020-06-03 18:27:07 -04:00
2021-08-17 15:29:21 -04:00
if ( flags & BTREE_TRIGGER_OVERWRITE )
disk_sectors = - disk_sectors ;
2021-10-29 18:43:18 -04:00
ret = bch2_mark_pointer ( trans , k , p , disk_sectors ,
data_type , flags ) ;
2020-06-03 18:27:07 -04:00
if ( ret < 0 )
return ret ;
stale = ret > 0 ;
2018-11-05 02:31:48 -05:00
2019-01-21 15:32:13 -05:00
if ( p . ptr . cached ) {
2021-11-28 14:31:19 -05:00
if ( ! stale ) {
2021-11-28 15:13:54 -05:00
ret = update_cached_sectors ( c , k , p . ptr . dev ,
2021-12-25 22:37:19 -05:00
disk_sectors , journal_seq , true ) ;
2021-11-28 14:31:19 -05:00
if ( ret ) {
2021-04-21 18:08:39 -04:00
bch2_fs_fatal_error ( c , " bch2_mark_extent(): no replicas entry while updating cached sectors " ) ;
2021-11-28 14:31:19 -05:00
return ret ;
2021-04-21 18:08:39 -04:00
}
2021-11-28 14:31:19 -05:00
}
2019-10-08 18:45:29 -04:00
} else if ( ! p . has_ec ) {
2019-01-21 15:32:13 -05:00
dirty_sectors + = disk_sectors ;
r . e . devs [ r . e . nr_devs + + ] = p . ptr . dev ;
} else {
2021-11-28 15:13:54 -05:00
ret = bch2_mark_stripe_ptr ( trans , k , p . ec , data_type ,
2021-10-29 18:43:18 -04:00
disk_sectors , flags ) ;
2019-10-08 18:45:29 -04:00
if ( ret )
return ret ;
2018-11-01 15:10:01 -04:00
2019-10-07 15:57:47 -04:00
/*
* There may be other dirty pointers in this extent , but
* if so they ' re not required for mounting if we have an
* erasure coded pointer in this extent :
*/
2019-01-21 15:32:13 -05:00
r . e . nr_required = 0 ;
}
2018-11-01 15:21:48 -04:00
}
2018-11-05 02:31:48 -05:00
2021-04-21 18:08:39 -04:00
if ( r . e . nr_devs ) {
2021-12-25 22:37:19 -05:00
ret = update_replicas ( c , k , & r . e , dirty_sectors , journal_seq , true ) ;
2021-11-28 14:31:19 -05:00
if ( ret ) {
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2021-04-21 18:08:39 -04:00
2022-02-25 13:18:19 -05:00
bch2_bkey_val_to_text ( & buf , c , k ) ;
bch2_fs_fatal_error ( c , " no replicas entry for %s " , buf . buf ) ;
printbuf_exit ( & buf ) ;
2021-11-28 14:31:19 -05:00
return ret ;
2021-04-21 18:08:39 -04:00
}
}
2018-11-24 17:09:44 -05:00
return 0 ;
2018-11-01 15:21:48 -04:00
}
2018-07-24 14:54:39 -04:00
2022-03-13 00:26:52 -05:00
int bch2_mark_stripe ( struct btree_trans * trans ,
struct bkey_s_c old , struct bkey_s_c new ,
unsigned flags )
2018-11-01 15:13:19 -04:00
{
2019-12-31 16:17:42 -05:00
bool gc = flags & BTREE_TRIGGER_GC ;
2021-10-29 18:43:18 -04:00
u64 journal_seq = trans - > journal_res . seq ;
struct bch_fs * c = trans - > c ;
2021-12-30 20:14:52 -05:00
u64 idx = new . k - > p . offset ;
2020-07-06 19:16:25 -04:00
const struct bch_stripe * old_s = old . k - > type = = KEY_TYPE_stripe
? bkey_s_c_to_stripe ( old ) . v : NULL ;
const struct bch_stripe * new_s = new . k - > type = = KEY_TYPE_stripe
? bkey_s_c_to_stripe ( new ) . v : NULL ;
2018-11-01 15:10:01 -04:00
unsigned i ;
2020-10-19 22:36:24 -04:00
int ret ;
2018-11-01 15:13:19 -04:00
2021-01-22 18:01:07 -05:00
BUG_ON ( gc & & old_s ) ;
2021-12-04 23:07:33 -05:00
if ( ! gc ) {
struct stripe * m = genradix_ptr ( & c - > stripes , idx ) ;
2021-10-29 16:29:13 -04:00
2021-12-04 23:07:33 -05:00
if ( ! m | | ( old_s & & ! m - > alive ) ) {
2022-02-25 13:18:19 -05:00
struct printbuf buf1 = PRINTBUF ;
struct printbuf buf2 = PRINTBUF ;
2018-11-01 15:13:19 -04:00
2022-02-25 13:18:19 -05:00
bch2_bkey_val_to_text ( & buf1 , c , old ) ;
bch2_bkey_val_to_text ( & buf2 , c , new ) ;
2021-12-30 20:14:52 -05:00
bch_err_ratelimited ( c , " error marking nonexistent stripe %llu while marking \n "
2021-12-04 23:07:33 -05:00
" old %s \n "
2022-02-25 13:18:19 -05:00
" new %s " , idx , buf1 . buf , buf2 . buf ) ;
printbuf_exit ( & buf2 ) ;
printbuf_exit ( & buf1 ) ;
2021-12-04 23:07:33 -05:00
bch2_inconsistent_error ( c ) ;
return - 1 ;
}
2020-10-23 18:40:30 -04:00
2021-12-04 23:07:33 -05:00
if ( ! new_s ) {
spin_lock ( & c - > ec_stripes_heap_lock ) ;
bch2_stripes_heap_del ( c , m , idx ) ;
spin_unlock ( & c - > ec_stripes_heap_lock ) ;
2021-03-12 16:55:28 -05:00
2021-12-04 23:07:33 -05:00
memset ( m , 0 , sizeof ( * m ) ) ;
} else {
m - > alive = true ;
m - > sectors = le16_to_cpu ( new_s - > sectors ) ;
m - > algorithm = new_s - > algorithm ;
m - > nr_blocks = new_s - > nr_blocks ;
m - > nr_redundant = new_s - > nr_redundant ;
m - > blocks_nonempty = 0 ;
2020-07-06 19:16:25 -04:00
2021-12-04 23:07:33 -05:00
for ( i = 0 ; i < new_s - > nr_blocks ; i + + )
m - > blocks_nonempty + = ! ! stripe_blockcount_get ( new_s , i ) ;
2019-01-21 15:32:13 -05:00
2020-07-06 19:16:25 -04:00
spin_lock ( & c - > ec_stripes_heap_lock ) ;
2019-05-24 11:56:20 -04:00
bch2_stripes_heap_update ( c , m , idx ) ;
2020-07-06 19:16:25 -04:00
spin_unlock ( & c - > ec_stripes_heap_lock ) ;
}
2021-12-04 23:07:33 -05:00
} else {
2021-12-26 22:27:10 -05:00
struct gc_stripe * m =
genradix_ptr_alloc ( & c - > gc_stripes , idx , GFP_KERNEL ) ;
2021-12-30 20:14:52 -05:00
if ( ! m ) {
bch_err ( c , " error allocating memory for gc_stripes, idx %llu " ,
idx ) ;
2021-12-26 22:27:10 -05:00
return - ENOMEM ;
2021-12-30 20:14:52 -05:00
}
2021-11-29 16:38:27 -05:00
/*
* This will be wrong when we bring back runtime gc : we should
* be unmarking the old key and then marking the new key
*/
2021-12-04 23:07:33 -05:00
m - > alive = true ;
m - > sectors = le16_to_cpu ( new_s - > sectors ) ;
m - > nr_blocks = new_s - > nr_blocks ;
m - > nr_redundant = new_s - > nr_redundant ;
for ( i = 0 ; i < new_s - > nr_blocks ; i + + )
m - > ptrs [ i ] = new_s - > ptrs [ i ] ;
bch2_bkey_to_replicas ( & m - > r . e , new ) ;
2021-11-29 16:38:27 -05:00
2021-01-22 18:01:07 -05:00
/*
* gc recalculates this field from stripe ptr
* references :
*/
memset ( m - > block_sectors , 0 , sizeof ( m - > block_sectors ) ) ;
for ( i = 0 ; i < new_s - > nr_blocks ; i + + ) {
2022-01-04 22:32:09 -05:00
ret = mark_stripe_bucket ( trans , new , i , flags ) ;
2021-01-22 18:01:07 -05:00
if ( ret )
return ret ;
}
2021-11-28 15:13:54 -05:00
ret = update_replicas ( c , new , & m - > r . e ,
2021-11-28 14:31:19 -05:00
( ( s64 ) m - > sectors * m - > nr_redundant ) ,
journal_seq , gc ) ;
if ( ret ) {
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2021-04-21 18:08:39 -04:00
2022-02-25 13:18:19 -05:00
bch2_bkey_val_to_text ( & buf , c , new ) ;
bch2_fs_fatal_error ( c , " no replicas entry for %s " , buf . buf ) ;
printbuf_exit ( & buf ) ;
2021-11-28 14:31:19 -05:00
return ret ;
2021-04-21 18:08:39 -04:00
}
2021-01-22 18:01:07 -05:00
}
2018-11-24 17:09:44 -05:00
return 0 ;
2018-11-01 15:13:19 -04:00
}
2022-03-13 00:26:52 -05:00
int bch2_mark_inode ( struct btree_trans * trans ,
struct bkey_s_c old , struct bkey_s_c new ,
unsigned flags )
2021-06-10 21:44:27 -04:00
{
2021-10-29 18:43:18 -04:00
struct bch_fs * c = trans - > c ;
2021-06-10 21:44:27 -04:00
struct bch_fs_usage __percpu * fs_usage ;
2021-10-29 21:14:23 -04:00
u64 journal_seq = trans - > journal_res . seq ;
2021-06-10 21:44:27 -04:00
2021-10-29 21:14:23 -04:00
if ( flags & BTREE_TRIGGER_INSERT ) {
struct bch_inode_v2 * v = ( struct bch_inode_v2 * ) new . v ;
BUG_ON ( ! journal_seq ) ;
BUG_ON ( new . k - > type ! = KEY_TYPE_inode_v2 ) ;
v - > bi_journal_seq = cpu_to_le64 ( journal_seq ) ;
}
if ( flags & BTREE_TRIGGER_GC ) {
2021-11-28 14:31:19 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2021-10-29 21:14:23 -04:00
preempt_disable ( ) ;
2021-11-28 14:31:19 -05:00
2021-10-29 21:14:23 -04:00
fs_usage = fs_usage_ptr ( c , journal_seq , flags & BTREE_TRIGGER_GC ) ;
fs_usage - > nr_inodes + = bkey_is_inode ( new . k ) ;
fs_usage - > nr_inodes - = bkey_is_inode ( old . k ) ;
2021-11-28 14:31:19 -05:00
2021-10-29 21:14:23 -04:00
preempt_enable ( ) ;
2021-11-28 14:31:19 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2021-10-29 21:14:23 -04:00
}
2021-06-10 21:44:27 -04:00
return 0 ;
}
2022-03-13 00:26:52 -05:00
int bch2_mark_reservation ( struct btree_trans * trans ,
struct bkey_s_c old , struct bkey_s_c new ,
unsigned flags )
2021-06-10 21:44:27 -04:00
{
2021-10-29 18:43:18 -04:00
struct bch_fs * c = trans - > c ;
2021-10-29 18:58:50 -04:00
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new ;
2021-06-10 21:44:27 -04:00
struct bch_fs_usage __percpu * fs_usage ;
unsigned replicas = bkey_s_c_to_reservation ( k ) . v - > nr_replicas ;
s64 sectors = ( s64 ) k . k - > size ;
2021-12-25 22:37:19 -05:00
BUG_ON ( ! ( flags & BTREE_TRIGGER_GC ) ) ;
2021-06-10 21:44:27 -04:00
if ( flags & BTREE_TRIGGER_OVERWRITE )
sectors = - sectors ;
sectors * = replicas ;
2021-11-28 14:31:19 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2021-06-10 21:44:27 -04:00
preempt_disable ( ) ;
2021-11-28 14:31:19 -05:00
2021-10-29 18:43:18 -04:00
fs_usage = fs_usage_ptr ( c , trans - > journal_res . seq , flags & BTREE_TRIGGER_GC ) ;
2021-06-10 21:44:27 -04:00
replicas = clamp_t ( unsigned , replicas , 1 ,
ARRAY_SIZE ( fs_usage - > persistent_reserved ) ) ;
fs_usage - > reserved + = sectors ;
fs_usage - > persistent_reserved [ replicas - 1 ] + = sectors ;
2021-11-28 14:31:19 -05:00
2021-06-10 21:44:27 -04:00
preempt_enable ( ) ;
2021-11-28 14:31:19 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2021-06-10 21:44:27 -04:00
return 0 ;
}
2022-02-19 02:48:27 -05:00
static s64 __bch2_mark_reflink_p ( struct btree_trans * trans ,
struct bkey_s_c_reflink_p p ,
2022-02-19 03:06:28 -05:00
u64 start , u64 end ,
2021-10-21 15:48:05 -04:00
u64 * idx , unsigned flags , size_t r_idx )
2021-05-23 02:31:33 -04:00
{
2022-02-19 02:48:27 -05:00
struct bch_fs * c = trans - > c ;
2021-05-23 02:31:33 -04:00
struct reflink_gc * r ;
int add = ! ( flags & BTREE_TRIGGER_OVERWRITE ) ? 1 : - 1 ;
2022-02-19 03:06:28 -05:00
u64 next_idx = end ;
2021-10-19 12:27:47 -04:00
s64 ret = 0 ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2021-05-23 02:31:33 -04:00
2021-10-21 15:48:05 -04:00
if ( r_idx > = c - > reflink_gc_nr )
goto not_found ;
2021-05-23 02:31:33 -04:00
2021-10-21 15:48:05 -04:00
r = genradix_ptr ( & c - > reflink_gc_table , r_idx ) ;
2022-02-19 03:06:28 -05:00
next_idx = min ( next_idx , r - > offset - r - > size ) ;
if ( * idx < next_idx )
2021-10-19 12:27:47 -04:00
goto not_found ;
2021-05-23 02:31:33 -04:00
BUG_ON ( ( s64 ) r - > refcount + add < 0 ) ;
r - > refcount + = add ;
2021-10-21 15:48:05 -04:00
* idx = r - > offset ;
return 0 ;
2021-05-23 02:31:33 -04:00
not_found :
2022-02-19 03:06:28 -05:00
if ( fsck_err ( c , " pointer to missing indirect extent \n "
" %s \n "
" missing range %llu-%llu " ,
2022-02-25 13:18:19 -05:00
( bch2_bkey_val_to_text ( & buf , c , p . s_c ) , buf . buf ) ,
2022-02-19 03:06:28 -05:00
* idx , next_idx ) ) {
2021-12-23 21:35:28 -05:00
struct bkey_i_error new ;
2021-10-21 15:48:05 -04:00
2021-12-23 21:35:28 -05:00
bkey_init ( & new . k ) ;
new . k . type = KEY_TYPE_error ;
2022-02-19 03:06:28 -05:00
new . k . p = bkey_start_pos ( p . k ) ;
new . k . p . offset + = * idx - start ;
bch2_key_resize ( & new . k , next_idx - * idx ) ;
2022-02-19 02:48:27 -05:00
ret = __bch2_btree_insert ( trans , BTREE_ID_extents , & new . k_i ) ;
2021-10-19 12:27:47 -04:00
}
2022-02-19 03:06:28 -05:00
* idx = next_idx ;
2021-10-19 12:27:47 -04:00
fsck_err :
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
2021-10-19 12:27:47 -04:00
return ret ;
2021-05-23 02:31:33 -04:00
}
2022-03-13 00:26:52 -05:00
int bch2_mark_reflink_p ( struct btree_trans * trans ,
struct bkey_s_c old , struct bkey_s_c new ,
unsigned flags )
2021-05-23 02:31:33 -04:00
{
2021-10-29 18:43:18 -04:00
struct bch_fs * c = trans - > c ;
2021-10-29 18:58:50 -04:00
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new ;
2021-06-10 21:44:27 -04:00
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p ( k ) ;
2021-05-23 02:31:33 -04:00
struct reflink_gc * ref ;
size_t l , r , m ;
2022-02-19 03:06:28 -05:00
u64 idx = le64_to_cpu ( p . v - > idx ) , start = idx ;
2021-11-06 00:05:12 -04:00
u64 end = le64_to_cpu ( p . v - > idx ) + p . k - > size ;
2021-10-21 15:48:05 -04:00
int ret = 0 ;
2021-05-23 02:31:33 -04:00
2021-12-25 22:37:19 -05:00
BUG_ON ( ! ( flags & BTREE_TRIGGER_GC ) ) ;
2021-11-06 00:05:12 -04:00
if ( c - > sb . version > = bcachefs_metadata_version_reflink_p_fix ) {
idx - = le32_to_cpu ( p . v - > front_pad ) ;
end + = le32_to_cpu ( p . v - > back_pad ) ;
}
2021-05-23 02:31:33 -04:00
l = 0 ;
r = c - > reflink_gc_nr ;
while ( l < r ) {
m = l + ( r - l ) / 2 ;
ref = genradix_ptr ( & c - > reflink_gc_table , m ) ;
if ( ref - > offset < = idx )
l = m + 1 ;
else
r = m ;
}
2021-11-06 00:05:12 -04:00
while ( idx < end & & ! ret )
2022-02-19 03:06:28 -05:00
ret = __bch2_mark_reflink_p ( trans , p , start , end ,
& idx , flags , l + + ) ;
2021-05-23 02:31:33 -04:00
2021-10-21 15:48:05 -04:00
return ret ;
2021-05-23 02:31:33 -04:00
}
2021-04-03 20:29:05 -04:00
static noinline __cold
void fs_usage_apply_warn ( struct btree_trans * trans ,
2021-05-17 00:28:50 -04:00
unsigned disk_res_sectors ,
s64 should_not_have_added )
2019-03-15 18:20:46 -04:00
{
struct bch_fs * c = trans - > c ;
struct btree_insert_entry * i ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2019-03-15 18:20:46 -04:00
2021-05-17 00:28:50 -04:00
bch_err ( c , " disk usage increased %lli more than %u sectors reserved " ,
should_not_have_added , disk_res_sectors ) ;
2019-03-15 18:20:46 -04:00
2019-09-22 18:49:16 -04:00
trans_for_each_update ( trans , i ) {
2022-02-24 11:02:58 -05:00
struct bkey_s_c old = { & i - > old_k , i - > old_v } ;
2019-01-21 15:32:13 -05:00
pr_err ( " while inserting " ) ;
2022-02-25 13:18:19 -05:00
printbuf_reset ( & buf ) ;
bch2_bkey_val_to_text ( & buf , c , bkey_i_to_s_c ( i - > k ) ) ;
pr_err ( " %s " , buf . buf ) ;
2019-01-21 15:32:13 -05:00
pr_err ( " overlapping with " ) ;
2022-02-25 13:18:19 -05:00
printbuf_reset ( & buf ) ;
bch2_bkey_val_to_text ( & buf , c , old ) ;
pr_err ( " %s " , buf . buf ) ;
2019-01-21 15:32:13 -05:00
}
2022-02-25 13:18:19 -05:00
2021-05-17 00:28:50 -04:00
__WARN ( ) ;
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
2018-11-05 02:31:48 -05:00
}
2021-11-28 14:08:58 -05:00
int bch2_trans_fs_usage_apply ( struct btree_trans * trans ,
struct replicas_delta_list * deltas )
2021-04-03 20:29:05 -04:00
{
struct bch_fs * c = trans - > c ;
static int warned_disk_usage = 0 ;
bool warn = false ;
unsigned disk_res_sectors = trans - > disk_res ? trans - > disk_res - > sectors : 0 ;
2021-11-28 14:08:58 -05:00
struct replicas_delta * d = deltas - > d , * d2 ;
2021-04-03 20:29:05 -04:00
struct replicas_delta * top = ( void * ) deltas - > d + deltas - > used ;
struct bch_fs_usage * dst ;
s64 added = 0 , should_not_have_added ;
unsigned i ;
2021-11-28 14:31:19 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2021-04-03 20:29:05 -04:00
preempt_disable ( ) ;
dst = fs_usage_ptr ( c , trans - > journal_res . seq , false ) ;
for ( d = deltas - > d ; d ! = top ; d = replicas_delta_next ( d ) ) {
switch ( d - > r . data_type ) {
case BCH_DATA_btree :
case BCH_DATA_user :
case BCH_DATA_parity :
added + = d - > delta ;
}
2021-11-28 14:08:58 -05:00
if ( __update_replicas ( c , dst , & d - > r , d - > delta ) )
goto need_mark ;
2021-04-03 20:29:05 -04:00
}
dst - > nr_inodes + = deltas - > nr_inodes ;
for ( i = 0 ; i < BCH_REPLICAS_MAX ; i + + ) {
added + = deltas - > persistent_reserved [ i ] ;
dst - > reserved + = deltas - > persistent_reserved [ i ] ;
dst - > persistent_reserved [ i ] + = deltas - > persistent_reserved [ i ] ;
}
/*
* Not allowed to reduce sectors_available except by getting a
* reservation :
*/
should_not_have_added = added - ( s64 ) disk_res_sectors ;
if ( unlikely ( should_not_have_added > 0 ) ) {
2021-06-10 23:33:27 -04:00
u64 old , new , v = atomic64_read ( & c - > sectors_available ) ;
do {
old = v ;
new = max_t ( s64 , 0 , old - should_not_have_added ) ;
} while ( ( v = atomic64_cmpxchg ( & c - > sectors_available ,
old , new ) ) ! = old ) ;
2021-04-03 20:29:05 -04:00
added - = should_not_have_added ;
warn = true ;
}
if ( added > 0 ) {
trans - > disk_res - > sectors - = added ;
this_cpu_sub ( * c - > online_reserved , added ) ;
}
preempt_enable ( ) ;
2021-11-28 14:31:19 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2021-04-03 20:29:05 -04:00
if ( unlikely ( warn ) & & ! xchg ( & warned_disk_usage , 1 ) )
2021-05-17 00:28:50 -04:00
fs_usage_apply_warn ( trans , disk_res_sectors , should_not_have_added ) ;
2021-11-28 14:08:58 -05:00
return 0 ;
need_mark :
/* revert changes: */
for ( d2 = deltas - > d ; d2 ! = d ; d2 = replicas_delta_next ( d2 ) )
BUG_ON ( __update_replicas ( c , dst , & d2 - > r , - d2 - > delta ) ) ;
preempt_enable ( ) ;
2021-11-28 14:31:19 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2021-11-28 14:08:58 -05:00
return - 1 ;
2021-04-03 20:29:05 -04:00
}
2019-03-11 14:59:58 -04:00
/* trans_mark: */
2020-10-19 22:36:24 -04:00
static int bch2_trans_mark_pointer ( struct btree_trans * trans ,
struct bkey_s_c k , struct extent_ptr_decoded p ,
s64 sectors , enum bch_data_type data_type )
{
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2021-12-31 20:03:29 -05:00
struct bkey_i_alloc_v4 * a ;
2020-10-19 22:36:24 -04:00
int ret ;
2021-12-31 20:03:29 -05:00
a = bch2_trans_start_alloc_update ( trans , & iter , PTR_BUCKET_POS ( trans - > c , & p . ptr ) ) ;
if ( IS_ERR ( a ) )
return PTR_ERR ( a ) ;
2021-10-29 18:43:18 -04:00
ret = __mark_pointer ( trans , k , & p . ptr , sectors , data_type ,
2021-12-31 20:03:29 -05:00
a - > v . gen , & a - > v . data_type ,
2022-02-14 00:07:38 -05:00
& a - > v . dirty_sectors , & a - > v . cached_sectors ) ;
2020-06-03 18:27:07 -04:00
if ( ret )
2019-10-26 14:58:36 -04:00
goto out ;
2019-03-11 14:59:58 -04:00
2021-12-31 20:03:29 -05:00
ret = bch2_trans_update ( trans , & iter , & a - > k_i , 0 ) ;
2021-12-05 00:30:49 -05:00
if ( ret )
goto out ;
2019-03-11 14:59:58 -04:00
out :
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2019-03-11 14:59:58 -04:00
return ret ;
}
static int bch2_trans_mark_stripe_ptr ( struct btree_trans * trans ,
2021-01-11 13:51:23 -05:00
struct extent_ptr_decoded p ,
2020-07-09 18:31:51 -04:00
s64 sectors , enum bch_data_type data_type )
2019-03-11 14:59:58 -04:00
{
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2019-03-11 14:59:58 -04:00
struct bkey_s_c k ;
2019-12-31 19:37:10 -05:00
struct bkey_i_stripe * s ;
2020-07-09 18:31:51 -04:00
struct bch_replicas_padded r ;
2019-03-11 14:59:58 -04:00
int ret = 0 ;
2021-08-30 15:18:31 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_stripes , POS ( 0 , p . ec . idx ) ,
BTREE_ITER_INTENT |
BTREE_ITER_WITH_UPDATES ) ;
k = bch2_btree_iter_peek_slot ( & iter ) ;
2021-06-02 00:18:34 -04:00
ret = bkey_err ( k ) ;
if ( ret )
goto err ;
2019-03-11 14:59:58 -04:00
if ( k . k - > type ! = KEY_TYPE_stripe ) {
2022-03-02 22:18:56 -05:00
bch2_trans_inconsistent ( trans ,
2019-08-16 09:59:56 -04:00
" pointer to nonexistent stripe %llu " ,
2021-01-11 13:51:23 -05:00
( u64 ) p . ec . idx ) ;
ret = - EIO ;
2021-06-02 00:18:34 -04:00
goto err ;
2021-01-11 13:51:23 -05:00
}
if ( ! bch2_ptr_matches_stripe ( bkey_s_c_to_stripe ( k ) . v , p ) ) {
2022-03-02 22:18:56 -05:00
bch2_trans_inconsistent ( trans ,
2021-01-11 13:51:23 -05:00
" stripe pointer doesn't match stripe %llu " ,
( u64 ) p . ec . idx ) ;
2019-08-16 09:59:56 -04:00
ret = - EIO ;
2021-06-02 00:18:34 -04:00
goto err ;
2019-03-11 14:59:58 -04:00
}
2019-12-31 19:37:10 -05:00
s = bch2_trans_kmalloc ( trans , bkey_bytes ( k . k ) ) ;
ret = PTR_ERR_OR_ZERO ( s ) ;
2019-03-11 14:59:58 -04:00
if ( ret )
2021-06-02 00:18:34 -04:00
goto err ;
2019-03-11 14:59:58 -04:00
2019-12-31 19:37:10 -05:00
bkey_reassemble ( & s - > k_i , k ) ;
2021-01-11 13:51:23 -05:00
stripe_blockcount_set ( & s - > v , p . ec . block ,
stripe_blockcount_get ( & s - > v , p . ec . block ) +
2019-10-07 15:57:47 -04:00
sectors ) ;
2021-12-04 23:07:33 -05:00
2021-12-05 00:30:49 -05:00
ret = bch2_trans_update ( trans , & iter , & s - > k_i , 0 ) ;
if ( ret )
goto err ;
2020-07-09 18:31:51 -04:00
bch2_bkey_to_replicas ( & r . e , bkey_i_to_s_c ( & s - > k_i ) ) ;
r . e . data_type = data_type ;
update_replicas_list ( trans , & r . e , sectors ) ;
2021-06-02 00:18:34 -04:00
err :
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2019-03-11 14:59:58 -04:00
return ret ;
}
2022-03-13 00:26:52 -05:00
int bch2_trans_mark_extent ( struct btree_trans * trans ,
struct bkey_s_c old , struct bkey_i * new ,
unsigned flags )
2019-03-11 14:59:58 -04:00
{
2021-06-10 21:44:27 -04:00
struct bch_fs * c = trans - > c ;
2022-03-13 00:26:52 -05:00
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
? old
: bkey_i_to_s_c ( new ) ;
2019-03-11 14:59:58 -04:00
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c ( k ) ;
const union bch_extent_entry * entry ;
struct extent_ptr_decoded p ;
struct bch_replicas_padded r ;
2021-06-10 21:44:27 -04:00
enum bch_data_type data_type = bkey_is_btree_ptr ( k . k )
? BCH_DATA_btree
: BCH_DATA_user ;
s64 sectors = bkey_is_btree_ptr ( k . k )
2021-12-14 14:24:41 -05:00
? btree_sectors ( c )
2021-06-10 21:44:27 -04:00
: k . k - > size ;
2019-03-11 14:59:58 -04:00
s64 dirty_sectors = 0 ;
bool stale ;
int ret ;
r . e . data_type = data_type ;
r . e . nr_devs = 0 ;
r . e . nr_required = 1 ;
bkey_for_each_ptr_decode ( k . k , ptrs , p , entry ) {
2021-06-10 21:44:27 -04:00
s64 disk_sectors = ptr_disk_sectors ( sectors , p ) ;
2019-03-11 14:59:58 -04:00
2021-08-17 15:29:21 -04:00
if ( flags & BTREE_TRIGGER_OVERWRITE )
disk_sectors = - disk_sectors ;
2021-06-10 21:44:27 -04:00
ret = bch2_trans_mark_pointer ( trans , k , p ,
disk_sectors , data_type ) ;
2019-03-11 14:59:58 -04:00
if ( ret < 0 )
return ret ;
stale = ret > 0 ;
if ( p . ptr . cached ) {
2019-08-22 13:20:38 -04:00
if ( ! stale )
2019-05-15 15:47:43 -04:00
update_cached_sectors_list ( trans , p . ptr . dev ,
2019-03-11 14:59:58 -04:00
disk_sectors ) ;
2019-10-08 18:45:29 -04:00
} else if ( ! p . has_ec ) {
2019-03-11 14:59:58 -04:00
dirty_sectors + = disk_sectors ;
r . e . devs [ r . e . nr_devs + + ] = p . ptr . dev ;
} else {
2021-01-11 13:51:23 -05:00
ret = bch2_trans_mark_stripe_ptr ( trans , p ,
2020-07-09 18:31:51 -04:00
disk_sectors , data_type ) ;
2019-10-08 18:45:29 -04:00
if ( ret )
return ret ;
2019-03-11 14:59:58 -04:00
r . e . nr_required = 0 ;
}
}
2019-10-07 15:57:47 -04:00
if ( r . e . nr_devs )
update_replicas_list ( trans , & r . e , dirty_sectors ) ;
2019-03-11 14:59:58 -04:00
return 0 ;
}
2021-11-29 16:38:27 -05:00
static int bch2_trans_mark_stripe_bucket ( struct btree_trans * trans ,
struct bkey_s_c_stripe s ,
unsigned idx , bool deleting )
2020-12-10 13:13:56 -05:00
{
2021-01-22 18:01:07 -05:00
struct bch_fs * c = trans - > c ;
const struct bch_extent_ptr * ptr = & s . v - > ptrs [ idx ] ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2021-12-31 20:03:29 -05:00
struct bkey_i_alloc_v4 * a ;
2021-11-29 16:38:27 -05:00
enum bch_data_type data_type = idx > = s . v - > nr_blocks - s . v - > nr_redundant
? BCH_DATA_parity : 0 ;
s64 sectors = data_type ? le16_to_cpu ( s . v - > sectors ) : 0 ;
2021-01-22 18:01:07 -05:00
int ret = 0 ;
2020-12-10 13:13:56 -05:00
2021-11-29 16:38:27 -05:00
if ( deleting )
sectors = - sectors ;
2021-12-31 20:03:29 -05:00
a = bch2_trans_start_alloc_update ( trans , & iter , PTR_BUCKET_POS ( c , ptr ) ) ;
if ( IS_ERR ( a ) )
return PTR_ERR ( a ) ;
2020-12-10 13:13:56 -05:00
2021-11-29 16:38:27 -05:00
ret = check_bucket_ref ( c , s . s_c , ptr , sectors , data_type ,
2021-12-31 20:03:29 -05:00
a - > v . gen , a - > v . data_type ,
a - > v . dirty_sectors , a - > v . cached_sectors ) ;
2021-11-29 16:38:27 -05:00
if ( ret )
goto err ;
2020-12-10 13:13:56 -05:00
2021-01-22 18:01:07 -05:00
if ( ! deleting ) {
2021-12-31 20:03:29 -05:00
if ( bch2_trans_inconsistent_on ( a - > v . stripe | |
a - > v . stripe_redundancy , trans ,
2021-11-29 16:38:27 -05:00
" bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu) " ,
2021-12-31 20:03:29 -05:00
iter . pos . inode , iter . pos . offset , a - > v . gen ,
bch2_data_types [ a - > v . data_type ] ,
a - > v . dirty_sectors ,
a - > v . stripe , s . k - > p . offset ) ) {
2021-01-22 18:01:07 -05:00
ret = - EIO ;
goto err ;
}
2020-12-10 13:13:56 -05:00
2021-12-31 20:03:29 -05:00
if ( bch2_trans_inconsistent_on ( data_type & & a - > v . dirty_sectors , trans ,
2021-11-29 16:38:27 -05:00
" bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu " ,
2021-12-31 20:03:29 -05:00
iter . pos . inode , iter . pos . offset , a - > v . gen ,
bch2_data_types [ a - > v . data_type ] ,
a - > v . dirty_sectors ,
2021-11-29 16:38:27 -05:00
s . k - > p . offset ) ) {
ret = - EIO ;
goto err ;
}
2021-12-31 20:03:29 -05:00
a - > v . stripe = s . k - > p . offset ;
a - > v . stripe_redundancy = s . v - > nr_redundant ;
2021-01-22 18:01:07 -05:00
} else {
2021-12-31 20:03:29 -05:00
if ( bch2_trans_inconsistent_on ( a - > v . stripe ! = s . k - > p . offset | |
a - > v . stripe_redundancy ! = s . v - > nr_redundant , trans ,
2021-11-29 16:38:27 -05:00
" bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u) " ,
2021-12-31 20:03:29 -05:00
iter . pos . inode , iter . pos . offset , a - > v . gen ,
s . k - > p . offset , a - > v . stripe ) ) {
2021-11-29 16:38:27 -05:00
ret = - EIO ;
goto err ;
}
2021-12-31 20:03:29 -05:00
a - > v . stripe = 0 ;
a - > v . stripe_redundancy = 0 ;
2021-01-22 18:01:07 -05:00
}
2021-12-31 20:03:29 -05:00
a - > v . dirty_sectors + = sectors ;
2021-11-29 16:38:27 -05:00
if ( data_type )
2021-12-31 20:03:29 -05:00
a - > v . data_type = ! deleting ? data_type : 0 ;
2021-11-29 16:38:27 -05:00
2021-12-31 20:03:29 -05:00
ret = bch2_trans_update ( trans , & iter , & a - > k_i , 0 ) ;
2021-12-05 00:30:49 -05:00
if ( ret )
goto err ;
2020-12-10 13:13:56 -05:00
err :
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2020-12-10 13:13:56 -05:00
return ret ;
}
2022-03-13 00:26:52 -05:00
int bch2_trans_mark_stripe ( struct btree_trans * trans ,
struct bkey_s_c old , struct bkey_i * new ,
unsigned flags )
2020-10-19 22:36:24 -04:00
{
2021-12-10 17:04:26 -05:00
const struct bch_stripe * old_s = NULL ;
struct bch_stripe * new_s = NULL ;
2020-07-09 18:31:51 -04:00
struct bch_replicas_padded r ;
2021-11-29 16:38:27 -05:00
unsigned i , nr_blocks ;
2020-10-19 22:36:24 -04:00
int ret = 0 ;
2021-01-22 18:01:07 -05:00
if ( old . k - > type = = KEY_TYPE_stripe )
2021-12-10 17:04:26 -05:00
old_s = bkey_s_c_to_stripe ( old ) . v ;
if ( new - > k . type = = KEY_TYPE_stripe )
new_s = & bkey_i_to_stripe ( new ) - > v ;
2021-01-22 18:01:07 -05:00
2020-10-19 22:36:24 -04:00
/*
2020-12-10 13:13:56 -05:00
* If the pointers aren ' t changing , we don ' t need to do anything :
2020-10-19 22:36:24 -04:00
*/
2021-12-10 17:04:26 -05:00
if ( new_s & & old_s & &
new_s - > nr_blocks = = old_s - > nr_blocks & &
new_s - > nr_redundant = = old_s - > nr_redundant & &
! memcmp ( old_s - > ptrs , new_s - > ptrs ,
new_s - > nr_blocks * sizeof ( struct bch_extent_ptr ) ) )
2020-12-10 13:13:56 -05:00
return 0 ;
2020-10-19 22:36:24 -04:00
2021-12-10 17:04:26 -05:00
BUG_ON ( new_s & & old_s & &
( new_s - > nr_blocks ! = old_s - > nr_blocks | |
new_s - > nr_redundant ! = old_s - > nr_redundant ) ) ;
2021-11-29 16:38:27 -05:00
2021-12-10 17:04:26 -05:00
nr_blocks = new_s ? new_s - > nr_blocks : old_s - > nr_blocks ;
2021-11-29 16:38:27 -05:00
2021-12-10 17:04:26 -05:00
if ( new_s ) {
s64 sectors = le16_to_cpu ( new_s - > sectors ) ;
2020-07-09 18:31:51 -04:00
2021-12-10 17:04:26 -05:00
bch2_bkey_to_replicas ( & r . e , bkey_i_to_s_c ( new ) ) ;
update_replicas_list ( trans , & r . e , sectors * new_s - > nr_redundant ) ;
2020-12-10 13:13:56 -05:00
}
2020-07-09 18:31:51 -04:00
2021-12-10 17:04:26 -05:00
if ( old_s ) {
s64 sectors = - ( ( s64 ) le16_to_cpu ( old_s - > sectors ) ) ;
2020-12-10 13:13:56 -05:00
bch2_bkey_to_replicas ( & r . e , old ) ;
2021-12-10 17:04:26 -05:00
update_replicas_list ( trans , & r . e , sectors * old_s - > nr_redundant ) ;
2021-11-29 16:38:27 -05:00
}
for ( i = 0 ; i < nr_blocks ; i + + ) {
2021-12-10 17:04:26 -05:00
if ( new_s & & old_s & &
! memcmp ( & new_s - > ptrs [ i ] ,
& old_s - > ptrs [ i ] ,
sizeof ( new_s - > ptrs [ i ] ) ) )
2021-11-29 16:38:27 -05:00
continue ;
2020-12-10 13:13:56 -05:00
2021-12-10 17:04:26 -05:00
if ( new_s ) {
ret = bch2_trans_mark_stripe_bucket ( trans ,
bkey_i_to_s_c_stripe ( new ) , i , false ) ;
2020-12-10 13:13:56 -05:00
if ( ret )
2021-11-29 16:38:27 -05:00
break ;
}
2021-12-10 17:04:26 -05:00
if ( old_s ) {
ret = bch2_trans_mark_stripe_bucket ( trans ,
bkey_s_c_to_stripe ( old ) , i , true ) ;
2021-11-29 16:38:27 -05:00
if ( ret )
break ;
2020-12-10 13:13:56 -05:00
}
2020-10-19 22:36:24 -04:00
}
return ret ;
}
2022-03-13 00:26:52 -05:00
int bch2_trans_mark_inode ( struct btree_trans * trans ,
struct bkey_s_c old ,
struct bkey_i * new ,
unsigned flags )
2021-06-10 21:44:27 -04:00
{
2021-12-10 17:04:26 -05:00
int nr = bkey_is_inode ( & new - > k ) - bkey_is_inode ( old . k ) ;
2021-06-10 21:44:27 -04:00
if ( nr ) {
struct replicas_delta_list * d =
replicas_deltas_realloc ( trans , 0 ) ;
d - > nr_inodes + = nr ;
}
return 0 ;
}
2022-03-13 00:26:52 -05:00
int bch2_trans_mark_reservation ( struct btree_trans * trans ,
struct bkey_s_c old ,
struct bkey_i * new ,
unsigned flags )
2021-06-10 21:44:27 -04:00
{
2022-03-13 00:26:52 -05:00
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
? old
: bkey_i_to_s_c ( new ) ;
2021-06-10 21:44:27 -04:00
unsigned replicas = bkey_s_c_to_reservation ( k ) . v - > nr_replicas ;
s64 sectors = ( s64 ) k . k - > size ;
struct replicas_delta_list * d ;
if ( flags & BTREE_TRIGGER_OVERWRITE )
sectors = - sectors ;
sectors * = replicas ;
d = replicas_deltas_realloc ( trans , 0 ) ;
replicas = clamp_t ( unsigned , replicas , 1 ,
ARRAY_SIZE ( d - > persistent_reserved ) ) ;
d - > persistent_reserved [ replicas - 1 ] + = sectors ;
return 0 ;
}
2019-08-16 09:59:56 -04:00
static int __bch2_trans_mark_reflink_p ( struct btree_trans * trans ,
struct bkey_s_c_reflink_p p ,
2021-10-21 15:48:05 -04:00
u64 * idx , unsigned flags )
2019-08-16 09:59:56 -04:00
{
struct bch_fs * c = trans - > c ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2019-08-16 09:59:56 -04:00
struct bkey_s_c k ;
2020-10-24 19:51:34 -04:00
struct bkey_i * n ;
__le64 * refcount ;
2021-05-03 20:31:27 -04:00
int add = ! ( flags & BTREE_TRIGGER_OVERWRITE ) ? 1 : - 1 ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2021-10-21 15:48:05 -04:00
int ret ;
2019-08-16 09:59:56 -04:00
2021-10-21 15:48:05 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_reflink , POS ( 0 , * idx ) ,
2021-08-30 15:18:31 -04:00
BTREE_ITER_INTENT |
BTREE_ITER_WITH_UPDATES ) ;
k = bch2_btree_iter_peek_slot ( & iter ) ;
2021-06-02 00:18:34 -04:00
ret = bkey_err ( k ) ;
if ( ret )
goto err ;
2019-08-16 09:59:56 -04:00
2020-10-24 19:51:34 -04:00
n = bch2_trans_kmalloc ( trans , bkey_bytes ( k . k ) ) ;
ret = PTR_ERR_OR_ZERO ( n ) ;
2019-08-16 09:59:56 -04:00
if ( ret )
goto err ;
2020-10-24 19:51:34 -04:00
bkey_reassemble ( n , k ) ;
refcount = bkey_refcount ( n ) ;
if ( ! refcount ) {
2022-02-25 13:18:19 -05:00
bch2_bkey_val_to_text ( & buf , c , p . s_c ) ;
2022-03-02 22:18:56 -05:00
bch2_trans_inconsistent ( trans ,
2021-10-26 14:07:43 -04:00
" nonexistent indirect extent at %llu while marking \n %s " ,
2022-02-25 13:18:19 -05:00
* idx , buf . buf ) ;
2020-10-24 19:51:34 -04:00
ret = - EIO ;
goto err ;
}
2019-08-16 09:59:56 -04:00
2021-10-14 09:54:47 -04:00
if ( ! * refcount & & ( flags & BTREE_TRIGGER_OVERWRITE ) ) {
2022-02-25 13:18:19 -05:00
bch2_bkey_val_to_text ( & buf , c , p . s_c ) ;
2022-03-02 22:18:56 -05:00
bch2_trans_inconsistent ( trans ,
2021-10-26 14:07:43 -04:00
" indirect extent refcount underflow at %llu while marking \n %s " ,
2022-02-25 13:18:19 -05:00
* idx , buf . buf ) ;
2021-10-14 09:54:47 -04:00
ret = - EIO ;
goto err ;
}
if ( flags & BTREE_TRIGGER_INSERT ) {
struct bch_reflink_p * v = ( struct bch_reflink_p * ) p . v ;
u64 pad ;
pad = max_t ( s64 , le32_to_cpu ( v - > front_pad ) ,
le64_to_cpu ( v - > idx ) - bkey_start_offset ( k . k ) ) ;
BUG_ON ( pad > U32_MAX ) ;
v - > front_pad = cpu_to_le32 ( pad ) ;
pad = max_t ( s64 , le32_to_cpu ( v - > back_pad ) ,
k . k - > p . offset - p . k - > size - le64_to_cpu ( v - > idx ) ) ;
BUG_ON ( pad > U32_MAX ) ;
v - > back_pad = cpu_to_le32 ( pad ) ;
}
2021-05-03 20:31:27 -04:00
le64_add_cpu ( refcount , add ) ;
2019-08-16 09:59:56 -04:00
2021-08-30 15:18:31 -04:00
bch2_btree_iter_set_pos_to_extent_start ( & iter ) ;
ret = bch2_trans_update ( trans , & iter , n , 0 ) ;
2021-06-02 00:15:07 -04:00
if ( ret )
goto err ;
2021-06-07 13:39:21 -04:00
2021-10-21 15:48:05 -04:00
* idx = k . k - > p . offset ;
2019-08-16 09:59:56 -04:00
err :
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
2019-08-16 09:59:56 -04:00
return ret ;
}
2022-03-13 00:26:52 -05:00
int bch2_trans_mark_reflink_p ( struct btree_trans * trans ,
struct bkey_s_c old ,
struct bkey_i * new ,
unsigned flags )
2019-08-16 09:59:56 -04:00
{
2022-03-13 00:26:52 -05:00
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
? old
: bkey_i_to_s_c ( new ) ;
2021-06-10 21:44:27 -04:00
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p ( k ) ;
2021-10-21 15:48:05 -04:00
u64 idx , end_idx ;
int ret = 0 ;
2019-08-16 09:59:56 -04:00
2021-10-14 09:54:47 -04:00
if ( flags & BTREE_TRIGGER_INSERT ) {
struct bch_reflink_p * v = ( struct bch_reflink_p * ) p . v ;
v - > front_pad = v - > back_pad = 0 ;
}
2021-10-21 15:48:05 -04:00
idx = le64_to_cpu ( p . v - > idx ) - le32_to_cpu ( p . v - > front_pad ) ;
end_idx = le64_to_cpu ( p . v - > idx ) + p . k - > size +
le32_to_cpu ( p . v - > back_pad ) ;
2019-08-16 09:59:56 -04:00
2021-10-21 15:48:05 -04:00
while ( idx < end_idx & & ! ret )
ret = __bch2_trans_mark_reflink_p ( trans , p , & idx , flags ) ;
2019-08-16 09:59:56 -04:00
2021-10-21 15:48:05 -04:00
return ret ;
2019-08-16 09:59:56 -04:00
}
2021-01-22 17:56:34 -05:00
static int __bch2_trans_mark_metadata_bucket ( struct btree_trans * trans ,
struct bch_dev * ca , size_t b ,
enum bch_data_type type ,
unsigned sectors )
{
struct bch_fs * c = trans - > c ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2021-12-31 20:03:29 -05:00
struct bkey_i_alloc_v4 * a ;
2021-01-22 17:56:34 -05:00
int ret = 0 ;
2021-06-08 22:50:30 -04:00
/*
* Backup superblock might be past the end of our normal usable space :
*/
if ( b > = ca - > mi . nbuckets )
return 0 ;
2021-12-31 20:03:29 -05:00
a = bch2_trans_start_alloc_update ( trans , & iter , POS ( ca - > dev_idx , b ) ) ;
if ( IS_ERR ( a ) )
return PTR_ERR ( a ) ;
2021-01-22 17:56:34 -05:00
2021-12-31 20:03:29 -05:00
if ( a - > v . data_type & & a - > v . data_type ! = type ) {
2021-01-22 17:56:34 -05:00
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %llu:%llu gen %u different types of data in same bucket: %s, %s \n "
" while marking %s " ,
2021-12-31 20:03:29 -05:00
iter . pos . inode , iter . pos . offset , a - > v . gen ,
bch2_data_types [ a - > v . data_type ] ,
2021-01-22 17:56:34 -05:00
bch2_data_types [ type ] ,
bch2_data_types [ type ] ) ;
ret = - EIO ;
goto out ;
}
2021-12-31 20:03:29 -05:00
a - > v . data_type = type ;
a - > v . dirty_sectors = sectors ;
2021-01-22 17:56:34 -05:00
2022-01-09 20:48:31 -05:00
ret = bch2_trans_update ( trans , & iter , & a - > k_i , 0 ) ;
2021-12-05 00:30:49 -05:00
if ( ret )
goto out ;
2021-01-22 17:56:34 -05:00
out :
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2021-01-22 17:56:34 -05:00
return ret ;
}
int bch2_trans_mark_metadata_bucket ( struct btree_trans * trans ,
struct bch_dev * ca , size_t b ,
enum bch_data_type type ,
unsigned sectors )
{
2021-04-14 20:25:33 -04:00
return __bch2_trans_do ( trans , NULL , NULL , 0 ,
__bch2_trans_mark_metadata_bucket ( trans , ca , b , type , sectors ) ) ;
2021-01-22 17:56:34 -05:00
}
static int bch2_trans_mark_metadata_sectors ( struct btree_trans * trans ,
struct bch_dev * ca ,
u64 start , u64 end ,
enum bch_data_type type ,
u64 * bucket , unsigned * bucket_sectors )
{
do {
u64 b = sector_to_bucket ( ca , start ) ;
unsigned sectors =
min_t ( u64 , bucket_to_sector ( ca , b + 1 ) , end ) - start ;
2021-04-14 20:25:33 -04:00
if ( b ! = * bucket & & * bucket_sectors ) {
int ret = bch2_trans_mark_metadata_bucket ( trans , ca , * bucket ,
type , * bucket_sectors ) ;
if ( ret )
return ret ;
2021-01-22 17:56:34 -05:00
2021-04-14 20:25:33 -04:00
* bucket_sectors = 0 ;
2021-01-22 17:56:34 -05:00
}
2021-04-14 20:25:33 -04:00
* bucket = b ;
2021-01-22 17:56:34 -05:00
* bucket_sectors + = sectors ;
start + = sectors ;
2021-04-14 20:25:33 -04:00
} while ( start < end ) ;
2021-01-22 17:56:34 -05:00
return 0 ;
}
static int __bch2_trans_mark_dev_sb ( struct btree_trans * trans ,
2021-04-14 20:25:33 -04:00
struct bch_dev * ca )
2021-01-22 17:56:34 -05:00
{
struct bch_sb_layout * layout = & ca - > disk_sb . sb - > layout ;
u64 bucket = 0 ;
unsigned i , bucket_sectors = 0 ;
int ret ;
for ( i = 0 ; i < layout - > nr_superblocks ; i + + ) {
u64 offset = le64_to_cpu ( layout - > sb_offset [ i ] ) ;
if ( offset = = BCH_SB_SECTOR ) {
2021-04-14 20:25:33 -04:00
ret = bch2_trans_mark_metadata_sectors ( trans , ca ,
2021-01-22 17:56:34 -05:00
0 , BCH_SB_SECTOR ,
BCH_DATA_sb , & bucket , & bucket_sectors ) ;
if ( ret )
return ret ;
}
2021-04-14 20:25:33 -04:00
ret = bch2_trans_mark_metadata_sectors ( trans , ca , offset ,
2021-01-22 17:56:34 -05:00
offset + ( 1 < < layout - > sb_max_size_bits ) ,
BCH_DATA_sb , & bucket , & bucket_sectors ) ;
if ( ret )
return ret ;
}
if ( bucket_sectors ) {
2021-04-14 20:25:33 -04:00
ret = bch2_trans_mark_metadata_bucket ( trans , ca ,
2021-01-22 17:56:34 -05:00
bucket , BCH_DATA_sb , bucket_sectors ) ;
if ( ret )
return ret ;
}
for ( i = 0 ; i < ca - > journal . nr ; i + + ) {
2021-04-14 20:25:33 -04:00
ret = bch2_trans_mark_metadata_bucket ( trans , ca ,
2021-01-22 17:56:34 -05:00
ca - > journal . buckets [ i ] ,
BCH_DATA_journal , ca - > mi . bucket_size ) ;
if ( ret )
return ret ;
}
return 0 ;
}
2021-04-14 20:25:33 -04:00
int bch2_trans_mark_dev_sb ( struct bch_fs * c , struct bch_dev * ca )
2021-01-22 17:56:34 -05:00
{
2021-05-07 20:43:43 -04:00
return bch2_trans_do ( c , NULL , NULL , BTREE_INSERT_LAZY_RW ,
2021-04-14 20:25:33 -04:00
__bch2_trans_mark_dev_sb ( & trans , ca ) ) ;
2021-01-22 17:56:34 -05:00
}
2017-03-16 22:18:50 -08:00
/* Disk reservations: */
# define SECTORS_CACHE 1024
2022-10-31 22:28:09 -04:00
int __bch2_disk_reservation_add ( struct bch_fs * c , struct disk_reservation * res ,
2021-01-17 13:19:16 -05:00
u64 sectors , int flags )
2017-03-16 22:18:50 -08:00
{
2018-11-27 08:23:22 -05:00
struct bch_fs_pcpu * pcpu ;
2017-03-16 22:18:50 -08:00
u64 old , v , get ;
s64 sectors_available ;
int ret ;
2018-11-26 00:13:33 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
preempt_disable ( ) ;
2018-11-27 08:23:22 -05:00
pcpu = this_cpu_ptr ( c - > pcpu ) ;
2017-03-16 22:18:50 -08:00
2018-11-27 08:23:22 -05:00
if ( sectors < = pcpu - > sectors_available )
2017-03-16 22:18:50 -08:00
goto out ;
v = atomic64_read ( & c - > sectors_available ) ;
do {
old = v ;
get = min ( ( u64 ) sectors + SECTORS_CACHE , old ) ;
if ( get < sectors ) {
preempt_enable ( ) ;
goto recalculate ;
}
} while ( ( v = atomic64_cmpxchg ( & c - > sectors_available ,
old , old - get ) ) ! = old ) ;
2018-11-27 08:23:22 -05:00
pcpu - > sectors_available + = get ;
2017-03-16 22:18:50 -08:00
out :
2018-11-27 08:23:22 -05:00
pcpu - > sectors_available - = sectors ;
2019-02-10 19:34:47 -05:00
this_cpu_add ( * c - > online_reserved , sectors ) ;
2018-11-27 08:23:22 -05:00
res - > sectors + = sectors ;
2017-03-16 22:18:50 -08:00
preempt_enable ( ) ;
2018-11-26 00:13:33 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
return 0 ;
recalculate :
2020-12-03 14:17:33 -05:00
mutex_lock ( & c - > sectors_available_lock ) ;
2019-02-11 22:08:09 -05:00
2020-12-03 14:17:33 -05:00
percpu_u64_set ( & c - > pcpu - > sectors_available , 0 ) ;
sectors_available = avail_factor ( __bch2_fs_usage_read_short ( c ) . free ) ;
2017-03-16 22:18:50 -08:00
if ( sectors < = sectors_available | |
( flags & BCH_DISK_RESERVATION_NOFAIL ) ) {
atomic64_set ( & c - > sectors_available ,
max_t ( s64 , 0 , sectors_available - sectors ) ) ;
2019-02-10 19:34:47 -05:00
this_cpu_add ( * c - > online_reserved , sectors ) ;
2018-11-27 08:23:22 -05:00
res - > sectors + = sectors ;
2017-03-16 22:18:50 -08:00
ret = 0 ;
} else {
atomic64_set ( & c - > sectors_available , sectors_available ) ;
ret = - ENOSPC ;
}
2020-12-03 14:17:33 -05:00
mutex_unlock ( & c - > sectors_available_lock ) ;
percpu_up_read ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
/* Startup/shutdown: */
2021-12-25 19:55:34 -05:00
static void bucket_gens_free_rcu ( struct rcu_head * rcu )
{
struct bucket_gens * buckets =
container_of ( rcu , struct bucket_gens , rcu ) ;
2022-02-06 19:20:36 -05:00
kvpfree ( buckets , sizeof ( * buckets ) + buckets - > nbuckets ) ;
2021-12-25 19:55:34 -05:00
}
2017-03-16 22:18:50 -08:00
int bch2_dev_buckets_resize ( struct bch_fs * c , struct bch_dev * ca , u64 nbuckets )
{
2021-12-25 19:55:34 -05:00
struct bucket_gens * bucket_gens = NULL , * old_bucket_gens = NULL ;
2018-11-19 01:16:07 -05:00
unsigned long * buckets_nouse = NULL ;
2022-02-10 19:26:55 -05:00
bool resize = ca - > bucket_gens ! = NULL ;
2017-03-16 22:18:50 -08:00
int ret = - ENOMEM ;
2022-02-10 19:26:55 -05:00
if ( ! ( bucket_gens = kvpmalloc ( sizeof ( struct bucket_gens ) + nbuckets ,
2021-12-25 19:55:34 -05:00
GFP_KERNEL | __GFP_ZERO ) ) | |
2022-02-14 01:42:31 -05:00
( c - > opts . buckets_nouse & &
! ( buckets_nouse = kvpmalloc ( BITS_TO_LONGS ( nbuckets ) *
2017-03-16 22:18:50 -08:00
sizeof ( unsigned long ) ,
2022-01-09 20:48:31 -05:00
GFP_KERNEL | __GFP_ZERO ) ) ) )
2017-03-16 22:18:50 -08:00
goto err ;
2021-12-25 19:55:34 -05:00
bucket_gens - > first_bucket = ca - > mi . first_bucket ;
bucket_gens - > nbuckets = nbuckets ;
2017-03-16 22:18:50 -08:00
2020-07-11 16:28:54 -04:00
bch2_copygc_stop ( c ) ;
2017-03-16 22:18:50 -08:00
if ( resize ) {
2020-06-17 17:30:38 -04:00
down_write ( & c - > gc_lock ) ;
2017-03-16 22:18:50 -08:00
down_write ( & ca - > bucket_lock ) ;
2018-11-26 00:13:33 -05:00
percpu_down_write ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
}
2021-12-25 19:55:34 -05:00
old_bucket_gens = rcu_dereference_protected ( ca - > bucket_gens , 1 ) ;
2017-03-16 22:18:50 -08:00
if ( resize ) {
2022-02-10 19:26:55 -05:00
size_t n = min ( bucket_gens - > nbuckets , old_bucket_gens - > nbuckets ) ;
2017-03-16 22:18:50 -08:00
2021-12-25 19:55:34 -05:00
memcpy ( bucket_gens - > b ,
old_bucket_gens - > b ,
n ) ;
2022-02-14 01:42:31 -05:00
if ( buckets_nouse )
memcpy ( buckets_nouse ,
ca - > buckets_nouse ,
BITS_TO_LONGS ( n ) * sizeof ( unsigned long ) ) ;
2017-03-16 22:18:50 -08:00
}
2021-12-25 19:55:34 -05:00
rcu_assign_pointer ( ca - > bucket_gens , bucket_gens ) ;
bucket_gens = old_bucket_gens ;
2017-03-16 22:18:50 -08:00
2018-11-19 01:16:07 -05:00
swap ( ca - > buckets_nouse , buckets_nouse ) ;
2017-03-16 22:18:50 -08:00
2022-02-10 19:26:55 -05:00
nbuckets = ca - > mi . nbuckets ;
2020-06-17 17:30:38 -04:00
if ( resize ) {
2018-11-26 00:13:33 -05:00
percpu_up_write ( & c - > mark_lock ) ;
2022-02-10 19:26:55 -05:00
up_write ( & ca - > bucket_lock ) ;
2020-06-17 17:30:38 -04:00
up_write ( & c - > gc_lock ) ;
}
2017-03-16 22:18:50 -08:00
ret = 0 ;
err :
2018-11-19 01:16:07 -05:00
kvpfree ( buckets_nouse ,
2017-03-16 22:18:50 -08:00
BITS_TO_LONGS ( nbuckets ) * sizeof ( unsigned long ) ) ;
2021-12-25 19:55:34 -05:00
if ( bucket_gens )
2022-02-06 19:20:36 -05:00
call_rcu ( & bucket_gens - > rcu , bucket_gens_free_rcu ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
void bch2_dev_buckets_free ( struct bch_dev * ca )
{
unsigned i ;
2018-11-19 01:16:07 -05:00
kvpfree ( ca - > buckets_nouse ,
2017-03-16 22:18:50 -08:00
BITS_TO_LONGS ( ca - > mi . nbuckets ) * sizeof ( unsigned long ) ) ;
2022-02-06 19:20:36 -05:00
kvpfree ( rcu_dereference_protected ( ca - > bucket_gens , 1 ) ,
sizeof ( struct bucket_gens ) + ca - > mi . nbuckets ) ;
2017-03-16 22:18:50 -08:00
2021-01-21 21:52:06 -05:00
for ( i = 0 ; i < ARRAY_SIZE ( ca - > usage ) ; i + + )
free_percpu ( ca - > usage [ i ] ) ;
kfree ( ca - > usage_base ) ;
2017-03-16 22:18:50 -08:00
}
int bch2_dev_buckets_alloc ( struct bch_fs * c , struct bch_dev * ca )
{
2021-01-21 21:52:06 -05:00
unsigned i ;
ca - > usage_base = kzalloc ( sizeof ( struct bch_dev_usage ) , GFP_KERNEL ) ;
if ( ! ca - > usage_base )
2017-03-16 22:18:50 -08:00
return - ENOMEM ;
2021-01-21 21:52:06 -05:00
for ( i = 0 ; i < ARRAY_SIZE ( ca - > usage ) ; i + + ) {
ca - > usage [ i ] = alloc_percpu ( struct bch_dev_usage ) ;
if ( ! ca - > usage [ i ] )
return - ENOMEM ;
}
2017-03-16 22:18:50 -08:00
return bch2_dev_buckets_resize ( c , ca , ca - > mi . nbuckets ) ; ;
}