2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
/*
* Code for manipulating bucket marks for garbage collection .
*
* Copyright 2014 Datera , Inc .
*
* Bucket states :
* - free bucket : mark = = 0
* The bucket contains no data and will not be read
*
* - allocator bucket : owned_by_allocator = = 1
* The bucket is on a free list , or it is an open bucket
*
* - cached bucket : owned_by_allocator = = 0 & &
* dirty_sectors = = 0 & &
* cached_sectors > 0
* The bucket contains data but may be safely discarded as there are
* enough replicas of the data on other cache devices , or it has been
* written back to the backing device
*
* - dirty bucket : owned_by_allocator = = 0 & &
* dirty_sectors > 0
* The bucket contains data that we must not discard ( either only copy ,
* or one of the ' main copies ' for data requiring multiple replicas )
*
* - metadata bucket : owned_by_allocator = = 0 & & is_metadata = = 1
* This is a btree node , journal or gen / prio bucket
*
* Lifecycle :
*
* bucket invalidated = > bucket on freelist = > open bucket = >
* [ dirty bucket = > ] cached bucket = > bucket invalidated = > . . .
*
* Note that cache promotion can skip the dirty bucket step , as data
* is copied from a deeper tier to a shallower tier , onto a cached
* bucket .
* Note also that a cached bucket can spontaneously become dirty - -
* see below .
*
* Only a traversal of the key space can determine whether a bucket is
* truly dirty or cached .
*
* Transitions :
*
* - free = > allocator : bucket was invalidated
* - cached = > allocator : bucket was invalidated
*
* - allocator = > dirty : open bucket was filled up
* - allocator = > cached : open bucket was filled up
* - allocator = > metadata : metadata was allocated
*
* - dirty = > cached : dirty sectors were copied to a deeper tier
* - dirty = > free : dirty sectors were overwritten or moved ( copy gc )
* - cached = > free : cached sectors were overwritten
*
* - metadata = > free : metadata was freed
*
* Oddities :
* - cached = > dirty : a device was removed so formerly replicated data
* is no longer sufficiently replicated
* - free = > cached : cannot happen
* - free = > dirty : cannot happen
* - free = > metadata : cannot happen
*/
# include "bcachefs.h"
2018-10-06 00:46:55 -04:00
# include "alloc_background.h"
2018-11-05 02:31:48 -05:00
# include "bset.h"
2017-03-16 22:18:50 -08:00
# include "btree_gc.h"
2018-11-05 02:31:48 -05:00
# include "btree_update.h"
2017-03-16 22:18:50 -08:00
# include "buckets.h"
2018-11-01 15:13:19 -04:00
# include "ec.h"
2017-03-16 22:18:50 -08:00
# include "error.h"
# include "movinggc.h"
2019-01-21 15:32:13 -05:00
# include "replicas.h"
2017-03-16 22:18:50 -08:00
# include "trace.h"
# include <linux/preempt.h>
/*
* Clear journal_seq_valid for buckets for which it ' s not needed , to prevent
* wraparound :
*/
void bch2_bucket_seq_cleanup ( struct bch_fs * c )
{
2018-07-21 22:57:20 -04:00
u64 journal_seq = atomic64_read ( & c - > journal . seq ) ;
2017-03-16 22:18:50 -08:00
u16 last_seq_ondisk = c - > journal . last_seq_ondisk ;
struct bch_dev * ca ;
struct bucket_array * buckets ;
struct bucket * g ;
struct bucket_mark m ;
unsigned i ;
2018-07-21 22:57:20 -04:00
if ( journal_seq - c - > last_bucket_seq_cleanup <
( 1U < < ( BUCKET_JOURNAL_SEQ_BITS - 2 ) ) )
return ;
c - > last_bucket_seq_cleanup = journal_seq ;
2017-03-16 22:18:50 -08:00
for_each_member_device ( ca , c , i ) {
down_read ( & ca - > bucket_lock ) ;
buckets = bucket_array ( ca ) ;
for_each_bucket ( g , buckets ) {
bucket_cmpxchg ( g , m , ( {
if ( ! m . journal_seq_valid | |
bucket_needs_journal_commit ( m , last_seq_ondisk ) )
break ;
m . journal_seq_valid = 0 ;
} ) ) ;
}
up_read ( & ca - > bucket_lock ) ;
}
}
2019-01-24 20:25:40 -05:00
void bch2_fs_usage_initialize ( struct bch_fs * c )
{
struct bch_fs_usage * usage ;
2019-02-14 20:39:17 -05:00
unsigned i ;
2019-01-24 20:25:40 -05:00
percpu_down_write ( & c - > mark_lock ) ;
2019-02-10 19:34:47 -05:00
usage = c - > usage_base ;
for ( i = 0 ; i < ARRAY_SIZE ( c - > usage ) ; i + + )
bch2_fs_usage_acc_to_base ( c , i ) ;
2019-01-24 20:25:40 -05:00
2019-02-09 19:20:57 -05:00
for ( i = 0 ; i < BCH_REPLICAS_MAX ; i + + )
2019-02-14 18:38:52 -05:00
usage - > reserved + = usage - > persistent_reserved [ i ] ;
2019-02-09 19:20:57 -05:00
2019-01-24 20:25:40 -05:00
for ( i = 0 ; i < c - > replicas . nr ; i + + ) {
struct bch_replicas_entry * e =
cpu_replicas_entry ( & c - > replicas , i ) ;
switch ( e - > data_type ) {
case BCH_DATA_BTREE :
2019-03-29 22:22:45 -04:00
usage - > btree + = usage - > replicas [ i ] ;
break ;
2019-01-24 20:25:40 -05:00
case BCH_DATA_USER :
2019-02-14 18:38:52 -05:00
usage - > data + = usage - > replicas [ i ] ;
2019-01-24 20:25:40 -05:00
break ;
case BCH_DATA_CACHED :
2019-02-14 18:38:52 -05:00
usage - > cached + = usage - > replicas [ i ] ;
2019-01-24 20:25:40 -05:00
break ;
}
}
percpu_up_write ( & c - > mark_lock ) ;
}
2019-02-10 19:34:47 -05:00
void bch2_fs_usage_scratch_put ( struct bch_fs * c , struct bch_fs_usage_online * fs_usage )
2019-03-15 18:20:46 -04:00
{
if ( fs_usage = = c - > usage_scratch )
mutex_unlock ( & c - > usage_scratch_lock ) ;
else
kfree ( fs_usage ) ;
}
2019-02-10 19:34:47 -05:00
struct bch_fs_usage_online * bch2_fs_usage_scratch_get ( struct bch_fs * c )
2019-03-15 18:20:46 -04:00
{
2019-02-10 19:34:47 -05:00
struct bch_fs_usage_online * ret ;
unsigned bytes = sizeof ( struct bch_fs_usage_online ) + sizeof ( u64 ) *
READ_ONCE ( c - > replicas . nr ) ;
ret = kzalloc ( bytes , GFP_NOWAIT | __GFP_NOWARN ) ;
2019-03-15 18:20:46 -04:00
if ( ret )
return ret ;
if ( mutex_trylock ( & c - > usage_scratch_lock ) )
goto out_pool ;
ret = kzalloc ( bytes , GFP_NOFS ) ;
if ( ret )
return ret ;
mutex_lock ( & c - > usage_scratch_lock ) ;
out_pool :
ret = c - > usage_scratch ;
memset ( ret , 0 , bytes ) ;
return ret ;
}
2017-03-16 22:18:50 -08:00
struct bch_dev_usage bch2_dev_usage_read ( struct bch_fs * c , struct bch_dev * ca )
{
2019-02-14 18:38:52 -05:00
struct bch_dev_usage ret ;
memset ( & ret , 0 , sizeof ( ret ) ) ;
acc_u64s_percpu ( ( u64 * ) & ret ,
( u64 __percpu * ) ca - > usage [ 0 ] ,
sizeof ( ret ) / sizeof ( u64 ) ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
2019-02-10 19:34:47 -05:00
static inline struct bch_fs_usage * fs_usage_ptr ( struct bch_fs * c ,
unsigned journal_seq ,
bool gc )
2017-03-16 22:18:50 -08:00
{
2019-02-10 19:34:47 -05:00
return this_cpu_ptr ( gc
? c - > usage_gc
: c - > usage [ journal_seq & 1 ] ) ;
}
u64 bch2_fs_usage_read_one ( struct bch_fs * c , u64 * v )
{
ssize_t offset = v - ( u64 * ) c - > usage_base ;
unsigned seq ;
u64 ret ;
BUG_ON ( offset < 0 | | offset > = fs_usage_u64s ( c ) ) ;
percpu_rwsem_assert_held ( & c - > mark_lock ) ;
do {
seq = read_seqcount_begin ( & c - > usage_lock ) ;
ret = * v +
percpu_u64_get ( ( u64 __percpu * ) c - > usage [ 0 ] + offset ) +
percpu_u64_get ( ( u64 __percpu * ) c - > usage [ 1 ] + offset ) ;
} while ( read_seqcount_retry ( & c - > usage_lock , seq ) ) ;
return ret ;
}
struct bch_fs_usage_online * bch2_fs_usage_read ( struct bch_fs * c )
{
struct bch_fs_usage_online * ret ;
unsigned seq , i , u64s ;
2019-01-21 15:32:13 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2019-02-10 19:34:47 -05:00
ret = kmalloc ( sizeof ( struct bch_fs_usage_online ) +
sizeof ( u64 ) + c - > replicas . nr , GFP_NOFS ) ;
if ( unlikely ( ! ret ) ) {
2019-01-21 15:32:13 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2019-02-10 19:34:47 -05:00
return NULL ;
2019-01-21 15:32:13 -05:00
}
2019-02-10 19:34:47 -05:00
ret - > online_reserved = percpu_u64_get ( c - > online_reserved ) ;
u64s = fs_usage_u64s ( c ) ;
do {
seq = read_seqcount_begin ( & c - > usage_lock ) ;
memcpy ( & ret - > u , c - > usage_base , u64s * sizeof ( u64 ) ) ;
for ( i = 0 ; i < ARRAY_SIZE ( c - > usage ) ; i + + )
acc_u64s_percpu ( ( u64 * ) & ret - > u , ( u64 __percpu * ) c - > usage [ i ] , u64s ) ;
} while ( read_seqcount_retry ( & c - > usage_lock , seq ) ) ;
2019-01-21 15:32:13 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2019-02-10 19:34:47 -05:00
void bch2_fs_usage_acc_to_base ( struct bch_fs * c , unsigned idx )
{
unsigned u64s = fs_usage_u64s ( c ) ;
BUG_ON ( idx > = ARRAY_SIZE ( c - > usage ) ) ;
preempt_disable ( ) ;
write_seqcount_begin ( & c - > usage_lock ) ;
acc_u64s_percpu ( ( u64 * ) c - > usage_base ,
( u64 __percpu * ) c - > usage [ idx ] , u64s ) ;
percpu_memset ( c - > usage [ idx ] , 0 , u64s * sizeof ( u64 ) ) ;
write_seqcount_end ( & c - > usage_lock ) ;
preempt_enable ( ) ;
}
void bch2_fs_usage_to_text ( struct printbuf * out ,
struct bch_fs * c ,
struct bch_fs_usage_online * fs_usage )
{
unsigned i ;
pr_buf ( out , " capacity: \t \t \t %llu \n " , c - > capacity ) ;
pr_buf ( out , " hidden: \t \t \t \t %llu \n " ,
fs_usage - > u . hidden ) ;
pr_buf ( out , " data: \t \t \t \t %llu \n " ,
fs_usage - > u . data ) ;
pr_buf ( out , " cached: \t \t \t \t %llu \n " ,
fs_usage - > u . cached ) ;
pr_buf ( out , " reserved: \t \t \t %llu \n " ,
fs_usage - > u . reserved ) ;
pr_buf ( out , " nr_inodes: \t \t \t %llu \n " ,
fs_usage - > u . nr_inodes ) ;
pr_buf ( out , " online reserved: \t \t %llu \n " ,
fs_usage - > online_reserved ) ;
for ( i = 0 ;
i < ARRAY_SIZE ( fs_usage - > u . persistent_reserved ) ;
i + + ) {
pr_buf ( out , " %u replicas: \n " , i + 1 ) ;
pr_buf ( out , " \t reserved: \t \t %llu \n " ,
fs_usage - > u . persistent_reserved [ i ] ) ;
}
for ( i = 0 ; i < c - > replicas . nr ; i + + ) {
struct bch_replicas_entry * e =
cpu_replicas_entry ( & c - > replicas , i ) ;
pr_buf ( out , " \t " ) ;
bch2_replicas_entry_to_text ( out , e ) ;
pr_buf ( out , " : \t %llu \n " , fs_usage - > u . replicas [ i ] ) ;
}
}
2017-03-16 22:18:50 -08:00
# define RESERVE_FACTOR 6
static u64 reserve_factor ( u64 r )
{
return r + ( round_up ( r , ( 1 < < RESERVE_FACTOR ) ) > > RESERVE_FACTOR ) ;
}
static u64 avail_factor ( u64 r )
{
2018-07-24 14:54:39 -04:00
return ( r < < RESERVE_FACTOR ) / ( ( 1 < < RESERVE_FACTOR ) + 1 ) ;
2017-03-16 22:18:50 -08:00
}
2019-02-10 19:34:47 -05:00
u64 bch2_fs_sectors_used ( struct bch_fs * c , struct bch_fs_usage_online * fs_usage )
2017-03-16 22:18:50 -08:00
{
2019-02-10 19:34:47 -05:00
return min ( fs_usage - > u . hidden +
fs_usage - > u . btree +
fs_usage - > u . data +
reserve_factor ( fs_usage - > u . reserved +
2019-02-14 18:38:52 -05:00
fs_usage - > online_reserved ) ,
2019-01-21 15:32:13 -05:00
c - > capacity ) ;
2018-11-27 08:23:22 -05:00
}
2019-02-14 18:38:52 -05:00
static struct bch_fs_usage_short
__bch2_fs_usage_read_short ( struct bch_fs * c )
{
struct bch_fs_usage_short ret ;
u64 data , reserved ;
ret . capacity = c - > capacity -
2019-02-10 19:34:47 -05:00
bch2_fs_usage_read_one ( c , & c - > usage_base - > hidden ) ;
2019-02-14 18:38:52 -05:00
2019-02-10 19:34:47 -05:00
data = bch2_fs_usage_read_one ( c , & c - > usage_base - > data ) +
bch2_fs_usage_read_one ( c , & c - > usage_base - > btree ) ;
reserved = bch2_fs_usage_read_one ( c , & c - > usage_base - > reserved ) +
percpu_u64_get ( c - > online_reserved ) ;
2019-02-14 18:38:52 -05:00
ret . used = min ( ret . capacity , data + reserve_factor ( reserved ) ) ;
ret . free = ret . capacity - ret . used ;
2019-02-10 19:34:47 -05:00
ret . nr_inodes = bch2_fs_usage_read_one ( c , & c - > usage_base - > nr_inodes ) ;
2019-02-14 18:38:52 -05:00
return ret ;
}
2018-11-27 08:23:22 -05:00
struct bch_fs_usage_short
bch2_fs_usage_read_short ( struct bch_fs * c )
{
struct bch_fs_usage_short ret ;
2019-02-14 18:38:52 -05:00
percpu_down_read ( & c - > mark_lock ) ;
ret = __bch2_fs_usage_read_short ( c ) ;
percpu_up_read ( & c - > mark_lock ) ;
2018-11-27 08:23:22 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
static inline int is_unavailable_bucket ( struct bucket_mark m )
{
return ! is_available_bucket ( m ) ;
}
static inline int is_fragmented_bucket ( struct bucket_mark m ,
struct bch_dev * ca )
{
if ( ! m . owned_by_allocator & &
m . data_type = = BCH_DATA_USER & &
bucket_sectors_used ( m ) )
return max_t ( int , 0 , ( int ) ca - > mi . bucket_size -
bucket_sectors_used ( m ) ) ;
return 0 ;
}
static inline enum bch_data_type bucket_type ( struct bucket_mark m )
{
return m . cached_sectors & & ! m . dirty_sectors
2018-11-24 17:09:44 -05:00
? BCH_DATA_CACHED
2017-03-16 22:18:50 -08:00
: m . data_type ;
}
2018-07-23 05:32:01 -04:00
static bool bucket_became_unavailable ( struct bucket_mark old ,
2017-03-16 22:18:50 -08:00
struct bucket_mark new )
{
return is_available_bucket ( old ) & &
2018-07-23 05:32:01 -04:00
! is_available_bucket ( new ) ;
2017-03-16 22:18:50 -08:00
}
2019-01-21 15:32:13 -05:00
int bch2_fs_usage_apply ( struct bch_fs * c ,
2019-02-10 19:34:47 -05:00
struct bch_fs_usage_online * src ,
struct disk_reservation * disk_res ,
unsigned journal_seq )
2017-03-16 22:18:50 -08:00
{
2019-02-10 19:34:47 -05:00
struct bch_fs_usage * dst = fs_usage_ptr ( c , journal_seq , false ) ;
s64 added = src - > u . data + src - > u . reserved ;
2018-11-04 23:10:09 -05:00
s64 should_not_have_added ;
2019-01-21 15:32:13 -05:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
2018-11-26 00:13:33 -05:00
percpu_rwsem_assert_held ( & c - > mark_lock ) ;
2018-11-22 22:50:35 -05:00
2017-03-16 22:18:50 -08:00
/*
* Not allowed to reduce sectors_available except by getting a
* reservation :
*/
2018-11-04 23:10:09 -05:00
should_not_have_added = added - ( s64 ) ( disk_res ? disk_res - > sectors : 0 ) ;
2018-11-05 02:31:48 -05:00
if ( WARN_ONCE ( should_not_have_added > 0 ,
2019-02-10 19:34:47 -05:00
" disk usage increased by %lli more than reservation of %llu " ,
added , disk_res ? disk_res - > sectors : 0 ) ) {
2018-11-04 23:10:09 -05:00
atomic64_sub ( should_not_have_added , & c - > sectors_available ) ;
added - = should_not_have_added ;
2019-01-21 15:32:13 -05:00
ret = - 1 ;
2018-11-04 23:10:09 -05:00
}
2017-03-16 22:18:50 -08:00
if ( added > 0 ) {
2019-02-10 19:34:47 -05:00
disk_res - > sectors - = added ;
src - > online_reserved - = added ;
2017-03-16 22:18:50 -08:00
}
2019-02-10 19:34:47 -05:00
this_cpu_add ( * c - > online_reserved , src - > online_reserved ) ;
2017-03-16 22:18:50 -08:00
preempt_disable ( ) ;
2019-02-10 19:34:47 -05:00
acc_u64s ( ( u64 * ) dst , ( u64 * ) & src - > u , fs_usage_u64s ( c ) ) ;
2017-03-16 22:18:50 -08:00
preempt_enable ( ) ;
2019-01-21 15:32:13 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2018-12-01 11:32:12 -05:00
static inline void account_bucket ( struct bch_fs_usage * fs_usage ,
struct bch_dev_usage * dev_usage ,
enum bch_data_type type ,
int nr , s64 size )
{
if ( type = = BCH_DATA_SB | | type = = BCH_DATA_JOURNAL )
2019-02-14 18:38:52 -05:00
fs_usage - > hidden + = size ;
2018-12-01 11:32:12 -05:00
dev_usage - > buckets [ type ] + = nr ;
}
2017-03-16 22:18:50 -08:00
static void bch2_dev_usage_update ( struct bch_fs * c , struct bch_dev * ca ,
2018-07-23 05:32:01 -04:00
struct bch_fs_usage * fs_usage ,
struct bucket_mark old , struct bucket_mark new ,
bool gc )
2017-03-16 22:18:50 -08:00
{
struct bch_dev_usage * dev_usage ;
2018-11-26 00:13:33 -05:00
percpu_rwsem_assert_held ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
preempt_disable ( ) ;
2018-07-23 05:32:01 -04:00
dev_usage = this_cpu_ptr ( ca - > usage [ gc ] ) ;
2017-03-16 22:18:50 -08:00
2018-12-01 11:32:12 -05:00
if ( bucket_type ( old ) )
account_bucket ( fs_usage , dev_usage , bucket_type ( old ) ,
- 1 , - ca - > mi . bucket_size ) ;
2018-11-24 17:09:44 -05:00
2018-12-01 11:32:12 -05:00
if ( bucket_type ( new ) )
account_bucket ( fs_usage , dev_usage , bucket_type ( new ) ,
1 , ca - > mi . bucket_size ) ;
2017-03-16 22:18:50 -08:00
2019-02-10 19:34:47 -05:00
dev_usage - > buckets_ec + = ( int ) new . stripe - ( int ) old . stripe ;
2017-03-16 22:18:50 -08:00
dev_usage - > buckets_unavailable + =
is_unavailable_bucket ( new ) - is_unavailable_bucket ( old ) ;
dev_usage - > sectors [ old . data_type ] - = old . dirty_sectors ;
dev_usage - > sectors [ new . data_type ] + = new . dirty_sectors ;
dev_usage - > sectors [ BCH_DATA_CACHED ] + =
( int ) new . cached_sectors - ( int ) old . cached_sectors ;
dev_usage - > sectors_fragmented + =
is_fragmented_bucket ( new , ca ) - is_fragmented_bucket ( old , ca ) ;
preempt_enable ( ) ;
if ( ! is_available_bucket ( old ) & & is_available_bucket ( new ) )
bch2_wake_allocator ( ca ) ;
}
2019-03-29 22:22:45 -04:00
void bch2_dev_usage_from_buckets ( struct bch_fs * c )
2018-11-22 22:50:35 -05:00
{
2019-03-29 22:22:45 -04:00
struct bch_dev * ca ;
2018-11-22 22:50:35 -05:00
struct bucket_mark old = { . v . counter = 0 } ;
struct bucket_array * buckets ;
struct bucket * g ;
2019-03-29 22:22:45 -04:00
unsigned i ;
int cpu ;
2019-02-10 19:34:47 -05:00
c - > usage_base - > hidden = 0 ;
2022-12-24 22:44:56 -05:00
2019-03-29 22:22:45 -04:00
for_each_member_device ( ca , c , i ) {
for_each_possible_cpu ( cpu )
memset ( per_cpu_ptr ( ca - > usage [ 0 ] , cpu ) , 0 ,
sizeof ( * ca - > usage [ 0 ] ) ) ;
buckets = bucket_array ( ca ) ;
2018-11-22 22:50:35 -05:00
2019-03-29 22:22:45 -04:00
for_each_bucket ( g , buckets )
2019-02-10 19:34:47 -05:00
bch2_dev_usage_update ( c , ca , c - > usage_base ,
2019-03-29 22:22:45 -04:00
old , g - > mark , false ) ;
}
2018-11-22 22:50:35 -05:00
}
2019-10-28 19:35:13 -04:00
static inline int update_replicas ( struct bch_fs * c ,
struct bch_fs_usage * fs_usage ,
struct bch_replicas_entry * r ,
s64 sectors )
2019-01-21 15:32:13 -05:00
{
int idx = bch2_replicas_entry_idx ( c , r ) ;
2019-10-28 19:35:13 -04:00
if ( idx < 0 )
return - 1 ;
if ( ! fs_usage )
return 0 ;
2019-01-21 15:32:13 -05:00
2019-03-29 22:22:45 -04:00
switch ( r - > data_type ) {
case BCH_DATA_BTREE :
fs_usage - > btree + = sectors ;
break ;
case BCH_DATA_USER :
2019-02-14 18:38:52 -05:00
fs_usage - > data + = sectors ;
2019-03-29 22:22:45 -04:00
break ;
case BCH_DATA_CACHED :
fs_usage - > cached + = sectors ;
break ;
}
2019-02-14 18:38:52 -05:00
fs_usage - > replicas [ idx ] + = sectors ;
2019-10-28 19:35:13 -04:00
return 0 ;
2019-01-21 15:32:13 -05:00
}
static inline void update_cached_sectors ( struct bch_fs * c ,
struct bch_fs_usage * fs_usage ,
unsigned dev , s64 sectors )
{
struct bch_replicas_padded r ;
bch2_replicas_entry_cached ( & r . e , dev ) ;
update_replicas ( c , fs_usage , & r . e , sectors ) ;
}
2019-05-24 11:56:20 -04:00
static struct replicas_delta_list *
replicas_deltas_realloc ( struct btree_trans * trans , unsigned more )
{
struct replicas_delta_list * d = trans - > fs_usage_deltas ;
unsigned new_size = d ? ( d - > size + more ) * 2 : 128 ;
if ( ! d | | d - > used + more > d - > size ) {
d = krealloc ( d , sizeof ( * d ) + new_size , GFP_NOIO | __GFP_ZERO ) ;
BUG_ON ( ! d ) ;
d - > size = new_size ;
trans - > fs_usage_deltas = d ;
}
return d ;
}
static inline void update_replicas_list ( struct btree_trans * trans ,
struct bch_replicas_entry * r ,
s64 sectors )
{
struct replicas_delta_list * d ;
struct replicas_delta * n ;
2019-08-22 13:20:38 -04:00
unsigned b ;
if ( ! sectors )
return ;
2019-05-24 11:56:20 -04:00
2019-08-22 13:20:38 -04:00
b = replicas_entry_bytes ( r ) + 8 ;
2019-05-24 11:56:20 -04:00
d = replicas_deltas_realloc ( trans , b ) ;
n = ( void * ) d - > d + d - > used ;
n - > delta = sectors ;
memcpy ( ( void * ) n + offsetof ( struct replicas_delta , r ) ,
r , replicas_entry_bytes ( r ) ) ;
d - > used + = b ;
}
static inline void update_cached_sectors_list ( struct btree_trans * trans ,
unsigned dev , s64 sectors )
{
struct bch_replicas_padded r ;
bch2_replicas_entry_cached ( & r . e , dev ) ;
update_replicas_list ( trans , & r . e , sectors ) ;
}
2019-10-28 19:35:13 -04:00
static inline struct replicas_delta *
replicas_delta_next ( struct replicas_delta * d )
{
return ( void * ) d + replicas_entry_bytes ( & d - > r ) + 8 ;
}
int bch2_replicas_delta_list_apply ( struct bch_fs * c ,
struct bch_fs_usage * fs_usage ,
struct replicas_delta_list * r )
2019-05-24 11:56:20 -04:00
{
struct replicas_delta * d = r - > d ;
struct replicas_delta * top = ( void * ) r - > d + r - > used ;
2019-10-19 22:22:29 -04:00
unsigned i ;
2019-10-28 19:35:13 -04:00
for ( d = r - > d ; d ! = top ; d = replicas_delta_next ( d ) )
if ( update_replicas ( c , fs_usage , & d - > r , d - > delta ) ) {
top = d ;
goto unwind ;
}
if ( ! fs_usage )
return 0 ;
2019-10-19 22:22:29 -04:00
fs_usage - > nr_inodes + = r - > nr_inodes ;
2019-05-24 11:56:20 -04:00
2019-10-19 22:22:29 -04:00
for ( i = 0 ; i < BCH_REPLICAS_MAX ; i + + ) {
fs_usage - > reserved + = r - > persistent_reserved [ i ] ;
fs_usage - > persistent_reserved [ i ] + = r - > persistent_reserved [ i ] ;
}
2019-05-24 11:56:20 -04:00
2019-10-28 19:35:13 -04:00
return 0 ;
unwind :
for ( d = r - > d ; d ! = top ; d = replicas_delta_next ( d ) )
update_replicas ( c , fs_usage , & d - > r , - d - > delta ) ;
return - 1 ;
2019-05-24 11:56:20 -04:00
}
2019-02-12 15:03:47 -05:00
# define do_mark_fn(fn, c, pos, flags, ...) \
( { \
int gc , ret = 0 ; \
\
percpu_rwsem_assert_held ( & c - > mark_lock ) ; \
\
for ( gc = 0 ; gc < 2 & & ! ret ; gc + + ) \
2019-12-31 16:17:42 -05:00
if ( ! gc = = ! ( flags & BTREE_TRIGGER_GC ) | | \
2019-02-12 15:03:47 -05:00
( gc & & gc_visited ( c , pos ) ) ) \
ret = fn ( c , __VA_ARGS__ , gc ) ; \
ret ; \
} )
static int __bch2_invalidate_bucket ( struct bch_fs * c , struct bch_dev * ca ,
size_t b , struct bucket_mark * ret ,
bool gc )
2017-03-16 22:18:50 -08:00
{
2019-02-10 19:34:47 -05:00
struct bch_fs_usage * fs_usage = fs_usage_ptr ( c , 0 , gc ) ;
2018-07-23 05:32:01 -04:00
struct bucket * g = __bucket ( ca , b , gc ) ;
2019-02-11 22:08:09 -05:00
struct bucket_mark old , new ;
2017-03-16 22:18:50 -08:00
2019-08-29 11:34:01 -04:00
old = bucket_cmpxchg ( g , new , ( {
2018-07-22 10:43:01 -04:00
BUG_ON ( ! is_available_bucket ( new ) ) ;
2017-03-16 22:18:50 -08:00
2018-11-19 01:31:41 -05:00
new . owned_by_allocator = true ;
2017-03-16 22:18:50 -08:00
new . data_type = 0 ;
new . cached_sectors = 0 ;
new . dirty_sectors = 0 ;
new . gen + + ;
} ) ) ;
2019-08-29 11:34:01 -04:00
bch2_dev_usage_update ( c , ca , fs_usage , old , new , gc ) ;
2019-02-11 22:08:09 -05:00
if ( old . cached_sectors )
2019-01-21 15:32:13 -05:00
update_cached_sectors ( c , fs_usage , ca - > dev_idx ,
2019-02-14 15:42:41 -05:00
- ( ( s64 ) old . cached_sectors ) ) ;
2019-02-11 22:08:09 -05:00
2019-02-12 15:03:47 -05:00
if ( ! gc )
2019-02-11 22:08:09 -05:00
* ret = old ;
2019-02-12 15:03:47 -05:00
return 0 ;
2018-07-23 05:32:01 -04:00
}
void bch2_invalidate_bucket ( struct bch_fs * c , struct bch_dev * ca ,
size_t b , struct bucket_mark * old )
{
2019-02-12 15:03:47 -05:00
do_mark_fn ( __bch2_invalidate_bucket , c , gc_phase ( GC_PHASE_START ) , 0 ,
ca , b , old ) ;
2019-02-11 22:08:09 -05:00
2017-03-16 22:18:50 -08:00
if ( ! old - > owned_by_allocator & & old - > cached_sectors )
trace_invalidate ( ca , bucket_to_sector ( ca , b ) ,
old - > cached_sectors ) ;
}
2019-02-12 15:03:47 -05:00
static int __bch2_mark_alloc_bucket ( struct bch_fs * c , struct bch_dev * ca ,
size_t b , bool owned_by_allocator ,
bool gc )
2017-03-16 22:18:50 -08:00
{
2019-02-10 19:34:47 -05:00
struct bch_fs_usage * fs_usage = fs_usage_ptr ( c , 0 , gc ) ;
2018-07-23 05:32:01 -04:00
struct bucket * g = __bucket ( ca , b , gc ) ;
2017-03-16 22:18:50 -08:00
struct bucket_mark old , new ;
2019-08-29 11:34:01 -04:00
old = bucket_cmpxchg ( g , new , ( {
2017-03-16 22:18:50 -08:00
new . owned_by_allocator = owned_by_allocator ;
} ) ) ;
2019-08-29 11:34:01 -04:00
bch2_dev_usage_update ( c , ca , fs_usage , old , new , gc ) ;
2018-07-23 05:32:01 -04:00
BUG_ON ( ! gc & &
! owned_by_allocator & & ! old . owned_by_allocator ) ;
2019-02-12 15:03:47 -05:00
return 0 ;
2018-07-23 05:32:01 -04:00
}
void bch2_mark_alloc_bucket ( struct bch_fs * c , struct bch_dev * ca ,
size_t b , bool owned_by_allocator ,
struct gc_pos pos , unsigned flags )
{
2022-12-24 22:44:56 -05:00
preempt_disable ( ) ;
2019-02-12 15:03:47 -05:00
do_mark_fn ( __bch2_mark_alloc_bucket , c , pos , flags ,
ca , b , owned_by_allocator ) ;
2022-12-24 22:44:56 -05:00
preempt_enable ( ) ;
2017-03-16 22:18:50 -08:00
}
2019-02-13 14:46:32 -05:00
static int bch2_mark_alloc ( struct bch_fs * c , struct bkey_s_c k ,
struct bch_fs_usage * fs_usage ,
2019-05-24 11:56:20 -04:00
u64 journal_seq , unsigned flags )
2019-02-13 14:46:32 -05:00
{
2019-12-31 16:17:42 -05:00
bool gc = flags & BTREE_TRIGGER_GC ;
2019-02-13 14:46:32 -05:00
struct bkey_alloc_unpacked u ;
struct bch_dev * ca ;
struct bucket * g ;
struct bucket_mark old , m ;
/*
* alloc btree is read in by bch2_alloc_read , not gc :
*/
2019-12-31 16:17:42 -05:00
if ( ( flags & BTREE_TRIGGER_GC ) & &
! ( flags & BTREE_TRIGGER_BUCKET_INVALIDATE ) )
2019-02-13 14:46:32 -05:00
return 0 ;
ca = bch_dev_bkey_exists ( c , k . k - > p . inode ) ;
2019-04-17 18:14:46 -04:00
if ( k . k - > p . offset > = ca - > mi . nbuckets )
return 0 ;
2019-02-13 14:46:32 -05:00
g = __bucket ( ca , k . k - > p . offset , gc ) ;
2019-04-17 18:14:46 -04:00
u = bch2_alloc_unpack ( k ) ;
2019-02-13 14:46:32 -05:00
2019-05-14 14:37:11 -04:00
old = bucket_cmpxchg ( g , m , ( {
2019-02-13 14:46:32 -05:00
m . gen = u . gen ;
m . data_type = u . data_type ;
m . dirty_sectors = u . dirty_sectors ;
m . cached_sectors = u . cached_sectors ;
2019-03-11 14:59:58 -04:00
2019-05-24 11:56:20 -04:00
if ( journal_seq ) {
2019-03-11 14:59:58 -04:00
m . journal_seq_valid = 1 ;
m . journal_seq = journal_seq ;
}
2019-02-13 14:46:32 -05:00
} ) ) ;
2019-12-31 16:17:42 -05:00
if ( ! ( flags & BTREE_TRIGGER_ALLOC_READ ) )
2019-05-14 14:37:11 -04:00
bch2_dev_usage_update ( c , ca , fs_usage , old , m , gc ) ;
2019-02-13 14:46:32 -05:00
g - > io_time [ READ ] = u . read_time ;
g - > io_time [ WRITE ] = u . write_time ;
g - > oldest_gen = u . oldest_gen ;
g - > gen_valid = 1 ;
2019-03-11 14:59:58 -04:00
/*
* need to know if we ' re getting called from the invalidate path or
* not :
*/
2019-12-31 16:17:42 -05:00
if ( ( flags & BTREE_TRIGGER_BUCKET_INVALIDATE ) & &
2019-05-24 11:56:20 -04:00
old . cached_sectors ) {
2019-02-13 14:46:32 -05:00
update_cached_sectors ( c , fs_usage , ca - > dev_idx ,
- old . cached_sectors ) ;
trace_invalidate ( ca , bucket_to_sector ( ca , k . k - > p . offset ) ,
old . cached_sectors ) ;
}
return 0 ;
}
2018-07-22 06:10:52 -04:00
# define checked_add(a, b) \
2019-03-07 16:33:56 -05:00
( { \
2018-07-22 06:10:52 -04:00
unsigned _res = ( unsigned ) ( a ) + ( b ) ; \
2019-03-07 16:33:56 -05:00
bool overflow = _res > U16_MAX ; \
if ( overflow ) \
_res = U16_MAX ; \
2018-07-22 06:10:52 -04:00
( a ) = _res ; \
2019-03-07 16:33:56 -05:00
overflow ; \
} )
2017-03-16 22:18:50 -08:00
2019-02-12 15:03:47 -05:00
static int __bch2_mark_metadata_bucket ( struct bch_fs * c , struct bch_dev * ca ,
2020-05-24 14:20:00 -04:00
size_t b , enum bch_data_type data_type ,
2019-02-12 15:03:47 -05:00
unsigned sectors , bool gc )
2018-07-23 05:32:01 -04:00
{
struct bucket * g = __bucket ( ca , b , gc ) ;
2019-03-07 16:33:56 -05:00
struct bucket_mark old , new ;
bool overflow ;
2018-07-23 05:32:01 -04:00
2020-05-24 14:20:00 -04:00
BUG_ON ( data_type ! = BCH_DATA_SB & &
data_type ! = BCH_DATA_JOURNAL ) ;
2018-07-23 05:32:01 -04:00
2019-03-07 16:33:56 -05:00
old = bucket_cmpxchg ( g , new , ( {
2020-05-24 14:20:00 -04:00
new . data_type = data_type ;
2019-03-07 16:33:56 -05:00
overflow = checked_add ( new . dirty_sectors , sectors ) ;
2018-07-23 05:32:01 -04:00
} ) ) ;
2019-02-12 15:03:47 -05:00
2019-08-29 11:34:01 -04:00
bch2_fs_inconsistent_on ( old . data_type & &
2020-05-24 14:20:00 -04:00
old . data_type ! = data_type , c ,
2019-08-29 11:34:01 -04:00
" different types of data in same bucket: %s, %s " ,
bch2_data_types [ old . data_type ] ,
2020-05-24 14:20:00 -04:00
bch2_data_types [ data_type ] ) ;
2019-08-29 11:34:01 -04:00
2019-03-07 16:33:56 -05:00
bch2_fs_inconsistent_on ( overflow , c ,
2020-05-24 14:20:00 -04:00
" bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX " ,
ca - > dev_idx , b , new . gen ,
bch2_data_types [ old . data_type ? : data_type ] ,
2019-03-07 16:33:56 -05:00
old . dirty_sectors , sectors ) ;
if ( c )
2019-02-10 19:34:47 -05:00
bch2_dev_usage_update ( c , ca , fs_usage_ptr ( c , 0 , gc ) ,
2019-03-07 16:33:56 -05:00
old , new , gc ) ;
2019-02-12 15:03:47 -05:00
return 0 ;
2018-07-23 05:32:01 -04:00
}
2017-03-16 22:18:50 -08:00
void bch2_mark_metadata_bucket ( struct bch_fs * c , struct bch_dev * ca ,
size_t b , enum bch_data_type type ,
unsigned sectors , struct gc_pos pos ,
unsigned flags )
{
2018-07-24 14:54:39 -04:00
BUG_ON ( type ! = BCH_DATA_SB & &
type ! = BCH_DATA_JOURNAL ) ;
2017-03-16 22:18:50 -08:00
2018-07-23 05:32:01 -04:00
preempt_disable ( ) ;
2017-03-16 22:18:50 -08:00
if ( likely ( c ) ) {
2019-02-12 15:03:47 -05:00
do_mark_fn ( __bch2_mark_metadata_bucket , c , pos , flags ,
ca , b , type , sectors ) ;
2018-07-24 16:42:49 -04:00
} else {
2019-03-07 16:33:56 -05:00
__bch2_mark_metadata_bucket ( c , ca , b , type , sectors , 0 ) ;
2018-07-24 16:42:49 -04:00
}
2017-03-16 22:18:50 -08:00
2018-07-23 05:32:01 -04:00
preempt_enable ( ) ;
2017-03-16 22:18:50 -08:00
}
2019-10-07 15:57:47 -04:00
static s64 disk_sectors_scaled ( unsigned n , unsigned d , unsigned sectors )
{
return DIV_ROUND_UP ( sectors * n , d ) ;
}
static s64 __ptr_disk_sectors_delta ( unsigned old_size ,
unsigned offset , s64 delta ,
unsigned flags ,
unsigned n , unsigned d )
2017-03-16 22:18:50 -08:00
{
2019-10-07 15:57:47 -04:00
BUG_ON ( ! n | | ! d ) ;
2019-12-31 16:17:42 -05:00
if ( flags & BTREE_TRIGGER_OVERWRITE_SPLIT ) {
2019-10-07 15:57:47 -04:00
BUG_ON ( offset + - delta > old_size ) ;
2019-08-09 13:01:10 -04:00
2019-10-07 15:57:47 -04:00
return - disk_sectors_scaled ( n , d , old_size ) +
disk_sectors_scaled ( n , d , offset ) +
disk_sectors_scaled ( n , d , old_size - offset + delta ) ;
2019-12-31 16:17:42 -05:00
} else if ( flags & BTREE_TRIGGER_OVERWRITE ) {
2019-10-07 15:57:47 -04:00
BUG_ON ( offset + - delta > old_size ) ;
2019-08-09 13:01:10 -04:00
2019-10-07 15:57:47 -04:00
return - disk_sectors_scaled ( n , d , old_size ) +
disk_sectors_scaled ( n , d , old_size + delta ) ;
2018-12-06 11:52:58 -05:00
} else {
2019-10-07 15:57:47 -04:00
return disk_sectors_scaled ( n , d , delta ) ;
2017-03-16 22:18:50 -08:00
}
2018-11-01 15:21:48 -04:00
}
2019-10-07 15:57:47 -04:00
static s64 ptr_disk_sectors_delta ( struct extent_ptr_decoded p ,
unsigned offset , s64 delta ,
unsigned flags )
{
return __ptr_disk_sectors_delta ( p . crc . live_size ,
offset , delta , flags ,
p . crc . compressed_size ,
p . crc . uncompressed_size ) ;
}
2019-03-11 14:59:58 -04:00
static void bucket_set_stripe ( struct bch_fs * c ,
const struct bch_stripe * v ,
struct bch_fs_usage * fs_usage ,
u64 journal_seq ,
2019-05-24 11:56:20 -04:00
unsigned flags )
2019-03-11 14:59:58 -04:00
{
2019-12-31 16:17:42 -05:00
bool enabled = ! ( flags & BTREE_TRIGGER_OVERWRITE ) ;
bool gc = flags & BTREE_TRIGGER_GC ;
2019-03-11 14:59:58 -04:00
unsigned i ;
for ( i = 0 ; i < v - > nr_blocks ; i + + ) {
const struct bch_extent_ptr * ptr = v - > ptrs + i ;
struct bch_dev * ca = bch_dev_bkey_exists ( c , ptr - > dev ) ;
struct bucket * g = PTR_BUCKET ( ca , ptr , gc ) ;
struct bucket_mark new , old ;
2019-08-29 11:34:01 -04:00
old = bucket_cmpxchg ( g , new , ( {
2019-03-11 14:59:58 -04:00
new . stripe = enabled ;
if ( journal_seq ) {
new . journal_seq_valid = 1 ;
new . journal_seq = journal_seq ;
}
} ) ) ;
2019-07-13 12:59:02 -04:00
2019-08-29 11:34:01 -04:00
bch2_dev_usage_update ( c , ca , fs_usage , old , new , gc ) ;
2019-07-13 12:59:02 -04:00
/*
* XXX write repair code for these , flag stripe as possibly bad
*/
if ( old . gen ! = ptr - > gen )
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" stripe with stale pointer " ) ;
#if 0
/*
* We ' d like to check for these , but these checks don ' t work
* yet :
*/
if ( old . stripe & & enabled )
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" multiple stripes using same bucket " ) ;
if ( ! old . stripe & & ! enabled )
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" deleting stripe but bucket not marked as stripe bucket " ) ;
# endif
2019-03-11 14:59:58 -04:00
}
}
2020-06-03 18:27:07 -04:00
static int __mark_pointer ( struct bch_fs * c , struct bkey_s_c k ,
struct extent_ptr_decoded p ,
s64 sectors , enum bch_data_type ptr_data_type ,
u8 bucket_gen , u8 * bucket_data_type ,
u16 * dirty_sectors , u16 * cached_sectors )
{
u16 * dst_sectors = ! p . ptr . cached
? dirty_sectors
: cached_sectors ;
u16 orig_sectors = * dst_sectors ;
char buf [ 200 ] ;
if ( gen_after ( p . ptr . gen , bucket_gen ) ) {
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen \n "
" while marking %s " ,
p . ptr . dev , PTR_BUCKET_NR ( bch_dev_bkey_exists ( c , p . ptr . dev ) , & p . ptr ) ,
bucket_gen ,
bch2_data_types [ * bucket_data_type ? : ptr_data_type ] ,
p . ptr . gen ,
( bch2_bkey_val_to_text ( & PBUF ( buf ) , c , k ) , buf ) ) ;
return - EIO ;
}
if ( gen_cmp ( bucket_gen , p . ptr . gen ) > = 96U ) {
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %u:%zu gen %u data type %s: ptr gen %u too stale \n "
" while marking %s " ,
p . ptr . dev , PTR_BUCKET_NR ( bch_dev_bkey_exists ( c , p . ptr . dev ) , & p . ptr ) ,
bucket_gen ,
bch2_data_types [ * bucket_data_type ? : ptr_data_type ] ,
p . ptr . gen ,
( bch2_bkey_val_to_text ( & PBUF ( buf ) , c , k ) , buf ) ) ;
return - EIO ;
}
if ( bucket_gen ! = p . ptr . gen & & ! p . ptr . cached ) {
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u) \n "
" while marking %s " ,
p . ptr . dev , PTR_BUCKET_NR ( bch_dev_bkey_exists ( c , p . ptr . dev ) , & p . ptr ) ,
bucket_gen ,
bch2_data_types [ * bucket_data_type ? : ptr_data_type ] ,
p . ptr . gen ,
( bch2_bkey_val_to_text ( & PBUF ( buf ) , c , k ) , buf ) ) ;
return - EIO ;
}
if ( bucket_gen ! = p . ptr . gen )
return 1 ;
if ( * bucket_data_type & & * bucket_data_type ! = ptr_data_type ) {
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %u:%zu gen %u different types of data in same bucket: %s, %s \n "
" while marking %s " ,
p . ptr . dev , PTR_BUCKET_NR ( bch_dev_bkey_exists ( c , p . ptr . dev ) , & p . ptr ) ,
bucket_gen ,
bch2_data_types [ * bucket_data_type ] ,
bch2_data_types [ ptr_data_type ] ,
( bch2_bkey_val_to_text ( & PBUF ( buf ) , c , k ) , buf ) ) ;
return - EIO ;
}
if ( checked_add ( * dst_sectors , sectors ) ) {
bch2_fsck_err ( c , FSCK_CAN_IGNORE | FSCK_NEED_FSCK ,
" bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX \n "
" while marking %s " ,
p . ptr . dev , PTR_BUCKET_NR ( bch_dev_bkey_exists ( c , p . ptr . dev ) , & p . ptr ) ,
bucket_gen ,
bch2_data_types [ * bucket_data_type ? : ptr_data_type ] ,
orig_sectors , sectors ,
( bch2_bkey_val_to_text ( & PBUF ( buf ) , c , k ) , buf ) ) ;
return - EIO ;
}
* bucket_data_type = * dirty_sectors | | * cached_sectors
? ptr_data_type : 0 ;
return 0 ;
}
static int bch2_mark_pointer ( struct bch_fs * c , struct bkey_s_c k ,
struct extent_ptr_decoded p ,
s64 sectors , enum bch_data_type data_type ,
struct bch_fs_usage * fs_usage ,
u64 journal_seq , unsigned flags )
2018-11-01 15:21:48 -04:00
{
2019-12-31 16:17:42 -05:00
bool gc = flags & BTREE_TRIGGER_GC ;
2018-11-01 15:21:48 -04:00
struct bucket_mark old , new ;
struct bch_dev * ca = bch_dev_bkey_exists ( c , p . ptr . dev ) ;
2019-03-11 14:59:58 -04:00
struct bucket * g = PTR_BUCKET ( ca , & p . ptr , gc ) ;
2020-06-03 18:27:07 -04:00
u8 bucket_data_type ;
2018-11-01 15:21:48 -04:00
u64 v ;
2020-06-03 18:27:07 -04:00
int ret ;
2018-07-24 16:42:49 -04:00
2017-03-16 22:18:50 -08:00
v = atomic64_read ( & g - > _mark . v ) ;
do {
new . v . counter = old . v . counter = v ;
2020-06-03 18:27:07 -04:00
bucket_data_type = new . data_type ;
2017-03-16 22:18:50 -08:00
2020-06-03 18:27:07 -04:00
ret = __mark_pointer ( c , k , p , sectors , data_type , new . gen ,
& bucket_data_type ,
& new . dirty_sectors ,
& new . cached_sectors ) ;
if ( ret )
return ret ;
2017-03-16 22:18:50 -08:00
2020-06-03 18:27:07 -04:00
new . data_type = bucket_data_type ;
2017-03-16 22:18:50 -08:00
2020-06-03 18:27:07 -04:00
if ( journal_seq ) {
new . journal_seq_valid = 1 ;
new . journal_seq = journal_seq ;
2017-03-16 22:18:50 -08:00
}
2019-12-31 16:17:42 -05:00
if ( flags & BTREE_TRIGGER_NOATOMIC ) {
2017-03-16 22:18:50 -08:00
g - > _mark = new ;
break ;
}
} while ( ( v = atomic64_cmpxchg ( & g - > _mark . v ,
old . v . counter ,
new . v . counter ) ) ! = old . v . counter ) ;
2018-07-23 05:32:01 -04:00
bch2_dev_usage_update ( c , ca , fs_usage , old , new , gc ) ;
2017-03-16 22:18:50 -08:00
2018-07-23 05:32:01 -04:00
BUG_ON ( ! gc & & bucket_became_unavailable ( old , new ) ) ;
2019-02-14 15:42:41 -05:00
2020-06-03 18:27:07 -04:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2018-11-24 17:09:44 -05:00
static int bch2_mark_stripe_ptr ( struct bch_fs * c ,
struct bch_extent_stripe_ptr p ,
2019-01-21 15:32:13 -05:00
enum bch_data_type data_type ,
struct bch_fs_usage * fs_usage ,
2019-10-07 15:57:47 -04:00
s64 sectors , unsigned flags ,
struct bch_replicas_padded * r ,
unsigned * nr_data ,
unsigned * nr_parity )
2018-11-01 15:13:19 -04:00
{
2019-12-31 16:17:42 -05:00
bool gc = flags & BTREE_TRIGGER_GC ;
2018-11-24 17:09:44 -05:00
struct stripe * m ;
2019-10-07 15:57:47 -04:00
unsigned old , new ;
2018-11-01 15:13:19 -04:00
int blocks_nonempty_delta ;
2018-11-25 20:53:51 -05:00
2018-11-24 17:09:44 -05:00
m = genradix_ptr ( & c - > stripes [ gc ] , p . idx ) ;
2018-11-01 15:13:19 -04:00
2018-11-25 20:53:51 -05:00
spin_lock ( & c - > ec_stripes_heap_lock ) ;
2018-11-24 17:09:44 -05:00
if ( ! m | | ! m - > alive ) {
2018-11-25 20:53:51 -05:00
spin_unlock ( & c - > ec_stripes_heap_lock ) ;
2018-11-24 17:09:44 -05:00
bch_err_ratelimited ( c , " pointer to nonexistent stripe %llu " ,
( u64 ) p . idx ) ;
2019-08-16 09:59:56 -04:00
return - EIO ;
2018-11-24 17:09:44 -05:00
}
2018-11-01 15:13:19 -04:00
2019-01-21 15:32:13 -05:00
BUG_ON ( m - > r . e . data_type ! = data_type ) ;
2019-10-07 15:57:47 -04:00
* nr_data = m - > nr_blocks - m - > nr_redundant ;
* nr_parity = m - > nr_redundant ;
* r = m - > r ;
2018-11-01 15:13:19 -04:00
2018-11-25 20:53:51 -05:00
old = m - > block_sectors [ p . block ] ;
m - > block_sectors [ p . block ] + = sectors ;
new = m - > block_sectors [ p . block ] ;
2018-11-01 15:13:19 -04:00
blocks_nonempty_delta = ( int ) ! ! new - ( int ) ! ! old ;
2018-11-25 20:53:51 -05:00
if ( blocks_nonempty_delta ) {
m - > blocks_nonempty + = blocks_nonempty_delta ;
2018-11-01 15:13:19 -04:00
2018-11-25 20:53:51 -05:00
if ( ! gc )
bch2_stripes_heap_update ( c , m , p . idx ) ;
}
2018-11-01 15:13:19 -04:00
2018-11-25 20:53:51 -05:00
m - > dirty = true ;
2018-11-01 15:13:19 -04:00
2018-11-25 20:53:51 -05:00
spin_unlock ( & c - > ec_stripes_heap_lock ) ;
2018-11-24 17:09:44 -05:00
return 0 ;
2018-11-01 15:13:19 -04:00
}
2018-11-24 17:09:44 -05:00
static int bch2_mark_extent ( struct bch_fs * c , struct bkey_s_c k ,
2019-08-09 13:01:10 -04:00
unsigned offset , s64 sectors ,
enum bch_data_type data_type ,
2018-11-27 08:23:22 -05:00
struct bch_fs_usage * fs_usage ,
2019-05-24 11:56:20 -04:00
unsigned journal_seq , unsigned flags )
2017-03-16 22:18:50 -08:00
{
2018-11-01 15:10:01 -04:00
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c ( k ) ;
const union bch_extent_entry * entry ;
struct extent_ptr_decoded p ;
2019-01-21 15:32:13 -05:00
struct bch_replicas_padded r ;
s64 dirty_sectors = 0 ;
2020-06-03 18:27:07 -04:00
bool stale ;
2018-11-01 15:10:01 -04:00
int ret ;
2019-01-21 15:32:13 -05:00
r . e . data_type = data_type ;
r . e . nr_devs = 0 ;
r . e . nr_required = 1 ;
2018-11-01 15:21:48 -04:00
BUG_ON ( ! sectors ) ;
2018-11-01 15:10:01 -04:00
bkey_for_each_ptr_decode ( k . k , ptrs , p , entry ) {
2018-12-06 11:52:58 -05:00
s64 disk_sectors = data_type = = BCH_DATA_BTREE
? sectors
2019-08-09 13:01:10 -04:00
: ptr_disk_sectors_delta ( p , offset , sectors , flags ) ;
2020-06-03 18:27:07 -04:00
ret = bch2_mark_pointer ( c , k , p , disk_sectors , data_type ,
fs_usage , journal_seq , flags ) ;
if ( ret < 0 )
return ret ;
stale = ret > 0 ;
2018-11-05 02:31:48 -05:00
2019-01-21 15:32:13 -05:00
if ( p . ptr . cached ) {
2019-08-22 13:20:38 -04:00
if ( ! stale )
2019-02-14 15:42:41 -05:00
update_cached_sectors ( c , fs_usage , p . ptr . dev ,
disk_sectors ) ;
2019-10-08 18:45:29 -04:00
} else if ( ! p . has_ec ) {
2019-01-21 15:32:13 -05:00
dirty_sectors + = disk_sectors ;
r . e . devs [ r . e . nr_devs + + ] = p . ptr . dev ;
} else {
2019-10-07 15:57:47 -04:00
struct bch_replicas_padded ec_r ;
unsigned nr_data , nr_parity ;
s64 parity_sectors ;
ret = bch2_mark_stripe_ptr ( c , p . ec , data_type ,
fs_usage , disk_sectors , flags ,
& ec_r , & nr_data , & nr_parity ) ;
2019-10-08 18:45:29 -04:00
if ( ret )
return ret ;
2018-11-01 15:10:01 -04:00
2019-10-07 15:57:47 -04:00
parity_sectors =
__ptr_disk_sectors_delta ( p . crc . live_size ,
offset , sectors , flags ,
p . crc . compressed_size * nr_parity ,
p . crc . uncompressed_size * nr_data ) ;
update_replicas ( c , fs_usage , & ec_r . e ,
disk_sectors + parity_sectors ) ;
/*
* There may be other dirty pointers in this extent , but
* if so they ' re not required for mounting if we have an
* erasure coded pointer in this extent :
*/
2019-01-21 15:32:13 -05:00
r . e . nr_required = 0 ;
}
2018-11-01 15:21:48 -04:00
}
2018-11-05 02:31:48 -05:00
2019-10-08 21:33:56 -04:00
if ( r . e . nr_devs )
update_replicas ( c , fs_usage , & r . e , dirty_sectors ) ;
2018-11-24 17:09:44 -05:00
return 0 ;
2018-11-01 15:21:48 -04:00
}
2018-07-24 14:54:39 -04:00
2018-11-24 17:09:44 -05:00
static int bch2_mark_stripe ( struct bch_fs * c , struct bkey_s_c k ,
struct bch_fs_usage * fs_usage ,
2019-05-24 11:56:20 -04:00
u64 journal_seq , unsigned flags )
2018-11-01 15:13:19 -04:00
{
2019-12-31 16:17:42 -05:00
bool gc = flags & BTREE_TRIGGER_GC ;
2018-11-01 15:10:01 -04:00
struct bkey_s_c_stripe s = bkey_s_c_to_stripe ( k ) ;
size_t idx = s . k - > p . offset ;
struct stripe * m = genradix_ptr ( & c - > stripes [ gc ] , idx ) ;
unsigned i ;
2018-11-01 15:13:19 -04:00
2018-11-25 20:53:51 -05:00
spin_lock ( & c - > ec_stripes_heap_lock ) ;
2019-12-31 16:17:42 -05:00
if ( ! m | | ( ( flags & BTREE_TRIGGER_OVERWRITE ) & & ! m - > alive ) ) {
2018-11-25 20:53:51 -05:00
spin_unlock ( & c - > ec_stripes_heap_lock ) ;
2018-11-01 15:10:01 -04:00
bch_err_ratelimited ( c , " error marking nonexistent stripe %zu " ,
idx ) ;
return - 1 ;
}
2018-11-01 15:13:19 -04:00
2019-12-31 16:17:42 -05:00
if ( ! ( flags & BTREE_TRIGGER_OVERWRITE ) ) {
2018-11-01 15:10:01 -04:00
m - > sectors = le16_to_cpu ( s . v - > sectors ) ;
m - > algorithm = s . v - > algorithm ;
m - > nr_blocks = s . v - > nr_blocks ;
m - > nr_redundant = s . v - > nr_redundant ;
2019-01-21 15:32:13 -05:00
2019-03-11 14:59:58 -04:00
bch2_bkey_to_replicas ( & m - > r . e , k ) ;
2018-11-01 15:10:01 -04:00
2019-05-24 11:56:20 -04:00
/*
* XXX : account for stripes somehow here
*/
2019-01-21 15:32:13 -05:00
#if 0
2019-05-24 11:56:20 -04:00
update_replicas ( c , fs_usage , & m - > r . e , stripe_sectors ) ;
2019-01-21 15:32:13 -05:00
# endif
2018-11-25 20:53:51 -05:00
/* gc recalculates these fields: */
2019-12-31 16:17:42 -05:00
if ( ! ( flags & BTREE_TRIGGER_GC ) ) {
2018-11-25 20:53:51 -05:00
for ( i = 0 ; i < s . v - > nr_blocks ; i + + ) {
m - > block_sectors [ i ] =
stripe_blockcount_get ( s . v , i ) ;
m - > blocks_nonempty + = ! ! m - > block_sectors [ i ] ;
}
}
if ( ! gc )
2019-05-24 11:56:20 -04:00
bch2_stripes_heap_update ( c , m , idx ) ;
m - > alive = true ;
} else {
if ( ! gc )
bch2_stripes_heap_del ( c , m , idx ) ;
memset ( m , 0 , sizeof ( * m ) ) ;
2018-11-01 15:13:19 -04:00
}
2018-11-24 17:09:44 -05:00
2018-11-25 20:53:51 -05:00
spin_unlock ( & c - > ec_stripes_heap_lock ) ;
2019-05-24 11:56:20 -04:00
bucket_set_stripe ( c , s . v , fs_usage , 0 , flags ) ;
2018-11-24 17:09:44 -05:00
return 0 ;
2018-11-01 15:13:19 -04:00
}
2020-05-25 14:57:06 -04:00
static int bch2_mark_key_locked ( struct bch_fs * c ,
2019-08-09 13:01:10 -04:00
struct bkey_s_c k ,
unsigned offset , s64 sectors ,
2019-03-29 19:49:17 -04:00
struct bch_fs_usage * fs_usage ,
u64 journal_seq , unsigned flags )
2018-11-01 15:21:48 -04:00
{
2019-03-15 18:20:46 -04:00
int ret = 0 ;
preempt_disable ( ) ;
2019-12-31 16:17:42 -05:00
if ( ! fs_usage | | ( flags & BTREE_TRIGGER_GC ) )
2019-05-24 11:56:20 -04:00
fs_usage = fs_usage_ptr ( c , journal_seq ,
2019-12-31 16:17:42 -05:00
flags & BTREE_TRIGGER_GC ) ;
2018-11-24 17:09:44 -05:00
2018-11-01 15:10:01 -04:00
switch ( k . k - > type ) {
2019-02-13 14:46:32 -05:00
case KEY_TYPE_alloc :
2019-05-24 11:56:20 -04:00
ret = bch2_mark_alloc ( c , k , fs_usage , journal_seq , flags ) ;
2019-03-15 18:20:46 -04:00
break ;
2018-11-01 15:10:01 -04:00
case KEY_TYPE_btree_ptr :
2020-02-07 13:38:02 -05:00
case KEY_TYPE_btree_ptr_v2 :
2019-12-31 16:17:42 -05:00
sectors = ! ( flags & BTREE_TRIGGER_OVERWRITE )
2019-05-24 11:56:20 -04:00
? c - > opts . btree_node_size
: - c - > opts . btree_node_size ;
2019-08-09 13:01:10 -04:00
ret = bch2_mark_extent ( c , k , offset , sectors , BCH_DATA_BTREE ,
2019-05-24 11:56:20 -04:00
fs_usage , journal_seq , flags ) ;
2019-03-15 18:20:46 -04:00
break ;
2018-11-01 15:10:01 -04:00
case KEY_TYPE_extent :
2019-08-16 09:59:56 -04:00
case KEY_TYPE_reflink_v :
2019-08-09 13:01:10 -04:00
ret = bch2_mark_extent ( c , k , offset , sectors , BCH_DATA_USER ,
2019-05-24 11:56:20 -04:00
fs_usage , journal_seq , flags ) ;
2019-03-15 18:20:46 -04:00
break ;
2018-11-01 15:10:01 -04:00
case KEY_TYPE_stripe :
2019-05-24 11:56:20 -04:00
ret = bch2_mark_stripe ( c , k , fs_usage , journal_seq , flags ) ;
2019-03-15 18:20:46 -04:00
break ;
2019-01-24 20:00:57 -05:00
case KEY_TYPE_inode :
2019-12-31 16:17:42 -05:00
if ( ! ( flags & BTREE_TRIGGER_OVERWRITE ) )
2019-02-14 18:38:52 -05:00
fs_usage - > nr_inodes + + ;
2018-11-29 02:14:31 -05:00
else
2019-02-14 18:38:52 -05:00
fs_usage - > nr_inodes - - ;
2019-03-15 18:20:46 -04:00
break ;
2018-11-01 15:10:01 -04:00
case KEY_TYPE_reservation : {
unsigned replicas = bkey_s_c_to_reservation ( k ) . v - > nr_replicas ;
sectors * = replicas ;
2019-01-21 15:32:13 -05:00
replicas = clamp_t ( unsigned , replicas , 1 ,
ARRAY_SIZE ( fs_usage - > persistent_reserved ) ) ;
2018-11-01 15:10:01 -04:00
2019-02-14 18:38:52 -05:00
fs_usage - > reserved + = sectors ;
2019-01-21 15:32:13 -05:00
fs_usage - > persistent_reserved [ replicas - 1 ] + = sectors ;
2019-03-15 18:20:46 -04:00
break ;
2018-11-01 15:10:01 -04:00
}
2017-03-16 22:18:50 -08:00
}
2019-03-15 18:20:46 -04:00
preempt_enable ( ) ;
return ret ;
2018-07-23 05:32:01 -04:00
}
2018-11-01 15:10:01 -04:00
int bch2_mark_key ( struct bch_fs * c , struct bkey_s_c k ,
2019-08-09 13:01:10 -04:00
unsigned offset , s64 sectors ,
2018-11-27 08:23:22 -05:00
struct bch_fs_usage * fs_usage ,
2018-11-22 22:50:35 -05:00
u64 journal_seq , unsigned flags )
{
int ret ;
2018-11-26 00:13:33 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2019-08-09 13:01:10 -04:00
ret = bch2_mark_key_locked ( c , k , offset , sectors ,
2019-03-29 19:49:17 -04:00
fs_usage , journal_seq , flags ) ;
2018-11-26 00:13:33 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2018-11-24 17:09:44 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2019-04-17 15:49:28 -04:00
inline int bch2_mark_overwrite ( struct btree_trans * trans ,
struct btree_iter * iter ,
struct bkey_s_c old ,
struct bkey_i * new ,
struct bch_fs_usage * fs_usage ,
2019-12-30 14:37:25 -05:00
unsigned flags ,
bool is_extents )
2019-04-15 14:58:00 -04:00
{
struct bch_fs * c = trans - > c ;
2019-08-09 13:01:10 -04:00
unsigned offset = 0 ;
2019-12-30 14:37:25 -05:00
s64 sectors = - ( ( s64 ) old . k - > size ) ;
2019-04-15 14:58:00 -04:00
2019-12-31 16:17:42 -05:00
flags | = BTREE_TRIGGER_OVERWRITE ;
2019-08-09 13:01:10 -04:00
2019-12-30 14:37:25 -05:00
if ( is_extents
2019-04-15 14:58:00 -04:00
? bkey_cmp ( new - > k . p , bkey_start_pos ( old . k ) ) < = 0
: bkey_cmp ( new - > k . p , old . k - > p ) )
2019-04-17 15:49:28 -04:00
return 0 ;
2019-04-15 14:58:00 -04:00
2019-12-30 14:37:25 -05:00
if ( is_extents ) {
2019-04-15 14:58:00 -04:00
switch ( bch2_extent_overlap ( & new - > k , old . k ) ) {
case BCH_EXTENT_OVERLAP_ALL :
2019-08-09 13:01:10 -04:00
offset = 0 ;
2019-04-15 14:58:00 -04:00
sectors = - ( ( s64 ) old . k - > size ) ;
break ;
case BCH_EXTENT_OVERLAP_BACK :
2019-08-09 13:01:10 -04:00
offset = bkey_start_offset ( & new - > k ) -
bkey_start_offset ( old . k ) ;
2019-04-15 14:58:00 -04:00
sectors = bkey_start_offset ( & new - > k ) -
old . k - > p . offset ;
break ;
case BCH_EXTENT_OVERLAP_FRONT :
2019-08-09 13:01:10 -04:00
offset = 0 ;
2019-04-15 14:58:00 -04:00
sectors = bkey_start_offset ( old . k ) -
new - > k . p . offset ;
break ;
case BCH_EXTENT_OVERLAP_MIDDLE :
2019-08-09 13:01:10 -04:00
offset = bkey_start_offset ( & new - > k ) -
bkey_start_offset ( old . k ) ;
sectors = - ( ( s64 ) new - > k . size ) ;
2019-12-31 16:17:42 -05:00
flags | = BTREE_TRIGGER_OVERWRITE_SPLIT ;
2019-04-15 14:58:00 -04:00
break ;
}
BUG_ON ( sectors > = 0 ) ;
}
2019-08-09 13:01:10 -04:00
return bch2_mark_key_locked ( c , old , offset , sectors , fs_usage ,
trans - > journal_res . seq , flags ) ? : 1 ;
2019-04-15 14:58:00 -04:00
}
2019-04-17 15:49:28 -04:00
int bch2_mark_update ( struct btree_trans * trans ,
2019-12-30 13:08:26 -05:00
struct btree_iter * iter ,
struct bkey_i * insert ,
2019-04-17 15:49:28 -04:00
struct bch_fs_usage * fs_usage ,
unsigned flags )
2018-11-05 02:31:48 -05:00
{
struct bch_fs * c = trans - > c ;
struct btree * b = iter - > l [ 0 ] . b ;
struct btree_node_iter node_iter = iter - > l [ 0 ] . iter ;
struct bkey_packed * _k ;
2019-04-17 15:49:28 -04:00
int ret = 0 ;
2018-11-05 02:31:48 -05:00
2019-12-31 16:17:42 -05:00
if ( unlikely ( flags & BTREE_TRIGGER_NORUN ) )
return 0 ;
2018-11-01 15:10:01 -04:00
if ( ! btree_node_type_needs_gc ( iter - > btree_id ) )
2019-04-17 15:49:28 -04:00
return 0 ;
2018-11-22 22:50:35 -05:00
2019-12-30 13:08:26 -05:00
bch2_mark_key_locked ( c , bkey_i_to_s_c ( insert ) ,
0 , insert - > k . size ,
2019-09-05 13:37:50 -04:00
fs_usage , trans - > journal_res . seq ,
2019-12-31 16:17:42 -05:00
BTREE_TRIGGER_INSERT | flags ) ;
2019-04-15 14:58:00 -04:00
2019-12-31 16:17:42 -05:00
if ( unlikely ( flags & BTREE_TRIGGER_NOOVERWRITES ) )
2019-04-17 15:49:28 -04:00
return 0 ;
2018-11-05 02:31:48 -05:00
2019-04-17 15:49:45 -04:00
/*
* For non extents , we only mark the new key , not the key being
* overwritten - unless we ' re actually deleting :
*/
if ( ( iter - > btree_id = = BTREE_ID_ALLOC | |
iter - > btree_id = = BTREE_ID_EC ) & &
2019-12-30 13:08:26 -05:00
! bkey_deleted ( & insert - > k ) )
2019-04-17 15:49:28 -04:00
return 0 ;
2019-04-17 15:49:45 -04:00
2020-03-16 15:49:23 -04:00
while ( ( _k = bch2_btree_node_iter_peek ( & node_iter , b ) ) ) {
2018-11-05 02:31:48 -05:00
struct bkey unpacked ;
2019-04-15 14:58:00 -04:00
struct bkey_s_c k = bkey_disassemble ( b , _k , & unpacked ) ;
2018-11-05 02:31:48 -05:00
2019-12-30 13:08:26 -05:00
ret = bch2_mark_overwrite ( trans , iter , k , insert ,
2019-12-30 14:37:25 -05:00
fs_usage , flags ,
btree_node_type_is_extents ( iter - > btree_id ) ) ;
2019-04-17 15:49:28 -04:00
if ( ret < = 0 )
2018-11-05 02:31:48 -05:00
break ;
bch2_btree_node_iter_advance ( & node_iter , b ) ;
}
2019-04-17 15:49:28 -04:00
return ret ;
2019-03-15 18:20:46 -04:00
}
2018-11-05 02:31:48 -05:00
2019-03-15 18:20:46 -04:00
void bch2_trans_fs_usage_apply ( struct btree_trans * trans ,
2019-02-10 19:34:47 -05:00
struct bch_fs_usage_online * fs_usage )
2019-03-15 18:20:46 -04:00
{
struct bch_fs * c = trans - > c ;
struct btree_insert_entry * i ;
static int warned_disk_usage = 0 ;
u64 disk_res_sectors = trans - > disk_res ? trans - > disk_res - > sectors : 0 ;
char buf [ 200 ] ;
2019-02-10 19:34:47 -05:00
if ( ! bch2_fs_usage_apply ( c , fs_usage , trans - > disk_res ,
trans - > journal_res . seq ) | |
2019-03-15 18:20:46 -04:00
warned_disk_usage | |
xchg ( & warned_disk_usage , 1 ) )
return ;
2019-01-21 15:32:13 -05:00
2019-08-16 09:59:56 -04:00
bch_err ( c , " disk usage increased more than %llu sectors reserved " ,
disk_res_sectors ) ;
2019-03-15 18:20:46 -04:00
2019-09-22 18:49:16 -04:00
trans_for_each_update ( trans , i ) {
2019-03-15 18:20:46 -04:00
struct btree_iter * iter = i - > iter ;
struct btree * b = iter - > l [ 0 ] . b ;
struct btree_node_iter node_iter = iter - > l [ 0 ] . iter ;
struct bkey_packed * _k ;
2019-01-21 15:32:13 -05:00
pr_err ( " while inserting " ) ;
2019-03-15 18:20:46 -04:00
bch2_bkey_val_to_text ( & PBUF ( buf ) , c , bkey_i_to_s_c ( i - > k ) ) ;
2019-01-21 15:32:13 -05:00
pr_err ( " %s " , buf ) ;
pr_err ( " overlapping with " ) ;
node_iter = iter - > l [ 0 ] . iter ;
2020-03-16 15:49:23 -04:00
while ( ( _k = bch2_btree_node_iter_peek ( & node_iter , b ) ) ) {
2019-01-21 15:32:13 -05:00
struct bkey unpacked ;
struct bkey_s_c k ;
k = bkey_disassemble ( b , _k , & unpacked ) ;
2018-11-22 22:50:35 -05:00
2019-01-21 15:32:13 -05:00
if ( btree_node_is_extents ( b )
2019-03-15 18:20:46 -04:00
? bkey_cmp ( i - > k - > k . p , bkey_start_pos ( k . k ) ) < = 0
: bkey_cmp ( i - > k - > k . p , k . k - > p ) )
2019-01-21 15:32:13 -05:00
break ;
bch2_bkey_val_to_text ( & PBUF ( buf ) , c , k ) ;
pr_err ( " %s " , buf ) ;
bch2_btree_node_iter_advance ( & node_iter , b ) ;
}
}
2018-11-05 02:31:48 -05:00
}
2019-03-11 14:59:58 -04:00
/* trans_mark: */
static int trans_get_key ( struct btree_trans * trans ,
enum btree_id btree_id , struct bpos pos ,
struct btree_iter * * iter ,
struct bkey_s_c * k )
{
2019-08-07 12:07:13 -04:00
struct btree_insert_entry * i ;
2019-03-11 14:59:58 -04:00
int ret ;
2019-09-22 18:49:16 -04:00
trans_for_each_update ( trans , i )
2019-09-07 14:16:00 -04:00
if ( i - > iter - > btree_id = = btree_id & &
2019-08-07 12:07:13 -04:00
( btree_node_type_is_extents ( btree_id )
? bkey_cmp ( pos , bkey_start_pos ( & i - > k - > k ) ) > = 0 & &
bkey_cmp ( pos , i - > k - > k . p ) < 0
: ! bkey_cmp ( pos , i - > iter - > pos ) ) ) {
* iter = i - > iter ;
* k = bkey_i_to_s_c ( i - > k ) ;
2019-08-27 17:34:03 -04:00
return 1 ;
2019-03-11 14:59:58 -04:00
}
2019-09-26 22:21:39 -04:00
* iter = bch2_trans_get_iter ( trans , btree_id , pos ,
BTREE_ITER_SLOTS | BTREE_ITER_INTENT ) ;
2019-03-11 14:59:58 -04:00
if ( IS_ERR ( * iter ) )
return PTR_ERR ( * iter ) ;
* k = bch2_btree_iter_peek_slot ( * iter ) ;
ret = bkey_err ( * k ) ;
if ( ret )
bch2_trans_iter_put ( trans , * iter ) ;
return ret ;
}
static int bch2_trans_mark_pointer ( struct btree_trans * trans ,
2020-06-03 18:27:07 -04:00
struct bkey_s_c k , struct extent_ptr_decoded p ,
2019-05-15 15:47:43 -04:00
s64 sectors , enum bch_data_type data_type )
2019-03-11 14:59:58 -04:00
{
struct bch_fs * c = trans - > c ;
struct bch_dev * ca = bch_dev_bkey_exists ( c , p . ptr . dev ) ;
struct btree_iter * iter ;
2020-06-03 18:27:07 -04:00
struct bkey_s_c k_a ;
2019-03-11 14:59:58 -04:00
struct bkey_alloc_unpacked u ;
struct bkey_i_alloc * a ;
int ret ;
ret = trans_get_key ( trans , BTREE_ID_ALLOC ,
POS ( p . ptr . dev , PTR_BUCKET_NR ( ca , & p . ptr ) ) ,
2020-06-03 18:27:07 -04:00
& iter , & k_a ) ;
2019-08-27 17:34:03 -04:00
if ( ret < 0 )
2019-03-11 14:59:58 -04:00
return ret ;
2020-06-03 18:27:07 -04:00
if ( k_a . k - > type ! = KEY_TYPE_alloc | |
2020-05-28 15:51:50 -04:00
( ! ret & & unlikely ( ! test_bit ( BCH_FS_ALLOC_WRITTEN , & c - > flags ) ) ) ) {
2019-08-27 17:34:03 -04:00
/*
* During journal replay , and if gc repairs alloc info at
* runtime , the alloc info in the btree might not be up to date
2020-05-28 15:51:50 -04:00
* yet - so , trust the in memory mark - unless we ' re already
* updating that key :
2019-08-27 17:34:03 -04:00
*/
struct bucket * g ;
struct bucket_mark m ;
2019-03-11 14:59:58 -04:00
2019-08-27 17:34:03 -04:00
percpu_down_read ( & c - > mark_lock ) ;
g = bucket ( ca , iter - > pos . offset ) ;
m = READ_ONCE ( g - > mark ) ;
u = alloc_mem_to_key ( g , m ) ;
percpu_up_read ( & c - > mark_lock ) ;
} else {
2020-06-03 18:27:07 -04:00
u = bch2_alloc_unpack ( k_a ) ;
2019-08-27 17:34:03 -04:00
}
2019-03-11 14:59:58 -04:00
2020-06-03 18:27:07 -04:00
ret = __mark_pointer ( c , k , p , sectors , data_type , u . gen , & u . data_type ,
& u . dirty_sectors , & u . cached_sectors ) ;
if ( ret )
2019-10-26 14:58:36 -04:00
goto out ;
2019-03-11 14:59:58 -04:00
2019-12-31 19:37:10 -05:00
a = bch2_trans_kmalloc ( trans , BKEY_ALLOC_U64s_MAX * 8 ) ;
2019-06-24 17:58:40 -04:00
ret = PTR_ERR_OR_ZERO ( a ) ;
2019-03-11 14:59:58 -04:00
if ( ret )
goto out ;
2019-06-24 17:58:40 -04:00
bkey_alloc_init ( & a - > k_i ) ;
2019-03-11 14:59:58 -04:00
a - > k . p = iter - > pos ;
bch2_alloc_pack ( a , u ) ;
2019-12-31 19:37:10 -05:00
bch2_trans_update ( trans , iter , & a - > k_i , 0 ) ;
2019-03-11 14:59:58 -04:00
out :
bch2_trans_iter_put ( trans , iter ) ;
return ret ;
}
static int bch2_trans_mark_stripe_ptr ( struct btree_trans * trans ,
struct bch_extent_stripe_ptr p ,
2019-10-07 15:57:47 -04:00
s64 sectors , enum bch_data_type data_type ,
struct bch_replicas_padded * r ,
unsigned * nr_data ,
unsigned * nr_parity )
2019-03-11 14:59:58 -04:00
{
2019-08-16 09:59:56 -04:00
struct bch_fs * c = trans - > c ;
2019-03-11 14:59:58 -04:00
struct btree_iter * iter ;
struct bkey_s_c k ;
2019-12-31 19:37:10 -05:00
struct bkey_i_stripe * s ;
2019-03-11 14:59:58 -04:00
int ret = 0 ;
2019-06-24 17:58:40 -04:00
ret = trans_get_key ( trans , BTREE_ID_EC , POS ( 0 , p . idx ) , & iter , & k ) ;
2019-08-27 17:34:03 -04:00
if ( ret < 0 )
2019-03-11 14:59:58 -04:00
return ret ;
if ( k . k - > type ! = KEY_TYPE_stripe ) {
2019-08-16 09:59:56 -04:00
bch2_fs_inconsistent ( c ,
" pointer to nonexistent stripe %llu " ,
( u64 ) p . idx ) ;
ret = - EIO ;
2019-03-11 14:59:58 -04:00
goto out ;
}
2019-12-31 19:37:10 -05:00
s = bch2_trans_kmalloc ( trans , bkey_bytes ( k . k ) ) ;
ret = PTR_ERR_OR_ZERO ( s ) ;
2019-03-11 14:59:58 -04:00
if ( ret )
goto out ;
2019-12-31 19:37:10 -05:00
bkey_reassemble ( & s - > k_i , k ) ;
2019-03-11 14:59:58 -04:00
2019-12-31 19:37:10 -05:00
stripe_blockcount_set ( & s - > v , p . block ,
stripe_blockcount_get ( & s - > v , p . block ) +
2019-10-07 15:57:47 -04:00
sectors ) ;
2019-03-11 14:59:58 -04:00
2019-12-31 19:37:10 -05:00
* nr_data = s - > v . nr_blocks - s - > v . nr_redundant ;
* nr_parity = s - > v . nr_redundant ;
bch2_bkey_to_replicas ( & r - > e , bkey_i_to_s_c ( & s - > k_i ) ) ;
bch2_trans_update ( trans , iter , & s - > k_i , 0 ) ;
2019-03-11 14:59:58 -04:00
out :
bch2_trans_iter_put ( trans , iter ) ;
return ret ;
}
static int bch2_trans_mark_extent ( struct btree_trans * trans ,
2019-08-09 13:01:10 -04:00
struct bkey_s_c k , unsigned offset ,
s64 sectors , unsigned flags ,
enum bch_data_type data_type )
2019-03-11 14:59:58 -04:00
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c ( k ) ;
const union bch_extent_entry * entry ;
struct extent_ptr_decoded p ;
struct bch_replicas_padded r ;
s64 dirty_sectors = 0 ;
bool stale ;
int ret ;
r . e . data_type = data_type ;
r . e . nr_devs = 0 ;
r . e . nr_required = 1 ;
BUG_ON ( ! sectors ) ;
bkey_for_each_ptr_decode ( k . k , ptrs , p , entry ) {
s64 disk_sectors = data_type = = BCH_DATA_BTREE
? sectors
2019-08-09 13:01:10 -04:00
: ptr_disk_sectors_delta ( p , offset , sectors , flags ) ;
2019-03-11 14:59:58 -04:00
2020-06-03 18:27:07 -04:00
ret = bch2_trans_mark_pointer ( trans , k , p , disk_sectors ,
2019-05-15 15:47:43 -04:00
data_type ) ;
2019-03-11 14:59:58 -04:00
if ( ret < 0 )
return ret ;
stale = ret > 0 ;
if ( p . ptr . cached ) {
2019-08-22 13:20:38 -04:00
if ( ! stale )
2019-05-15 15:47:43 -04:00
update_cached_sectors_list ( trans , p . ptr . dev ,
2019-03-11 14:59:58 -04:00
disk_sectors ) ;
2019-10-08 18:45:29 -04:00
} else if ( ! p . has_ec ) {
2019-03-11 14:59:58 -04:00
dirty_sectors + = disk_sectors ;
r . e . devs [ r . e . nr_devs + + ] = p . ptr . dev ;
} else {
2019-10-07 15:57:47 -04:00
struct bch_replicas_padded ec_r ;
unsigned nr_data , nr_parity ;
s64 parity_sectors ;
2019-10-08 18:45:29 -04:00
ret = bch2_trans_mark_stripe_ptr ( trans , p . ec ,
2019-10-07 15:57:47 -04:00
disk_sectors , data_type ,
& ec_r , & nr_data , & nr_parity ) ;
2019-10-08 18:45:29 -04:00
if ( ret )
return ret ;
2019-03-11 14:59:58 -04:00
2019-10-07 15:57:47 -04:00
parity_sectors =
__ptr_disk_sectors_delta ( p . crc . live_size ,
offset , sectors , flags ,
p . crc . compressed_size * nr_parity ,
p . crc . uncompressed_size * nr_data ) ;
update_replicas_list ( trans , & ec_r . e ,
disk_sectors + parity_sectors ) ;
2019-03-11 14:59:58 -04:00
r . e . nr_required = 0 ;
}
}
2019-10-07 15:57:47 -04:00
if ( r . e . nr_devs )
update_replicas_list ( trans , & r . e , dirty_sectors ) ;
2019-03-11 14:59:58 -04:00
return 0 ;
}
2019-08-16 09:59:56 -04:00
static int __bch2_trans_mark_reflink_p ( struct btree_trans * trans ,
struct bkey_s_c_reflink_p p ,
u64 idx , unsigned sectors ,
unsigned flags )
{
struct bch_fs * c = trans - > c ;
struct btree_iter * iter ;
struct bkey_s_c k ;
struct bkey_i_reflink_v * r_v ;
s64 ret ;
ret = trans_get_key ( trans , BTREE_ID_REFLINK ,
POS ( 0 , idx ) , & iter , & k ) ;
2019-08-27 17:34:03 -04:00
if ( ret < 0 )
2019-08-16 09:59:56 -04:00
return ret ;
if ( k . k - > type ! = KEY_TYPE_reflink_v ) {
bch2_fs_inconsistent ( c ,
" %llu:%llu len %u points to nonexistent indirect extent %llu " ,
p . k - > p . inode , p . k - > p . offset , p . k - > size , idx ) ;
ret = - EIO ;
goto err ;
}
2019-12-31 16:17:42 -05:00
if ( ( flags & BTREE_TRIGGER_OVERWRITE ) & &
2019-08-16 09:59:56 -04:00
( bkey_start_offset ( k . k ) < idx | |
k . k - > p . offset > idx + sectors ) )
goto out ;
2020-02-18 16:17:55 -05:00
sectors = k . k - > p . offset - idx ;
2019-08-16 09:59:56 -04:00
2019-12-31 19:37:10 -05:00
r_v = bch2_trans_kmalloc ( trans , bkey_bytes ( k . k ) ) ;
ret = PTR_ERR_OR_ZERO ( r_v ) ;
2019-08-16 09:59:56 -04:00
if ( ret )
goto err ;
2019-12-31 19:37:10 -05:00
bkey_reassemble ( & r_v - > k_i , k ) ;
2019-08-16 09:59:56 -04:00
le64_add_cpu ( & r_v - > v . refcount ,
2019-12-31 16:17:42 -05:00
! ( flags & BTREE_TRIGGER_OVERWRITE ) ? 1 : - 1 ) ;
2019-08-16 09:59:56 -04:00
if ( ! r_v - > v . refcount ) {
r_v - > k . type = KEY_TYPE_deleted ;
set_bkey_val_u64s ( & r_v - > k , 0 ) ;
}
2019-12-31 19:37:10 -05:00
2020-02-18 16:17:55 -05:00
bch2_btree_iter_set_pos ( iter , bkey_start_pos ( k . k ) ) ;
BUG_ON ( iter - > uptodate > BTREE_ITER_NEED_PEEK ) ;
2019-12-31 19:37:10 -05:00
bch2_trans_update ( trans , iter , & r_v - > k_i , 0 ) ;
2019-08-16 09:59:56 -04:00
out :
2020-02-18 16:17:55 -05:00
ret = sectors ;
2019-08-16 09:59:56 -04:00
err :
bch2_trans_iter_put ( trans , iter ) ;
return ret ;
}
static int bch2_trans_mark_reflink_p ( struct btree_trans * trans ,
struct bkey_s_c_reflink_p p , unsigned offset ,
s64 sectors , unsigned flags )
{
u64 idx = le64_to_cpu ( p . v - > idx ) + offset ;
s64 ret = 0 ;
sectors = abs ( sectors ) ;
BUG_ON ( offset + sectors > p . k - > size ) ;
while ( sectors ) {
ret = __bch2_trans_mark_reflink_p ( trans , p , idx , sectors , flags ) ;
if ( ret < 0 )
break ;
idx + = ret ;
sectors = max_t ( s64 , 0LL , sectors - ret ) ;
ret = 0 ;
}
return ret ;
}
2019-05-24 11:56:20 -04:00
int bch2_trans_mark_key ( struct btree_trans * trans , struct bkey_s_c k ,
2019-08-09 13:01:10 -04:00
unsigned offset , s64 sectors , unsigned flags )
2019-03-11 14:59:58 -04:00
{
2019-05-15 15:47:43 -04:00
struct replicas_delta_list * d ;
2019-03-11 14:59:58 -04:00
struct bch_fs * c = trans - > c ;
switch ( k . k - > type ) {
case KEY_TYPE_btree_ptr :
2020-02-07 13:38:02 -05:00
case KEY_TYPE_btree_ptr_v2 :
2019-12-31 16:17:42 -05:00
sectors = ! ( flags & BTREE_TRIGGER_OVERWRITE )
2019-05-24 11:56:20 -04:00
? c - > opts . btree_node_size
: - c - > opts . btree_node_size ;
2019-08-09 13:01:10 -04:00
return bch2_trans_mark_extent ( trans , k , offset , sectors ,
flags , BCH_DATA_BTREE ) ;
2019-03-11 14:59:58 -04:00
case KEY_TYPE_extent :
2019-08-16 09:59:56 -04:00
case KEY_TYPE_reflink_v :
2019-08-09 13:01:10 -04:00
return bch2_trans_mark_extent ( trans , k , offset , sectors ,
flags , BCH_DATA_USER ) ;
2019-03-11 14:59:58 -04:00
case KEY_TYPE_inode :
2019-05-21 15:49:56 -04:00
d = replicas_deltas_realloc ( trans , 0 ) ;
2019-05-15 15:47:43 -04:00
2019-12-31 16:17:42 -05:00
if ( ! ( flags & BTREE_TRIGGER_OVERWRITE ) )
2019-10-19 22:22:29 -04:00
d - > nr_inodes + + ;
2019-03-11 14:59:58 -04:00
else
2019-10-19 22:22:29 -04:00
d - > nr_inodes - - ;
2019-03-11 14:59:58 -04:00
return 0 ;
case KEY_TYPE_reservation : {
unsigned replicas = bkey_s_c_to_reservation ( k ) . v - > nr_replicas ;
2019-05-21 15:49:56 -04:00
d = replicas_deltas_realloc ( trans , 0 ) ;
2019-05-15 15:47:43 -04:00
2019-03-11 14:59:58 -04:00
sectors * = replicas ;
replicas = clamp_t ( unsigned , replicas , 1 ,
2019-10-19 22:22:29 -04:00
ARRAY_SIZE ( d - > persistent_reserved ) ) ;
2019-03-11 14:59:58 -04:00
2019-10-19 22:22:29 -04:00
d - > persistent_reserved [ replicas - 1 ] + = sectors ;
2019-03-11 14:59:58 -04:00
return 0 ;
}
2019-08-16 09:59:56 -04:00
case KEY_TYPE_reflink_p :
return bch2_trans_mark_reflink_p ( trans ,
bkey_s_c_to_reflink_p ( k ) ,
offset , sectors , flags ) ;
2019-03-11 14:59:58 -04:00
default :
return 0 ;
}
}
int bch2_trans_mark_update ( struct btree_trans * trans ,
2019-06-24 17:50:52 -04:00
struct btree_iter * iter ,
2019-12-31 16:17:42 -05:00
struct bkey_i * insert ,
unsigned flags )
2019-03-11 14:59:58 -04:00
{
struct btree * b = iter - > l [ 0 ] . b ;
struct btree_node_iter node_iter = iter - > l [ 0 ] . iter ;
struct bkey_packed * _k ;
int ret ;
2019-12-31 16:17:42 -05:00
if ( unlikely ( flags & BTREE_TRIGGER_NORUN ) )
return 0 ;
2019-03-11 14:59:58 -04:00
if ( ! btree_node_type_needs_gc ( iter - > btree_id ) )
return 0 ;
2019-08-09 13:01:10 -04:00
ret = bch2_trans_mark_key ( trans , bkey_i_to_s_c ( insert ) ,
2019-12-31 16:17:42 -05:00
0 , insert - > k . size , BTREE_TRIGGER_INSERT ) ;
2019-03-11 14:59:58 -04:00
if ( ret )
return ret ;
2019-12-31 16:17:42 -05:00
if ( unlikely ( flags & BTREE_TRIGGER_NOOVERWRITES ) )
2019-08-29 13:29:31 -04:00
return 0 ;
2020-03-16 15:49:23 -04:00
while ( ( _k = bch2_btree_node_iter_peek ( & node_iter , b ) ) ) {
2019-03-11 14:59:58 -04:00
struct bkey unpacked ;
struct bkey_s_c k ;
2019-08-09 13:01:10 -04:00
unsigned offset = 0 ;
2019-03-11 14:59:58 -04:00
s64 sectors = 0 ;
2019-12-31 16:17:42 -05:00
unsigned flags = BTREE_TRIGGER_OVERWRITE ;
2019-03-11 14:59:58 -04:00
k = bkey_disassemble ( b , _k , & unpacked ) ;
if ( btree_node_is_extents ( b )
2019-06-24 17:50:52 -04:00
? bkey_cmp ( insert - > k . p , bkey_start_pos ( k . k ) ) < = 0
: bkey_cmp ( insert - > k . p , k . k - > p ) )
2019-03-11 14:59:58 -04:00
break ;
if ( btree_node_is_extents ( b ) ) {
2019-06-24 17:50:52 -04:00
switch ( bch2_extent_overlap ( & insert - > k , k . k ) ) {
2019-03-11 14:59:58 -04:00
case BCH_EXTENT_OVERLAP_ALL :
2019-08-09 13:01:10 -04:00
offset = 0 ;
2019-03-11 14:59:58 -04:00
sectors = - ( ( s64 ) k . k - > size ) ;
break ;
case BCH_EXTENT_OVERLAP_BACK :
2019-08-09 13:01:10 -04:00
offset = bkey_start_offset ( & insert - > k ) -
bkey_start_offset ( k . k ) ;
2019-06-24 17:50:52 -04:00
sectors = bkey_start_offset ( & insert - > k ) -
2019-03-11 14:59:58 -04:00
k . k - > p . offset ;
break ;
case BCH_EXTENT_OVERLAP_FRONT :
2019-08-09 13:01:10 -04:00
offset = 0 ;
2019-03-11 14:59:58 -04:00
sectors = bkey_start_offset ( k . k ) -
2019-06-24 17:50:52 -04:00
insert - > k . p . offset ;
2019-03-11 14:59:58 -04:00
break ;
case BCH_EXTENT_OVERLAP_MIDDLE :
2019-08-09 13:01:10 -04:00
offset = bkey_start_offset ( & insert - > k ) -
bkey_start_offset ( k . k ) ;
sectors = - ( ( s64 ) insert - > k . size ) ;
2019-12-31 16:17:42 -05:00
flags | = BTREE_TRIGGER_OVERWRITE_SPLIT ;
2019-03-11 14:59:58 -04:00
break ;
}
BUG_ON ( sectors > = 0 ) ;
}
2019-08-09 13:01:10 -04:00
ret = bch2_trans_mark_key ( trans , k , offset , sectors , flags ) ;
2019-03-11 14:59:58 -04:00
if ( ret )
return ret ;
bch2_btree_node_iter_advance ( & node_iter , b ) ;
}
return 0 ;
}
2017-03-16 22:18:50 -08:00
/* Disk reservations: */
# define SECTORS_CACHE 1024
int bch2_disk_reservation_add ( struct bch_fs * c , struct disk_reservation * res ,
unsigned sectors , int flags )
{
2018-11-27 08:23:22 -05:00
struct bch_fs_pcpu * pcpu ;
2017-03-16 22:18:50 -08:00
u64 old , v , get ;
s64 sectors_available ;
int ret ;
2018-11-26 00:13:33 -05:00
percpu_down_read ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
preempt_disable ( ) ;
2018-11-27 08:23:22 -05:00
pcpu = this_cpu_ptr ( c - > pcpu ) ;
2017-03-16 22:18:50 -08:00
2018-11-27 08:23:22 -05:00
if ( sectors < = pcpu - > sectors_available )
2017-03-16 22:18:50 -08:00
goto out ;
v = atomic64_read ( & c - > sectors_available ) ;
do {
old = v ;
get = min ( ( u64 ) sectors + SECTORS_CACHE , old ) ;
if ( get < sectors ) {
preempt_enable ( ) ;
goto recalculate ;
}
} while ( ( v = atomic64_cmpxchg ( & c - > sectors_available ,
old , old - get ) ) ! = old ) ;
2018-11-27 08:23:22 -05:00
pcpu - > sectors_available + = get ;
2017-03-16 22:18:50 -08:00
out :
2018-11-27 08:23:22 -05:00
pcpu - > sectors_available - = sectors ;
2019-02-10 19:34:47 -05:00
this_cpu_add ( * c - > online_reserved , sectors ) ;
2018-11-27 08:23:22 -05:00
res - > sectors + = sectors ;
2017-03-16 22:18:50 -08:00
preempt_enable ( ) ;
2018-11-26 00:13:33 -05:00
percpu_up_read ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
return 0 ;
recalculate :
2020-12-03 14:17:33 -05:00
mutex_lock ( & c - > sectors_available_lock ) ;
2019-02-11 22:08:09 -05:00
2020-12-03 14:17:33 -05:00
percpu_u64_set ( & c - > pcpu - > sectors_available , 0 ) ;
sectors_available = avail_factor ( __bch2_fs_usage_read_short ( c ) . free ) ;
2017-03-16 22:18:50 -08:00
if ( sectors < = sectors_available | |
( flags & BCH_DISK_RESERVATION_NOFAIL ) ) {
atomic64_set ( & c - > sectors_available ,
max_t ( s64 , 0 , sectors_available - sectors ) ) ;
2019-02-10 19:34:47 -05:00
this_cpu_add ( * c - > online_reserved , sectors ) ;
2018-11-27 08:23:22 -05:00
res - > sectors + = sectors ;
2017-03-16 22:18:50 -08:00
ret = 0 ;
} else {
atomic64_set ( & c - > sectors_available , sectors_available ) ;
ret = - ENOSPC ;
}
2020-12-03 14:17:33 -05:00
mutex_unlock ( & c - > sectors_available_lock ) ;
percpu_up_read ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
/* Startup/shutdown: */
static void buckets_free_rcu ( struct rcu_head * rcu )
{
struct bucket_array * buckets =
container_of ( rcu , struct bucket_array , rcu ) ;
kvpfree ( buckets ,
sizeof ( struct bucket_array ) +
buckets - > nbuckets * sizeof ( struct bucket ) ) ;
}
int bch2_dev_buckets_resize ( struct bch_fs * c , struct bch_dev * ca , u64 nbuckets )
{
struct bucket_array * buckets = NULL , * old_buckets = NULL ;
2018-11-19 01:16:07 -05:00
unsigned long * buckets_nouse = NULL ;
2017-03-16 22:18:50 -08:00
alloc_fifo free [ RESERVE_NR ] ;
alloc_fifo free_inc ;
alloc_heap alloc_heap ;
copygc_heap copygc_heap ;
size_t btree_reserve = DIV_ROUND_UP ( BTREE_NODE_RESERVE ,
ca - > mi . bucket_size / c - > opts . btree_node_size ) ;
/* XXX: these should be tunable */
2018-11-04 22:09:51 -05:00
size_t reserve_none = max_t ( size_t , 1 , nbuckets > > 9 ) ;
size_t copygc_reserve = max_t ( size_t , 2 , nbuckets > > 7 ) ;
size_t free_inc_nr = max ( max_t ( size_t , 1 , nbuckets > > 12 ) ,
2019-01-13 16:02:22 -05:00
btree_reserve * 2 ) ;
2018-07-23 05:32:01 -04:00
bool resize = ca - > buckets [ 0 ] ! = NULL ,
2017-03-16 22:18:50 -08:00
start_copygc = ca - > copygc_thread ! = NULL ;
int ret = - ENOMEM ;
unsigned i ;
memset ( & free , 0 , sizeof ( free ) ) ;
memset ( & free_inc , 0 , sizeof ( free_inc ) ) ;
memset ( & alloc_heap , 0 , sizeof ( alloc_heap ) ) ;
memset ( & copygc_heap , 0 , sizeof ( copygc_heap ) ) ;
if ( ! ( buckets = kvpmalloc ( sizeof ( struct bucket_array ) +
nbuckets * sizeof ( struct bucket ) ,
GFP_KERNEL | __GFP_ZERO ) ) | |
2018-11-19 01:16:07 -05:00
! ( buckets_nouse = kvpmalloc ( BITS_TO_LONGS ( nbuckets ) *
2017-03-16 22:18:50 -08:00
sizeof ( unsigned long ) ,
GFP_KERNEL | __GFP_ZERO ) ) | |
! init_fifo ( & free [ RESERVE_BTREE ] , btree_reserve , GFP_KERNEL ) | |
! init_fifo ( & free [ RESERVE_MOVINGGC ] ,
copygc_reserve , GFP_KERNEL ) | |
! init_fifo ( & free [ RESERVE_NONE ] , reserve_none , GFP_KERNEL ) | |
2018-07-22 10:43:01 -04:00
! init_fifo ( & free_inc , free_inc_nr , GFP_KERNEL ) | |
! init_heap ( & alloc_heap , ALLOC_SCAN_BATCH ( ca ) < < 1 , GFP_KERNEL ) | |
2017-03-16 22:18:50 -08:00
! init_heap ( & copygc_heap , copygc_reserve , GFP_KERNEL ) )
goto err ;
buckets - > first_bucket = ca - > mi . first_bucket ;
buckets - > nbuckets = nbuckets ;
bch2_copygc_stop ( ca ) ;
if ( resize ) {
down_write ( & c - > gc_lock ) ;
down_write ( & ca - > bucket_lock ) ;
2018-11-26 00:13:33 -05:00
percpu_down_write ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
}
old_buckets = bucket_array ( ca ) ;
if ( resize ) {
size_t n = min ( buckets - > nbuckets , old_buckets - > nbuckets ) ;
memcpy ( buckets - > b ,
old_buckets - > b ,
n * sizeof ( struct bucket ) ) ;
2018-11-19 01:16:07 -05:00
memcpy ( buckets_nouse ,
ca - > buckets_nouse ,
2017-03-16 22:18:50 -08:00
BITS_TO_LONGS ( n ) * sizeof ( unsigned long ) ) ;
}
2018-07-23 05:32:01 -04:00
rcu_assign_pointer ( ca - > buckets [ 0 ] , buckets ) ;
2017-03-16 22:18:50 -08:00
buckets = old_buckets ;
2018-11-19 01:16:07 -05:00
swap ( ca - > buckets_nouse , buckets_nouse ) ;
2017-03-16 22:18:50 -08:00
if ( resize )
2018-11-26 00:13:33 -05:00
percpu_up_write ( & c - > mark_lock ) ;
2017-03-16 22:18:50 -08:00
spin_lock ( & c - > freelist_lock ) ;
for ( i = 0 ; i < RESERVE_NR ; i + + ) {
fifo_move ( & free [ i ] , & ca - > free [ i ] ) ;
swap ( ca - > free [ i ] , free [ i ] ) ;
}
fifo_move ( & free_inc , & ca - > free_inc ) ;
swap ( ca - > free_inc , free_inc ) ;
spin_unlock ( & c - > freelist_lock ) ;
/* with gc lock held, alloc_heap can't be in use: */
swap ( ca - > alloc_heap , alloc_heap ) ;
/* and we shut down copygc: */
swap ( ca - > copygc_heap , copygc_heap ) ;
nbuckets = ca - > mi . nbuckets ;
if ( resize ) {
up_write ( & ca - > bucket_lock ) ;
up_write ( & c - > gc_lock ) ;
}
if ( start_copygc & &
bch2_copygc_start ( c , ca ) )
bch_err ( ca , " error restarting copygc thread " ) ;
ret = 0 ;
err :
free_heap ( & copygc_heap ) ;
free_heap ( & alloc_heap ) ;
free_fifo ( & free_inc ) ;
for ( i = 0 ; i < RESERVE_NR ; i + + )
free_fifo ( & free [ i ] ) ;
2018-11-19 01:16:07 -05:00
kvpfree ( buckets_nouse ,
2017-03-16 22:18:50 -08:00
BITS_TO_LONGS ( nbuckets ) * sizeof ( unsigned long ) ) ;
if ( buckets )
call_rcu ( & old_buckets - > rcu , buckets_free_rcu ) ;
return ret ;
}
void bch2_dev_buckets_free ( struct bch_dev * ca )
{
unsigned i ;
free_heap ( & ca - > copygc_heap ) ;
free_heap ( & ca - > alloc_heap ) ;
free_fifo ( & ca - > free_inc ) ;
for ( i = 0 ; i < RESERVE_NR ; i + + )
free_fifo ( & ca - > free [ i ] ) ;
2018-11-19 01:16:07 -05:00
kvpfree ( ca - > buckets_nouse ,
2017-03-16 22:18:50 -08:00
BITS_TO_LONGS ( ca - > mi . nbuckets ) * sizeof ( unsigned long ) ) ;
2018-07-23 05:32:01 -04:00
kvpfree ( rcu_dereference_protected ( ca - > buckets [ 0 ] , 1 ) ,
2017-03-16 22:18:50 -08:00
sizeof ( struct bucket_array ) +
ca - > mi . nbuckets * sizeof ( struct bucket ) ) ;
2018-07-23 05:32:01 -04:00
free_percpu ( ca - > usage [ 0 ] ) ;
2017-03-16 22:18:50 -08:00
}
int bch2_dev_buckets_alloc ( struct bch_fs * c , struct bch_dev * ca )
{
2018-07-23 05:32:01 -04:00
if ( ! ( ca - > usage [ 0 ] = alloc_percpu ( struct bch_dev_usage ) ) )
2017-03-16 22:18:50 -08:00
return - ENOMEM ;
return bch2_dev_buckets_resize ( c , ca , ca - > mi . nbuckets ) ; ;
}