2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2010 Kent Overstreet < kent . overstreet @ gmail . com >
* Copyright ( C ) 2014 Datera Inc .
*/
# include "bcachefs.h"
2018-10-06 07:46:55 +03:00
# include "alloc_background.h"
2018-10-06 11:12:42 +03:00
# include "alloc_foreground.h"
2017-03-17 09:18:50 +03:00
# include "bkey_methods.h"
# include "btree_locking.h"
# include "btree_update_interior.h"
# include "btree_io.h"
# include "btree_gc.h"
# include "buckets.h"
# include "clock.h"
# include "debug.h"
2018-11-01 22:13:19 +03:00
# include "ec.h"
2017-03-17 09:18:50 +03:00
# include "error.h"
# include "extents.h"
# include "journal.h"
2018-10-21 17:56:11 +03:00
# include "journal_io.h"
2017-03-17 09:18:50 +03:00
# include "keylist.h"
# include "move.h"
# include "replicas.h"
# include "super-io.h"
# include "trace.h"
# include <linux/slab.h>
# include <linux/bitops.h>
# include <linux/freezer.h>
# include <linux/kthread.h>
# include <linux/preempt.h>
# include <linux/rcupdate.h>
# include <linux/sched/task.h>
2018-10-21 17:56:11 +03:00
static inline void __gc_pos_set ( struct bch_fs * c , struct gc_pos new_pos )
{
preempt_disable ( ) ;
write_seqcount_begin ( & c - > gc_pos_lock ) ;
c - > gc_pos = new_pos ;
write_seqcount_end ( & c - > gc_pos_lock ) ;
preempt_enable ( ) ;
}
static inline void gc_pos_set ( struct bch_fs * c , struct gc_pos new_pos )
{
BUG_ON ( gc_pos_cmp ( new_pos , c - > gc_pos ) < = 0 ) ;
__gc_pos_set ( c , new_pos ) ;
}
/* range_checks - for validating min/max pos of each btree node: */
2017-03-17 09:18:50 +03:00
struct range_checks {
struct range_level {
struct bpos min ;
struct bpos max ;
} l [ BTREE_MAX_DEPTH ] ;
unsigned depth ;
} ;
static void btree_node_range_checks_init ( struct range_checks * r , unsigned depth )
{
unsigned i ;
for ( i = 0 ; i < BTREE_MAX_DEPTH ; i + + )
r - > l [ i ] . min = r - > l [ i ] . max = POS_MIN ;
r - > depth = depth ;
}
static void btree_node_range_checks ( struct bch_fs * c , struct btree * b ,
struct range_checks * r )
{
struct range_level * l = & r - > l [ b - > level ] ;
struct bpos expected_min = bkey_cmp ( l - > min , l - > max )
? btree_type_successor ( b - > btree_id , l - > max )
: l - > max ;
bch2_fs_inconsistent_on ( bkey_cmp ( b - > data - > min_key , expected_min ) , c ,
" btree node has incorrect min key: %llu:%llu != %llu:%llu " ,
b - > data - > min_key . inode ,
b - > data - > min_key . offset ,
expected_min . inode ,
expected_min . offset ) ;
l - > max = b - > data - > max_key ;
if ( b - > level > r - > depth ) {
l = & r - > l [ b - > level - 1 ] ;
bch2_fs_inconsistent_on ( bkey_cmp ( b - > data - > min_key , l - > min ) , c ,
" btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu " ,
b - > data - > min_key . inode ,
b - > data - > min_key . offset ,
l - > min . inode ,
l - > min . offset ) ;
bch2_fs_inconsistent_on ( bkey_cmp ( b - > data - > max_key , l - > max ) , c ,
" btree node max doesn't match max of child nodes: %llu:%llu != %llu:%llu " ,
b - > data - > max_key . inode ,
b - > data - > max_key . offset ,
l - > max . inode ,
l - > max . offset ) ;
if ( bkey_cmp ( b - > data - > max_key , POS_MAX ) )
l - > min = l - > max =
btree_type_successor ( b - > btree_id ,
b - > data - > max_key ) ;
}
}
2018-10-21 17:56:11 +03:00
/* marking of btree keys/nodes: */
2018-11-13 01:26:36 +03:00
static void ptr_gen_recalc_oldest ( struct bch_fs * c ,
const struct bch_extent_ptr * ptr ,
u8 * max_stale )
2017-03-17 09:18:50 +03:00
{
2018-11-13 01:26:36 +03:00
struct bch_dev * ca = bch_dev_bkey_exists ( c , ptr - > dev ) ;
size_t b = PTR_BUCKET_NR ( ca , ptr ) ;
2017-03-17 09:18:50 +03:00
2018-11-13 01:26:36 +03:00
if ( gen_after ( ca - > oldest_gens [ b ] , ptr - > gen ) )
ca - > oldest_gens [ b ] = ptr - > gen ;
2017-03-17 09:18:50 +03:00
2018-11-13 01:26:36 +03:00
* max_stale = max ( * max_stale , ptr_stale ( ca , ptr ) ) ;
}
2017-03-17 09:18:50 +03:00
2018-11-13 01:26:36 +03:00
static u8 ptr_gens_recalc_oldest ( struct bch_fs * c ,
enum bkey_type type ,
struct bkey_s_c k )
{
const struct bch_extent_ptr * ptr ;
u8 max_stale = 0 ;
2017-03-17 09:18:50 +03:00
2018-11-13 01:26:36 +03:00
switch ( type ) {
case BKEY_TYPE_BTREE :
case BKEY_TYPE_EXTENTS :
switch ( k . k - > type ) {
case BCH_EXTENT :
case BCH_EXTENT_CACHED : {
struct bkey_s_c_extent e = bkey_s_c_to_extent ( k ) ;
extent_for_each_ptr ( e , ptr )
ptr_gen_recalc_oldest ( c , ptr , & max_stale ) ;
break ;
2017-03-17 09:18:50 +03:00
}
2018-11-13 01:26:36 +03:00
}
break ;
2018-11-01 22:13:19 +03:00
case BKEY_TYPE_EC :
switch ( k . k - > type ) {
case BCH_STRIPE : {
struct bkey_s_c_stripe s = bkey_s_c_to_stripe ( k ) ;
for ( ptr = s . v - > ptrs ;
ptr < s . v - > ptrs + s . v - > nr_blocks ;
ptr + + )
ptr_gen_recalc_oldest ( c , ptr , & max_stale ) ;
}
}
2018-11-13 01:26:36 +03:00
default :
break ;
2017-03-17 09:18:50 +03:00
}
return max_stale ;
}
2018-11-13 01:26:36 +03:00
static int ptr_gen_check ( struct bch_fs * c ,
enum bkey_type type ,
const struct bch_extent_ptr * ptr )
2017-03-17 09:18:50 +03:00
{
2018-11-13 01:26:36 +03:00
struct bch_dev * ca = bch_dev_bkey_exists ( c , ptr - > dev ) ;
size_t b = PTR_BUCKET_NR ( ca , ptr ) ;
struct bucket * g = PTR_BUCKET ( ca , ptr ) ;
2017-03-17 09:18:50 +03:00
int ret = 0 ;
2018-11-13 01:26:36 +03:00
if ( mustfix_fsck_err_on ( ! g - > mark . gen_valid , c ,
" found ptr with missing gen in alloc btree, \n "
" type %u gen %u " ,
type , ptr - > gen ) ) {
g - > _mark . gen = ptr - > gen ;
g - > _mark . gen_valid = 1 ;
set_bit ( b , ca - > buckets_dirty ) ;
}
2017-03-17 09:18:50 +03:00
2018-11-13 01:26:36 +03:00
if ( mustfix_fsck_err_on ( gen_cmp ( ptr - > gen , g - > mark . gen ) > 0 , c ,
" %u ptr gen in the future: %u > %u " ,
type , ptr - > gen , g - > mark . gen ) ) {
g - > _mark . gen = ptr - > gen ;
g - > _mark . gen_valid = 1 ;
set_bit ( b , ca - > buckets_dirty ) ;
set_bit ( BCH_FS_FIXED_GENS , & c - > flags ) ;
2017-03-17 09:18:50 +03:00
}
2018-11-13 01:26:36 +03:00
fsck_err :
return ret ;
}
2017-03-17 09:18:50 +03:00
2018-11-13 01:26:36 +03:00
static int ptr_gens_check ( struct bch_fs * c , enum bkey_type type ,
struct bkey_s_c k )
{
const struct bch_extent_ptr * ptr ;
int ret = 0 ;
2017-03-17 09:18:50 +03:00
2018-11-13 01:26:36 +03:00
switch ( type ) {
case BKEY_TYPE_BTREE :
case BKEY_TYPE_EXTENTS :
switch ( k . k - > type ) {
case BCH_EXTENT :
case BCH_EXTENT_CACHED : {
struct bkey_s_c_extent e = bkey_s_c_to_extent ( k ) ;
2017-03-17 09:18:50 +03:00
2018-11-13 01:26:36 +03:00
extent_for_each_ptr ( e , ptr ) {
ret = ptr_gen_check ( c , type , ptr ) ;
if ( ret )
return ret ;
}
break ;
}
2017-03-17 09:18:50 +03:00
}
break ;
2018-11-01 22:13:19 +03:00
case BKEY_TYPE_EC :
switch ( k . k - > type ) {
case BCH_STRIPE : {
struct bkey_s_c_stripe s = bkey_s_c_to_stripe ( k ) ;
for ( ptr = s . v - > ptrs ;
ptr < s . v - > ptrs + s . v - > nr_blocks ;
ptr + + ) {
ret = ptr_gen_check ( c , type , ptr ) ;
if ( ret )
return ret ;
}
}
}
break ;
2018-11-13 01:26:36 +03:00
default :
break ;
2017-03-17 09:18:50 +03:00
}
return ret ;
}
2018-10-21 17:56:11 +03:00
/*
* For runtime mark and sweep :
*/
static int bch2_gc_mark_key ( struct bch_fs * c , enum bkey_type type ,
struct bkey_s_c k , bool initial )
{
struct gc_pos pos = { 0 } ;
2018-11-01 22:21:48 +03:00
unsigned flags =
2018-07-23 12:32:01 +03:00
BCH_BUCKET_MARK_GC |
2018-11-01 22:21:48 +03:00
( initial ? BCH_BUCKET_MARK_NOATOMIC : 0 ) ;
2018-10-21 17:56:11 +03:00
int ret = 0 ;
2018-11-13 01:26:36 +03:00
if ( initial ) {
BUG_ON ( journal_seq_verify ( c ) & &
k . k - > version . lo > journal_cur_seq ( & c - > journal ) ) ;
if ( k . k - > version . lo > atomic64_read ( & c - > key_version ) )
atomic64_set ( & c - > key_version , k . k - > version . lo ) ;
if ( test_bit ( BCH_FS_REBUILD_REPLICAS , & c - > flags ) | |
fsck_err_on ( ! bch2_bkey_replicas_marked ( c , type , k ,
false ) , c ,
" superblock not marked as containing replicas (type %u) " ,
type ) ) {
ret = bch2_mark_bkey_replicas ( c , type , k ) ;
if ( ret )
2018-10-21 17:56:11 +03:00
return ret ;
}
2018-11-01 22:21:48 +03:00
2018-11-13 01:26:36 +03:00
ret = ptr_gens_check ( c , type , k ) ;
if ( ret )
return ret ;
2018-10-21 17:56:11 +03:00
}
2018-11-13 01:26:36 +03:00
bch2_mark_key ( c , type , k , true , k . k - > size , pos , NULL , 0 , flags ) ;
ret = ptr_gens_recalc_oldest ( c , type , k ) ;
fsck_err :
2018-10-21 17:56:11 +03:00
return ret ;
}
static int btree_gc_mark_node ( struct bch_fs * c , struct btree * b ,
bool initial )
2017-03-17 09:18:50 +03:00
{
enum bkey_type type = btree_node_type ( b ) ;
struct btree_node_iter iter ;
struct bkey unpacked ;
struct bkey_s_c k ;
u8 stale = 0 ;
2018-10-21 17:56:11 +03:00
int ret ;
2017-03-17 09:18:50 +03:00
2018-10-21 17:56:11 +03:00
if ( ! bkey_type_needs_gc ( type ) )
return 0 ;
2017-03-17 09:18:50 +03:00
2018-10-21 17:56:11 +03:00
for_each_btree_node_key_unpack ( b , k , & iter ,
& unpacked ) {
bch2_bkey_debugcheck ( c , b , k ) ;
2017-03-17 09:18:50 +03:00
2018-10-21 17:56:11 +03:00
ret = bch2_gc_mark_key ( c , type , k , initial ) ;
if ( ret < 0 )
return ret ;
2017-03-17 09:18:50 +03:00
2018-10-21 17:56:11 +03:00
stale = max_t ( u8 , stale , ret ) ;
}
return stale ;
2017-03-17 09:18:50 +03:00
}
2018-10-21 17:56:11 +03:00
static int bch2_gc_btree ( struct bch_fs * c , enum btree_id btree_id ,
bool initial )
2017-03-17 09:18:50 +03:00
{
struct btree_iter iter ;
struct btree * b ;
struct range_checks r ;
2018-10-21 17:56:11 +03:00
unsigned depth = bkey_type_needs_gc ( btree_id ) ? 0 : 1 ;
2017-03-17 09:18:50 +03:00
unsigned max_stale ;
int ret = 0 ;
gc_pos_set ( c , gc_pos_btree ( btree_id , POS_MIN , 0 ) ) ;
/*
* if expensive_debug_checks is on , run range_checks on all leaf nodes :
2018-10-21 17:56:11 +03:00
*
* and on startup , we have to read every btree node ( XXX : only if it was
* an unclean shutdown )
2017-03-17 09:18:50 +03:00
*/
2018-10-21 17:56:11 +03:00
if ( initial | | expensive_debug_checks ( c ) )
2017-03-17 09:18:50 +03:00
depth = 0 ;
btree_node_range_checks_init ( & r , depth ) ;
__for_each_btree_node ( & iter , c , btree_id , POS_MIN ,
0 , depth , BTREE_ITER_PREFETCH , b ) {
btree_node_range_checks ( c , b , & r ) ;
bch2_verify_btree_nr_keys ( b ) ;
2018-10-21 17:56:11 +03:00
max_stale = btree_gc_mark_node ( c , b , initial ) ;
2017-03-17 09:18:50 +03:00
gc_pos_set ( c , gc_pos_btree_node ( b ) ) ;
2018-10-21 17:56:11 +03:00
if ( ! initial ) {
if ( max_stale > 64 )
bch2_btree_node_rewrite ( c , & iter ,
b - > data - > keys . seq ,
BTREE_INSERT_USE_RESERVE |
BTREE_INSERT_NOWAIT |
BTREE_INSERT_GC_LOCK_HELD ) ;
else if ( ! btree_gc_rewrite_disabled ( c ) & &
( btree_gc_always_rewrite ( c ) | | max_stale > 16 ) )
bch2_btree_node_rewrite ( c , & iter ,
b - > data - > keys . seq ,
BTREE_INSERT_NOWAIT |
BTREE_INSERT_GC_LOCK_HELD ) ;
}
2017-03-17 09:18:50 +03:00
bch2_btree_iter_cond_resched ( & iter ) ;
}
ret = bch2_btree_iter_unlock ( & iter ) ;
if ( ret )
return ret ;
mutex_lock ( & c - > btree_root_lock ) ;
b = c - > btree_roots [ btree_id ] . b ;
if ( ! btree_node_fake ( b ) )
2018-10-21 17:56:11 +03:00
bch2_gc_mark_key ( c , BKEY_TYPE_BTREE ,
bkey_i_to_s_c ( & b - > key ) , initial ) ;
2017-03-17 09:18:50 +03:00
gc_pos_set ( c , gc_pos_btree_root ( b - > btree_id ) ) ;
mutex_unlock ( & c - > btree_root_lock ) ;
return 0 ;
}
2018-11-01 22:13:19 +03:00
static inline int btree_id_gc_phase_cmp ( enum btree_id l , enum btree_id r )
{
return ( int ) btree_id_to_gc_phase ( l ) -
( int ) btree_id_to_gc_phase ( r ) ;
}
2018-10-21 17:56:11 +03:00
static int bch2_gc_btrees ( struct bch_fs * c , struct list_head * journal ,
bool initial )
{
2018-11-01 22:13:19 +03:00
enum btree_id ids [ BTREE_ID_NR ] ;
2018-10-21 17:56:11 +03:00
unsigned i ;
2018-11-01 22:13:19 +03:00
for ( i = 0 ; i < BTREE_ID_NR ; i + + )
ids [ i ] = i ;
bubble_sort ( ids , BTREE_ID_NR , btree_id_gc_phase_cmp ) ;
2018-10-21 17:56:11 +03:00
for ( i = 0 ; i < BTREE_ID_NR ; i + + ) {
2018-11-01 22:13:19 +03:00
enum btree_id id = ids [ i ] ;
enum bkey_type type = bkey_type ( 0 , id ) ;
2018-10-21 17:56:11 +03:00
2018-11-01 22:13:19 +03:00
int ret = bch2_gc_btree ( c , id , initial ) ;
2018-10-21 17:56:11 +03:00
if ( ret )
return ret ;
if ( journal & & bkey_type_needs_gc ( type ) ) {
struct bkey_i * k , * n ;
struct jset_entry * j ;
struct journal_replay * r ;
int ret ;
list_for_each_entry ( r , journal , list )
for_each_jset_key ( k , n , j , & r - > j ) {
if ( type = = bkey_type ( j - > level , j - > btree_id ) ) {
ret = bch2_gc_mark_key ( c , type ,
bkey_i_to_s_c ( k ) , initial ) ;
if ( ret < 0 )
return ret ;
}
}
}
}
return 0 ;
}
2017-03-17 09:18:50 +03:00
static void mark_metadata_sectors ( struct bch_fs * c , struct bch_dev * ca ,
u64 start , u64 end ,
enum bch_data_type type ,
unsigned flags )
{
u64 b = sector_to_bucket ( ca , start ) ;
do {
unsigned sectors =
min_t ( u64 , bucket_to_sector ( ca , b + 1 ) , end ) - start ;
bch2_mark_metadata_bucket ( c , ca , b , type , sectors ,
gc_phase ( GC_PHASE_SB ) , flags ) ;
b + + ;
start + = sectors ;
} while ( start < end ) ;
}
void bch2_mark_dev_superblock ( struct bch_fs * c , struct bch_dev * ca ,
unsigned flags )
{
struct bch_sb_layout * layout = & ca - > disk_sb . sb - > layout ;
unsigned i ;
u64 b ;
2018-07-21 05:08:17 +03:00
/*
* This conditional is kind of gross , but we may be called from the
* device add path , before the new device has actually been added to the
* running filesystem :
*/
2017-03-17 09:18:50 +03:00
if ( c ) {
lockdep_assert_held ( & c - > sb_lock ) ;
percpu_down_read ( & c - > usage_lock ) ;
2018-07-21 05:08:17 +03:00
} else {
preempt_disable ( ) ;
2017-03-17 09:18:50 +03:00
}
for ( i = 0 ; i < layout - > nr_superblocks ; i + + ) {
u64 offset = le64_to_cpu ( layout - > sb_offset [ i ] ) ;
if ( offset = = BCH_SB_SECTOR )
mark_metadata_sectors ( c , ca , 0 , BCH_SB_SECTOR ,
BCH_DATA_SB , flags ) ;
mark_metadata_sectors ( c , ca , offset ,
offset + ( 1 < < layout - > sb_max_size_bits ) ,
BCH_DATA_SB , flags ) ;
}
for ( i = 0 ; i < ca - > journal . nr ; i + + ) {
b = ca - > journal . buckets [ i ] ;
bch2_mark_metadata_bucket ( c , ca , b , BCH_DATA_JOURNAL ,
ca - > mi . bucket_size ,
gc_phase ( GC_PHASE_SB ) , flags ) ;
}
if ( c ) {
percpu_up_read ( & c - > usage_lock ) ;
2018-07-21 05:08:17 +03:00
} else {
preempt_enable ( ) ;
2017-03-17 09:18:50 +03:00
}
}
static void bch2_mark_superblocks ( struct bch_fs * c )
{
struct bch_dev * ca ;
unsigned i ;
mutex_lock ( & c - > sb_lock ) ;
gc_pos_set ( c , gc_phase ( GC_PHASE_SB ) ) ;
for_each_online_member ( ca , c , i )
2018-07-23 12:32:01 +03:00
bch2_mark_dev_superblock ( c , ca , BCH_BUCKET_MARK_GC ) ;
2017-03-17 09:18:50 +03:00
mutex_unlock ( & c - > sb_lock ) ;
}
/* Also see bch2_pending_btree_node_free_insert_done() */
static void bch2_mark_pending_btree_node_frees ( struct bch_fs * c )
{
struct gc_pos pos = { 0 } ;
struct btree_update * as ;
struct pending_btree_node_free * d ;
mutex_lock ( & c - > btree_interior_update_lock ) ;
gc_pos_set ( c , gc_phase ( GC_PHASE_PENDING_DELETE ) ) ;
for_each_pending_btree_node_free ( c , as , d )
if ( d - > index_update_done )
2018-11-01 22:21:48 +03:00
bch2_mark_key ( c , BKEY_TYPE_BTREE ,
bkey_i_to_s_c ( & d - > key ) ,
true , 0 ,
2018-07-23 12:32:01 +03:00
pos , NULL , 0 ,
BCH_BUCKET_MARK_GC ) ;
2017-03-17 09:18:50 +03:00
mutex_unlock ( & c - > btree_interior_update_lock ) ;
}
static void bch2_mark_allocator_buckets ( struct bch_fs * c )
{
struct bch_dev * ca ;
struct open_bucket * ob ;
size_t i , j , iter ;
unsigned ci ;
percpu_down_read ( & c - > usage_lock ) ;
spin_lock ( & c - > freelist_lock ) ;
gc_pos_set ( c , gc_pos_alloc ( c , NULL ) ) ;
for_each_member_device ( ca , c , ci ) {
fifo_for_each_entry ( i , & ca - > free_inc , iter )
bch2_mark_alloc_bucket ( c , ca , i , true ,
gc_pos_alloc ( c , NULL ) ,
2018-07-23 12:32:01 +03:00
BCH_BUCKET_MARK_GC ) ;
2017-03-17 09:18:50 +03:00
for ( j = 0 ; j < RESERVE_NR ; j + + )
fifo_for_each_entry ( i , & ca - > free [ j ] , iter )
bch2_mark_alloc_bucket ( c , ca , i , true ,
gc_pos_alloc ( c , NULL ) ,
2018-07-23 12:32:01 +03:00
BCH_BUCKET_MARK_GC ) ;
2017-03-17 09:18:50 +03:00
}
spin_unlock ( & c - > freelist_lock ) ;
for ( ob = c - > open_buckets ;
ob < c - > open_buckets + ARRAY_SIZE ( c - > open_buckets ) ;
ob + + ) {
spin_lock ( & ob - > lock ) ;
if ( ob - > valid ) {
gc_pos_set ( c , gc_pos_alloc ( c , ob ) ) ;
ca = bch_dev_bkey_exists ( c , ob - > ptr . dev ) ;
bch2_mark_alloc_bucket ( c , ca , PTR_BUCKET_NR ( ca , & ob - > ptr ) , true ,
gc_pos_alloc ( c , ob ) ,
2018-07-23 12:32:01 +03:00
BCH_BUCKET_MARK_GC ) ;
2017-03-17 09:18:50 +03:00
}
spin_unlock ( & ob - > lock ) ;
}
percpu_up_read ( & c - > usage_lock ) ;
}
2018-07-23 12:32:01 +03:00
static void bch2_gc_free ( struct bch_fs * c )
{
struct bch_dev * ca ;
unsigned i ;
2018-11-25 01:09:44 +03:00
genradix_free ( & c - > stripes [ 1 ] ) ;
2018-07-23 12:32:01 +03:00
for_each_member_device ( ca , c , i ) {
kvpfree ( rcu_dereference_protected ( ca - > buckets [ 1 ] , 1 ) ,
sizeof ( struct bucket_array ) +
ca - > mi . nbuckets * sizeof ( struct bucket ) ) ;
ca - > buckets [ 1 ] = NULL ;
free_percpu ( ca - > usage [ 1 ] ) ;
ca - > usage [ 1 ] = NULL ;
}
free_percpu ( c - > usage [ 1 ] ) ;
c - > usage [ 1 ] = NULL ;
}
static void bch2_gc_done_nocheck ( struct bch_fs * c )
2017-03-17 09:18:50 +03:00
{
struct bch_dev * ca ;
unsigned i ;
int cpu ;
2018-11-25 01:09:44 +03:00
{
struct genradix_iter dst_iter = genradix_iter_init ( & c - > stripes [ 0 ] , 0 ) ;
struct genradix_iter src_iter = genradix_iter_init ( & c - > stripes [ 1 ] , 0 ) ;
struct stripe * dst , * src ;
c - > ec_stripes_heap . used = 0 ;
while ( ( dst = genradix_iter_peek ( & dst_iter , & c - > stripes [ 0 ] ) ) & &
( src = genradix_iter_peek ( & src_iter , & c - > stripes [ 1 ] ) ) ) {
* dst = * src ;
if ( dst - > alive )
bch2_stripes_heap_insert ( c , dst , dst_iter . pos ) ;
genradix_iter_advance ( & dst_iter , & c - > stripes [ 0 ] ) ;
genradix_iter_advance ( & src_iter , & c - > stripes [ 1 ] ) ;
}
}
2018-07-23 12:32:01 +03:00
for_each_member_device ( ca , c , i ) {
struct bucket_array * src = __bucket_array ( ca , 1 ) ;
2017-03-17 09:18:50 +03:00
2018-07-23 12:32:01 +03:00
memcpy ( __bucket_array ( ca , 0 ) , src ,
sizeof ( struct bucket_array ) +
sizeof ( struct bucket ) * src - > nbuckets ) ;
} ;
2017-03-17 09:18:50 +03:00
for_each_member_device ( ca , c , i ) {
2018-07-23 12:32:01 +03:00
struct bch_dev_usage * p ;
2017-03-17 09:18:50 +03:00
for_each_possible_cpu ( cpu ) {
2018-07-23 12:32:01 +03:00
p = per_cpu_ptr ( ca - > usage [ 0 ] , cpu ) ;
2017-03-17 09:18:50 +03:00
memset ( p , 0 , sizeof ( * p ) ) ;
}
2018-07-23 12:32:01 +03:00
preempt_disable ( ) ;
* this_cpu_ptr ( ca - > usage [ 0 ] ) = __bch2_dev_usage_read ( ca , 1 ) ;
preempt_enable ( ) ;
2017-03-17 09:18:50 +03:00
}
2018-07-23 12:32:01 +03:00
{
struct bch_fs_usage src = __bch2_fs_usage_read ( c , 1 ) ;
struct bch_fs_usage * p ;
2017-03-17 09:18:50 +03:00
2018-07-23 12:32:01 +03:00
for_each_possible_cpu ( cpu ) {
p = per_cpu_ptr ( c - > usage [ 0 ] , cpu ) ;
memset ( p , 0 , offsetof ( typeof ( * p ) , online_reserved ) ) ;
}
preempt_disable ( ) ;
memcpy ( this_cpu_ptr ( c - > usage [ 0 ] ) ,
& src ,
offsetof ( typeof ( * p ) , online_reserved ) ) ;
preempt_enable ( ) ;
2017-03-17 09:18:50 +03:00
}
2018-07-23 12:32:01 +03:00
}
static void bch2_gc_done ( struct bch_fs * c , bool initial )
{
struct bch_dev * ca ;
unsigned i ;
int cpu ;
# define copy_field(_f, _msg, ...) \
if ( dst . _f ! = src . _f ) { \
2018-11-25 01:09:44 +03:00
bch_err ( c , _msg " : got %llu, should be %llu, fixing " \
2018-07-23 12:32:01 +03:00
, # # __VA_ARGS__ , dst . _f , src . _f ) ; \
dst . _f = src . _f ; \
}
2018-11-25 01:09:44 +03:00
# define copy_stripe_field(_f, _msg, ...) \
if ( dst - > _f ! = src - > _f ) { \
bch_err_ratelimited ( c , " stripe %zu has wrong " _msg \
" : got %u, should be %u, fixing " , \
dst_iter . pos , # # __VA_ARGS__ , \
dst - > _f , src - > _f ) ; \
dst - > _f = src - > _f ; \
}
2018-07-23 12:32:01 +03:00
# define copy_bucket_field(_f) \
if ( dst - > b [ b ] . mark . _f ! = src - > b [ b ] . mark . _f ) { \
2018-11-25 01:09:44 +03:00
bch_err_ratelimited ( c , " dev %u bucket %zu has wrong " # _f \
2018-07-23 12:32:01 +03:00
" : got %u, should be %u, fixing " , \
i , b , dst - > b [ b ] . mark . _f , src - > b [ b ] . mark . _f ) ; \
dst - > b [ b ] . _mark . _f = src - > b [ b ] . mark . _f ; \
}
# define copy_dev_field(_f, _msg, ...) \
copy_field ( _f , " dev %u has wrong " _msg , i , # # __VA_ARGS__ )
# define copy_fs_field(_f, _msg, ...) \
copy_field ( _f , " fs has wrong " _msg , # # __VA_ARGS__ )
percpu_down_write ( & c - > usage_lock ) ;
if ( initial ) {
bch2_gc_done_nocheck ( c ) ;
goto out ;
}
2018-11-25 01:09:44 +03:00
{
struct genradix_iter dst_iter = genradix_iter_init ( & c - > stripes [ 0 ] , 0 ) ;
struct genradix_iter src_iter = genradix_iter_init ( & c - > stripes [ 1 ] , 0 ) ;
struct stripe * dst , * src ;
unsigned i ;
c - > ec_stripes_heap . used = 0 ;
while ( ( dst = genradix_iter_peek ( & dst_iter , & c - > stripes [ 0 ] ) ) & &
( src = genradix_iter_peek ( & src_iter , & c - > stripes [ 1 ] ) ) ) {
copy_stripe_field ( alive , " alive " ) ;
copy_stripe_field ( sectors , " sectors " ) ;
copy_stripe_field ( algorithm , " algorithm " ) ;
copy_stripe_field ( nr_blocks , " nr_blocks " ) ;
copy_stripe_field ( nr_redundant , " nr_redundant " ) ;
copy_stripe_field ( blocks_nonempty . counter ,
" blocks_nonempty " ) ;
for ( i = 0 ; i < ARRAY_SIZE ( dst - > block_sectors ) ; i + + )
copy_stripe_field ( block_sectors [ i ] . counter ,
" block_sectors[%u] " , i ) ;
if ( dst - > alive )
bch2_stripes_heap_insert ( c , dst , dst_iter . pos ) ;
genradix_iter_advance ( & dst_iter , & c - > stripes [ 0 ] ) ;
genradix_iter_advance ( & src_iter , & c - > stripes [ 1 ] ) ;
}
}
2018-07-23 12:32:01 +03:00
for_each_member_device ( ca , c , i ) {
struct bucket_array * dst = __bucket_array ( ca , 0 ) ;
struct bucket_array * src = __bucket_array ( ca , 1 ) ;
size_t b ;
if ( initial ) {
memcpy ( dst , src ,
sizeof ( struct bucket_array ) +
sizeof ( struct bucket ) * dst - > nbuckets ) ;
}
for ( b = 0 ; b < src - > nbuckets ; b + + ) {
copy_bucket_field ( gen ) ;
copy_bucket_field ( data_type ) ;
copy_bucket_field ( owned_by_allocator ) ;
copy_bucket_field ( stripe ) ;
copy_bucket_field ( dirty_sectors ) ;
copy_bucket_field ( cached_sectors ) ;
}
} ;
for_each_member_device ( ca , c , i ) {
struct bch_dev_usage dst = __bch2_dev_usage_read ( ca , 0 ) ;
struct bch_dev_usage src = __bch2_dev_usage_read ( ca , 1 ) ;
struct bch_dev_usage * p ;
unsigned b ;
for ( b = 0 ; b < BCH_DATA_NR ; b + + )
copy_dev_field ( buckets [ b ] ,
" buckets[%s] " , bch2_data_types [ b ] ) ;
copy_dev_field ( buckets_alloc , " buckets_alloc " ) ;
copy_dev_field ( buckets_ec , " buckets_ec " ) ;
for ( b = 0 ; b < BCH_DATA_NR ; b + + )
copy_dev_field ( sectors [ b ] ,
" sectors[%s] " , bch2_data_types [ b ] ) ;
copy_dev_field ( sectors_fragmented ,
" sectors_fragmented " ) ;
for_each_possible_cpu ( cpu ) {
p = per_cpu_ptr ( ca - > usage [ 0 ] , cpu ) ;
memset ( p , 0 , sizeof ( * p ) ) ;
}
preempt_disable ( ) ;
p = this_cpu_ptr ( ca - > usage [ 0 ] ) ;
* p = dst ;
preempt_enable ( ) ;
}
{
struct bch_fs_usage dst = __bch2_fs_usage_read ( c , 0 ) ;
struct bch_fs_usage src = __bch2_fs_usage_read ( c , 1 ) ;
struct bch_fs_usage * p ;
unsigned r , b ;
for ( r = 0 ; r < BCH_REPLICAS_MAX ; r + + ) {
for ( b = 0 ; b < BCH_DATA_NR ; b + + )
copy_fs_field ( replicas [ r ] . data [ b ] ,
" replicas[%i].data[%s] " ,
r , bch2_data_types [ b ] ) ;
copy_fs_field ( replicas [ r ] . ec_data ,
" replicas[%i].ec_data " , r ) ;
copy_fs_field ( replicas [ r ] . persistent_reserved ,
" replicas[%i].persistent_reserved " , r ) ;
}
for ( b = 0 ; b < BCH_DATA_NR ; b + + )
copy_fs_field ( buckets [ b ] ,
" buckets[%s] " , bch2_data_types [ b ] ) ;
for_each_possible_cpu ( cpu ) {
p = per_cpu_ptr ( c - > usage [ 0 ] , cpu ) ;
memset ( p , 0 , offsetof ( typeof ( * p ) , online_reserved ) ) ;
}
preempt_disable ( ) ;
p = this_cpu_ptr ( c - > usage [ 0 ] ) ;
memcpy ( p , & dst , offsetof ( typeof ( * p ) , online_reserved ) ) ;
preempt_enable ( ) ;
}
out :
2017-03-17 09:18:50 +03:00
percpu_up_write ( & c - > usage_lock ) ;
2018-07-23 12:32:01 +03:00
# undef copy_fs_field
# undef copy_dev_field
# undef copy_bucket_field
2018-11-25 01:09:44 +03:00
# undef copy_stripe_field
# undef copy_field
2018-07-23 12:32:01 +03:00
}
static int bch2_gc_start ( struct bch_fs * c )
{
struct bch_dev * ca ;
unsigned i ;
2018-11-25 01:09:44 +03:00
/*
* indicate to stripe code that we need to allocate for the gc stripes
* radix tree , too
*/
gc_pos_set ( c , gc_phase ( GC_PHASE_START ) ) ;
2018-07-23 12:32:01 +03:00
BUG_ON ( c - > usage [ 1 ] ) ;
c - > usage [ 1 ] = alloc_percpu ( struct bch_fs_usage ) ;
if ( ! c - > usage [ 1 ] )
return - ENOMEM ;
2017-03-17 09:18:50 +03:00
for_each_member_device ( ca , c , i ) {
2018-07-23 12:32:01 +03:00
BUG_ON ( ca - > buckets [ 1 ] ) ;
BUG_ON ( ca - > usage [ 1 ] ) ;
ca - > buckets [ 1 ] = kvpmalloc ( sizeof ( struct bucket_array ) +
ca - > mi . nbuckets * sizeof ( struct bucket ) ,
GFP_KERNEL | __GFP_ZERO ) ;
if ( ! ca - > buckets [ 1 ] ) {
percpu_ref_put ( & ca - > ref ) ;
return - ENOMEM ;
}
ca - > usage [ 1 ] = alloc_percpu ( struct bch_dev_usage ) ;
if ( ! ca - > usage [ 1 ] ) {
percpu_ref_put ( & ca - > ref ) ;
return - ENOMEM ;
2017-03-17 09:18:50 +03:00
}
}
2018-07-23 12:32:01 +03:00
percpu_down_write ( & c - > usage_lock ) ;
for_each_member_device ( ca , c , i ) {
struct bucket_array * dst = __bucket_array ( ca , 1 ) ;
struct bucket_array * src = __bucket_array ( ca , 0 ) ;
size_t b ;
dst - > first_bucket = src - > first_bucket ;
dst - > nbuckets = src - > nbuckets ;
for ( b = 0 ; b < src - > nbuckets ; b + + )
dst - > b [ b ] . _mark . gen = src - > b [ b ] . mark . gen ;
} ;
percpu_up_write ( & c - > usage_lock ) ;
2018-11-25 01:09:44 +03:00
return bch2_ec_mem_alloc ( c , true ) ;
2017-03-17 09:18:50 +03:00
}
/**
2018-07-23 12:32:01 +03:00
* bch2_gc - walk _all_ references to buckets , and recompute them :
*
* Order matters here :
* - Concurrent GC relies on the fact that we have a total ordering for
* everything that GC walks - see gc_will_visit_node ( ) ,
* gc_will_visit_root ( )
*
* - also , references move around in the course of index updates and
* various other crap : everything needs to agree on the ordering
* references are allowed to move around in - e . g . , we ' re allowed to
* start with a reference owned by an open_bucket ( the allocator ) and
* move it to the btree , but not the reverse .
*
* This is necessary to ensure that gc doesn ' t miss references that
* move around - if references move backwards in the ordering GC
* uses , GC could skip past them
2017-03-17 09:18:50 +03:00
*/
2018-07-23 12:32:01 +03:00
int bch2_gc ( struct bch_fs * c , struct list_head * journal , bool initial )
2017-03-17 09:18:50 +03:00
{
struct bch_dev * ca ;
u64 start_time = local_clock ( ) ;
2018-07-23 12:32:01 +03:00
unsigned i , iter = 0 ;
2018-10-21 17:56:11 +03:00
int ret ;
2017-03-17 09:18:50 +03:00
trace_gc_start ( c ) ;
down_write ( & c - > gc_lock ) ;
2018-07-23 12:32:01 +03:00
again :
ret = bch2_gc_start ( c ) ;
if ( ret )
2017-03-17 09:18:50 +03:00
goto out ;
bch2_mark_superblocks ( c ) ;
2018-07-23 12:32:01 +03:00
ret = bch2_gc_btrees ( c , journal , initial ) ;
if ( ret )
2018-10-21 17:56:11 +03:00
goto out ;
2017-03-17 09:18:50 +03:00
bch2_mark_pending_btree_node_frees ( c ) ;
bch2_mark_allocator_buckets ( c ) ;
c - > gc_count + + ;
out :
2018-07-23 12:32:01 +03:00
if ( ! ret & & test_bit ( BCH_FS_FIXED_GENS , & c - > flags ) ) {
/*
* XXX : make sure gens we fixed got saved
*/
if ( iter + + < = 2 ) {
bch_info ( c , " Fixed gens, restarting mark and sweep: " ) ;
clear_bit ( BCH_FS_FIXED_GENS , & c - > flags ) ;
goto again ;
}
bch_info ( c , " Unable to fix bucket gens, looping " ) ;
ret = - EINVAL ;
}
if ( ! ret )
bch2_gc_done ( c , initial ) ;
/* Indicates that gc is no longer in progress: */
2018-11-25 01:09:44 +03:00
__gc_pos_set ( c , gc_phase ( GC_PHASE_NOT_RUNNING ) ) ;
2018-07-23 12:32:01 +03:00
bch2_gc_free ( c ) ;
2017-03-17 09:18:50 +03:00
up_write ( & c - > gc_lock ) ;
2018-07-23 12:32:01 +03:00
if ( ! ret & & initial )
set_bit ( BCH_FS_INITIAL_GC_DONE , & c - > flags ) ;
2017-03-17 09:18:50 +03:00
trace_gc_end ( c ) ;
bch2_time_stats_update ( & c - > times [ BCH_TIME_btree_gc ] , start_time ) ;
/*
* Wake up allocator in case it was waiting for buckets
* because of not being able to inc gens
*/
for_each_member_device ( ca , c , i )
bch2_wake_allocator ( ca ) ;
/*
* At startup , allocations can happen directly instead of via the
* allocator thread - issue wakeup in case they blocked on gc_lock :
*/
closure_wake_up ( & c - > freelist_wait ) ;
2018-07-23 12:32:01 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
}
/* Btree coalescing */
static void recalc_packed_keys ( struct btree * b )
{
2018-08-12 02:12:05 +03:00
struct bset * i = btree_bset_first ( b ) ;
2017-03-17 09:18:50 +03:00
struct bkey_packed * k ;
memset ( & b - > nr , 0 , sizeof ( b - > nr ) ) ;
BUG_ON ( b - > nsets ! = 1 ) ;
2018-08-12 02:12:05 +03:00
vstruct_for_each ( i , k )
2017-03-17 09:18:50 +03:00
btree_keys_account_key_add ( & b - > nr , 0 , k ) ;
}
static void bch2_coalesce_nodes ( struct bch_fs * c , struct btree_iter * iter ,
struct btree * old_nodes [ GC_MERGE_NODES ] )
{
struct btree * parent = btree_node_parent ( iter , old_nodes [ 0 ] ) ;
unsigned i , nr_old_nodes , nr_new_nodes , u64s = 0 ;
unsigned blocks = btree_blocks ( c ) * 2 / 3 ;
struct btree * new_nodes [ GC_MERGE_NODES ] ;
struct btree_update * as ;
struct keylist keylist ;
struct bkey_format_state format_state ;
struct bkey_format new_format ;
memset ( new_nodes , 0 , sizeof ( new_nodes ) ) ;
bch2_keylist_init ( & keylist , NULL ) ;
/* Count keys that are not deleted */
for ( i = 0 ; i < GC_MERGE_NODES & & old_nodes [ i ] ; i + + )
u64s + = old_nodes [ i ] - > nr . live_u64s ;
nr_old_nodes = nr_new_nodes = i ;
/* Check if all keys in @old_nodes could fit in one fewer node */
if ( nr_old_nodes < = 1 | |
__vstruct_blocks ( struct btree_node , c - > block_bits ,
DIV_ROUND_UP ( u64s , nr_old_nodes - 1 ) ) > blocks )
return ;
/* Find a format that all keys in @old_nodes can pack into */
bch2_bkey_format_init ( & format_state ) ;
for ( i = 0 ; i < nr_old_nodes ; i + + )
__bch2_btree_calc_format ( & format_state , old_nodes [ i ] ) ;
new_format = bch2_bkey_format_done ( & format_state ) ;
/* Check if repacking would make any nodes too big to fit */
for ( i = 0 ; i < nr_old_nodes ; i + + )
if ( ! bch2_btree_node_format_fits ( c , old_nodes [ i ] , & new_format ) ) {
trace_btree_gc_coalesce_fail ( c ,
BTREE_GC_COALESCE_FAIL_FORMAT_FITS ) ;
return ;
}
if ( bch2_keylist_realloc ( & keylist , NULL , 0 ,
( BKEY_U64s + BKEY_EXTENT_U64s_MAX ) * nr_old_nodes ) ) {
trace_btree_gc_coalesce_fail ( c ,
BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC ) ;
return ;
}
as = bch2_btree_update_start ( c , iter - > btree_id ,
btree_update_reserve_required ( c , parent ) + nr_old_nodes ,
BTREE_INSERT_NOFAIL |
BTREE_INSERT_USE_RESERVE ,
NULL ) ;
if ( IS_ERR ( as ) ) {
trace_btree_gc_coalesce_fail ( c ,
BTREE_GC_COALESCE_FAIL_RESERVE_GET ) ;
bch2_keylist_free ( & keylist , NULL ) ;
return ;
}
trace_btree_gc_coalesce ( c , old_nodes [ 0 ] ) ;
for ( i = 0 ; i < nr_old_nodes ; i + + )
bch2_btree_interior_update_will_free_node ( as , old_nodes [ i ] ) ;
/* Repack everything with @new_format and sort down to one bset */
for ( i = 0 ; i < nr_old_nodes ; i + + )
new_nodes [ i ] =
__bch2_btree_node_alloc_replacement ( as , old_nodes [ i ] ,
new_format ) ;
/*
* Conceptually we concatenate the nodes together and slice them
* up at different boundaries .
*/
for ( i = nr_new_nodes - 1 ; i > 0 ; - - i ) {
struct btree * n1 = new_nodes [ i ] ;
struct btree * n2 = new_nodes [ i - 1 ] ;
struct bset * s1 = btree_bset_first ( n1 ) ;
struct bset * s2 = btree_bset_first ( n2 ) ;
struct bkey_packed * k , * last = NULL ;
/* Calculate how many keys from @n2 we could fit inside @n1 */
u64s = 0 ;
for ( k = s2 - > start ;
k < vstruct_last ( s2 ) & &
vstruct_blocks_plus ( n1 - > data , c - > block_bits ,
u64s + k - > u64s ) < = blocks ;
k = bkey_next ( k ) ) {
last = k ;
u64s + = k - > u64s ;
}
if ( u64s = = le16_to_cpu ( s2 - > u64s ) ) {
/* n2 fits entirely in n1 */
n1 - > key . k . p = n1 - > data - > max_key = n2 - > data - > max_key ;
memcpy_u64s ( vstruct_last ( s1 ) ,
s2 - > start ,
le16_to_cpu ( s2 - > u64s ) ) ;
le16_add_cpu ( & s1 - > u64s , le16_to_cpu ( s2 - > u64s ) ) ;
set_btree_bset_end ( n1 , n1 - > set ) ;
six_unlock_write ( & n2 - > lock ) ;
bch2_btree_node_free_never_inserted ( c , n2 ) ;
six_unlock_intent ( & n2 - > lock ) ;
memmove ( new_nodes + i - 1 ,
new_nodes + i ,
sizeof ( new_nodes [ 0 ] ) * ( nr_new_nodes - i ) ) ;
new_nodes [ - - nr_new_nodes ] = NULL ;
} else if ( u64s ) {
/* move part of n2 into n1 */
n1 - > key . k . p = n1 - > data - > max_key =
bkey_unpack_pos ( n1 , last ) ;
n2 - > data - > min_key =
btree_type_successor ( iter - > btree_id ,
n1 - > data - > max_key ) ;
memcpy_u64s ( vstruct_last ( s1 ) ,
s2 - > start , u64s ) ;
le16_add_cpu ( & s1 - > u64s , u64s ) ;
memmove ( s2 - > start ,
vstruct_idx ( s2 , u64s ) ,
( le16_to_cpu ( s2 - > u64s ) - u64s ) * sizeof ( u64 ) ) ;
s2 - > u64s = cpu_to_le16 ( le16_to_cpu ( s2 - > u64s ) - u64s ) ;
set_btree_bset_end ( n1 , n1 - > set ) ;
set_btree_bset_end ( n2 , n2 - > set ) ;
}
}
for ( i = 0 ; i < nr_new_nodes ; i + + ) {
struct btree * n = new_nodes [ i ] ;
recalc_packed_keys ( n ) ;
btree_node_reset_sib_u64s ( n ) ;
bch2_btree_build_aux_trees ( n ) ;
six_unlock_write ( & n - > lock ) ;
bch2_btree_node_write ( c , n , SIX_LOCK_intent ) ;
}
/*
* The keys for the old nodes get deleted . We don ' t want to insert keys
* that compare equal to the keys for the new nodes we ' ll also be
* inserting - we can ' t because keys on a keylist must be strictly
* greater than the previous keys , and we also don ' t need to since the
* key for the new node will serve the same purpose ( overwriting the key
* for the old node ) .
*/
for ( i = 0 ; i < nr_old_nodes ; i + + ) {
struct bkey_i delete ;
unsigned j ;
for ( j = 0 ; j < nr_new_nodes ; j + + )
if ( ! bkey_cmp ( old_nodes [ i ] - > key . k . p ,
new_nodes [ j ] - > key . k . p ) )
goto next ;
bkey_init ( & delete . k ) ;
delete . k . p = old_nodes [ i ] - > key . k . p ;
bch2_keylist_add_in_order ( & keylist , & delete ) ;
next :
i = i ;
}
/*
* Keys for the new nodes get inserted : bch2_btree_insert_keys ( ) only
* does the lookup once and thus expects the keys to be in sorted order
* so we have to make sure the new keys are correctly ordered with
* respect to the deleted keys added in the previous loop
*/
for ( i = 0 ; i < nr_new_nodes ; i + + )
bch2_keylist_add_in_order ( & keylist , & new_nodes [ i ] - > key ) ;
/* Insert the newly coalesced nodes */
bch2_btree_insert_node ( as , parent , iter , & keylist , 0 ) ;
BUG_ON ( ! bch2_keylist_empty ( & keylist ) ) ;
BUG_ON ( iter - > l [ old_nodes [ 0 ] - > level ] . b ! = old_nodes [ 0 ] ) ;
bch2_btree_iter_node_replace ( iter , new_nodes [ 0 ] ) ;
for ( i = 0 ; i < nr_new_nodes ; i + + )
2018-10-06 11:12:42 +03:00
bch2_open_buckets_put ( c , & new_nodes [ i ] - > ob ) ;
2017-03-17 09:18:50 +03:00
/* Free the old nodes and update our sliding window */
for ( i = 0 ; i < nr_old_nodes ; i + + ) {
bch2_btree_node_free_inmem ( c , old_nodes [ i ] , iter ) ;
/*
* the index update might have triggered a split , in which case
* the nodes we coalesced - the new nodes we just created -
* might not be sibling nodes anymore - don ' t add them to the
* sliding window ( except the first ) :
*/
if ( ! i ) {
old_nodes [ i ] = new_nodes [ i ] ;
} else {
old_nodes [ i ] = NULL ;
if ( new_nodes [ i ] )
six_unlock_intent ( & new_nodes [ i ] - > lock ) ;
}
}
bch2_btree_update_done ( as ) ;
bch2_keylist_free ( & keylist , NULL ) ;
}
static int bch2_coalesce_btree ( struct bch_fs * c , enum btree_id btree_id )
{
struct btree_iter iter ;
struct btree * b ;
bool kthread = ( current - > flags & PF_KTHREAD ) ! = 0 ;
unsigned i ;
/* Sliding window of adjacent btree nodes */
struct btree * merge [ GC_MERGE_NODES ] ;
u32 lock_seq [ GC_MERGE_NODES ] ;
/*
* XXX : We don ' t have a good way of positively matching on sibling nodes
* that have the same parent - this code works by handling the cases
* where they might not have the same parent , and is thus fragile . Ugh .
*
* Perhaps redo this to use multiple linked iterators ?
*/
memset ( merge , 0 , sizeof ( merge ) ) ;
__for_each_btree_node ( & iter , c , btree_id , POS_MIN ,
BTREE_MAX_DEPTH , 0 ,
BTREE_ITER_PREFETCH , b ) {
memmove ( merge + 1 , merge ,
sizeof ( merge ) - sizeof ( merge [ 0 ] ) ) ;
memmove ( lock_seq + 1 , lock_seq ,
sizeof ( lock_seq ) - sizeof ( lock_seq [ 0 ] ) ) ;
merge [ 0 ] = b ;
for ( i = 1 ; i < GC_MERGE_NODES ; i + + ) {
if ( ! merge [ i ] | |
! six_relock_intent ( & merge [ i ] - > lock , lock_seq [ i ] ) )
break ;
if ( merge [ i ] - > level ! = merge [ 0 ] - > level ) {
six_unlock_intent ( & merge [ i ] - > lock ) ;
break ;
}
}
memset ( merge + i , 0 , ( GC_MERGE_NODES - i ) * sizeof ( merge [ 0 ] ) ) ;
bch2_coalesce_nodes ( c , & iter , merge ) ;
for ( i = 1 ; i < GC_MERGE_NODES & & merge [ i ] ; i + + ) {
lock_seq [ i ] = merge [ i ] - > lock . state . seq ;
six_unlock_intent ( & merge [ i ] - > lock ) ;
}
lock_seq [ 0 ] = merge [ 0 ] - > lock . state . seq ;
if ( kthread & & kthread_should_stop ( ) ) {
bch2_btree_iter_unlock ( & iter ) ;
return - ESHUTDOWN ;
}
bch2_btree_iter_cond_resched ( & iter ) ;
/*
* If the parent node wasn ' t relocked , it might have been split
* and the nodes in our sliding window might not have the same
* parent anymore - blow away the sliding window :
*/
if ( btree_iter_node ( & iter , iter . level + 1 ) & &
! btree_node_intent_locked ( & iter , iter . level + 1 ) )
memset ( merge + 1 , 0 ,
( GC_MERGE_NODES - 1 ) * sizeof ( merge [ 0 ] ) ) ;
}
return bch2_btree_iter_unlock ( & iter ) ;
}
/**
* bch_coalesce - coalesce adjacent nodes with low occupancy
*/
void bch2_coalesce ( struct bch_fs * c )
{
enum btree_id id ;
down_read ( & c - > gc_lock ) ;
trace_gc_coalesce_start ( c ) ;
for ( id = 0 ; id < BTREE_ID_NR ; id + + ) {
int ret = c - > btree_roots [ id ] . b
? bch2_coalesce_btree ( c , id )
: 0 ;
if ( ret ) {
if ( ret ! = - ESHUTDOWN )
bch_err ( c , " btree coalescing failed: %d " , ret ) ;
return ;
}
}
trace_gc_coalesce_end ( c ) ;
up_read ( & c - > gc_lock ) ;
}
static int bch2_gc_thread ( void * arg )
{
struct bch_fs * c = arg ;
struct io_clock * clock = & c - > io_clock [ WRITE ] ;
unsigned long last = atomic_long_read ( & clock - > now ) ;
unsigned last_kick = atomic_read ( & c - > kick_gc ) ;
2018-07-23 12:32:01 +03:00
int ret ;
2017-03-17 09:18:50 +03:00
set_freezable ( ) ;
while ( 1 ) {
while ( 1 ) {
set_current_state ( TASK_INTERRUPTIBLE ) ;
if ( kthread_should_stop ( ) ) {
__set_current_state ( TASK_RUNNING ) ;
return 0 ;
}
if ( atomic_read ( & c - > kick_gc ) ! = last_kick )
break ;
if ( c - > btree_gc_periodic ) {
unsigned long next = last + c - > capacity / 16 ;
if ( atomic_long_read ( & clock - > now ) > = next )
break ;
bch2_io_clock_schedule_timeout ( clock , next ) ;
} else {
schedule ( ) ;
}
try_to_freeze ( ) ;
}
__set_current_state ( TASK_RUNNING ) ;
last = atomic_long_read ( & clock - > now ) ;
last_kick = atomic_read ( & c - > kick_gc ) ;
2018-07-23 12:32:01 +03:00
ret = bch2_gc ( c , NULL , false ) ;
if ( ret )
bch_err ( c , " btree gc failed: %i " , ret ) ;
2017-03-17 09:18:50 +03:00
debug_check_no_locks_held ( ) ;
}
return 0 ;
}
void bch2_gc_thread_stop ( struct bch_fs * c )
{
struct task_struct * p ;
p = c - > gc_thread ;
c - > gc_thread = NULL ;
if ( p ) {
kthread_stop ( p ) ;
put_task_struct ( p ) ;
}
}
int bch2_gc_thread_start ( struct bch_fs * c )
{
struct task_struct * p ;
BUG_ON ( c - > gc_thread ) ;
p = kthread_create ( bch2_gc_thread , c , " bch_gc " ) ;
if ( IS_ERR ( p ) )
return PTR_ERR ( p ) ;
get_task_struct ( p ) ;
c - > gc_thread = p ;
wake_up_process ( p ) ;
return 0 ;
}
/* Initial GC computes bucket marks during startup */
int bch2_initial_gc ( struct bch_fs * c , struct list_head * journal )
{
2018-07-23 12:32:01 +03:00
int ret = bch2_gc ( c , journal , true ) ;
2017-03-17 09:18:50 +03:00
/*
* Skip past versions that might have possibly been used ( as nonces ) ,
* but hadn ' t had their pointers written :
*/
if ( c - > sb . encryption_type )
atomic64_add ( 1 < < 16 , & c - > key_version ) ;
return ret ;
}