2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Moving / copying garbage collector
*
* Copyright 2012 Google , Inc .
*/
# include "bcachefs.h"
2018-10-06 07:46:55 +03:00
# include "alloc_foreground.h"
2017-03-17 09:18:50 +03:00
# include "btree_iter.h"
# include "btree_update.h"
# include "buckets.h"
# include "clock.h"
# include "disk_groups.h"
2020-07-22 00:12:39 +03:00
# include "error.h"
2017-03-17 09:18:50 +03:00
# include "extents.h"
# include "eytzinger.h"
# include "io.h"
# include "keylist.h"
# include "move.h"
# include "movinggc.h"
# include "super-io.h"
# include "trace.h"
# include <linux/freezer.h>
# include <linux/kthread.h>
# include <linux/math64.h>
# include <linux/sched/task.h>
# include <linux/sort.h>
# include <linux/wait.h>
/*
* We can ' t use the entire copygc reserve in one iteration of copygc : we may
* need the buckets we ' re freeing up to go back into the copygc reserve to make
* forward progress , but if the copygc reserve is full they ' ll be available for
* any allocation - and it ' s possible that in a given iteration , we free up most
* of the buckets we ' re going to free before we allocate most of the buckets
* we ' re going to allocate .
*
* If we only use half of the reserve per iteration , then in steady state we ' ll
* always have room in the reserve for the buckets we ' re going to need in the
* next iteration :
*/
# define COPYGC_BUCKETS_PER_ITER(ca) \
( ( ca ) - > free [ RESERVE_MOVINGGC ] . size / 2 )
static inline int sectors_used_cmp ( copygc_heap * heap ,
struct copygc_heap_entry l ,
struct copygc_heap_entry r )
{
2019-04-12 11:54:12 +03:00
return cmp_int ( l . sectors , r . sectors ) ;
2017-03-17 09:18:50 +03:00
}
static int bucket_offset_cmp ( const void * _l , const void * _r , size_t size )
{
const struct copygc_heap_entry * l = _l ;
const struct copygc_heap_entry * r = _r ;
2020-07-11 23:28:54 +03:00
return cmp_int ( l - > dev , r - > dev ) ? :
cmp_int ( l - > offset , r - > offset ) ;
2017-03-17 09:18:50 +03:00
}
2020-07-11 23:28:54 +03:00
static int __copygc_pred ( struct bch_fs * c , struct bkey_s_c k )
2017-03-17 09:18:50 +03:00
{
2020-07-11 23:28:54 +03:00
copygc_heap * h = & c - > copygc_heap ;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c ( k ) ;
const struct bch_extent_ptr * ptr ;
bkey_for_each_ptr ( ptrs , ptr ) {
struct bch_dev * ca = bch_dev_bkey_exists ( c , ptr - > dev ) ;
struct copygc_heap_entry search = {
. dev = ptr - > dev ,
. offset = ptr - > offset
} ;
2017-03-17 09:18:50 +03:00
2019-07-25 20:52:14 +03:00
ssize_t i = eytzinger0_find_le ( h - > data , h - > used ,
sizeof ( h - > data [ 0 ] ) ,
bucket_offset_cmp , & search ) ;
2020-06-04 06:47:50 +03:00
#if 0
/* eytzinger search verify code: */
ssize_t j = - 1 , k ;
2017-03-17 09:18:50 +03:00
2020-06-04 06:47:50 +03:00
for ( k = 0 ; k < h - > used ; k + + )
if ( h - > data [ k ] . offset < = ptr - > offset & &
( j < 0 | | h - > data [ k ] . offset > h - > data [ j ] . offset ) )
j = k ;
BUG_ON ( i ! = j ) ;
# endif
2020-07-11 23:28:54 +03:00
if ( i > = 0 & &
ptr - > offset < h - > data [ i ] . offset + ca - > mi . bucket_size & &
ptr - > gen = = h - > data [ i ] . gen )
return ptr - > dev ;
2017-03-17 09:18:50 +03:00
}
2020-07-11 23:28:54 +03:00
return - 1 ;
2017-03-17 09:18:50 +03:00
}
static enum data_cmd copygc_pred ( struct bch_fs * c , void * arg ,
2018-11-01 22:10:01 +03:00
struct bkey_s_c k ,
2017-03-17 09:18:50 +03:00
struct bch_io_opts * io_opts ,
struct data_opts * data_opts )
{
2020-07-11 23:28:54 +03:00
int dev_idx = __copygc_pred ( c , k ) ;
if ( dev_idx < 0 )
2017-03-17 09:18:50 +03:00
return DATA_SKIP ;
2020-07-12 01:52:14 +03:00
data_opts - > target = io_opts - > background_target ;
2017-03-17 09:18:50 +03:00
data_opts - > btree_insert_flags = BTREE_INSERT_USE_RESERVE ;
2020-07-11 23:28:54 +03:00
data_opts - > rewrite_dev = dev_idx ;
2017-03-17 09:18:50 +03:00
return DATA_REWRITE ;
}
static bool have_copygc_reserve ( struct bch_dev * ca )
{
bool ret ;
2019-11-21 00:16:57 +03:00
spin_lock ( & ca - > fs - > freelist_lock ) ;
2017-03-17 09:18:50 +03:00
ret = fifo_full ( & ca - > free [ RESERVE_MOVINGGC ] ) | |
2019-04-16 22:13:16 +03:00
ca - > allocator_state ! = ALLOCATOR_RUNNING ;
2019-11-21 00:16:57 +03:00
spin_unlock ( & ca - > fs - > freelist_lock ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
2020-07-22 00:12:39 +03:00
static int bch2_copygc ( struct bch_fs * c )
2017-03-17 09:18:50 +03:00
{
2020-07-11 23:28:54 +03:00
copygc_heap * h = & c - > copygc_heap ;
2017-03-17 09:18:50 +03:00
struct copygc_heap_entry e , * i ;
struct bucket_array * buckets ;
struct bch_move_stats move_stats ;
u64 sectors_to_move = 0 , sectors_not_moved = 0 ;
2020-07-11 23:28:54 +03:00
u64 sectors_reserved = 0 ;
2017-03-17 09:18:50 +03:00
u64 buckets_to_move , buckets_not_moved = 0 ;
2020-07-11 23:28:54 +03:00
struct bch_dev * ca ;
unsigned dev_idx ;
size_t b , heap_size = 0 ;
2017-03-17 09:18:50 +03:00
int ret ;
memset ( & move_stats , 0 , sizeof ( move_stats ) ) ;
/*
* Find buckets with lowest sector counts , skipping completely
* empty buckets , by building a maxheap sorted by sector count ,
* and repeatedly replacing the maximum element until all
* buckets have been visited .
*/
h - > used = 0 ;
2020-07-11 23:28:54 +03:00
for_each_rw_member ( ca , c , dev_idx )
heap_size + = ca - > mi . nbuckets > > 7 ;
2017-03-17 09:18:50 +03:00
2020-07-11 23:28:54 +03:00
if ( h - > size < heap_size ) {
free_heap ( & c - > copygc_heap ) ;
if ( ! init_heap ( & c - > copygc_heap , heap_size , GFP_KERNEL ) ) {
bch_err ( c , " error allocating copygc heap " ) ;
2020-07-22 00:12:39 +03:00
return 0 ;
2020-07-11 23:28:54 +03:00
}
}
for_each_rw_member ( ca , c , dev_idx ) {
closure_wait_event ( & c - > freelist_wait , have_copygc_reserve ( ca ) ) ;
spin_lock ( & ca - > fs - > freelist_lock ) ;
sectors_reserved + = fifo_used ( & ca - > free [ RESERVE_MOVINGGC ] ) * ca - > mi . bucket_size ;
spin_unlock ( & ca - > fs - > freelist_lock ) ;
down_read ( & ca - > bucket_lock ) ;
buckets = bucket_array ( ca ) ;
for ( b = buckets - > first_bucket ; b < buckets - > nbuckets ; b + + ) {
struct bucket_mark m = READ_ONCE ( buckets - > b [ b ] . mark ) ;
struct copygc_heap_entry e ;
if ( m . owned_by_allocator | |
m . data_type ! = BCH_DATA_user | |
! bucket_sectors_used ( m ) | |
bucket_sectors_used ( m ) > = ca - > mi . bucket_size )
continue ;
e = ( struct copygc_heap_entry ) {
2020-07-22 00:12:39 +03:00
. dev = dev_idx ,
2020-07-11 23:28:54 +03:00
. gen = m . gen ,
. sectors = bucket_sectors_used ( m ) ,
. offset = bucket_to_sector ( ca , b ) ,
} ;
heap_add_or_replace ( h , e , - sectors_used_cmp , NULL ) ;
}
up_read ( & ca - > bucket_lock ) ;
2017-03-17 09:18:50 +03:00
}
2020-07-22 00:12:39 +03:00
if ( ! sectors_reserved ) {
bch2_fs_fatal_error ( c , " stuck, ran out of copygc reserve! " ) ;
return - 1 ;
}
2017-03-17 09:18:50 +03:00
for ( i = h - > data ; i < h - > data + h - > used ; i + + )
sectors_to_move + = i - > sectors ;
2020-07-11 23:28:54 +03:00
while ( sectors_to_move > sectors_reserved ) {
2018-10-21 23:32:51 +03:00
BUG_ON ( ! heap_pop ( h , e , - sectors_used_cmp , NULL ) ) ;
2017-03-17 09:18:50 +03:00
sectors_to_move - = e . sectors ;
}
buckets_to_move = h - > used ;
if ( ! buckets_to_move )
2020-07-22 00:12:39 +03:00
return 0 ;
2017-03-17 09:18:50 +03:00
eytzinger0_sort ( h - > data , h - > used ,
sizeof ( h - > data [ 0 ] ) ,
bucket_offset_cmp , NULL ) ;
2020-07-11 23:28:54 +03:00
ret = bch2_move_data ( c , & c - > copygc_pd . rate ,
writepoint_ptr ( & c - > copygc_write_point ) ,
2017-03-17 09:18:50 +03:00
POS_MIN , POS_MAX ,
2020-07-11 23:28:54 +03:00
copygc_pred , NULL ,
2017-03-17 09:18:50 +03:00
& move_stats ) ;
2020-07-11 23:28:54 +03:00
for_each_rw_member ( ca , c , dev_idx ) {
down_read ( & ca - > bucket_lock ) ;
buckets = bucket_array ( ca ) ;
for ( i = h - > data ; i < h - > data + h - > used ; i + + ) {
2020-07-22 00:12:39 +03:00
struct bucket_mark m ;
size_t b ;
2020-07-11 23:28:54 +03:00
2020-07-22 00:12:39 +03:00
if ( i - > dev ! = dev_idx )
continue ;
b = sector_to_bucket ( ca , i - > offset ) ;
m = READ_ONCE ( buckets - > b [ b ] . mark ) ;
if ( i - > gen = = m . gen & &
bucket_sectors_used ( m ) ) {
2020-07-11 23:28:54 +03:00
sectors_not_moved + = bucket_sectors_used ( m ) ;
buckets_not_moved + + ;
}
2017-03-17 09:18:50 +03:00
}
2020-07-11 23:28:54 +03:00
up_read ( & ca - > bucket_lock ) ;
2017-03-17 09:18:50 +03:00
}
if ( sectors_not_moved & & ! ret )
2019-04-04 03:38:37 +03:00
bch_warn_ratelimited ( c ,
2020-06-04 06:47:50 +03:00
" copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors) " ,
2017-03-17 09:18:50 +03:00
sectors_not_moved , sectors_to_move ,
2020-06-04 06:47:50 +03:00
buckets_not_moved , buckets_to_move ,
atomic64_read ( & move_stats . sectors_moved ) ,
atomic64_read ( & move_stats . keys_raced ) ,
atomic64_read ( & move_stats . sectors_raced ) ) ;
2017-03-17 09:18:50 +03:00
2020-07-11 23:28:54 +03:00
trace_copygc ( c ,
2017-03-17 09:18:50 +03:00
atomic64_read ( & move_stats . sectors_moved ) , sectors_not_moved ,
buckets_to_move , buckets_not_moved ) ;
2020-07-22 00:12:39 +03:00
return 0 ;
2017-03-17 09:18:50 +03:00
}
2019-12-21 00:19:46 +03:00
/*
* Copygc runs when the amount of fragmented data is above some arbitrary
* threshold :
*
* The threshold at the limit - when the device is full - is the amount of space
* we reserved in bch2_recalc_capacity ; we can ' t have more than that amount of
* disk space stranded due to fragmentation and store everything we have
* promised to store .
*
* But we don ' t want to be running copygc unnecessarily when the device still
* has plenty of free space - rather , we want copygc to smoothly run every so
* often and continually reduce the amount of fragmented space as the device
* fills up . So , we increase the threshold by half the current free space .
*/
2020-07-11 23:28:54 +03:00
unsigned long bch2_copygc_wait_amount ( struct bch_fs * c )
2019-12-21 00:19:46 +03:00
{
2020-07-11 23:28:54 +03:00
struct bch_dev * ca ;
unsigned dev_idx ;
u64 fragmented_allowed = c - > copygc_threshold ;
u64 fragmented = 0 ;
for_each_rw_member ( ca , c , dev_idx ) {
2020-07-22 20:27:00 +03:00
struct bch_dev_usage usage = bch2_dev_usage_read ( ca ) ;
2020-07-11 23:28:54 +03:00
fragmented_allowed + = ( ( __dev_buckets_available ( ca , usage ) *
ca - > mi . bucket_size ) > > 1 ) ;
fragmented + = usage . sectors_fragmented ;
}
2019-12-21 00:19:46 +03:00
2020-07-11 23:28:54 +03:00
return max_t ( s64 , 0 , fragmented_allowed - fragmented ) ;
2019-12-21 00:19:46 +03:00
}
2017-03-17 09:18:50 +03:00
static int bch2_copygc_thread ( void * arg )
{
2020-07-11 23:28:54 +03:00
struct bch_fs * c = arg ;
2017-03-17 09:18:50 +03:00
struct io_clock * clock = & c - > io_clock [ WRITE ] ;
2019-12-21 00:19:46 +03:00
unsigned long last , wait ;
2017-03-17 09:18:50 +03:00
set_freezable ( ) ;
while ( ! kthread_should_stop ( ) ) {
if ( kthread_wait_freezable ( c - > copy_gc_enabled ) )
break ;
last = atomic_long_read ( & clock - > now ) ;
2020-07-11 23:28:54 +03:00
wait = bch2_copygc_wait_amount ( c ) ;
2017-03-17 09:18:50 +03:00
2019-12-21 00:19:46 +03:00
if ( wait > clock - > max_slop ) {
bch2_kthread_io_clock_wait ( clock , last + wait ,
2017-03-17 09:18:50 +03:00
MAX_SCHEDULE_TIMEOUT ) ;
continue ;
}
2020-07-22 00:12:39 +03:00
if ( bch2_copygc ( c ) )
break ;
2017-03-17 09:18:50 +03:00
}
return 0 ;
}
2020-07-11 23:28:54 +03:00
void bch2_copygc_stop ( struct bch_fs * c )
2017-03-17 09:18:50 +03:00
{
2020-07-11 23:28:54 +03:00
c - > copygc_pd . rate . rate = UINT_MAX ;
bch2_ratelimit_reset ( & c - > copygc_pd . rate ) ;
2017-03-17 09:18:50 +03:00
2020-07-11 23:28:54 +03:00
if ( c - > copygc_thread ) {
kthread_stop ( c - > copygc_thread ) ;
put_task_struct ( c - > copygc_thread ) ;
2017-03-17 09:18:50 +03:00
}
2020-07-11 23:28:54 +03:00
c - > copygc_thread = NULL ;
2017-03-17 09:18:50 +03:00
}
2020-07-11 23:28:54 +03:00
int bch2_copygc_start ( struct bch_fs * c )
2017-03-17 09:18:50 +03:00
{
struct task_struct * t ;
2020-07-11 23:28:54 +03:00
if ( c - > copygc_thread )
2019-05-24 21:45:33 +03:00
return 0 ;
2017-03-17 09:18:50 +03:00
if ( c - > opts . nochanges )
return 0 ;
if ( bch2_fs_init_fault ( " copygc_start " ) )
return - ENOMEM ;
2020-07-22 00:12:39 +03:00
t = kthread_create ( bch2_copygc_thread , c , " bch_copygc " ) ;
2017-03-17 09:18:50 +03:00
if ( IS_ERR ( t ) )
return PTR_ERR ( t ) ;
get_task_struct ( t ) ;
2020-07-11 23:28:54 +03:00
c - > copygc_thread = t ;
wake_up_process ( c - > copygc_thread ) ;
2017-03-17 09:18:50 +03:00
return 0 ;
}
2020-07-11 23:28:54 +03:00
void bch2_fs_copygc_init ( struct bch_fs * c )
2017-03-17 09:18:50 +03:00
{
2020-07-11 23:28:54 +03:00
bch2_pd_controller_init ( & c - > copygc_pd ) ;
c - > copygc_pd . d_term = 0 ;
2017-03-17 09:18:50 +03:00
}