2017-03-16 22:18:50 -08:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_GC_H
# define _BCACHEFS_BTREE_GC_H
2023-08-05 12:55:08 -04:00
# include "bkey.h"
2017-03-16 22:18:50 -08:00
# include "btree_types.h"
2023-07-16 23:19:49 -04:00
int bch2_check_topology ( struct bch_fs * ) ;
2021-04-16 16:54:11 -04:00
int bch2_gc ( struct bch_fs * , bool , bool ) ;
2020-06-15 15:10:54 -04:00
int bch2_gc_gens ( struct bch_fs * ) ;
2017-03-16 22:18:50 -08:00
void bch2_gc_thread_stop ( struct bch_fs * ) ;
int bch2_gc_thread_start ( struct bch_fs * ) ;
/*
* For concurrent mark and sweep ( with other index updates ) , we define a total
* ordering of _all_ references GC walks :
*
* Note that some references will have the same GC position as others - e . g .
* everything within the same btree node ; in those cases we ' re relying on
* whatever locking exists for where those references live , i . e . the write lock
* on a btree node .
*
* That locking is also required to ensure GC doesn ' t pass the updater in
* between the updater adding / removing the reference and updating the GC marks ;
* without that , we would at best double count sometimes .
*
* That part is important - whenever calling bch2_mark_pointers ( ) , a lock _must_
* be held that prevents GC from passing the position the updater is at .
*
* ( What about the start of gc , when we ' re clearing all the marks ? GC clears the
* mark with the gc pos seqlock held , and bch_mark_bucket checks against the gc
* position inside its cmpxchg loop , so crap magically works ) .
*/
/* Position of (the start of) a gc phase: */
static inline struct gc_pos gc_phase ( enum gc_phase phase )
{
return ( struct gc_pos ) {
. phase = phase ,
. pos = POS_MIN ,
. level = 0 ,
} ;
}
static inline int gc_pos_cmp ( struct gc_pos l , struct gc_pos r )
{
2021-03-04 16:20:16 -05:00
return cmp_int ( l . phase , r . phase ) ? :
bpos_cmp ( l . pos , r . pos ) ? :
cmp_int ( l . level , r . level ) ;
2017-03-16 22:18:50 -08:00
}
2018-11-01 15:13:19 -04:00
static inline enum gc_phase btree_id_to_gc_phase ( enum btree_id id )
{
switch ( id ) {
2023-07-21 05:38:45 -04:00
# define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
2018-11-01 15:10:01 -04:00
BCH_BTREE_IDS ( )
# undef x
2018-11-01 15:13:19 -04:00
default :
BUG ( ) ;
}
}
2017-03-16 22:18:50 -08:00
static inline struct gc_pos gc_pos_btree ( enum btree_id id ,
struct bpos pos , unsigned level )
{
return ( struct gc_pos ) {
2018-11-01 15:13:19 -04:00
. phase = btree_id_to_gc_phase ( id ) ,
2017-03-16 22:18:50 -08:00
. pos = pos ,
. level = level ,
} ;
}
/*
* GC position of the pointers within a btree node : note , _not_ for & b - > key
* itself , that lives in the parent node :
*/
static inline struct gc_pos gc_pos_btree_node ( struct btree * b )
{
2020-06-06 12:28:01 -04:00
return gc_pos_btree ( b - > c . btree_id , b - > key . k . p , b - > c . level ) ;
2017-03-16 22:18:50 -08:00
}
/*
* GC position of the pointer to a btree root : we don ' t use
* gc_pos_pointer_to_btree_node ( ) here to avoid a potential race with
* btree_split ( ) increasing the tree depth - the new root will have level > the
* old root and thus have a greater gc position than the old root , but that
* would be incorrect since once gc has marked the root it ' s not coming back .
*/
static inline struct gc_pos gc_pos_btree_root ( enum btree_id id )
{
2021-07-05 22:02:07 -04:00
return gc_pos_btree ( id , SPOS_MAX , BTREE_MAX_DEPTH ) ;
2017-03-16 22:18:50 -08:00
}
2018-07-23 05:32:01 -04:00
static inline bool gc_visited ( struct bch_fs * c , struct gc_pos pos )
2017-03-16 22:18:50 -08:00
{
unsigned seq ;
bool ret ;
do {
seq = read_seqcount_begin ( & c - > gc_pos_lock ) ;
2018-07-23 05:32:01 -04:00
ret = gc_pos_cmp ( pos , c - > gc_pos ) < = 0 ;
2017-03-16 22:18:50 -08:00
} while ( read_seqcount_retry ( & c - > gc_pos_lock , seq ) ) ;
return ret ;
}
2022-04-01 01:29:59 -04:00
static inline void bch2_do_gc_gens ( struct bch_fs * c )
{
atomic_inc ( & c - > kick_gc ) ;
2022-04-10 14:36:10 -04:00
if ( c - > gc_thread )
wake_up_process ( c - > gc_thread ) ;
2022-04-01 01:29:59 -04:00
}
2017-03-16 22:18:50 -08:00
# endif /* _BCACHEFS_BTREE_GC_H */