2017-03-16 22:18:50 -08:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_GC_H
# define _BCACHEFS_BTREE_GC_H
# include "btree_types.h"
enum bkey_type ;
void bch2_coalesce ( struct bch_fs * ) ;
2018-07-23 05:32:01 -04:00
int bch2_gc ( struct bch_fs * , struct list_head * , bool ) ;
2017-03-16 22:18:50 -08:00
void bch2_gc_thread_stop ( struct bch_fs * ) ;
int bch2_gc_thread_start ( struct bch_fs * ) ;
int bch2_initial_gc ( struct bch_fs * , struct list_head * ) ;
void bch2_mark_dev_superblock ( struct bch_fs * , struct bch_dev * , unsigned ) ;
/*
* For concurrent mark and sweep ( with other index updates ) , we define a total
* ordering of _all_ references GC walks :
*
* Note that some references will have the same GC position as others - e . g .
* everything within the same btree node ; in those cases we ' re relying on
* whatever locking exists for where those references live , i . e . the write lock
* on a btree node .
*
* That locking is also required to ensure GC doesn ' t pass the updater in
* between the updater adding / removing the reference and updating the GC marks ;
* without that , we would at best double count sometimes .
*
* That part is important - whenever calling bch2_mark_pointers ( ) , a lock _must_
* be held that prevents GC from passing the position the updater is at .
*
* ( What about the start of gc , when we ' re clearing all the marks ? GC clears the
* mark with the gc pos seqlock held , and bch_mark_bucket checks against the gc
* position inside its cmpxchg loop , so crap magically works ) .
*/
/* Position of (the start of) a gc phase: */
static inline struct gc_pos gc_phase ( enum gc_phase phase )
{
return ( struct gc_pos ) {
. phase = phase ,
. pos = POS_MIN ,
. level = 0 ,
} ;
}
static inline int gc_pos_cmp ( struct gc_pos l , struct gc_pos r )
{
if ( l . phase ! = r . phase )
return l . phase < r . phase ? - 1 : 1 ;
if ( bkey_cmp ( l . pos , r . pos ) )
return bkey_cmp ( l . pos , r . pos ) ;
if ( l . level ! = r . level )
return l . level < r . level ? - 1 : 1 ;
return 0 ;
}
2018-11-01 15:13:19 -04:00
static inline enum gc_phase btree_id_to_gc_phase ( enum btree_id id )
{
switch ( id ) {
# define DEF_BTREE_ID(n, v, s) case BTREE_ID_##n: return GC_PHASE_BTREE_##n;
DEFINE_BCH_BTREE_IDS ( )
# undef DEF_BTREE_ID
default :
BUG ( ) ;
}
}
2017-03-16 22:18:50 -08:00
static inline struct gc_pos gc_pos_btree ( enum btree_id id ,
struct bpos pos , unsigned level )
{
return ( struct gc_pos ) {
2018-11-01 15:13:19 -04:00
. phase = btree_id_to_gc_phase ( id ) ,
2017-03-16 22:18:50 -08:00
. pos = pos ,
. level = level ,
} ;
}
/*
* GC position of the pointers within a btree node : note , _not_ for & b - > key
* itself , that lives in the parent node :
*/
static inline struct gc_pos gc_pos_btree_node ( struct btree * b )
{
return gc_pos_btree ( b - > btree_id , b - > key . k . p , b - > level ) ;
}
/*
* GC position of the pointer to a btree root : we don ' t use
* gc_pos_pointer_to_btree_node ( ) here to avoid a potential race with
* btree_split ( ) increasing the tree depth - the new root will have level > the
* old root and thus have a greater gc position than the old root , but that
* would be incorrect since once gc has marked the root it ' s not coming back .
*/
static inline struct gc_pos gc_pos_btree_root ( enum btree_id id )
{
return gc_pos_btree ( id , POS_MAX , BTREE_MAX_DEPTH ) ;
}
static inline struct gc_pos gc_pos_alloc ( struct bch_fs * c , struct open_bucket * ob )
{
return ( struct gc_pos ) {
. phase = GC_PHASE_ALLOC ,
. pos = POS ( ob ? ob - c - > open_buckets : 0 , 0 ) ,
} ;
}
2018-07-23 05:32:01 -04:00
static inline bool gc_visited ( struct bch_fs * c , struct gc_pos pos )
2017-03-16 22:18:50 -08:00
{
unsigned seq ;
bool ret ;
do {
seq = read_seqcount_begin ( & c - > gc_pos_lock ) ;
2018-07-23 05:32:01 -04:00
ret = gc_pos_cmp ( pos , c - > gc_pos ) < = 0 ;
2017-03-16 22:18:50 -08:00
} while ( read_seqcount_retry ( & c - > gc_pos_lock , seq ) ) ;
return ret ;
}
# endif /* _BCACHEFS_BTREE_GC_H */