2017-03-17 09:18:50 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_GC_H
# define _BCACHEFS_BTREE_GC_H
2023-08-05 19:55:08 +03:00
# include "bkey.h"
2017-03-17 09:18:50 +03:00
# include "btree_types.h"
2023-07-17 06:19:49 +03:00
int bch2_check_topology ( struct bch_fs * ) ;
2024-04-06 07:11:01 +03:00
int bch2_check_allocations ( struct bch_fs * ) ;
2017-03-17 09:18:50 +03:00
/*
* For concurrent mark and sweep ( with other index updates ) , we define a total
* ordering of _all_ references GC walks :
*
* Note that some references will have the same GC position as others - e . g .
* everything within the same btree node ; in those cases we ' re relying on
* whatever locking exists for where those references live , i . e . the write lock
* on a btree node .
*
* That locking is also required to ensure GC doesn ' t pass the updater in
* between the updater adding / removing the reference and updating the GC marks ;
* without that , we would at best double count sometimes .
*
* That part is important - whenever calling bch2_mark_pointers ( ) , a lock _must_
* be held that prevents GC from passing the position the updater is at .
*
* ( What about the start of gc , when we ' re clearing all the marks ? GC clears the
* mark with the gc pos seqlock held , and bch_mark_bucket checks against the gc
* position inside its cmpxchg loop , so crap magically works ) .
*/
/* Position of (the start of) a gc phase: */
static inline struct gc_pos gc_phase ( enum gc_phase phase )
{
return ( struct gc_pos ) {
. phase = phase ,
. level = 0 ,
2024-04-07 06:58:01 +03:00
. pos = POS_MIN ,
2017-03-17 09:18:50 +03:00
} ;
}
static inline int gc_pos_cmp ( struct gc_pos l , struct gc_pos r )
{
2024-04-07 06:58:01 +03:00
return cmp_int ( l . phase , r . phase ) ? :
- cmp_int ( l . level , r . level ) ? :
bpos_cmp ( l . pos , r . pos ) ;
2017-03-17 09:18:50 +03:00
}
2018-11-01 22:13:19 +03:00
static inline enum gc_phase btree_id_to_gc_phase ( enum btree_id id )
{
switch ( id ) {
2023-07-21 12:38:45 +03:00
# define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
2018-11-01 22:10:01 +03:00
BCH_BTREE_IDS ( )
# undef x
2018-11-01 22:13:19 +03:00
default :
BUG ( ) ;
}
}
2024-04-07 06:58:01 +03:00
static inline struct gc_pos gc_pos_btree ( enum btree_id btree , unsigned level ,
struct bpos pos )
2017-03-17 09:18:50 +03:00
{
return ( struct gc_pos ) {
2024-04-07 06:58:01 +03:00
. phase = btree_id_to_gc_phase ( btree ) ,
2017-03-17 09:18:50 +03:00
. level = level ,
2024-04-07 06:58:01 +03:00
. pos = pos ,
2017-03-17 09:18:50 +03:00
} ;
}
/*
* GC position of the pointers within a btree node : note , _not_ for & b - > key
* itself , that lives in the parent node :
*/
static inline struct gc_pos gc_pos_btree_node ( struct btree * b )
{
2024-04-07 06:58:01 +03:00
return gc_pos_btree ( b - > c . btree_id , b - > c . level , b - > key . k . p ) ;
2017-03-17 09:18:50 +03:00
}
2018-07-23 12:32:01 +03:00
static inline bool gc_visited ( struct bch_fs * c , struct gc_pos pos )
2017-03-17 09:18:50 +03:00
{
unsigned seq ;
bool ret ;
do {
seq = read_seqcount_begin ( & c - > gc_pos_lock ) ;
2018-07-23 12:32:01 +03:00
ret = gc_pos_cmp ( pos , c - > gc_pos ) < = 0 ;
2017-03-17 09:18:50 +03:00
} while ( read_seqcount_retry ( & c - > gc_pos_lock , seq ) ) ;
return ret ;
}
2024-04-20 05:44:12 +03:00
int bch2_gc_gens ( struct bch_fs * ) ;
void bch2_gc_gens_async ( struct bch_fs * ) ;
void bch2_fs_gc_init ( struct bch_fs * ) ;
2022-04-01 08:29:59 +03:00
2017-03-17 09:18:50 +03:00
# endif /* _BCACHEFS_BTREE_GC_H */