2017-03-16 22:18:50 -08:00
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Code for manipulating bucket marks for garbage collection .
*
* Copyright 2014 Datera , Inc .
*/
# ifndef _BUCKETS_H
# define _BUCKETS_H
# include "buckets_types.h"
# include "super.h"
# define for_each_bucket(_b, _buckets) \
for ( _b = ( _buckets ) - > b + ( _buckets ) - > first_bucket ; \
_b < ( _buckets ) - > b + ( _buckets ) - > nbuckets ; _b + + )
# define bucket_cmpxchg(g, new, expr) \
( { \
2019-02-13 14:46:32 -05:00
struct bucket * _g = g ; \
2017-03-16 22:18:50 -08:00
u64 _v = atomic64_read ( & ( g ) - > _mark . v ) ; \
struct bucket_mark _old ; \
\
do { \
( new ) . v . counter = _old . v . counter = _v ; \
expr ; \
2019-02-13 14:46:32 -05:00
} while ( ( _v = atomic64_cmpxchg ( & ( _g ) - > _mark . v , \
2017-03-16 22:18:50 -08:00
_old . v . counter , \
( new ) . v . counter ) ) ! = _old . v . counter ) ; \
_old ; \
} )
2018-07-23 05:32:01 -04:00
static inline struct bucket_array * __bucket_array ( struct bch_dev * ca ,
bool gc )
2017-03-16 22:18:50 -08:00
{
2018-07-23 05:32:01 -04:00
return rcu_dereference_check ( ca - > buckets [ gc ] ,
2017-03-16 22:18:50 -08:00
! ca - > fs | |
2018-11-26 00:13:33 -05:00
percpu_rwsem_is_held ( & ca - > fs - > mark_lock ) | |
2017-03-16 22:18:50 -08:00
lockdep_is_held ( & ca - > fs - > gc_lock ) | |
lockdep_is_held ( & ca - > bucket_lock ) ) ;
}
2018-07-23 05:32:01 -04:00
static inline struct bucket_array * bucket_array ( struct bch_dev * ca )
{
return __bucket_array ( ca , false ) ;
}
static inline struct bucket * __bucket ( struct bch_dev * ca , size_t b , bool gc )
2017-03-16 22:18:50 -08:00
{
2018-07-23 05:32:01 -04:00
struct bucket_array * buckets = __bucket_array ( ca , gc ) ;
2017-03-16 22:18:50 -08:00
BUG_ON ( b < buckets - > first_bucket | | b > = buckets - > nbuckets ) ;
return buckets - > b + b ;
}
2021-12-25 22:37:19 -05:00
static inline struct bucket * gc_bucket ( struct bch_dev * ca , size_t b )
{
return __bucket ( ca , b , true ) ;
}
2021-12-25 19:55:34 -05:00
static inline struct bucket_gens * bucket_gens ( struct bch_dev * ca )
{
return rcu_dereference_check ( ca - > bucket_gens ,
! ca - > fs | |
percpu_rwsem_is_held ( & ca - > fs - > mark_lock ) | |
lockdep_is_held ( & ca - > fs - > gc_lock ) | |
lockdep_is_held ( & ca - > bucket_lock ) ) ;
}
static inline u8 * bucket_gen ( struct bch_dev * ca , size_t b )
{
struct bucket_gens * gens = bucket_gens ( ca ) ;
BUG_ON ( b < gens - > first_bucket | | b > = gens - > nbuckets ) ;
return gens - > b + b ;
}
2017-03-16 22:18:50 -08:00
static inline size_t PTR_BUCKET_NR ( const struct bch_dev * ca ,
const struct bch_extent_ptr * ptr )
{
return sector_to_bucket ( ca , ptr - > offset ) ;
}
2021-12-31 20:03:29 -05:00
static inline struct bpos PTR_BUCKET_POS ( const struct bch_fs * c ,
const struct bch_extent_ptr * ptr )
{
struct bch_dev * ca = bch_dev_bkey_exists ( c , ptr - > dev ) ;
return POS ( ptr - > dev , PTR_BUCKET_NR ( ca , ptr ) ) ;
}
2021-12-25 22:37:19 -05:00
static inline struct bucket * PTR_GC_BUCKET ( struct bch_dev * ca ,
const struct bch_extent_ptr * ptr )
2017-03-16 22:18:50 -08:00
{
2021-12-25 22:37:19 -05:00
return gc_bucket ( ca , PTR_BUCKET_NR ( ca , ptr ) ) ;
2017-03-16 22:18:50 -08:00
}
2019-08-29 11:34:01 -04:00
static inline enum bch_data_type ptr_data_type ( const struct bkey * k ,
const struct bch_extent_ptr * ptr )
{
2020-02-07 13:38:02 -05:00
if ( k - > type = = KEY_TYPE_btree_ptr | |
k - > type = = KEY_TYPE_btree_ptr_v2 )
2020-07-09 18:28:11 -04:00
return BCH_DATA_btree ;
2019-08-29 11:34:01 -04:00
2020-07-09 18:28:11 -04:00
return ptr - > cached ? BCH_DATA_cached : BCH_DATA_user ;
2019-08-29 11:34:01 -04:00
}
2017-03-16 22:18:50 -08:00
static inline int gen_cmp ( u8 a , u8 b )
{
return ( s8 ) ( a - b ) ;
}
static inline int gen_after ( u8 a , u8 b )
{
int r = gen_cmp ( a , b ) ;
return r > 0 ? r : 0 ;
}
/**
* ptr_stale ( ) - check if a pointer points into a bucket that has been
* invalidated .
*/
static inline u8 ptr_stale ( struct bch_dev * ca ,
const struct bch_extent_ptr * ptr )
{
2021-12-24 03:08:06 -05:00
u8 ret ;
rcu_read_lock ( ) ;
2021-12-25 19:55:34 -05:00
ret = gen_after ( * bucket_gen ( ca , PTR_BUCKET_NR ( ca , ptr ) ) , ptr - > gen ) ;
2021-12-24 03:08:06 -05:00
rcu_read_unlock ( ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
/* bucket gc marks */
2018-11-27 08:23:22 -05:00
static inline bool is_available_bucket ( struct bucket_mark mark )
{
2021-01-21 20:51:51 -05:00
return ! mark . dirty_sectors & & ! mark . stripe ;
2018-11-27 08:23:22 -05:00
}
2017-03-16 22:18:50 -08:00
/* Device usage: */
2020-07-22 13:27:00 -04:00
struct bch_dev_usage bch2_dev_usage_read ( struct bch_dev * ) ;
2017-03-16 22:18:50 -08:00
static inline u64 __dev_buckets_available ( struct bch_dev * ca ,
2022-01-09 20:48:31 -05:00
struct bch_dev_usage stats ,
enum alloc_reserve reserve )
{
s64 total = ca - > mi . nbuckets - ca - > mi . first_bucket ;
s64 reserved = 0 ;
switch ( reserve ) {
case RESERVE_none :
reserved + = ca - > mi . nbuckets > > 6 ;
fallthrough ;
case RESERVE_movinggc :
reserved + = ca - > nr_btree_reserve ;
fallthrough ;
case RESERVE_btree :
reserved + = ca - > nr_btree_reserve ;
fallthrough ;
case RESERVE_btree_movinggc :
break ;
default :
BUG ( ) ;
}
2017-03-16 22:18:50 -08:00
if ( WARN_ONCE ( stats . buckets_unavailable > total ,
" buckets_unavailable overflow (%llu > %llu) \n " ,
stats . buckets_unavailable , total ) )
return 0 ;
2022-01-09 20:48:31 -05:00
return max_t ( s64 , 0 ,
total -
stats . buckets_unavailable -
ca - > nr_open_buckets -
reserved ) ;
2017-03-16 22:18:50 -08:00
}
2022-01-09 20:48:31 -05:00
static inline u64 dev_buckets_available ( struct bch_dev * ca ,
enum alloc_reserve reserve )
2017-03-16 22:18:50 -08:00
{
2022-01-09 20:48:31 -05:00
return __dev_buckets_available ( ca , bch2_dev_usage_read ( ca ) , reserve ) ;
2017-03-16 22:18:50 -08:00
}
/* Filesystem usage: */
2019-02-14 20:39:17 -05:00
static inline unsigned fs_usage_u64s ( struct bch_fs * c )
2019-01-21 15:32:13 -05:00
{
2019-02-14 20:39:17 -05:00
return sizeof ( struct bch_fs_usage ) / sizeof ( u64 ) +
READ_ONCE ( c - > replicas . nr ) ;
}
2019-01-21 15:32:13 -05:00
2021-01-21 21:52:06 -05:00
static inline unsigned dev_usage_u64s ( void )
{
return sizeof ( struct bch_dev_usage ) / sizeof ( u64 ) ;
}
2019-02-10 19:34:47 -05:00
u64 bch2_fs_usage_read_one ( struct bch_fs * , u64 * ) ;
2017-03-16 22:18:50 -08:00
2019-02-10 19:34:47 -05:00
struct bch_fs_usage_online * bch2_fs_usage_read ( struct bch_fs * ) ;
void bch2_fs_usage_acc_to_base ( struct bch_fs * , unsigned ) ;
void bch2_fs_usage_to_text ( struct printbuf * ,
struct bch_fs * , struct bch_fs_usage_online * ) ;
u64 bch2_fs_sectors_used ( struct bch_fs * , struct bch_fs_usage_online * ) ;
2017-03-16 22:18:50 -08:00
2018-11-27 08:23:22 -05:00
struct bch_fs_usage_short
bch2_fs_usage_read_short ( struct bch_fs * ) ;
2018-11-04 21:55:35 -05:00
2018-11-27 08:23:22 -05:00
/* key/bucket marking: */
2019-01-24 20:25:40 -05:00
void bch2_fs_usage_initialize ( struct bch_fs * ) ;
2017-03-16 22:18:50 -08:00
void bch2_mark_metadata_bucket ( struct bch_fs * , struct bch_dev * ,
size_t , enum bch_data_type , unsigned ,
struct gc_pos , unsigned ) ;
2022-03-13 00:26:52 -05:00
int bch2_mark_alloc ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_extent ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_stripe ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_inode ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_reservation ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_reflink_p ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_trans_mark_extent ( struct btree_trans * , struct bkey_s_c , struct bkey_i * , unsigned ) ;
int bch2_trans_mark_stripe ( struct btree_trans * , struct bkey_s_c , struct bkey_i * , unsigned ) ;
int bch2_trans_mark_inode ( struct btree_trans * , struct bkey_s_c , struct bkey_i * , unsigned ) ;
int bch2_trans_mark_reservation ( struct btree_trans * , struct bkey_s_c , struct bkey_i * , unsigned ) ;
int bch2_trans_mark_reflink_p ( struct btree_trans * , struct bkey_s_c , struct bkey_i * , unsigned ) ;
2021-11-28 14:31:19 -05:00
int bch2_mark_key ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
2018-11-27 08:23:22 -05:00
2021-06-10 21:44:27 -04:00
int bch2_trans_mark_key ( struct btree_trans * , struct bkey_s_c ,
2021-12-10 17:04:26 -05:00
struct bkey_i * , unsigned ) ;
static inline int bch2_trans_mark_old ( struct btree_trans * trans ,
struct bkey_s_c old , unsigned flags )
{
struct bkey_i deleted ;
bkey_init ( & deleted . k ) ;
deleted . k . p = old . k - > p ;
return bch2_trans_mark_key ( trans , old , & deleted ,
BTREE_TRIGGER_OVERWRITE | flags ) ;
}
static inline int bch2_trans_mark_new ( struct btree_trans * trans ,
struct bkey_i * new , unsigned flags )
{
struct bkey_i deleted ;
bkey_init ( & deleted . k ) ;
deleted . k . p = new - > k . p ;
return bch2_trans_mark_key ( trans , bkey_i_to_s_c ( & deleted ) , new ,
BTREE_TRIGGER_INSERT | flags ) ;
}
2021-11-28 14:08:58 -05:00
int bch2_trans_fs_usage_apply ( struct btree_trans * , struct replicas_delta_list * ) ;
2019-03-15 18:20:46 -04:00
2021-04-14 20:25:33 -04:00
int bch2_trans_mark_metadata_bucket ( struct btree_trans * , struct bch_dev * ,
size_t , enum bch_data_type , unsigned ) ;
int bch2_trans_mark_dev_sb ( struct bch_fs * , struct bch_dev * ) ;
2021-01-22 17:56:34 -05:00
2018-11-27 08:23:22 -05:00
/* disk reservations: */
2017-03-16 22:18:50 -08:00
static inline void bch2_disk_reservation_put ( struct bch_fs * c ,
struct disk_reservation * res )
{
2022-10-31 22:28:09 -04:00
if ( res - > sectors ) {
this_cpu_sub ( * c - > online_reserved , res - > sectors ) ;
res - > sectors = 0 ;
}
2017-03-16 22:18:50 -08:00
}
# define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
2022-10-31 22:28:09 -04:00
int __bch2_disk_reservation_add ( struct bch_fs * ,
struct disk_reservation * ,
u64 , int ) ;
static inline int bch2_disk_reservation_add ( struct bch_fs * c , struct disk_reservation * res ,
u64 sectors , int flags )
{
u64 old , new ;
do {
old = this_cpu_read ( c - > pcpu - > sectors_available ) ;
if ( sectors > old )
return __bch2_disk_reservation_add ( c , res , sectors , flags ) ;
new = old - sectors ;
} while ( this_cpu_cmpxchg ( c - > pcpu - > sectors_available , old , new ) ! = old ) ;
this_cpu_add ( * c - > online_reserved , sectors ) ;
res - > sectors + = sectors ;
return 0 ;
}
2017-03-16 22:18:50 -08:00
static inline struct disk_reservation
bch2_disk_reservation_init ( struct bch_fs * c , unsigned nr_replicas )
{
return ( struct disk_reservation ) {
. sectors = 0 ,
#if 0
/* not used yet: */
. gen = c - > capacity_gen ,
# endif
. nr_replicas = nr_replicas ,
} ;
}
static inline int bch2_disk_reservation_get ( struct bch_fs * c ,
struct disk_reservation * res ,
2021-01-17 13:19:16 -05:00
u64 sectors , unsigned nr_replicas ,
2017-03-16 22:18:50 -08:00
int flags )
{
* res = bch2_disk_reservation_init ( c , nr_replicas ) ;
return bch2_disk_reservation_add ( c , res , sectors * nr_replicas , flags ) ;
}
2021-05-18 20:36:20 -04:00
# define RESERVE_FACTOR 6
static inline u64 avail_factor ( u64 r )
{
return div_u64 ( r < < RESERVE_FACTOR , ( 1 < < RESERVE_FACTOR ) + 1 ) ;
}
2017-03-16 22:18:50 -08:00
int bch2_dev_buckets_resize ( struct bch_fs * , struct bch_dev * , u64 ) ;
void bch2_dev_buckets_free ( struct bch_dev * ) ;
int bch2_dev_buckets_alloc ( struct bch_fs * , struct bch_dev * ) ;
# endif /* _BUCKETS_H */