2017-03-16 22:18:50 -08:00
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Code for manipulating bucket marks for garbage collection .
*
* Copyright 2014 Datera , Inc .
*/
# ifndef _BUCKETS_H
# define _BUCKETS_H
# include "buckets_types.h"
2022-04-03 17:50:01 -04:00
# include "extents.h"
2017-03-16 22:18:50 -08:00
# include "super.h"
# define for_each_bucket(_b, _buckets) \
for ( _b = ( _buckets ) - > b + ( _buckets ) - > first_bucket ; \
_b < ( _buckets ) - > b + ( _buckets ) - > nbuckets ; _b + + )
2022-02-14 00:07:38 -05:00
static inline void bucket_unlock ( struct bucket * b )
{
smp_store_release ( & b - > lock , 0 ) ;
}
static inline void bucket_lock ( struct bucket * b )
{
while ( xchg ( & b - > lock , 1 ) )
cpu_relax ( ) ;
}
2017-03-16 22:18:50 -08:00
2022-02-10 19:26:55 -05:00
static inline struct bucket_array * gc_bucket_array ( struct bch_dev * ca )
2017-03-16 22:18:50 -08:00
{
2022-02-10 19:26:55 -05:00
return rcu_dereference_check ( ca - > buckets_gc ,
2017-03-16 22:18:50 -08:00
! ca - > fs | |
2018-11-26 00:13:33 -05:00
percpu_rwsem_is_held ( & ca - > fs - > mark_lock ) | |
2017-03-16 22:18:50 -08:00
lockdep_is_held ( & ca - > fs - > gc_lock ) | |
lockdep_is_held ( & ca - > bucket_lock ) ) ;
}
2022-02-10 19:26:55 -05:00
static inline struct bucket * gc_bucket ( struct bch_dev * ca , size_t b )
2017-03-16 22:18:50 -08:00
{
2022-02-10 19:26:55 -05:00
struct bucket_array * buckets = gc_bucket_array ( ca ) ;
2017-03-16 22:18:50 -08:00
BUG_ON ( b < buckets - > first_bucket | | b > = buckets - > nbuckets ) ;
return buckets - > b + b ;
}
2021-12-25 19:55:34 -05:00
static inline struct bucket_gens * bucket_gens ( struct bch_dev * ca )
{
return rcu_dereference_check ( ca - > bucket_gens ,
! ca - > fs | |
percpu_rwsem_is_held ( & ca - > fs - > mark_lock ) | |
lockdep_is_held ( & ca - > fs - > gc_lock ) | |
lockdep_is_held ( & ca - > bucket_lock ) ) ;
}
static inline u8 * bucket_gen ( struct bch_dev * ca , size_t b )
{
struct bucket_gens * gens = bucket_gens ( ca ) ;
BUG_ON ( b < gens - > first_bucket | | b > = gens - > nbuckets ) ;
return gens - > b + b ;
}
2017-03-16 22:18:50 -08:00
static inline size_t PTR_BUCKET_NR ( const struct bch_dev * ca ,
const struct bch_extent_ptr * ptr )
{
return sector_to_bucket ( ca , ptr - > offset ) ;
}
2021-12-31 20:03:29 -05:00
static inline struct bpos PTR_BUCKET_POS ( const struct bch_fs * c ,
const struct bch_extent_ptr * ptr )
{
struct bch_dev * ca = bch_dev_bkey_exists ( c , ptr - > dev ) ;
return POS ( ptr - > dev , PTR_BUCKET_NR ( ca , ptr ) ) ;
}
2021-12-25 22:37:19 -05:00
static inline struct bucket * PTR_GC_BUCKET ( struct bch_dev * ca ,
const struct bch_extent_ptr * ptr )
2017-03-16 22:18:50 -08:00
{
2021-12-25 22:37:19 -05:00
return gc_bucket ( ca , PTR_BUCKET_NR ( ca , ptr ) ) ;
2017-03-16 22:18:50 -08:00
}
2019-08-29 11:34:01 -04:00
static inline enum bch_data_type ptr_data_type ( const struct bkey * k ,
const struct bch_extent_ptr * ptr )
{
2022-04-03 17:50:01 -04:00
if ( bkey_is_btree_ptr ( k ) )
2020-07-09 18:28:11 -04:00
return BCH_DATA_btree ;
2019-08-29 11:34:01 -04:00
2020-07-09 18:28:11 -04:00
return ptr - > cached ? BCH_DATA_cached : BCH_DATA_user ;
2019-08-29 11:34:01 -04:00
}
2017-03-16 22:18:50 -08:00
static inline int gen_cmp ( u8 a , u8 b )
{
return ( s8 ) ( a - b ) ;
}
static inline int gen_after ( u8 a , u8 b )
{
int r = gen_cmp ( a , b ) ;
return r > 0 ? r : 0 ;
}
/**
* ptr_stale ( ) - check if a pointer points into a bucket that has been
* invalidated .
*/
static inline u8 ptr_stale ( struct bch_dev * ca ,
const struct bch_extent_ptr * ptr )
{
2021-12-24 03:08:06 -05:00
u8 ret ;
rcu_read_lock ( ) ;
2021-12-25 19:55:34 -05:00
ret = gen_after ( * bucket_gen ( ca , PTR_BUCKET_NR ( ca , ptr ) ) , ptr - > gen ) ;
2021-12-24 03:08:06 -05:00
rcu_read_unlock ( ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
/* Device usage: */
2022-10-21 14:01:19 -04:00
void bch2_dev_usage_read_fast ( struct bch_dev * , struct bch_dev_usage * ) ;
static inline struct bch_dev_usage bch2_dev_usage_read ( struct bch_dev * ca )
{
struct bch_dev_usage ret ;
bch2_dev_usage_read_fast ( ca , & ret ) ;
return ret ;
}
2022-04-01 01:29:59 -04:00
void bch2_dev_usage_init ( struct bch_dev * ) ;
2017-03-16 22:18:50 -08:00
2022-04-01 01:29:59 -04:00
static inline u64 bch2_dev_buckets_reserved ( struct bch_dev * ca , enum alloc_reserve reserve )
2022-01-09 20:48:31 -05:00
{
s64 reserved = 0 ;
switch ( reserve ) {
case RESERVE_none :
reserved + = ca - > mi . nbuckets > > 6 ;
fallthrough ;
case RESERVE_movinggc :
reserved + = ca - > nr_btree_reserve ;
fallthrough ;
case RESERVE_btree :
reserved + = ca - > nr_btree_reserve ;
fallthrough ;
case RESERVE_btree_movinggc :
break ;
}
2017-03-16 22:18:50 -08:00
2022-04-01 01:29:59 -04:00
return reserved ;
}
2017-03-16 22:18:50 -08:00
2022-06-14 17:51:20 -04:00
static inline u64 dev_buckets_free ( struct bch_dev * ca ,
struct bch_dev_usage usage ,
enum alloc_reserve reserve )
{
return max_t ( s64 , 0 ,
usage . d [ BCH_DATA_free ] . buckets -
ca - > nr_open_buckets -
bch2_dev_buckets_reserved ( ca , reserve ) ) ;
}
2022-04-01 01:29:59 -04:00
static inline u64 __dev_buckets_available ( struct bch_dev * ca ,
struct bch_dev_usage usage ,
enum alloc_reserve reserve )
{
2022-01-09 20:48:31 -05:00
return max_t ( s64 , 0 ,
2022-06-23 10:28:30 +12:00
usage . d [ BCH_DATA_free ] . buckets
+ usage . d [ BCH_DATA_cached ] . buckets
+ usage . d [ BCH_DATA_need_gc_gens ] . buckets
+ usage . d [ BCH_DATA_need_discard ] . buckets
- ca - > nr_open_buckets
- bch2_dev_buckets_reserved ( ca , reserve ) ) ;
2017-03-16 22:18:50 -08:00
}
2022-01-09 20:48:31 -05:00
static inline u64 dev_buckets_available ( struct bch_dev * ca ,
enum alloc_reserve reserve )
2017-03-16 22:18:50 -08:00
{
2022-01-09 20:48:31 -05:00
return __dev_buckets_available ( ca , bch2_dev_usage_read ( ca ) , reserve ) ;
2017-03-16 22:18:50 -08:00
}
/* Filesystem usage: */
2019-02-14 20:39:17 -05:00
static inline unsigned fs_usage_u64s ( struct bch_fs * c )
2019-01-21 15:32:13 -05:00
{
2019-02-14 20:39:17 -05:00
return sizeof ( struct bch_fs_usage ) / sizeof ( u64 ) +
READ_ONCE ( c - > replicas . nr ) ;
}
2019-01-21 15:32:13 -05:00
2021-01-21 21:52:06 -05:00
static inline unsigned dev_usage_u64s ( void )
{
return sizeof ( struct bch_dev_usage ) / sizeof ( u64 ) ;
}
2019-02-10 19:34:47 -05:00
u64 bch2_fs_usage_read_one ( struct bch_fs * , u64 * ) ;
2017-03-16 22:18:50 -08:00
2019-02-10 19:34:47 -05:00
struct bch_fs_usage_online * bch2_fs_usage_read ( struct bch_fs * ) ;
void bch2_fs_usage_acc_to_base ( struct bch_fs * , unsigned ) ;
void bch2_fs_usage_to_text ( struct printbuf * ,
struct bch_fs * , struct bch_fs_usage_online * ) ;
u64 bch2_fs_sectors_used ( struct bch_fs * , struct bch_fs_usage_online * ) ;
2017-03-16 22:18:50 -08:00
2018-11-27 08:23:22 -05:00
struct bch_fs_usage_short
bch2_fs_usage_read_short ( struct bch_fs * ) ;
2018-11-04 21:55:35 -05:00
2018-11-27 08:23:22 -05:00
/* key/bucket marking: */
2019-01-24 20:25:40 -05:00
void bch2_fs_usage_initialize ( struct bch_fs * ) ;
2017-03-16 22:18:50 -08:00
2022-04-02 18:00:04 -04:00
int bch2_mark_metadata_bucket ( struct bch_fs * , struct bch_dev * ,
size_t , enum bch_data_type , unsigned ,
struct gc_pos , unsigned ) ;
2017-03-16 22:18:50 -08:00
2022-03-13 00:26:52 -05:00
int bch2_mark_alloc ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_extent ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_stripe ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_inode ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_reservation ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
int bch2_mark_reflink_p ( struct btree_trans * , struct bkey_s_c , struct bkey_s_c , unsigned ) ;
2022-03-31 21:44:55 -04:00
int bch2_trans_mark_extent ( struct btree_trans * , enum btree_id , unsigned , struct bkey_s_c , struct bkey_i * , unsigned ) ;
int bch2_trans_mark_stripe ( struct btree_trans * , enum btree_id , unsigned , struct bkey_s_c , struct bkey_i * , unsigned ) ;
int bch2_trans_mark_inode ( struct btree_trans * , enum btree_id , unsigned , struct bkey_s_c , struct bkey_i * , unsigned ) ;
int bch2_trans_mark_reservation ( struct btree_trans * , enum btree_id , unsigned , struct bkey_s_c , struct bkey_i * , unsigned ) ;
int bch2_trans_mark_reflink_p ( struct btree_trans * , enum btree_id , unsigned , struct bkey_s_c , struct bkey_i * , unsigned ) ;
2022-03-13 00:26:52 -05:00
2021-11-28 14:08:58 -05:00
int bch2_trans_fs_usage_apply ( struct btree_trans * , struct replicas_delta_list * ) ;
2019-03-15 18:20:46 -04:00
2021-04-14 20:25:33 -04:00
int bch2_trans_mark_metadata_bucket ( struct btree_trans * , struct bch_dev * ,
size_t , enum bch_data_type , unsigned ) ;
int bch2_trans_mark_dev_sb ( struct bch_fs * , struct bch_dev * ) ;
2021-01-22 17:56:34 -05:00
2018-11-27 08:23:22 -05:00
/* disk reservations: */
2017-03-16 22:18:50 -08:00
static inline void bch2_disk_reservation_put ( struct bch_fs * c ,
struct disk_reservation * res )
{
2022-10-31 22:28:09 -04:00
if ( res - > sectors ) {
this_cpu_sub ( * c - > online_reserved , res - > sectors ) ;
res - > sectors = 0 ;
}
2017-03-16 22:18:50 -08:00
}
# define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
2022-10-31 22:28:09 -04:00
int __bch2_disk_reservation_add ( struct bch_fs * ,
struct disk_reservation * ,
u64 , int ) ;
static inline int bch2_disk_reservation_add ( struct bch_fs * c , struct disk_reservation * res ,
u64 sectors , int flags )
{
2022-11-13 20:01:42 -05:00
# ifdef __KERNEL__
2022-10-31 22:28:09 -04:00
u64 old , new ;
do {
old = this_cpu_read ( c - > pcpu - > sectors_available ) ;
if ( sectors > old )
return __bch2_disk_reservation_add ( c , res , sectors , flags ) ;
new = old - sectors ;
} while ( this_cpu_cmpxchg ( c - > pcpu - > sectors_available , old , new ) ! = old ) ;
this_cpu_add ( * c - > online_reserved , sectors ) ;
res - > sectors + = sectors ;
return 0 ;
2022-11-13 20:01:42 -05:00
# else
return __bch2_disk_reservation_add ( c , res , sectors , flags ) ;
# endif
2022-10-31 22:28:09 -04:00
}
2017-03-16 22:18:50 -08:00
static inline struct disk_reservation
bch2_disk_reservation_init ( struct bch_fs * c , unsigned nr_replicas )
{
return ( struct disk_reservation ) {
. sectors = 0 ,
#if 0
/* not used yet: */
. gen = c - > capacity_gen ,
# endif
. nr_replicas = nr_replicas ,
} ;
}
static inline int bch2_disk_reservation_get ( struct bch_fs * c ,
struct disk_reservation * res ,
2021-01-17 13:19:16 -05:00
u64 sectors , unsigned nr_replicas ,
2017-03-16 22:18:50 -08:00
int flags )
{
* res = bch2_disk_reservation_init ( c , nr_replicas ) ;
return bch2_disk_reservation_add ( c , res , sectors * nr_replicas , flags ) ;
}
2021-05-18 20:36:20 -04:00
# define RESERVE_FACTOR 6
static inline u64 avail_factor ( u64 r )
{
return div_u64 ( r < < RESERVE_FACTOR , ( 1 < < RESERVE_FACTOR ) + 1 ) ;
}
2017-03-16 22:18:50 -08:00
int bch2_dev_buckets_resize ( struct bch_fs * , struct bch_dev * , u64 ) ;
void bch2_dev_buckets_free ( struct bch_dev * ) ;
int bch2_dev_buckets_alloc ( struct bch_fs * , struct bch_dev * ) ;
# endif /* _BUCKETS_H */