2019-06-19 13:47:17 -04:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef BTRFS_BLOCK_RSV_H
# define BTRFS_BLOCK_RSV_H
2024-01-27 03:19:56 +01:00
# include <linux/types.h>
# include <linux/compiler.h>
# include <linux/spinlock.h>
2019-06-19 13:47:23 -04:00
struct btrfs_trans_handle ;
2022-10-26 15:08:18 -04:00
struct btrfs_root ;
2024-01-27 03:19:56 +01:00
struct btrfs_space_info ;
struct btrfs_block_rsv ;
struct btrfs_fs_info ;
2019-06-19 13:47:17 -04:00
enum btrfs_reserve_flush_enum ;
/*
* Types of block reserves
*/
2022-06-23 17:15:37 +02:00
enum btrfs_rsv_type {
2019-06-19 13:47:17 -04:00
BTRFS_BLOCK_RSV_GLOBAL ,
BTRFS_BLOCK_RSV_DELALLOC ,
BTRFS_BLOCK_RSV_TRANS ,
BTRFS_BLOCK_RSV_CHUNK ,
BTRFS_BLOCK_RSV_DELOPS ,
BTRFS_BLOCK_RSV_DELREFS ,
BTRFS_BLOCK_RSV_EMPTY ,
BTRFS_BLOCK_RSV_TEMP ,
} ;
struct btrfs_block_rsv {
u64 size ;
u64 reserved ;
struct btrfs_space_info * space_info ;
spinlock_t lock ;
2022-06-23 17:08:14 +02:00
bool full ;
2022-06-23 17:08:14 +02:00
bool failfast ;
2022-06-23 17:15:37 +02:00
/* Block reserve type, one of BTRFS_BLOCK_RSV_* */
enum btrfs_rsv_type type : 8 ;
2019-06-19 13:47:17 -04:00
/*
* Qgroup equivalent for @ size @ reserved
*
* Unlike normal @ size / @ reserved for inode rsv , qgroup doesn ' t care
* about things like csum size nor how many tree blocks it will need to
* reserve .
*
* Qgroup cares more about net change of the extent usage .
*
* So for one newly inserted file extent , in worst case it will cause
* leaf split and level increase , nodesize for each file extent is
* already too much .
*
* In short , qgroup_size / reserved is the upper limit of possible needed
* qgroup metadata reservation .
*/
u64 qgroup_rsv_size ;
u64 qgroup_rsv_reserved ;
} ;
2022-06-23 17:15:37 +02:00
void btrfs_init_block_rsv ( struct btrfs_block_rsv * rsv , enum btrfs_rsv_type type ) ;
2021-11-05 16:45:44 -04:00
void btrfs_init_root_block_rsv ( struct btrfs_root * root ) ;
2019-06-19 13:47:17 -04:00
struct btrfs_block_rsv * btrfs_alloc_block_rsv ( struct btrfs_fs_info * fs_info ,
2022-06-23 17:15:37 +02:00
enum btrfs_rsv_type type ) ;
2019-06-19 13:47:17 -04:00
void btrfs_init_metadata_block_rsv ( struct btrfs_fs_info * fs_info ,
struct btrfs_block_rsv * rsv ,
2022-06-23 17:15:37 +02:00
enum btrfs_rsv_type type ) ;
2019-06-19 13:47:17 -04:00
void btrfs_free_block_rsv ( struct btrfs_fs_info * fs_info ,
struct btrfs_block_rsv * rsv ) ;
2021-11-09 10:12:07 -05:00
int btrfs_block_rsv_add ( struct btrfs_fs_info * fs_info ,
2019-06-19 13:47:17 -04:00
struct btrfs_block_rsv * block_rsv , u64 num_bytes ,
enum btrfs_reserve_flush_enum flush ) ;
2022-10-26 23:25:14 +02:00
int btrfs_block_rsv_check ( struct btrfs_block_rsv * block_rsv , int min_percent ) ;
2021-11-09 10:12:07 -05:00
int btrfs_block_rsv_refill ( struct btrfs_fs_info * fs_info ,
2023-03-21 11:13:48 +00:00
struct btrfs_block_rsv * block_rsv , u64 num_bytes ,
2019-06-19 13:47:17 -04:00
enum btrfs_reserve_flush_enum flush ) ;
int btrfs_block_rsv_migrate ( struct btrfs_block_rsv * src_rsv ,
struct btrfs_block_rsv * dst_rsv , u64 num_bytes ,
bool update_size ) ;
int btrfs_block_rsv_use_bytes ( struct btrfs_block_rsv * block_rsv , u64 num_bytes ) ;
2019-06-19 13:47:18 -04:00
void btrfs_block_rsv_add_bytes ( struct btrfs_block_rsv * block_rsv ,
u64 num_bytes , bool update_size ) ;
2020-03-10 10:59:31 +02:00
u64 btrfs_block_rsv_release ( struct btrfs_fs_info * fs_info ,
2019-06-19 13:47:19 -04:00
struct btrfs_block_rsv * block_rsv ,
u64 num_bytes , u64 * qgroup_to_release ) ;
2019-06-19 13:47:23 -04:00
void btrfs_update_global_block_rsv ( struct btrfs_fs_info * fs_info ) ;
void btrfs_init_global_block_rsv ( struct btrfs_fs_info * fs_info ) ;
void btrfs_release_global_block_rsv ( struct btrfs_fs_info * fs_info ) ;
struct btrfs_block_rsv * btrfs_use_block_rsv ( struct btrfs_trans_handle * trans ,
struct btrfs_root * root ,
u32 blocksize ) ;
2023-04-29 16:07:10 -04:00
int btrfs_check_trunc_cache_free_space ( struct btrfs_fs_info * fs_info ,
struct btrfs_block_rsv * rsv ) ;
2019-06-19 13:47:23 -04:00
static inline void btrfs_unuse_block_rsv ( struct btrfs_fs_info * fs_info ,
struct btrfs_block_rsv * block_rsv ,
u32 blocksize )
{
btrfs_block_rsv_add_bytes ( block_rsv , blocksize , false ) ;
2020-03-10 10:59:31 +02:00
btrfs_block_rsv_release ( fs_info , block_rsv , 0 , NULL ) ;
2019-06-19 13:47:23 -04:00
}
2022-09-05 18:32:23 +02:00
/*
* Fast path to check if the reserve is full , may be carefully used outside of
* locks .
*/
static inline bool btrfs_block_rsv_full ( const struct btrfs_block_rsv * rsv )
{
return data_race ( rsv - > full ) ;
}
2024-02-19 19:41:23 +00:00
/*
* Get the reserved mount of a block reserve in a context where getting a stale
* value is acceptable , instead of accessing it directly and trigger data race
* warning from KCSAN .
*/
static inline u64 btrfs_block_rsv_reserved ( struct btrfs_block_rsv * rsv )
{
u64 ret ;
spin_lock ( & rsv - > lock ) ;
ret = rsv - > reserved ;
spin_unlock ( & rsv - > lock ) ;
return ret ;
}
2024-02-19 20:10:07 +00:00
/*
* Get the size of a block reserve in a context where getting a stale value is
* acceptable , instead of accessing it directly and trigger data race warning
* from KCSAN .
*/
static inline u64 btrfs_block_rsv_size ( struct btrfs_block_rsv * rsv )
{
u64 ret ;
spin_lock ( & rsv - > lock ) ;
ret = rsv - > size ;
spin_unlock ( & rsv - > lock ) ;
return ret ;
}
2019-06-19 13:47:17 -04:00
# endif /* BTRFS_BLOCK_RSV_H */