2019-06-19 20:47:17 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef BTRFS_BLOCK_RSV_H
# define BTRFS_BLOCK_RSV_H
2019-06-19 20:47:23 +03:00
struct btrfs_trans_handle ;
2022-10-26 22:08:18 +03:00
struct btrfs_root ;
2019-06-19 20:47:17 +03:00
enum btrfs_reserve_flush_enum ;
/*
* Types of block reserves
*/
2022-06-23 18:15:37 +03:00
enum btrfs_rsv_type {
2019-06-19 20:47:17 +03:00
BTRFS_BLOCK_RSV_GLOBAL ,
BTRFS_BLOCK_RSV_DELALLOC ,
BTRFS_BLOCK_RSV_TRANS ,
BTRFS_BLOCK_RSV_CHUNK ,
BTRFS_BLOCK_RSV_DELOPS ,
BTRFS_BLOCK_RSV_DELREFS ,
BTRFS_BLOCK_RSV_EMPTY ,
BTRFS_BLOCK_RSV_TEMP ,
} ;
struct btrfs_block_rsv {
u64 size ;
u64 reserved ;
struct btrfs_space_info * space_info ;
spinlock_t lock ;
2022-06-23 18:08:14 +03:00
bool full ;
2022-06-23 18:08:14 +03:00
bool failfast ;
2022-06-23 18:15:37 +03:00
/* Block reserve type, one of BTRFS_BLOCK_RSV_* */
enum btrfs_rsv_type type : 8 ;
2019-06-19 20:47:17 +03:00
/*
* Qgroup equivalent for @ size @ reserved
*
* Unlike normal @ size / @ reserved for inode rsv , qgroup doesn ' t care
* about things like csum size nor how many tree blocks it will need to
* reserve .
*
* Qgroup cares more about net change of the extent usage .
*
* So for one newly inserted file extent , in worst case it will cause
* leaf split and level increase , nodesize for each file extent is
* already too much .
*
* In short , qgroup_size / reserved is the upper limit of possible needed
* qgroup metadata reservation .
*/
u64 qgroup_rsv_size ;
u64 qgroup_rsv_reserved ;
} ;
2022-06-23 18:15:37 +03:00
void btrfs_init_block_rsv ( struct btrfs_block_rsv * rsv , enum btrfs_rsv_type type ) ;
2021-11-05 23:45:44 +03:00
void btrfs_init_root_block_rsv ( struct btrfs_root * root ) ;
2019-06-19 20:47:17 +03:00
struct btrfs_block_rsv * btrfs_alloc_block_rsv ( struct btrfs_fs_info * fs_info ,
2022-06-23 18:15:37 +03:00
enum btrfs_rsv_type type ) ;
2019-06-19 20:47:17 +03:00
void btrfs_init_metadata_block_rsv ( struct btrfs_fs_info * fs_info ,
struct btrfs_block_rsv * rsv ,
2022-06-23 18:15:37 +03:00
enum btrfs_rsv_type type ) ;
2019-06-19 20:47:17 +03:00
void btrfs_free_block_rsv ( struct btrfs_fs_info * fs_info ,
struct btrfs_block_rsv * rsv ) ;
2021-11-09 18:12:07 +03:00
int btrfs_block_rsv_add ( struct btrfs_fs_info * fs_info ,
2019-06-19 20:47:17 +03:00
struct btrfs_block_rsv * block_rsv , u64 num_bytes ,
enum btrfs_reserve_flush_enum flush ) ;
2022-10-27 00:25:14 +03:00
int btrfs_block_rsv_check ( struct btrfs_block_rsv * block_rsv , int min_percent ) ;
2021-11-09 18:12:07 +03:00
int btrfs_block_rsv_refill ( struct btrfs_fs_info * fs_info ,
2019-06-19 20:47:17 +03:00
struct btrfs_block_rsv * block_rsv , u64 min_reserved ,
enum btrfs_reserve_flush_enum flush ) ;
int btrfs_block_rsv_migrate ( struct btrfs_block_rsv * src_rsv ,
struct btrfs_block_rsv * dst_rsv , u64 num_bytes ,
bool update_size ) ;
int btrfs_block_rsv_use_bytes ( struct btrfs_block_rsv * block_rsv , u64 num_bytes ) ;
2019-06-19 20:47:18 +03:00
void btrfs_block_rsv_add_bytes ( struct btrfs_block_rsv * block_rsv ,
u64 num_bytes , bool update_size ) ;
2020-03-10 11:59:31 +03:00
u64 btrfs_block_rsv_release ( struct btrfs_fs_info * fs_info ,
2019-06-19 20:47:19 +03:00
struct btrfs_block_rsv * block_rsv ,
u64 num_bytes , u64 * qgroup_to_release ) ;
2019-06-19 20:47:23 +03:00
void btrfs_update_global_block_rsv ( struct btrfs_fs_info * fs_info ) ;
void btrfs_init_global_block_rsv ( struct btrfs_fs_info * fs_info ) ;
void btrfs_release_global_block_rsv ( struct btrfs_fs_info * fs_info ) ;
struct btrfs_block_rsv * btrfs_use_block_rsv ( struct btrfs_trans_handle * trans ,
struct btrfs_root * root ,
u32 blocksize ) ;
static inline void btrfs_unuse_block_rsv ( struct btrfs_fs_info * fs_info ,
struct btrfs_block_rsv * block_rsv ,
u32 blocksize )
{
btrfs_block_rsv_add_bytes ( block_rsv , blocksize , false ) ;
2020-03-10 11:59:31 +03:00
btrfs_block_rsv_release ( fs_info , block_rsv , 0 , NULL ) ;
2019-06-19 20:47:23 +03:00
}
2022-09-05 19:32:23 +03:00
/*
* Fast path to check if the reserve is full , may be carefully used outside of
* locks .
*/
static inline bool btrfs_block_rsv_full ( const struct btrfs_block_rsv * rsv )
{
return data_race ( rsv - > full ) ;
}
2019-06-19 20:47:17 +03:00
# endif /* BTRFS_BLOCK_RSV_H */