2019-06-18 23:09:16 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef BTRFS_SPACE_INFO_H
# define BTRFS_SPACE_INFO_H
struct btrfs_space_info {
spinlock_t lock ;
u64 total_bytes ; /* total bytes in the space,
this doesn ' t take mirrors into account */
u64 bytes_used ; /* total bytes used,
this doesn ' t take mirrors into account */
u64 bytes_pinned ; /* total bytes pinned, will be freed when the
transaction finishes */
u64 bytes_reserved ; /* total bytes the allocator has reserved for
current allocations */
u64 bytes_may_use ; /* number of bytes that may be used for
delalloc / allocations */
u64 bytes_readonly ; /* total bytes that are read only */
2021-02-04 13:21:52 +03:00
u64 bytes_zone_unusable ; /* total bytes that are unusable until
resetting the device zone */
2019-06-18 23:09:16 +03:00
u64 max_extent_size ; /* This will hold the maximum extent size of
the space info if we had an ENOSPC in the
allocator . */
2020-10-09 16:28:27 +03:00
int clamp ; /* Used to scale our threshold for preemptive
flushing . The value is > > clamp , so turns
out to be a 2 ^ clamp divisor . */
2019-06-18 23:09:16 +03:00
unsigned int full : 1 ; /* indicates that we cannot allocate any more
chunks for this space */
unsigned int chunk_alloc : 1 ; /* set if we are allocating a chunk */
unsigned int flush : 1 ; /* set if we are trying to make space */
unsigned int force_alloc ; /* set if we need to force a chunk
alloc for this space */
u64 disk_used ; /* total bytes used on disk */
u64 disk_total ; /* total bytes on disk, takes mirrors into
account */
u64 flags ;
struct list_head list ;
/* Protected by the spinlock 'lock'. */
struct list_head ro_bgs ;
struct list_head priority_tickets ;
struct list_head tickets ;
2020-03-10 12:00:35 +03:00
/*
* Size of space that needs to be reclaimed in order to satisfy pending
* tickets
*/
u64 reclaim_size ;
2019-06-18 23:09:16 +03:00
/*
* tickets_id just indicates the next ticket will be handled , so note
* it ' s not stored per ticket .
*/
u64 tickets_id ;
struct rw_semaphore groups_sem ;
/* for block groups in our same type */
struct list_head block_groups [ BTRFS_NR_RAID_TYPES ] ;
struct kobject kobj ;
struct kobject * block_group_kobjs [ BTRFS_NR_RAID_TYPES ] ;
} ;
2019-06-18 23:09:22 +03:00
struct reserve_ticket {
u64 bytes ;
int error ;
2020-03-13 22:58:05 +03:00
bool steal ;
2019-06-18 23:09:22 +03:00
struct list_head list ;
wait_queue_head_t wait ;
} ;
2019-06-18 23:09:16 +03:00
static inline bool btrfs_mixed_space_info ( struct btrfs_space_info * space_info )
{
return ( ( space_info - > flags & BTRFS_BLOCK_GROUP_METADATA ) & &
( space_info - > flags & BTRFS_BLOCK_GROUP_DATA ) ) ;
}
2019-06-18 23:09:21 +03:00
/*
*
* Declare a helper function to detect underflow of various space info members
*/
2019-08-22 22:10:55 +03:00
# define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \
2019-06-18 23:09:21 +03:00
static inline void \
btrfs_space_info_update_ # # name ( struct btrfs_fs_info * fs_info , \
struct btrfs_space_info * sinfo , \
s64 bytes ) \
{ \
2019-08-22 22:10:55 +03:00
const u64 abs_bytes = ( bytes < 0 ) ? - bytes : bytes ; \
2019-06-18 23:09:21 +03:00
lockdep_assert_held ( & sinfo - > lock ) ; \
trace_update_ # # name ( fs_info , sinfo , sinfo - > name , bytes ) ; \
2019-08-22 22:10:55 +03:00
trace_btrfs_space_reservation ( fs_info , trace_name , \
sinfo - > flags , abs_bytes , \
bytes > 0 ) ; \
2019-06-18 23:09:21 +03:00
if ( bytes < 0 & & sinfo - > name < - bytes ) { \
WARN_ON ( 1 ) ; \
sinfo - > name = 0 ; \
return ; \
} \
sinfo - > name + = bytes ; \
}
2019-08-22 22:10:55 +03:00
DECLARE_SPACE_INFO_UPDATE ( bytes_may_use , " space_info " ) ;
DECLARE_SPACE_INFO_UPDATE ( bytes_pinned , " pinned " ) ;
2019-06-18 23:09:21 +03:00
2019-06-18 23:09:19 +03:00
int btrfs_init_space_info ( struct btrfs_fs_info * fs_info ) ;
void btrfs_update_space_info ( struct btrfs_fs_info * info , u64 flags ,
u64 total_bytes , u64 bytes_used ,
2021-02-04 13:21:52 +03:00
u64 bytes_readonly , u64 bytes_zone_unusable ,
2019-06-18 23:09:19 +03:00
struct btrfs_space_info * * space_info ) ;
struct btrfs_space_info * btrfs_find_space_info ( struct btrfs_fs_info * info ,
u64 flags ) ;
2019-10-01 20:57:39 +03:00
u64 __pure btrfs_space_info_used ( struct btrfs_space_info * s_info ,
2019-06-18 23:09:19 +03:00
bool may_use_included ) ;
void btrfs_clear_space_info_full ( struct btrfs_fs_info * info ) ;
2019-06-18 23:09:24 +03:00
void btrfs_dump_space_info ( struct btrfs_fs_info * fs_info ,
struct btrfs_space_info * info , u64 bytes ,
int dump_block_groups ) ;
2019-06-18 23:09:25 +03:00
int btrfs_reserve_metadata_bytes ( struct btrfs_root * root ,
struct btrfs_block_rsv * block_rsv ,
u64 orig_bytes ,
enum btrfs_reserve_flush_enum flush ) ;
2019-08-22 22:10:58 +03:00
void btrfs_try_granting_tickets ( struct btrfs_fs_info * fs_info ,
struct btrfs_space_info * space_info ) ;
2020-01-17 17:07:39 +03:00
int btrfs_can_overcommit ( struct btrfs_fs_info * fs_info ,
struct btrfs_space_info * space_info , u64 bytes ,
enum btrfs_reserve_flush_enum flush ) ;
2019-08-22 22:10:58 +03:00
2019-08-22 22:11:02 +03:00
static inline void btrfs_space_info_free_bytes_may_use (
2019-08-22 22:10:58 +03:00
struct btrfs_fs_info * fs_info ,
struct btrfs_space_info * space_info ,
u64 num_bytes )
{
spin_lock ( & space_info - > lock ) ;
btrfs_space_info_update_bytes_may_use ( fs_info , space_info , - num_bytes ) ;
btrfs_try_granting_tickets ( fs_info , space_info ) ;
spin_unlock ( & space_info - > lock ) ;
}
2020-07-21 17:22:25 +03:00
int btrfs_reserve_data_bytes ( struct btrfs_fs_info * fs_info , u64 bytes ,
enum btrfs_reserve_flush_enum flush ) ;
2019-06-18 23:09:16 +03:00
# endif /* BTRFS_SPACE_INFO_H */