We are currently using a cached rb_root (struct rb_root_cached) for the rb root of struct extent_map_tree. This doesn't offer much of an advantage here because: 1) It's only advantage over the regular rb_root is that it caches a pointer to the left most node (first node), so a call to rb_first_cached() doesn't have to chase pointers until it reaches the left most node; 2) We only have two scenarios that access left most node with rb_first_cached(): When dropping all extent maps from an inode, during inode eviction; When iterating over extent maps during the extent map shrinker; 3) In both cases we keep removing extent maps, which causes deletion of the left most node so rb_erase_cached() has to call rb_next() to find out what's the next left most node and assign it to struct rb_root_cached::rb_leftmost; 4) We can do that ourselves in those two uses cases and stop using a rb_root_cached rb tree and use instead a regular rb_root rb tree. This reduces the size of struct extent_map_tree by 8 bytes and, since this structure is embedded in struct btrfs_inode, it also reduces the size of that structure by 8 bytes. So on a 64 bits platform the size of btrfs_inode is reduced from 1032 bytes down to 1024 bytes. This means we will be able to have 4 inodes per 4K page instead of 3. Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
199 lines
5.6 KiB
C
199 lines
5.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef BTRFS_EXTENT_MAP_H
|
|
#define BTRFS_EXTENT_MAP_H
|
|
|
|
#include <linux/compiler_types.h>
|
|
#include <linux/rwlock_types.h>
|
|
#include <linux/rbtree.h>
|
|
#include <linux/list.h>
|
|
#include <linux/refcount.h>
|
|
#include "misc.h"
|
|
#include "extent_map.h"
|
|
#include "compression.h"
|
|
|
|
struct btrfs_inode;
|
|
struct btrfs_fs_info;
|
|
|
|
#define EXTENT_MAP_LAST_BYTE ((u64)-4)
|
|
#define EXTENT_MAP_HOLE ((u64)-3)
|
|
#define EXTENT_MAP_INLINE ((u64)-2)
|
|
|
|
/* bits for the extent_map::flags field */
|
|
enum {
|
|
/* this entry not yet on disk, don't free it */
|
|
ENUM_BIT(EXTENT_FLAG_PINNED),
|
|
ENUM_BIT(EXTENT_FLAG_COMPRESS_ZLIB),
|
|
ENUM_BIT(EXTENT_FLAG_COMPRESS_LZO),
|
|
ENUM_BIT(EXTENT_FLAG_COMPRESS_ZSTD),
|
|
/* pre-allocated extent */
|
|
ENUM_BIT(EXTENT_FLAG_PREALLOC),
|
|
/* Logging this extent */
|
|
ENUM_BIT(EXTENT_FLAG_LOGGING),
|
|
/* This em is merged from two or more physically adjacent ems */
|
|
ENUM_BIT(EXTENT_FLAG_MERGED),
|
|
};
|
|
|
|
/*
|
|
* This structure represents file extents and holes.
|
|
*
|
|
* Unlike on-disk file extent items, extent maps can be merged to save memory.
|
|
* This means members only match file extent items before any merging.
|
|
*
|
|
* Keep this structure as compact as possible, as we can have really large
|
|
* amounts of allocated extent maps at any time.
|
|
*/
|
|
struct extent_map {
|
|
struct rb_node rb_node;
|
|
|
|
/* All of these are in bytes. */
|
|
|
|
/* File offset matching the offset of a BTRFS_EXTENT_ITEM_KEY key. */
|
|
u64 start;
|
|
|
|
/*
|
|
* Length of the file extent.
|
|
*
|
|
* For non-inlined file extents it's btrfs_file_extent_item::num_bytes.
|
|
* For inline extents it's sectorsize, since inline data starts at
|
|
* offsetof(struct btrfs_file_extent_item, disk_bytenr) thus
|
|
* btrfs_file_extent_item::num_bytes is not valid.
|
|
*/
|
|
u64 len;
|
|
|
|
/*
|
|
* The file offset of the original file extent before splitting.
|
|
*
|
|
* This is an in-memory only member, matching
|
|
* extent_map::start - btrfs_file_extent_item::offset for
|
|
* regular/preallocated extents. EXTENT_MAP_HOLE otherwise.
|
|
*/
|
|
u64 orig_start;
|
|
|
|
/*
|
|
* The full on-disk extent length, matching
|
|
* btrfs_file_extent_item::disk_num_bytes.
|
|
*/
|
|
u64 orig_block_len;
|
|
|
|
/*
|
|
* The decompressed size of the whole on-disk extent, matching
|
|
* btrfs_file_extent_item::ram_bytes.
|
|
*/
|
|
u64 ram_bytes;
|
|
|
|
/*
|
|
* The on-disk logical bytenr for the file extent.
|
|
*
|
|
* For compressed extents it matches btrfs_file_extent_item::disk_bytenr.
|
|
* For uncompressed extents it matches
|
|
* btrfs_file_extent_item::disk_bytenr + btrfs_file_extent_item::offset
|
|
*
|
|
* For holes it is EXTENT_MAP_HOLE and for inline extents it is
|
|
* EXTENT_MAP_INLINE.
|
|
*/
|
|
u64 block_start;
|
|
|
|
/*
|
|
* The on-disk length for the file extent.
|
|
*
|
|
* For compressed extents it matches btrfs_file_extent_item::disk_num_bytes.
|
|
* For uncompressed extents it matches extent_map::len.
|
|
* For holes and inline extents it's -1 and shouldn't be used.
|
|
*/
|
|
u64 block_len;
|
|
|
|
/*
|
|
* Generation of the extent map, for merged em it's the highest
|
|
* generation of all merged ems.
|
|
* For non-merged extents, it's from btrfs_file_extent_item::generation.
|
|
*/
|
|
u64 generation;
|
|
u32 flags;
|
|
refcount_t refs;
|
|
struct list_head list;
|
|
};
|
|
|
|
struct extent_map_tree {
|
|
struct rb_root root;
|
|
struct list_head modified_extents;
|
|
rwlock_t lock;
|
|
};
|
|
|
|
struct btrfs_inode;
|
|
|
|
static inline void extent_map_set_compression(struct extent_map *em,
|
|
enum btrfs_compression_type type)
|
|
{
|
|
if (type == BTRFS_COMPRESS_ZLIB)
|
|
em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
|
|
else if (type == BTRFS_COMPRESS_LZO)
|
|
em->flags |= EXTENT_FLAG_COMPRESS_LZO;
|
|
else if (type == BTRFS_COMPRESS_ZSTD)
|
|
em->flags |= EXTENT_FLAG_COMPRESS_ZSTD;
|
|
}
|
|
|
|
static inline enum btrfs_compression_type extent_map_compression(const struct extent_map *em)
|
|
{
|
|
if (em->flags & EXTENT_FLAG_COMPRESS_ZLIB)
|
|
return BTRFS_COMPRESS_ZLIB;
|
|
|
|
if (em->flags & EXTENT_FLAG_COMPRESS_LZO)
|
|
return BTRFS_COMPRESS_LZO;
|
|
|
|
if (em->flags & EXTENT_FLAG_COMPRESS_ZSTD)
|
|
return BTRFS_COMPRESS_ZSTD;
|
|
|
|
return BTRFS_COMPRESS_NONE;
|
|
}
|
|
|
|
/*
|
|
* More efficient way to determine if extent is compressed, instead of using
|
|
* 'extent_map_compression() != BTRFS_COMPRESS_NONE'.
|
|
*/
|
|
static inline bool extent_map_is_compressed(const struct extent_map *em)
|
|
{
|
|
return (em->flags & (EXTENT_FLAG_COMPRESS_ZLIB |
|
|
EXTENT_FLAG_COMPRESS_LZO |
|
|
EXTENT_FLAG_COMPRESS_ZSTD)) != 0;
|
|
}
|
|
|
|
static inline int extent_map_in_tree(const struct extent_map *em)
|
|
{
|
|
return !RB_EMPTY_NODE(&em->rb_node);
|
|
}
|
|
|
|
static inline u64 extent_map_end(const struct extent_map *em)
|
|
{
|
|
if (em->start + em->len < em->start)
|
|
return (u64)-1;
|
|
return em->start + em->len;
|
|
}
|
|
|
|
void extent_map_tree_init(struct extent_map_tree *tree);
|
|
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
|
|
u64 start, u64 len);
|
|
void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em);
|
|
int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
|
|
u64 new_logical);
|
|
|
|
struct extent_map *alloc_extent_map(void);
|
|
void free_extent_map(struct extent_map *em);
|
|
int __init extent_map_init(void);
|
|
void __cold extent_map_exit(void);
|
|
int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen);
|
|
void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em);
|
|
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
|
|
u64 start, u64 len);
|
|
int btrfs_add_extent_mapping(struct btrfs_inode *inode,
|
|
struct extent_map **em_in, u64 start, u64 len);
|
|
void btrfs_drop_extent_map_range(struct btrfs_inode *inode,
|
|
u64 start, u64 end,
|
|
bool skip_pinned);
|
|
int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
|
|
struct extent_map *new_em,
|
|
bool modified);
|
|
long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan);
|
|
|
|
#endif
|