2019-07-31 18:57:31 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
/*
2018-07-26 15:21:45 +03:00
* Copyright ( C ) 2017 - 2018 HUAWEI , Inc .
2020-07-13 16:09:44 +03:00
* https : //www.huawei.com/
2021-08-20 13:00:19 +03:00
* Copyright ( C ) 2021 , Alibaba Cloud
2018-07-26 15:21:45 +03:00
*/
2019-07-31 18:57:36 +03:00
# ifndef __EROFS_INTERNAL_H
# define __EROFS_INTERNAL_H
2018-07-26 15:21:45 +03:00
# include <linux/fs.h>
# include <linux/dcache.h>
# include <linux/mm.h>
# include <linux/pagemap.h>
# include <linux/bio.h>
2019-08-23 00:36:59 +03:00
# include <linux/magic.h>
2018-07-26 15:21:45 +03:00
# include <linux/slab.h>
# include <linux/vmalloc.h>
2021-08-13 08:29:31 +03:00
# include <linux/iomap.h>
2018-07-26 15:21:45 +03:00
# include "erofs_fs.h"
/* redefine pr_fmt "erofs: " */
# undef pr_fmt
# define pr_fmt(fmt) "erofs: " fmt
2019-09-04 05:09:09 +03:00
__printf ( 3 , 4 ) void _erofs_err ( struct super_block * sb ,
const char * function , const char * fmt , . . . ) ;
# define erofs_err(sb, fmt, ...) \
_erofs_err ( sb , __func__ , fmt " \n " , # # __VA_ARGS__ )
__printf ( 3 , 4 ) void _erofs_info ( struct super_block * sb ,
const char * function , const char * fmt , . . . ) ;
# define erofs_info(sb, fmt, ...) \
_erofs_info ( sb , __func__ , fmt " \n " , # # __VA_ARGS__ )
2018-07-26 15:21:45 +03:00
# ifdef CONFIG_EROFS_FS_DEBUG
# define DBG_BUGON BUG_ON
# else
2018-11-22 20:15:59 +03:00
# define DBG_BUGON(x) ((void)(x))
2019-07-31 18:57:36 +03:00
# endif /* !CONFIG_EROFS_FS_DEBUG */
2018-07-26 15:21:45 +03:00
/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
# define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
typedef u64 erofs_nid_t ;
2019-07-31 18:57:36 +03:00
typedef u64 erofs_off_t ;
/* data type for filesystem-wide blocks number */
typedef u32 erofs_blk_t ;
2018-07-26 15:21:45 +03:00
2021-10-14 11:10:10 +03:00
struct erofs_device_info {
char * path ;
2022-04-25 15:21:38 +03:00
struct erofs_fscache * fscache ;
2021-10-14 11:10:10 +03:00
struct block_device * bdev ;
struct dax_device * dax_dev ;
2021-11-29 13:21:59 +03:00
u64 dax_part_off ;
2021-10-14 11:10:10 +03:00
u32 blocks ;
u32 mapped_blkaddr ;
} ;
2021-12-06 17:35:52 +03:00
enum {
EROFS_SYNC_DECOMPRESS_AUTO ,
EROFS_SYNC_DECOMPRESS_FORCE_ON ,
EROFS_SYNC_DECOMPRESS_FORCE_OFF
} ;
2021-10-07 10:02:23 +03:00
struct erofs_mount_opts {
2020-05-29 13:48:36 +03:00
# ifdef CONFIG_EROFS_FS_ZIP
/* current strategy of how to use managed cache */
unsigned char cache_strategy ;
2021-12-06 17:35:52 +03:00
/* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */
unsigned int sync_decompress ;
2020-05-29 13:48:36 +03:00
/* threshold for decompression synchronously */
unsigned int max_sync_decompress_pages ;
# endif
unsigned int mount_opt ;
} ;
2021-10-14 11:10:10 +03:00
struct erofs_dev_context {
struct idr tree ;
struct rw_semaphore rwsem ;
unsigned int extra_devices ;
2023-03-02 10:17:51 +03:00
bool flatdev ;
2021-10-14 11:10:10 +03:00
} ;
2021-10-07 10:02:23 +03:00
struct erofs_fs_context {
struct erofs_mount_opts opt ;
2021-10-14 11:10:10 +03:00
struct erofs_dev_context * devs ;
2022-10-21 05:31:53 +03:00
char * fsid ;
char * domain_id ;
2021-10-07 10:02:23 +03:00
} ;
2021-03-29 04:23:06 +03:00
/* all filesystem-wide lz4 configurations */
struct erofs_sb_lz4_info {
/* # of pages needed for EROFS lz4 rolling decompression */
u16 max_distance_pages ;
2021-04-07 07:39:23 +03:00
/* maximum possible blocks for pclusters in the filesystem */
u16 max_pclusterblks ;
2021-03-29 04:23:06 +03:00
} ;
2022-09-18 07:34:53 +03:00
struct erofs_domain {
refcount_t ref ;
struct list_head list ;
struct fscache_volume * volume ;
char * domain_id ;
} ;
2022-04-25 15:21:34 +03:00
struct erofs_fscache {
struct fscache_cookie * cookie ;
2023-02-09 09:39:13 +03:00
struct inode * inode ; /* anonymous inode for the blob */
2023-02-09 09:39:11 +03:00
/* used for share domain mode */
2022-09-18 14:01:50 +03:00
struct erofs_domain * domain ;
2023-02-09 09:39:11 +03:00
struct list_head node ;
refcount_t ref ;
2022-09-18 14:01:50 +03:00
char * name ;
2022-04-25 15:21:34 +03:00
} ;
2023-04-07 17:17:08 +03:00
struct erofs_xattr_prefix_item {
struct erofs_xattr_long_prefix * prefix ;
u8 infix_len ;
} ;
2018-07-26 15:21:45 +03:00
struct erofs_sb_info {
2021-10-07 10:02:23 +03:00
struct erofs_mount_opts opt ; /* options */
2019-07-31 18:57:39 +03:00
# ifdef CONFIG_EROFS_FS_ZIP
2018-07-26 15:22:03 +03:00
/* list for all registered superblocks, mainly for shrinker */
struct list_head list ;
2018-07-26 15:22:04 +03:00
struct mutex umount_mutex ;
2018-07-26 15:22:03 +03:00
2020-02-20 05:46:42 +03:00
/* managed XArray arranged in physical block number */
struct xarray managed_pslots ;
2018-07-26 15:22:07 +03:00
2019-07-31 18:57:39 +03:00
unsigned int shrinker_run_no ;
2021-03-29 13:00:12 +03:00
u16 available_compr_algs ;
2019-07-31 18:57:39 +03:00
2019-07-31 18:57:49 +03:00
/* pseudo inode to manage cached pages */
struct inode * managed_cache ;
2021-03-29 04:23:06 +03:00
struct erofs_sb_lz4_info lz4 ;
2019-07-31 18:57:39 +03:00
# endif /* CONFIG_EROFS_FS_ZIP */
2023-04-07 17:17:06 +03:00
struct inode * packed_inode ;
2021-10-14 11:10:10 +03:00
struct erofs_dev_context * devs ;
2021-08-05 03:36:00 +03:00
struct dax_device * dax_dev ;
2021-11-29 13:21:59 +03:00
u64 dax_part_off ;
2021-10-14 11:10:10 +03:00
u64 total_blocks ;
u32 primarydevice_blocks ;
2019-07-31 18:57:39 +03:00
u32 meta_blkaddr ;
# ifdef CONFIG_EROFS_FS_XATTR
u32 xattr_blkaddr ;
2023-04-07 17:17:08 +03:00
u32 xattr_prefix_start ;
u8 xattr_prefix_count ;
struct erofs_xattr_prefix_item * xattr_prefixes ;
2018-07-26 15:21:58 +03:00
# endif
2021-10-14 11:10:10 +03:00
u16 device_id_mask ; /* valid bits of device id to be used */
2018-07-26 15:21:45 +03:00
2023-03-13 16:53:08 +03:00
unsigned char islotbits ; /* inode slot unit size in bit shift */
2023-03-13 16:53:09 +03:00
unsigned char blkszbits ; /* filesystem block size in bit shift */
2019-07-31 18:57:39 +03:00
2021-03-29 13:00:12 +03:00
u32 sb_size ; /* total superblock size */
2018-07-26 15:21:45 +03:00
u32 build_time_nsec ;
u64 build_time ;
/* what we really care is nid, rather than ino.. */
erofs_nid_t root_nid ;
2023-04-07 17:17:05 +03:00
erofs_nid_t packed_nid ;
2018-07-26 15:21:45 +03:00
/* used for statfs, f_files - f_favail */
u64 inos ;
u8 uuid [ 16 ] ; /* 128-bit uuid for volume */
u8 volume_name [ 16 ] ; /* volume name */
2019-11-04 05:49:37 +03:00
u32 feature_compat ;
2019-09-04 05:08:53 +03:00
u32 feature_incompat ;
2021-12-01 17:54:36 +03:00
/* sysfs support */
struct kobject s_kobj ; /* /sys/fs/erofs/<devname> */
struct completion s_kobj_unregister ;
2022-04-25 15:21:33 +03:00
/* fscache support */
struct fscache_volume * volume ;
2022-04-25 15:21:37 +03:00
struct erofs_fscache * s_fscache ;
2022-09-18 07:34:53 +03:00
struct erofs_domain * domain ;
2022-10-21 05:31:53 +03:00
char * fsid ;
char * domain_id ;
2018-07-26 15:21:45 +03:00
} ;
# define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
# define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
2018-07-26 15:21:52 +03:00
/* Mount flags set via mount options or defaults */
# define EROFS_MOUNT_XATTR_USER 0x00000010
# define EROFS_MOUNT_POSIX_ACL 0x00000020
2021-08-05 03:36:00 +03:00
# define EROFS_MOUNT_DAX_ALWAYS 0x00000040
# define EROFS_MOUNT_DAX_NEVER 0x00000080
2018-07-26 15:21:52 +03:00
2021-10-07 10:02:23 +03:00
# define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option)
# define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option)
# define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option)
2018-07-26 15:21:45 +03:00
2022-04-25 15:21:32 +03:00
static inline bool erofs_is_fscache_mode ( struct super_block * sb )
{
return IS_ENABLED ( CONFIG_EROFS_FS_ONDEMAND ) & & ! sb - > s_bdev ;
}
2019-07-31 18:57:49 +03:00
enum {
EROFS_ZIP_CACHE_DISABLED ,
EROFS_ZIP_CACHE_READAHEAD ,
EROFS_ZIP_CACHE_READAROUND
} ;
2018-07-26 15:22:05 +03:00
/* basic unit of the workstation of a super_block */
struct erofs_workgroup {
pgoff_t index ;
2023-05-29 15:37:27 +03:00
struct lockref lockref ;
2018-07-26 15:22:05 +03:00
} ;
2022-01-02 07:00:13 +03:00
enum erofs_kmap_type {
EROFS_NO_KMAP , /* don't map the buffer */
2022-10-18 13:53:13 +03:00
EROFS_KMAP , /* use kmap_local_page() to map the buffer */
2022-01-02 07:00:13 +03:00
} ;
struct erofs_buf {
2023-04-07 17:17:04 +03:00
struct inode * inode ;
2022-01-02 07:00:13 +03:00
struct page * page ;
void * base ;
enum erofs_kmap_type kmap_type ;
} ;
# define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL })
2018-07-26 15:21:45 +03:00
# define ROOT_NID(sb) ((sb)->root_nid)
2023-03-13 16:53:08 +03:00
# define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits)
# define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1))
# define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits)
# define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits)
2018-07-26 15:21:45 +03:00
2021-03-29 04:23:05 +03:00
# define EROFS_FEATURE_FUNCS(name, compat, feature) \
static inline bool erofs_sb_has_ # # name ( struct erofs_sb_info * sbi ) \
{ \
return sbi - > feature_ # # compat & EROFS_FEATURE_ # # feature ; \
}
2021-11-12 19:09:33 +03:00
EROFS_FEATURE_FUNCS ( zero_padding , incompat , INCOMPAT_ZERO_PADDING )
2021-03-29 13:00:12 +03:00
EROFS_FEATURE_FUNCS ( compr_cfgs , incompat , INCOMPAT_COMPR_CFGS )
2021-04-07 07:39:22 +03:00
EROFS_FEATURE_FUNCS ( big_pcluster , incompat , INCOMPAT_BIG_PCLUSTER )
2021-12-01 17:54:36 +03:00
EROFS_FEATURE_FUNCS ( chunked_file , incompat , INCOMPAT_CHUNKED_FILE )
2021-10-14 11:10:10 +03:00
EROFS_FEATURE_FUNCS ( device_table , incompat , INCOMPAT_DEVICE_TABLE )
2021-12-01 17:54:36 +03:00
EROFS_FEATURE_FUNCS ( compr_head2 , incompat , INCOMPAT_COMPR_HEAD2 )
2021-12-28 08:46:04 +03:00
EROFS_FEATURE_FUNCS ( ztailpacking , incompat , INCOMPAT_ZTAILPACKING )
2022-09-23 05:11:22 +03:00
EROFS_FEATURE_FUNCS ( fragments , incompat , INCOMPAT_FRAGMENTS )
2022-09-23 04:49:15 +03:00
EROFS_FEATURE_FUNCS ( dedupe , incompat , INCOMPAT_DEDUPE )
2023-04-08 01:28:08 +03:00
EROFS_FEATURE_FUNCS ( xattr_prefixes , incompat , INCOMPAT_XATTR_PREFIXES )
2021-03-29 04:23:05 +03:00
EROFS_FEATURE_FUNCS ( sb_chksum , compat , COMPAT_SB_CHKSUM )
2019-02-18 10:19:04 +03:00
/* atomic flag definitions */
2019-09-04 05:08:56 +03:00
# define EROFS_I_EA_INITED_BIT 0
# define EROFS_I_Z_INITED_BIT 1
2019-02-18 10:19:04 +03:00
/* bitlock definitions (arranged in reverse order) */
2019-09-04 05:08:56 +03:00
# define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1)
# define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2)
2018-07-26 15:21:45 +03:00
2019-09-04 05:08:56 +03:00
struct erofs_inode {
2018-07-26 15:21:45 +03:00
erofs_nid_t nid ;
2019-02-18 10:19:04 +03:00
/* atomic flags (including bitlocks) */
unsigned long flags ;
2018-07-26 15:21:45 +03:00
2019-09-04 05:08:54 +03:00
unsigned char datalayout ;
2018-07-26 15:21:45 +03:00
unsigned char inode_isize ;
2023-04-14 09:18:10 +03:00
unsigned int xattr_isize ;
2018-07-26 15:21:45 +03:00
2019-07-15 15:21:27 +03:00
unsigned int xattr_shared_count ;
unsigned int * xattr_shared_xattrs ;
2018-07-26 15:21:45 +03:00
2019-06-24 10:22:52 +03:00
union {
erofs_blk_t raw_blkaddr ;
2021-08-20 13:00:19 +03:00
struct {
unsigned short chunkformat ;
unsigned char chunkbits ;
} ;
2019-06-24 10:22:52 +03:00
# ifdef CONFIG_EROFS_FS_ZIP
struct {
unsigned short z_advise ;
unsigned char z_algorithmtype [ 2 ] ;
unsigned char z_logical_clusterbits ;
2021-12-28 08:46:04 +03:00
unsigned long z_tailextent_headlcn ;
2022-09-23 05:11:22 +03:00
union {
struct {
erofs_off_t z_idataoff ;
unsigned short z_idata_size ;
} ;
erofs_off_t z_fragmentoff ;
} ;
2019-06-24 10:22:52 +03:00
} ;
2019-07-31 18:57:36 +03:00
# endif /* CONFIG_EROFS_FS_ZIP */
2019-06-24 10:22:52 +03:00
} ;
2018-07-26 15:21:45 +03:00
/* the corresponding vfs inode */
struct inode vfs_inode ;
} ;
2023-01-14 18:08:23 +03:00
# define EROFS_I(ptr) container_of(ptr, struct erofs_inode, vfs_inode)
2018-07-26 15:21:45 +03:00
2023-01-14 18:08:23 +03:00
static inline erofs_off_t erofs_iloc ( struct inode * inode )
2018-07-26 15:21:45 +03:00
{
2023-01-14 18:08:23 +03:00
struct erofs_sb_info * sbi = EROFS_I_SB ( inode ) ;
2023-03-13 16:53:08 +03:00
return erofs_pos ( inode - > i_sb , sbi - > meta_blkaddr ) +
2023-01-14 18:08:23 +03:00
( EROFS_I ( inode ) - > nid < < sbi - > islotbits ) ;
2018-07-26 15:21:45 +03:00
}
2023-04-14 11:30:27 +03:00
static inline unsigned int erofs_inode_version ( unsigned int ifmt )
2019-09-04 05:08:54 +03:00
{
2023-04-14 11:30:27 +03:00
return ( ifmt > > EROFS_I_VERSION_BIT ) & EROFS_I_VERSION_MASK ;
2018-07-26 15:21:45 +03:00
}
2023-04-14 11:30:27 +03:00
static inline unsigned int erofs_inode_datalayout ( unsigned int ifmt )
2018-07-26 15:21:45 +03:00
{
2023-04-14 11:30:27 +03:00
return ( ifmt > > EROFS_I_DATALAYOUT_BIT ) & EROFS_I_DATALAYOUT_MASK ;
2018-07-26 15:21:45 +03:00
}
2021-10-08 23:08:39 +03:00
/*
* Different from grab_cache_page_nowait ( ) , reclaiming is never triggered
* when allocating new pages .
*/
static inline
struct page * erofs_grab_cache_page_nowait ( struct address_space * mapping ,
pgoff_t index )
{
return pagecache_get_page ( mapping , index ,
FGP_LOCK | FGP_CREAT | FGP_NOFS | FGP_NOWAIT ,
readahead_gfp_mask ( mapping ) & ~ __GFP_RECLAIM ) ;
}
2018-07-26 15:21:45 +03:00
/* Has a disk mapping */
2023-01-13 09:52:26 +03:00
# define EROFS_MAP_MAPPED 0x0001
2018-07-26 15:21:45 +03:00
/* Located in metadata (could be copied from bd_inode) */
2023-01-13 09:52:26 +03:00
# define EROFS_MAP_META 0x0002
2021-10-08 23:08:37 +03:00
/* The extent is encoded */
2023-01-13 09:52:26 +03:00
# define EROFS_MAP_ENCODED 0x0004
2019-06-24 10:22:58 +03:00
/* The length of extent is full */
2023-01-13 09:52:26 +03:00
# define EROFS_MAP_FULL_MAPPED 0x0008
2022-09-23 05:11:22 +03:00
/* Located in the special packed inode */
2023-01-13 09:52:26 +03:00
# define EROFS_MAP_FRAGMENT 0x0010
2022-09-23 04:49:15 +03:00
/* The extent refers to partial decompressed data */
2023-01-13 09:52:26 +03:00
# define EROFS_MAP_PARTIAL_REF 0x0020
2018-07-26 15:21:45 +03:00
struct erofs_map_blocks {
2022-01-02 07:00:17 +03:00
struct erofs_buf buf ;
2018-07-26 15:21:45 +03:00
erofs_off_t m_pa , m_la ;
u64 m_plen , m_llen ;
2021-10-14 11:10:10 +03:00
unsigned short m_deviceid ;
2021-10-08 23:08:37 +03:00
char m_algorithmformat ;
2018-07-26 15:21:45 +03:00
unsigned int m_flags ;
} ;
2021-08-18 18:22:31 +03:00
/*
* Used to get the exact decompressed length , e . g . fiemap ( consider lookback
* approach instead if possible since it ' s more metadata lightweight . )
*/
2023-02-09 05:48:25 +03:00
# define EROFS_GET_BLOCKS_FIEMAP 0x0001
2021-10-11 00:31:45 +03:00
/* Used to map the whole extent if non-negligible data is requested for LZMA */
2023-02-09 05:48:25 +03:00
# define EROFS_GET_BLOCKS_READMORE 0x0002
2022-09-23 05:11:22 +03:00
/* Used to map tail extent for tailpacking inline or fragment pcluster */
2023-02-09 05:48:25 +03:00
# define EROFS_GET_BLOCKS_FINDTAIL 0x0004
2018-07-26 15:21:45 +03:00
2021-10-08 23:08:37 +03:00
enum {
Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX ,
2022-09-23 05:11:21 +03:00
Z_EROFS_COMPRESSION_INTERLACED ,
2021-10-08 23:08:37 +03:00
Z_EROFS_COMPRESSION_RUNTIME_MAX
} ;
2021-10-14 11:10:10 +03:00
struct erofs_map_dev {
2022-04-25 15:21:38 +03:00
struct erofs_fscache * m_fscache ;
2021-10-14 11:10:10 +03:00
struct block_device * m_bdev ;
struct dax_device * m_daxdev ;
2021-11-29 13:22:00 +03:00
u64 m_dax_part_off ;
2021-10-14 11:10:10 +03:00
erofs_off_t m_pa ;
unsigned int m_deviceid ;
} ;
2023-02-04 12:30:40 +03:00
extern struct file_system_type erofs_fs_type ;
extern const struct super_operations erofs_sops ;
extern const struct address_space_operations erofs_raw_access_aops ;
extern const struct address_space_operations z_erofs_aops ;
extern const struct address_space_operations erofs_fscache_access_aops ;
extern const struct inode_operations erofs_generic_iops ;
extern const struct inode_operations erofs_symlink_iops ;
extern const struct inode_operations erofs_fast_symlink_iops ;
extern const struct inode_operations erofs_dir_iops ;
2021-08-05 03:35:59 +03:00
extern const struct file_operations erofs_file_fops ;
2023-02-04 12:30:40 +03:00
extern const struct file_operations erofs_dir_fops ;
extern const struct iomap_ops z_erofs_iomap_report_ops ;
/* flags for erofs_fscache_register_cookie() */
2023-02-09 09:39:13 +03:00
# define EROFS_REG_COOKIE_SHARE 0x0001
# define EROFS_REG_COOKIE_NEED_NOEXIST 0x0002
2023-02-04 12:30:40 +03:00
2023-04-07 17:17:08 +03:00
void * erofs_read_metadata ( struct super_block * sb , struct erofs_buf * buf ,
erofs_off_t * offset , int * lengthp ) ;
2022-01-02 07:00:17 +03:00
void erofs_unmap_metabuf ( struct erofs_buf * buf ) ;
2022-01-02 07:00:14 +03:00
void erofs_put_metabuf ( struct erofs_buf * buf ) ;
2023-04-07 17:17:04 +03:00
void * erofs_bread ( struct erofs_buf * buf , erofs_blk_t blkaddr ,
enum erofs_kmap_type type ) ;
void erofs_init_metabuf ( struct erofs_buf * buf , struct super_block * sb ) ;
2022-01-02 07:00:14 +03:00
void * erofs_read_metabuf ( struct erofs_buf * buf , struct super_block * sb ,
erofs_blk_t blkaddr , enum erofs_kmap_type type ) ;
2021-10-14 11:10:10 +03:00
int erofs_map_dev ( struct super_block * sb , struct erofs_map_dev * dev ) ;
2021-08-13 08:29:31 +03:00
int erofs_fiemap ( struct inode * inode , struct fiemap_extent_info * fieinfo ,
u64 start , u64 len ) ;
2023-02-09 05:48:25 +03:00
int erofs_map_blocks ( struct inode * inode , struct erofs_map_blocks * map ) ;
2022-09-27 09:36:07 +03:00
struct inode * erofs_iget ( struct super_block * sb , erofs_nid_t nid ) ;
2023-01-13 14:49:12 +03:00
int erofs_getattr ( struct mnt_idmap * idmap , const struct path * path ,
2021-01-21 16:19:43 +03:00
struct kstat * stat , u32 request_mask ,
unsigned int query_flags ) ;
2022-04-25 07:07:12 +03:00
int erofs_namei ( struct inode * dir , const struct qstr * name ,
2019-01-14 14:40:24 +03:00
erofs_nid_t * nid , unsigned int * d_type ) ;
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 07:39:26 +03:00
static inline void * erofs_vm_map_ram ( struct page * * pages , unsigned int count )
{
int retried = 0 ;
while ( 1 ) {
void * p = vm_map_ram ( pages , count , - 1 ) ;
/* retry two more times (totally 3 times) */
if ( p | | + + retried > = 3 )
return p ;
vm_unmap_aliases ( ) ;
}
return NULL ;
}
2021-12-01 17:54:36 +03:00
int erofs_register_sysfs ( struct super_block * sb ) ;
void erofs_unregister_sysfs ( struct super_block * sb ) ;
int __init erofs_init_sysfs ( void ) ;
void erofs_exit_sysfs ( void ) ;
2021-10-22 12:01:20 +03:00
struct page * erofs_allocpage ( struct page * * pagepool , gfp_t gfp ) ;
2023-02-04 12:30:40 +03:00
static inline void erofs_pagepool_add ( struct page * * pagepool , struct page * page )
2021-10-22 12:01:20 +03:00
{
set_page_private ( page , ( unsigned long ) * pagepool ) ;
* pagepool = page ;
}
void erofs_release_pages ( struct page * * pagepool ) ;
2019-06-24 10:22:53 +03:00
2019-07-31 18:57:39 +03:00
# ifdef CONFIG_EROFS_FS_ZIP
2023-05-29 15:37:27 +03:00
void erofs_workgroup_put ( struct erofs_workgroup * grp ) ;
2019-07-31 18:57:36 +03:00
struct erofs_workgroup * erofs_find_workgroup ( struct super_block * sb ,
2020-01-02 15:01:16 +03:00
pgoff_t index ) ;
2020-02-20 05:46:42 +03:00
struct erofs_workgroup * erofs_insert_workgroup ( struct super_block * sb ,
struct erofs_workgroup * grp ) ;
2019-07-31 18:57:36 +03:00
void erofs_workgroup_free_rcu ( struct erofs_workgroup * grp ) ;
2019-07-31 18:57:39 +03:00
void erofs_shrinker_register ( struct super_block * sb ) ;
void erofs_shrinker_unregister ( struct super_block * sb ) ;
int __init erofs_init_shrinker ( void ) ;
void erofs_exit_shrinker ( void ) ;
int __init z_erofs_init_zip_subsystem ( void ) ;
void z_erofs_exit_zip_subsystem ( void ) ;
2019-07-31 18:57:36 +03:00
int erofs_try_to_free_all_cached_pages ( struct erofs_sb_info * sbi ,
struct erofs_workgroup * egrp ) ;
2021-03-29 04:23:06 +03:00
int z_erofs_load_lz4_config ( struct super_block * sb ,
2021-03-29 04:23:07 +03:00
struct erofs_super_block * dsb ,
struct z_erofs_lz4_cfgs * lz4 , int len ) ;
2023-02-04 12:30:40 +03:00
int z_erofs_map_blocks_iter ( struct inode * inode , struct erofs_map_blocks * map ,
int flags ) ;
2023-05-15 12:57:58 +03:00
void * erofs_get_pcpubuf ( unsigned int requiredpages ) ;
void erofs_put_pcpubuf ( void * ptr ) ;
int erofs_pcpubuf_growsize ( unsigned int nrpages ) ;
void __init erofs_pcpubuf_init ( void ) ;
void erofs_pcpubuf_exit ( void ) ;
2023-05-26 23:14:57 +03:00
int erofs_init_managed_cache ( struct super_block * sb ) ;
2019-07-31 18:57:39 +03:00
# else
static inline void erofs_shrinker_register ( struct super_block * sb ) { }
static inline void erofs_shrinker_unregister ( struct super_block * sb ) { }
static inline int erofs_init_shrinker ( void ) { return 0 ; }
static inline void erofs_exit_shrinker ( void ) { }
static inline int z_erofs_init_zip_subsystem ( void ) { return 0 ; }
static inline void z_erofs_exit_zip_subsystem ( void ) { }
2021-03-29 04:23:06 +03:00
static inline int z_erofs_load_lz4_config ( struct super_block * sb ,
2021-03-29 04:23:07 +03:00
struct erofs_super_block * dsb ,
struct z_erofs_lz4_cfgs * lz4 , int len )
2021-03-29 04:23:06 +03:00
{
2021-03-29 13:00:12 +03:00
if ( lz4 | | dsb - > u1 . lz4_max_distance ) {
2021-03-29 04:23:06 +03:00
erofs_err ( sb , " lz4 algorithm isn't enabled " ) ;
return - EINVAL ;
}
return 0 ;
}
2023-05-15 12:57:58 +03:00
static inline void erofs_pcpubuf_init ( void ) { }
static inline void erofs_pcpubuf_exit ( void ) { }
2023-05-26 23:14:57 +03:00
static inline int erofs_init_managed_cache ( struct super_block * sb ) { return 0 ; }
2019-07-31 18:57:39 +03:00
# endif /* !CONFIG_EROFS_FS_ZIP */
2019-01-16 11:59:56 +03:00
2021-10-11 00:31:45 +03:00
# ifdef CONFIG_EROFS_FS_ZIP_LZMA
2023-03-03 09:37:31 +03:00
int __init z_erofs_lzma_init ( void ) ;
2021-10-11 00:31:45 +03:00
void z_erofs_lzma_exit ( void ) ;
int z_erofs_load_lzma_config ( struct super_block * sb ,
struct erofs_super_block * dsb ,
struct z_erofs_lzma_cfgs * lzma , int size ) ;
# else
static inline int z_erofs_lzma_init ( void ) { return 0 ; }
static inline int z_erofs_lzma_exit ( void ) { return 0 ; }
static inline int z_erofs_load_lzma_config ( struct super_block * sb ,
struct erofs_super_block * dsb ,
struct z_erofs_lzma_cfgs * lzma , int size ) {
if ( lzma ) {
erofs_err ( sb , " lzma algorithm isn't enabled " ) ;
return - EINVAL ;
}
return 0 ;
}
2023-02-04 12:30:40 +03:00
# endif /* !CONFIG_EROFS_FS_ZIP_LZMA */
2021-10-11 00:31:45 +03:00
2022-04-25 15:21:33 +03:00
# ifdef CONFIG_EROFS_FS_ONDEMAND
int erofs_fscache_register_fs ( struct super_block * sb ) ;
void erofs_fscache_unregister_fs ( struct super_block * sb ) ;
2022-04-25 15:21:34 +03:00
2022-09-18 07:34:52 +03:00
struct erofs_fscache * erofs_fscache_register_cookie ( struct super_block * sb ,
2023-02-04 12:30:40 +03:00
char * name , unsigned int flags ) ;
2022-09-18 07:34:52 +03:00
void erofs_fscache_unregister_cookie ( struct erofs_fscache * fscache ) ;
2022-04-25 15:21:33 +03:00
# else
static inline int erofs_fscache_register_fs ( struct super_block * sb )
{
2022-09-18 07:34:52 +03:00
return - EOPNOTSUPP ;
2022-04-25 15:21:33 +03:00
}
static inline void erofs_fscache_unregister_fs ( struct super_block * sb ) { }
2022-04-25 15:21:34 +03:00
2022-09-18 07:34:52 +03:00
static inline
struct erofs_fscache * erofs_fscache_register_cookie ( struct super_block * sb ,
2023-02-04 12:30:40 +03:00
char * name , unsigned int flags )
2022-04-25 15:21:34 +03:00
{
2022-09-18 07:34:52 +03:00
return ERR_PTR ( - EOPNOTSUPP ) ;
2022-04-25 15:21:34 +03:00
}
2022-09-18 07:34:52 +03:00
static inline void erofs_fscache_unregister_cookie ( struct erofs_fscache * fscache )
2022-04-25 15:21:34 +03:00
{
}
2022-04-25 15:21:33 +03:00
# endif
2019-08-14 13:37:03 +03:00
# define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
2019-07-31 18:57:36 +03:00
# endif /* __EROFS_INTERNAL_H */