From b4892fa3e7fd69e78a82356de45b90758589cafc Mon Sep 17 00:00:00 2001 From: Huang Jianan Date: Tue, 16 Mar 2021 11:15:14 +0800 Subject: [PATCH 01/21] erofs: avoid memory allocation failure during rolling decompression Currently, err would be treated as io error. Therefore, it'd be better to ensure memory allocation during rolling decompression to avoid such io error. In the long term, we might consider adding another !Uptodate case for such case. Link: https://lore.kernel.org/r/20210316031515.90954-1-huangjianan@oppo.com Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Signed-off-by: Huang Jianan Signed-off-by: Guo Weichao Signed-off-by: Gao Xiang --- fs/erofs/decompressor.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 1cb1ffd10569..34e73ff76f89 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -73,9 +73,8 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, victim = availables[--top]; get_page(victim); } else { - victim = erofs_allocpage(pagepool, GFP_KERNEL); - if (!victim) - return -ENOMEM; + victim = erofs_allocpage(pagepool, + GFP_KERNEL | __GFP_NOFAIL); set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); } rq->out[i] = victim; From 648f2de053a882c87c05f0060f47d3b11841fdbe Mon Sep 17 00:00:00 2001 From: Huang Jianan Date: Wed, 17 Mar 2021 11:54:47 +0800 Subject: [PATCH 02/21] erofs: use workqueue decompression for atomic contexts only z_erofs_decompressqueue_endio may not be executed in the atomic context, for example, when dm-verity is turned on. In this scenario, data can be decompressed directly to get rid of additional kworker scheduling overhead. Link: https://lore.kernel.org/r/20210317035448.13921-2-huangjianan@oppo.com Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Signed-off-by: Huang Jianan Signed-off-by: Guo Weichao Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 3851e1a64f73..9530b611f94b 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -706,6 +706,7 @@ err_out: goto out; } +static void z_erofs_decompressqueue_work(struct work_struct *work); static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, bool sync, int bios) { @@ -720,8 +721,14 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, return; } - if (!atomic_add_return(bios, &io->pending_bios)) + if (atomic_add_return(bios, &io->pending_bios)) + return; + /* Use workqueue decompression for atomic contexts only */ + if (in_atomic() || irqs_disabled()) { queue_work(z_erofs_workqueue, &io->u.work); + return; + } + z_erofs_decompressqueue_work(&io->u.work); } static bool z_erofs_page_is_invalidated(struct page *page) From 30048cdac4b92f39ee50e2a1344f5899f8e70cb6 Mon Sep 17 00:00:00 2001 From: Huang Jianan Date: Wed, 17 Mar 2021 11:54:48 +0800 Subject: [PATCH 03/21] erofs: use sync decompression for atomic contexts only Sync decompression was introduced to get rid of additional kworker scheduling overhead. But there is no such overhead in non-atomic contexts. Therefore, it should be better to turn off sync decompression to avoid the current thread waiting in z_erofs_runqueue. Link: https://lore.kernel.org/r/20210317035448.13921-3-huangjianan@oppo.com Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Signed-off-by: Huang Jianan Signed-off-by: Guo Weichao Signed-off-by: Gao Xiang --- fs/erofs/internal.h | 2 ++ fs/erofs/super.c | 1 + fs/erofs/zdata.c | 8 ++++++-- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 351dae524a0c..30e63b73a675 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -50,6 +50,8 @@ struct erofs_fs_context { #ifdef CONFIG_EROFS_FS_ZIP /* current strategy of how to use managed cache */ unsigned char cache_strategy; + /* strategy of sync decompression (false - auto, true - force on) */ + bool readahead_sync_decompress; /* threshold for decompression synchronously */ unsigned int max_sync_decompress_pages; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index d5a6b9b888a5..0445d09b6331 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -200,6 +200,7 @@ static void erofs_default_options(struct erofs_fs_context *ctx) #ifdef CONFIG_EROFS_FS_ZIP ctx->cache_strategy = EROFS_ZIP_CACHE_READAROUND; ctx->max_sync_decompress_pages = 3; + ctx->readahead_sync_decompress = false; #endif #ifdef CONFIG_EROFS_FS_XATTR set_opt(ctx, XATTR_USER); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 9530b611f94b..7ab8a4e3dfcb 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -710,6 +710,8 @@ static void z_erofs_decompressqueue_work(struct work_struct *work); static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, bool sync, int bios) { + struct erofs_sb_info *const sbi = EROFS_SB(io->sb); + /* wake up the caller thread for sync decompression */ if (sync) { unsigned long flags; @@ -723,9 +725,10 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, if (atomic_add_return(bios, &io->pending_bios)) return; - /* Use workqueue decompression for atomic contexts only */ + /* Use workqueue and sync decompression for atomic contexts only */ if (in_atomic() || irqs_disabled()) { queue_work(z_erofs_workqueue, &io->u.work); + sbi->ctx.readahead_sync_decompress = true; return; } z_erofs_decompressqueue_work(&io->u.work); @@ -1340,7 +1343,8 @@ static void z_erofs_readahead(struct readahead_control *rac) struct erofs_sb_info *const sbi = EROFS_I_SB(inode); unsigned int nr_pages = readahead_count(rac); - bool sync = (nr_pages <= sbi->ctx.max_sync_decompress_pages); + bool sync = (sbi->ctx.readahead_sync_decompress && + nr_pages <= sbi->ctx.max_sync_decompress_pages); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct page *page, *head = NULL; LIST_HEAD(pagepool); From 0b964600d3aae56ff9d5bdd710a79f39a44c572c Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 22 Mar 2021 02:32:27 +0800 Subject: [PATCH 04/21] erofs: complete a missing case for inplace I/O Add a missing case which could cause unnecessary page allocation but not directly use inplace I/O instead, which increases runtime extra memory footprint. The detail is, considering an online file-backed page, the right half of the page is chosen to be cached (e.g. the end page of a readahead request) and some of its data doesn't exist in managed cache, so the pcluster will be definitely kept in the submission chain. (IOWs, it cannot be decompressed without I/O, e.g., due to the bypass queue). Currently, DELAYEDALLOC/TRYALLOC cases can be downgraded as NOINPLACE, and stop online pages from inplace I/O. After this patch, unneeded page allocations won't be observed in pickup_page_for_submission() then. Link: https://lore.kernel.org/r/20210321183227.5182-1-hsiangkao@aol.com Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 44 +++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 7ab8a4e3dfcb..cd9b76216925 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -104,6 +104,12 @@ enum z_erofs_collectmode { * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| */ COLLECT_PRIMARY_HOOKED, + /* + * a weak form of COLLECT_PRIMARY_FOLLOWED, the difference is that it + * could be dispatched into bypass queue later due to uptodated managed + * pages. All related online pages cannot be reused for inplace I/O (or + * pagevec) since it can be directly decoded without I/O submission. + */ COLLECT_PRIMARY_FOLLOWED_NOINPLACE, /* * The current collection has been linked with the owned chain, and @@ -186,21 +192,25 @@ static void preload_compressed_pages(struct z_erofs_collector *clt, if (page) { t = tag_compressed_page_justfound(page); - } else if (type == DELAYEDALLOC) { - t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); - } else if (type == TRYALLOC) { - newpage = erofs_allocpage(pagepool, gfp); - if (!newpage) - goto dontalloc; - - set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); - t = tag_compressed_page_justfound(newpage); - } else { /* DONTALLOC */ -dontalloc: - if (standalone) - clt->compressedpages = pages; + } else { + /* I/O is needed, no possible to decompress directly */ standalone = false; - continue; + switch (type) { + case DELAYEDALLOC: + t = tagptr_init(compressed_page_t, + PAGE_UNALLOCATED); + break; + case TRYALLOC: + newpage = erofs_allocpage(pagepool, gfp); + if (!newpage) + continue; + set_page_private(newpage, + Z_EROFS_PREALLOCATED_PAGE); + t = tag_compressed_page_justfound(newpage); + break; + default: /* DONTALLOC */ + continue; + } } if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) @@ -214,7 +224,11 @@ dontalloc: } } - if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ + /* + * don't do inplace I/O if all compressed pages are available in + * managed cache since it can be moved to the bypass queue instead. + */ + if (standalone) clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; } From 8137824eddd2e790c61c70c20d70a087faca95fa Mon Sep 17 00:00:00 2001 From: Yue Hu Date: Thu, 25 Mar 2021 15:10:08 +0800 Subject: [PATCH 05/21] erofs: don't use erofs_map_blocks() any more Currently, erofs_map_blocks() will be called only from erofs_{bmap, read_raw_page} which are all for uncompressed files. So, the compression branch in erofs_map_blocks() is pointless. Let's remove it and use erofs_map_blocks_flatmode() directly. Also update related comments. Link: https://lore.kernel.org/r/20210325071008.573-1-zbestahu@gmail.com Reviewed-by: Chao Yu Signed-off-by: Yue Hu Signed-off-by: Gao Xiang --- fs/erofs/data.c | 19 ++----------------- fs/erofs/internal.h | 6 ++---- 2 files changed, 4 insertions(+), 21 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 1249e74b3bf0..ebac756cb2a3 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -109,21 +109,6 @@ err_out: return err; } -int erofs_map_blocks(struct inode *inode, - struct erofs_map_blocks *map, int flags) -{ - if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { - int err = z_erofs_map_blocks_iter(inode, map, flags); - - if (map->mpage) { - put_page(map->mpage); - map->mpage = NULL; - } - return err; - } - return erofs_map_blocks_flatmode(inode, map, flags); -} - static inline struct bio *erofs_read_raw_page(struct bio *bio, struct address_space *mapping, struct page *page, @@ -159,7 +144,7 @@ submit_bio_retry: erofs_blk_t blknr; unsigned int blkoff; - err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + err = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW); if (err) goto err_out; @@ -318,7 +303,7 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block) return 0; } - if (!erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW)) + if (!erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW)) return erofs_blknr(map.m_pa); return 0; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 30e63b73a675..db8c8470269e 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -289,7 +289,7 @@ extern const struct address_space_operations erofs_raw_access_aops; extern const struct address_space_operations z_erofs_aops; /* - * Logical to physical block mapping, used by erofs_map_blocks() + * Logical to physical block mapping * * Different with other file systems, it is used for 2 access modes: * @@ -336,7 +336,7 @@ struct erofs_map_blocks { struct page *mpage; }; -/* Flags used by erofs_map_blocks() */ +/* Flags used by erofs_map_blocks_flatmode() */ #define EROFS_GET_BLOCKS_RAW 0x0001 /* zmap.c */ @@ -358,8 +358,6 @@ static inline int z_erofs_map_blocks_iter(struct inode *inode, /* data.c */ struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr); -int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); - /* inode.c */ static inline unsigned long erofs_inode_hash(erofs_nid_t nid) { From 24a806d849c0b0c1d0cd6a6b93ba4ae4c0ec9f08 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 29 Mar 2021 08:36:14 +0800 Subject: [PATCH 06/21] erofs: add unsupported inode i_format check If any unknown i_format fields are set (may be of some new incompat inode features), mark such inode as unsupported. Just in case of any new incompat i_format fields added in the future. Link: https://lore.kernel.org/r/20210329003614.6583-1-hsiangkao@aol.com Fixes: 431339ba9042 ("staging: erofs: add inode operations") Cc: # 4.19+ Signed-off-by: Gao Xiang --- fs/erofs/erofs_fs.h | 3 +++ fs/erofs/inode.c | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 9ad1615f4474..e8d04d808fa6 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -75,6 +75,9 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode) #define EROFS_I_VERSION_BIT 0 #define EROFS_I_DATALAYOUT_BIT 1 +#define EROFS_I_ALL \ + ((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1) + /* 32-byte reduced form of an ondisk inode */ struct erofs_inode_compact { __le16 i_format; /* inode format hints */ diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 119fdce1b520..7ed2d7391692 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -44,6 +44,13 @@ static struct page *erofs_read_inode(struct inode *inode, dic = page_address(page) + *ofs; ifmt = le16_to_cpu(dic->i_format); + if (ifmt & ~EROFS_I_ALL) { + erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu", + ifmt, vi->nid); + err = -EOPNOTSUPP; + goto err_out; + } + vi->datalayout = erofs_inode_datalayout(ifmt); if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", From de06a6a375414be03ce5b1054f2d836591923a1d Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 29 Mar 2021 09:23:05 +0800 Subject: [PATCH 07/21] erofs: introduce erofs_sb_has_xxx() helpers Introduce erofs_sb_has_xxx() to make long checks short, especially for later big pcluster & LZMA features. Link: https://lore.kernel.org/r/20210329012308.28743-2-hsiangkao@aol.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/decompressor.c | 3 +-- fs/erofs/internal.h | 9 +++++++++ fs/erofs/super.c | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 34e73ff76f89..80e8871aef71 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -124,8 +124,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) support_0padding = false; /* decompression inplace is only safe when 0padding is enabled */ - if (EROFS_SB(rq->sb)->feature_incompat & - EROFS_FEATURE_INCOMPAT_LZ4_0PADDING) { + if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) { support_0padding = true; while (!src[inputmargin & ~PAGE_MASK]) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index db8c8470269e..a148a039cf67 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -218,6 +218,15 @@ static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); } +#define EROFS_FEATURE_FUNCS(name, compat, feature) \ +static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \ +{ \ + return sbi->feature_##compat & EROFS_FEATURE_##feature; \ +} + +EROFS_FEATURE_FUNCS(lz4_0padding, incompat, INCOMPAT_LZ4_0PADDING) +EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM) + /* atomic flag definitions */ #define EROFS_I_EA_INITED_BIT 0 #define EROFS_I_Z_INITED_BIT 1 diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 0445d09b6331..991b99eaf22a 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -149,7 +149,7 @@ static int erofs_read_superblock(struct super_block *sb) } sbi->feature_compat = le32_to_cpu(dsb->feature_compat); - if (sbi->feature_compat & EROFS_FEATURE_COMPAT_SB_CHKSUM) { + if (erofs_sb_has_sb_chksum(sbi)) { ret = erofs_superblock_csum_verify(sb, data); if (ret) goto out; From 5d50538fc567c6f3692dec1825fb38c5a0884d93 Mon Sep 17 00:00:00 2001 From: Huang Jianan Date: Mon, 29 Mar 2021 09:23:06 +0800 Subject: [PATCH 08/21] erofs: support adjust lz4 history window size lz4 uses LZ4_DISTANCE_MAX to record history preservation. When using rolling decompression, a block with a higher compression ratio will cause a larger memory allocation (up to 64k). It may cause a large resource burden in extreme cases on devices with small memory and a large number of concurrent IOs. So appropriately reducing this value can improve performance. Decreasing this value will reduce the compression ratio (except when input_size Signed-off-by: Huang Jianan Signed-off-by: Guo Weichao [ Gao Xiang: introduce struct erofs_sb_lz4_info for configurations. ] Signed-off-by: Gao Xiang --- fs/erofs/decompressor.c | 21 +++++++++++++++++---- fs/erofs/erofs_fs.h | 4 +++- fs/erofs/internal.h | 19 +++++++++++++++++++ fs/erofs/super.c | 4 +++- 4 files changed, 42 insertions(+), 6 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 80e8871aef71..93411e9df9b6 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -28,6 +28,17 @@ struct z_erofs_decompressor { char *name; }; +int z_erofs_load_lz4_config(struct super_block *sb, + struct erofs_super_block *dsb) +{ + u16 distance = le16_to_cpu(dsb->lz4_max_distance); + + EROFS_SB(sb)->lz4.max_distance_pages = distance ? + DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : + LZ4_MAX_DISTANCE_PAGES; + return 0; +} + static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, struct list_head *pagepool) { @@ -36,6 +47,8 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, BITS_PER_LONG)] = { 0 }; + unsigned int lz4_max_distance_pages = + EROFS_SB(rq->sb)->lz4.max_distance_pages; void *kaddr = NULL; unsigned int i, j, top; @@ -44,14 +57,14 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, struct page *const page = rq->out[i]; struct page *victim; - if (j >= LZ4_MAX_DISTANCE_PAGES) + if (j >= lz4_max_distance_pages) j = 0; /* 'valid' bounced can only be tested after a complete round */ if (test_bit(j, bounced)) { - DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES); - DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES); - availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; + DBG_BUGON(i < lz4_max_distance_pages); + DBG_BUGON(top >= lz4_max_distance_pages); + availables[top++] = rq->out[i - lz4_max_distance_pages]; } if (page) { diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index e8d04d808fa6..dc7cc79410df 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -39,7 +39,9 @@ struct erofs_super_block { __u8 uuid[16]; /* 128-bit uuid for volume */ __u8 volume_name[16]; /* volume name */ __le32 feature_incompat; - __u8 reserved2[44]; + /* customized lz4 sliding window size instead of 64k by default */ + __le16 lz4_max_distance; + __u8 reserved2[42]; }; /* diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index a148a039cf67..875b2ebf2af9 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -59,6 +59,12 @@ struct erofs_fs_context { unsigned int mount_opt; }; +/* all filesystem-wide lz4 configurations */ +struct erofs_sb_lz4_info { + /* # of pages needed for EROFS lz4 rolling decompression */ + u16 max_distance_pages; +}; + struct erofs_sb_info { #ifdef CONFIG_EROFS_FS_ZIP /* list for all registered superblocks, mainly for shrinker */ @@ -72,6 +78,8 @@ struct erofs_sb_info { /* pseudo inode to manage cached pages */ struct inode *managed_cache; + + struct erofs_sb_lz4_info lz4; #endif /* CONFIG_EROFS_FS_ZIP */ u32 blocks; u32 meta_blkaddr; @@ -430,6 +438,8 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, struct erofs_workgroup *egrp); int erofs_try_to_free_cached_page(struct address_space *mapping, struct page *page); +int z_erofs_load_lz4_config(struct super_block *sb, + struct erofs_super_block *dsb); #else static inline void erofs_shrinker_register(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {} @@ -437,6 +447,15 @@ static inline int erofs_init_shrinker(void) { return 0; } static inline void erofs_exit_shrinker(void) {} static inline int z_erofs_init_zip_subsystem(void) { return 0; } static inline void z_erofs_exit_zip_subsystem(void) {} +static inline int z_erofs_load_lz4_config(struct super_block *sb, + struct erofs_super_block *dsb) +{ + if (dsb->lz4_max_distance) { + erofs_err(sb, "lz4 algorithm isn't enabled"); + return -EINVAL; + } + return 0; +} #endif /* !CONFIG_EROFS_FS_ZIP */ #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 991b99eaf22a..3212e4f73f85 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -187,7 +187,9 @@ static int erofs_read_superblock(struct super_block *sb) ret = -EFSCORRUPTED; goto out; } - ret = 0; + + /* parse on-disk compression configurations */ + ret = z_erofs_load_lz4_config(sb, dsb); out: kunmap(page); put_page(page); From 46249cded18ac0c4ffb7b177219510a133a51c00 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 29 Mar 2021 09:23:07 +0800 Subject: [PATCH 09/21] erofs: introduce on-disk lz4 fs configurations Introduce z_erofs_lz4_cfgs to store all lz4 configurations. Currently it's only max_distance, but will be used for new features later. Link: https://lore.kernel.org/r/20210329012308.28743-4-hsiangkao@aol.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/decompressor.c | 15 +++++++++++++-- fs/erofs/erofs_fs.h | 6 ++++++ fs/erofs/internal.h | 8 +++++--- fs/erofs/super.c | 2 +- 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 93411e9df9b6..97538ff24a19 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -29,9 +29,20 @@ struct z_erofs_decompressor { }; int z_erofs_load_lz4_config(struct super_block *sb, - struct erofs_super_block *dsb) + struct erofs_super_block *dsb, + struct z_erofs_lz4_cfgs *lz4, int size) { - u16 distance = le16_to_cpu(dsb->lz4_max_distance); + u16 distance; + + if (lz4) { + if (size < sizeof(struct z_erofs_lz4_cfgs)) { + erofs_err(sb, "invalid lz4 cfgs, size=%u", size); + return -EINVAL; + } + distance = le16_to_cpu(lz4->max_distance); + } else { + distance = le16_to_cpu(dsb->lz4_max_distance); + } EROFS_SB(sb)->lz4.max_distance_pages = distance ? DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index dc7cc79410df..700597e9c810 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -200,6 +200,12 @@ enum { Z_EROFS_COMPRESSION_MAX }; +/* 14 bytes (+ length field = 16 bytes) */ +struct z_erofs_lz4_cfgs { + __le16 max_distance; + u8 reserved[12]; +} __packed; + /* * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) * e.g. for 4k logical cluster size, 4B if compacted 2B is off; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 875b2ebf2af9..b02fc64fcece 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -439,7 +439,8 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, int erofs_try_to_free_cached_page(struct address_space *mapping, struct page *page); int z_erofs_load_lz4_config(struct super_block *sb, - struct erofs_super_block *dsb); + struct erofs_super_block *dsb, + struct z_erofs_lz4_cfgs *lz4, int len); #else static inline void erofs_shrinker_register(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {} @@ -448,9 +449,10 @@ static inline void erofs_exit_shrinker(void) {} static inline int z_erofs_init_zip_subsystem(void) { return 0; } static inline void z_erofs_exit_zip_subsystem(void) {} static inline int z_erofs_load_lz4_config(struct super_block *sb, - struct erofs_super_block *dsb) + struct erofs_super_block *dsb, + struct z_erofs_lz4_cfgs *lz4, int len) { - if (dsb->lz4_max_distance) { + if (lz4 || dsb->lz4_max_distance) { erofs_err(sb, "lz4 algorithm isn't enabled"); return -EINVAL; } diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 3212e4f73f85..1ca8da3f2125 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -189,7 +189,7 @@ static int erofs_read_superblock(struct super_block *sb) } /* parse on-disk compression configurations */ - ret = z_erofs_load_lz4_config(sb, dsb); + ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0); out: kunmap(page); put_page(page); From 14373711dd54be8a84e2f4f624bc58787f80cfbd Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 29 Mar 2021 18:00:12 +0800 Subject: [PATCH 10/21] erofs: add on-disk compression configurations Add a bitmap for available compression algorithms and a variable-sized on-disk table for compression options in preparation for upcoming big pcluster and LZMA algorithm, which follows the end of super block. To parse the compression options, the bitmap is scanned one by one. For each available algorithm, there is data followed by 2-byte `length' correspondingly (it's enough for most cases, or entire fs blocks should be used.) With such available algorithm bitmap, kernel itself can also refuse to mount such filesystem if any unsupported compression algorithm exists. Note that COMPR_CFGS feature will be enabled with BIG_PCLUSTER. Link: https://lore.kernel.org/r/20210329100012.12980-1-hsiangkao@aol.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/decompressor.c | 2 +- fs/erofs/erofs_fs.h | 16 +++-- fs/erofs/internal.h | 5 +- fs/erofs/super.c | 141 +++++++++++++++++++++++++++++++++++++++- 4 files changed, 157 insertions(+), 7 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 97538ff24a19..27aa6a99b371 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -41,7 +41,7 @@ int z_erofs_load_lz4_config(struct super_block *sb, } distance = le16_to_cpu(lz4->max_distance); } else { - distance = le16_to_cpu(dsb->lz4_max_distance); + distance = le16_to_cpu(dsb->u1.lz4_max_distance); } EROFS_SB(sb)->lz4.max_distance_pages = distance ? diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 700597e9c810..17bc0b5f117d 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -18,15 +18,18 @@ * be incompatible with this kernel version. */ #define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001 +#define EROFS_FEATURE_INCOMPAT_COMPR_CFGS 0x00000002 #define EROFS_ALL_FEATURE_INCOMPAT EROFS_FEATURE_INCOMPAT_LZ4_0PADDING -/* 128-byte erofs on-disk super block */ +#define EROFS_SB_EXTSLOT_SIZE 16 + +/* erofs on-disk super block (currently 128 bytes) */ struct erofs_super_block { __le32 magic; /* file system magic number */ __le32 checksum; /* crc32c(super_block) */ __le32 feature_compat; __u8 blkszbits; /* support block_size == PAGE_SIZE only */ - __u8 reserved; + __u8 sb_extslots; /* superblock size = 128 + sb_extslots * 16 */ __le16 root_nid; /* nid of root directory */ __le64 inos; /* total valid ino # (== f_files - f_favail) */ @@ -39,8 +42,12 @@ struct erofs_super_block { __u8 uuid[16]; /* 128-bit uuid for volume */ __u8 volume_name[16]; /* volume name */ __le32 feature_incompat; - /* customized lz4 sliding window size instead of 64k by default */ - __le16 lz4_max_distance; + union { + /* bitmap for available compression algorithms */ + __le16 available_compr_algs; + /* customized sliding window size instead of 64k by default */ + __le16 lz4_max_distance; + } __packed u1; __u8 reserved2[42]; }; @@ -199,6 +206,7 @@ enum { Z_EROFS_COMPRESSION_LZ4 = 0, Z_EROFS_COMPRESSION_MAX }; +#define Z_EROFS_ALL_COMPR_ALGS (1 << (Z_EROFS_COMPRESSION_MAX - 1)) /* 14 bytes (+ length field = 16 bytes) */ struct z_erofs_lz4_cfgs { diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index b02fc64fcece..60063bbbb91a 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -75,6 +75,7 @@ struct erofs_sb_info { struct xarray managed_pslots; unsigned int shrinker_run_no; + u16 available_compr_algs; /* pseudo inode to manage cached pages */ struct inode *managed_cache; @@ -90,6 +91,7 @@ struct erofs_sb_info { /* inode slot unit size in bit shift */ unsigned char islotbits; + u32 sb_size; /* total superblock size */ u32 build_time_nsec; u64 build_time; @@ -233,6 +235,7 @@ static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \ } EROFS_FEATURE_FUNCS(lz4_0padding, incompat, INCOMPAT_LZ4_0PADDING) +EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS) EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM) /* atomic flag definitions */ @@ -452,7 +455,7 @@ static inline int z_erofs_load_lz4_config(struct super_block *sb, struct erofs_super_block *dsb, struct z_erofs_lz4_cfgs *lz4, int len) { - if (lz4 || dsb->lz4_max_distance) { + if (lz4 || dsb->u1.lz4_max_distance) { erofs_err(sb, "lz4 algorithm isn't enabled"); return -EINVAL; } diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 1ca8da3f2125..b641658e772f 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -122,6 +122,136 @@ static bool check_layout_compatibility(struct super_block *sb, return true; } +#ifdef CONFIG_EROFS_FS_ZIP +/* read variable-sized metadata, offset will be aligned by 4-byte */ +static void *erofs_read_metadata(struct super_block *sb, struct page **pagep, + erofs_off_t *offset, int *lengthp) +{ + struct page *page = *pagep; + u8 *buffer, *ptr; + int len, i, cnt; + erofs_blk_t blk; + + *offset = round_up(*offset, 4); + blk = erofs_blknr(*offset); + + if (!page || page->index != blk) { + if (page) { + unlock_page(page); + put_page(page); + } + page = erofs_get_meta_page(sb, blk); + if (IS_ERR(page)) + goto err_nullpage; + } + + ptr = kmap(page); + len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]); + if (!len) + len = U16_MAX + 1; + buffer = kmalloc(len, GFP_KERNEL); + if (!buffer) { + buffer = ERR_PTR(-ENOMEM); + goto out; + } + *offset += sizeof(__le16); + *lengthp = len; + + for (i = 0; i < len; i += cnt) { + cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i); + blk = erofs_blknr(*offset); + + if (!page || page->index != blk) { + if (page) { + kunmap(page); + unlock_page(page); + put_page(page); + } + page = erofs_get_meta_page(sb, blk); + if (IS_ERR(page)) { + kfree(buffer); + goto err_nullpage; + } + ptr = kmap(page); + } + memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt); + *offset += cnt; + } +out: + kunmap(page); + *pagep = page; + return buffer; +err_nullpage: + *pagep = NULL; + return page; +} + +static int erofs_load_compr_cfgs(struct super_block *sb, + struct erofs_super_block *dsb) +{ + struct erofs_sb_info *sbi; + struct page *page; + unsigned int algs, alg; + erofs_off_t offset; + int size, ret; + + sbi = EROFS_SB(sb); + sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs); + + if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) { + erofs_err(sb, "try to load compressed fs with unsupported algorithms %x", + sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS); + return -EINVAL; + } + + offset = EROFS_SUPER_OFFSET + sbi->sb_size; + page = NULL; + alg = 0; + ret = 0; + + for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { + void *data; + + if (!(algs & 1)) + continue; + + data = erofs_read_metadata(sb, &page, &offset, &size); + if (IS_ERR(data)) { + ret = PTR_ERR(data); + goto err; + } + + switch (alg) { + case Z_EROFS_COMPRESSION_LZ4: + ret = z_erofs_load_lz4_config(sb, dsb, data, size); + break; + default: + DBG_BUGON(1); + ret = -EFAULT; + } + kfree(data); + if (ret) + goto err; + } +err: + if (page) { + unlock_page(page); + put_page(page); + } + return ret; +} +#else +static int erofs_load_compr_cfgs(struct super_block *sb, + struct erofs_super_block *dsb) +{ + if (dsb->u1.available_compr_algs) { + erofs_err(sb, "try to load compressed fs when compression is disabled"); + return -EINVAL; + } + return 0; +} +#endif + static int erofs_read_superblock(struct super_block *sb) { struct erofs_sb_info *sbi; @@ -166,6 +296,12 @@ static int erofs_read_superblock(struct super_block *sb) if (!check_layout_compatibility(sb, dsb)) goto out; + sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE; + if (sbi->sb_size > EROFS_BLKSIZ) { + erofs_err(sb, "invalid sb_extslots %u (more than a fs block)", + sbi->sb_size); + goto out; + } sbi->blocks = le32_to_cpu(dsb->blocks); sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr); #ifdef CONFIG_EROFS_FS_XATTR @@ -189,7 +325,10 @@ static int erofs_read_superblock(struct super_block *sb) } /* parse on-disk compression configurations */ - ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0); + if (erofs_sb_has_compr_cfgs(sbi)) + ret = erofs_load_compr_cfgs(sb, dsb); + else + ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0); out: kunmap(page); put_page(page); From fe6adcce7e297fcb49f40c62df42334690c3f848 Mon Sep 17 00:00:00 2001 From: Ruiqi Gong Date: Wed, 31 Mar 2021 05:39:20 -0400 Subject: [PATCH 11/21] erofs: Clean up spelling mistakes found in fs/erofs zmap.c: s/correspoinding/corresponding zdata.c: s/endding/ending Link: https://lore.kernel.org/r/20210331093920.31923-1-gongruiqi1@huawei.com Reported-by: Hulk Robot Signed-off-by: Ruiqi Gong Reviewed-by: Gao Xiang Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 2 +- fs/erofs/zmap.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index cd9b76216925..4226f4115981 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -933,7 +933,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, }, pagepool); out: - /* must handle all compressed pages before endding pages */ + /* must handle all compressed pages before ending pages */ for (i = 0; i < clusterpages; ++i) { page = compressed_pages[i]; diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 14d2de35110c..b384f546d368 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -443,7 +443,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, m.delta[0] = 1; fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - /* get the correspoinding first chunk */ + /* get the corresponding first chunk */ err = z_erofs_extent_lookback(&m, m.delta[0]); if (err) goto unmap_out; From 54e0b6c873dcbd02b9b479c893f6fba8fcbc6a9c Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 7 Apr 2021 12:39:18 +0800 Subject: [PATCH 12/21] erofs: reserve physical_clusterbits[] Formal big pcluster design is actually more powerful / flexable than the previous thought whose pclustersize was fixed as power-of-2 blocks, which was obviously inefficient and space-wasting. Instead, pclustersize can now be set independently for each pcluster, so various pcluster sizes can also be used together in one file if mkfs wants (for example, according to data type and/or compression ratio). Let's get rid of previous physical_clusterbits[] setting (also notice that corresponding on-disk fields are still 0 for now). Therefore, head1/2 can be used for at most 2 different algorithms in one file and again pclustersize is now independent of these. Link: https://lore.kernel.org/r/20210407043927.10623-2-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/erofs_fs.h | 4 +--- fs/erofs/internal.h | 1 - fs/erofs/zdata.c | 3 +-- fs/erofs/zmap.c | 15 --------------- 4 files changed, 2 insertions(+), 21 deletions(-) diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 17bc0b5f117d..626b7d3e9ab7 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -233,9 +233,7 @@ struct z_erofs_map_header { __u8 h_algorithmtype; /* * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096; - * bit 3-4 : (physical - logical) cluster bits of head 1: - * For example, if logical clustersize = 4096, 1 for 8192. - * bit 5-7 : (physical - logical) cluster bits of head 2. + * bit 3-7 : reserved. */ __u8 h_clusterbits; }; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 60063bbbb91a..05b02f99324c 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -266,7 +266,6 @@ struct erofs_inode { unsigned short z_advise; unsigned char z_algorithmtype[2]; unsigned char z_logical_clusterbits; - unsigned char z_physical_clusterbits[2]; }; #endif /* CONFIG_EROFS_FS_ZIP */ }; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 4226f4115981..e3f0100d82d1 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -430,8 +430,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt, else pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; - pcl->clusterbits = EROFS_I(inode)->z_physical_clusterbits[0]; - pcl->clusterbits -= PAGE_SHIFT; + pcl->clusterbits = 0; /* new pclusters should be claimed as type 1, primary and followed */ pcl->next = clt->owned_head; diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index b384f546d368..7fd6bd843471 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -17,11 +17,8 @@ int z_erofs_fill_inode(struct inode *inode) vi->z_algorithmtype[0] = 0; vi->z_algorithmtype[1] = 0; vi->z_logical_clusterbits = LOG_BLOCK_SIZE; - vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits; - vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits; set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); } - inode->i_mapping->a_ops = &z_erofs_aops; return 0; } @@ -77,18 +74,6 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) } vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); - vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits + - ((h->h_clusterbits >> 3) & 3); - - if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) { - erofs_err(sb, "unsupported physical clusterbits %u for nid %llu, please upgrade kernel", - vi->z_physical_clusterbits[0], vi->nid); - err = -EOPNOTSUPP; - goto unmap_done; - } - - vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + - ((h->h_clusterbits >> 5) & 7); /* paired with smp_mb() at the beginning of the function */ smp_mb(); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); From 524887347fcb67faa0a63dd3c4c02ab48d4968d4 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sat, 10 Apr 2021 03:06:30 +0800 Subject: [PATCH 13/21] erofs: introduce multipage per-CPU buffers To deal the with the cases which inplace decompression is infeasible for some inplace I/O. Per-CPU buffers was introduced to get rid of page allocation latency and thrash for low-latency decompression algorithms such as lz4. For the big pcluster feature, introduce multipage per-CPU buffers to keep such inplace I/O pclusters temporarily as well but note that per-CPU pages are just consecutive virtually. When a new big pcluster fs is mounted, its max pclustersize will be read and per-CPU buffers can be growed if needed. Shrinking adjustable per-CPU buffers is more complex (because we don't know if such size is still be used), so currently just release them all when unloading. Link: https://lore.kernel.org/r/20210409190630.19569-1-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/Makefile | 2 +- fs/erofs/decompressor.c | 8 ++- fs/erofs/internal.h | 25 ++----- fs/erofs/pcpubuf.c | 148 ++++++++++++++++++++++++++++++++++++++++ fs/erofs/super.c | 2 + fs/erofs/utils.c | 12 ---- 6 files changed, 163 insertions(+), 34 deletions(-) create mode 100644 fs/erofs/pcpubuf.c diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile index af159539fc1b..1f9aced49070 100644 --- a/fs/erofs/Makefile +++ b/fs/erofs/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_EROFS_FS) += erofs.o -erofs-objs := super.o inode.o data.o namei.o dir.o utils.o +erofs-objs := super.o inode.o data.o namei.o dir.o utils.o pcpubuf.o erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 27aa6a99b371..fb4838c0f0df 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -47,7 +47,9 @@ int z_erofs_load_lz4_config(struct super_block *sb, EROFS_SB(sb)->lz4.max_distance_pages = distance ? DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : LZ4_MAX_DISTANCE_PAGES; - return 0; + + /* TODO: use max pclusterblks after bigpcluster is enabled */ + return erofs_pcpubuf_growsize(1); } static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, @@ -114,7 +116,7 @@ static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, * pages should be copied in order to avoid being overlapped. */ struct page **in = rq->in; - u8 *const tmp = erofs_get_pcpubuf(0); + u8 *const tmp = erofs_get_pcpubuf(1); u8 *tmpp = tmp; unsigned int inlen = rq->inputsize - pageofs_in; unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); @@ -271,7 +273,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq, * compressed data is preferred. */ if (rq->outputsize <= PAGE_SIZE * 7 / 8) { - dst = erofs_get_pcpubuf(0); + dst = erofs_get_pcpubuf(1); if (IS_ERR(dst)) return PTR_ERR(dst); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 05b02f99324c..4db085413304 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -197,9 +197,6 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) /* hard limit of pages per compressed cluster */ #define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) -#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES -#else -#define EROFS_PCPUBUF_NR_PAGES 0 #endif /* !CONFIG_EROFS_FS_ZIP */ /* we strictly follow PAGE_SIZE and no buffer head yet */ @@ -405,24 +402,16 @@ int erofs_namei(struct inode *dir, struct qstr *name, /* dir.c */ extern const struct file_operations erofs_dir_fops; +/* pcpubuf.c */ +void *erofs_get_pcpubuf(unsigned int requiredpages); +void erofs_put_pcpubuf(void *ptr); +int erofs_pcpubuf_growsize(unsigned int nrpages); +void erofs_pcpubuf_init(void); +void erofs_pcpubuf_exit(void); + /* utils.c / zdata.c */ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp); -#if (EROFS_PCPUBUF_NR_PAGES > 0) -void *erofs_get_pcpubuf(unsigned int pagenr); -#define erofs_put_pcpubuf(buf) do { \ - (void)&(buf); \ - preempt_enable(); \ -} while (0) -#else -static inline void *erofs_get_pcpubuf(unsigned int pagenr) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -#define erofs_put_pcpubuf(buf) do {} while (0) -#endif - #ifdef CONFIG_EROFS_FS_ZIP int erofs_workgroup_put(struct erofs_workgroup *grp); struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, diff --git a/fs/erofs/pcpubuf.c b/fs/erofs/pcpubuf.c new file mode 100644 index 000000000000..6c885575128a --- /dev/null +++ b/fs/erofs/pcpubuf.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) Gao Xiang + * + * For low-latency decompression algorithms (e.g. lz4), reserve consecutive + * per-CPU virtual memory (in pages) in advance to store such inplace I/O + * data if inplace decompression is failed (due to unmet inplace margin for + * example). + */ +#include "internal.h" + +struct erofs_pcpubuf { + raw_spinlock_t lock; + void *ptr; + struct page **pages; + unsigned int nrpages; +}; + +static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb); + +void *erofs_get_pcpubuf(unsigned int requiredpages) + __acquires(pcb->lock) +{ + struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb); + + raw_spin_lock(&pcb->lock); + /* check if the per-CPU buffer is too small */ + if (requiredpages > pcb->nrpages) { + raw_spin_unlock(&pcb->lock); + put_cpu_var(erofs_pcb); + /* (for sparse checker) pretend pcb->lock is still taken */ + __acquire(pcb->lock); + return NULL; + } + return pcb->ptr; +} + +void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock) +{ + struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id()); + + DBG_BUGON(pcb->ptr != ptr); + raw_spin_unlock(&pcb->lock); + put_cpu_var(erofs_pcb); +} + +/* the next step: support per-CPU page buffers hotplug */ +int erofs_pcpubuf_growsize(unsigned int nrpages) +{ + static DEFINE_MUTEX(pcb_resize_mutex); + static unsigned int pcb_nrpages; + LIST_HEAD(pagepool); + int delta, cpu, ret, i; + + mutex_lock(&pcb_resize_mutex); + delta = nrpages - pcb_nrpages; + ret = 0; + /* avoid shrinking pcpubuf, since no idea how many fses rely on */ + if (delta <= 0) + goto out; + + for_each_possible_cpu(cpu) { + struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu); + struct page **pages, **oldpages; + void *ptr, *old_ptr; + + pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + break; + } + + for (i = 0; i < nrpages; ++i) { + pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL); + if (!pages[i]) { + ret = -ENOMEM; + oldpages = pages; + goto free_pagearray; + } + } + ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL); + if (!ptr) { + ret = -ENOMEM; + oldpages = pages; + goto free_pagearray; + } + raw_spin_lock(&pcb->lock); + old_ptr = pcb->ptr; + pcb->ptr = ptr; + oldpages = pcb->pages; + pcb->pages = pages; + i = pcb->nrpages; + pcb->nrpages = nrpages; + raw_spin_unlock(&pcb->lock); + + if (!oldpages) { + DBG_BUGON(old_ptr); + continue; + } + + if (old_ptr) + vunmap(old_ptr); +free_pagearray: + while (i) + list_add(&oldpages[--i]->lru, &pagepool); + kfree(oldpages); + if (ret) + break; + } + pcb_nrpages = nrpages; + put_pages_list(&pagepool); +out: + mutex_unlock(&pcb_resize_mutex); + return ret; +} + +void erofs_pcpubuf_init(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu); + + raw_spin_lock_init(&pcb->lock); + } +} + +void erofs_pcpubuf_exit(void) +{ + int cpu, i; + + for_each_possible_cpu(cpu) { + struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu); + + if (pcb->ptr) { + vunmap(pcb->ptr); + pcb->ptr = NULL; + } + if (!pcb->pages) + continue; + + for (i = 0; i < pcb->nrpages; ++i) + if (pcb->pages[i]) + put_page(pcb->pages[i]); + kfree(pcb->pages); + pcb->pages = NULL; + } +} diff --git a/fs/erofs/super.c b/fs/erofs/super.c index b641658e772f..bbf3bbd908e0 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -655,6 +655,7 @@ static int __init erofs_module_init(void) if (err) goto shrinker_err; + erofs_pcpubuf_init(); err = z_erofs_init_zip_subsystem(); if (err) goto zip_err; @@ -684,6 +685,7 @@ static void __exit erofs_module_exit(void) /* Ensure all RCU free inodes are safe before cache is destroyed. */ rcu_barrier(); kmem_cache_destroy(erofs_inode_cachep); + erofs_pcpubuf_exit(); } /* get filesystem statistics */ diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index de9986d2f82f..6758c5b19f7c 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -21,18 +21,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp) return page; } -#if (EROFS_PCPUBUF_NR_PAGES > 0) -static struct { - u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES]; -} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; - -void *erofs_get_pcpubuf(unsigned int pagenr) -{ - preempt_disable(); - return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE]; -} -#endif - #ifdef CONFIG_EROFS_FS_ZIP /* global shrink count (for all mounted EROFS instances) */ static atomic_long_t erofs_global_shrink_cnt; From 9f6cc76e6ff0631a99cd94eab8af137057633a52 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 7 Apr 2021 12:39:20 +0800 Subject: [PATCH 14/21] erofs: introduce physical cluster slab pools Since multiple pcluster sizes could be used at once, the number of compressed pages will become a variable factor. It's necessary to introduce slab pools rather than a single slab cache now. This limits the pclustersize to 1M (Z_EROFS_PCLUSTER_MAX_SIZE), and get rid of the obsolete EROFS_FS_CLUSTER_PAGE_LIMIT, which has no use now. Link: https://lore.kernel.org/r/20210407043927.10623-4-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/Kconfig | 14 ---- fs/erofs/erofs_fs.h | 3 + fs/erofs/internal.h | 3 - fs/erofs/zdata.c | 170 ++++++++++++++++++++++++++++++-------------- fs/erofs/zdata.h | 14 ++-- 5 files changed, 125 insertions(+), 79 deletions(-) diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index 74b0aaa7114c..858b3339f381 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -76,17 +76,3 @@ config EROFS_FS_ZIP If you don't want to enable compression feature, say N. -config EROFS_FS_CLUSTER_PAGE_LIMIT - int "EROFS Cluster Pages Hard Limit" - depends on EROFS_FS_ZIP - range 1 256 - default "1" - help - Indicates maximum # of pages of a compressed - physical cluster. - - For example, if files in a image were compressed - into 8k-unit, hard limit should not be configured - less than 2. Otherwise, the image will be refused - to mount on this kernel. - diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 626b7d3e9ab7..76777673eb63 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -201,6 +201,9 @@ static inline unsigned int erofs_xattr_entry_size(struct erofs_xattr_entry *e) e->e_name_len + le16_to_cpu(e->e_value_size)); } +/* maximum supported size of a physical compression cluster */ +#define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024) + /* available compression algorithm types (for h_algorithmtype) */ enum { Z_EROFS_COMPRESSION_LZ4 = 0, diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 4db085413304..898838e1965e 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -194,9 +194,6 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) return v; } #endif /* !CONFIG_SMP */ - -/* hard limit of pages per compressed cluster */ -#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) #endif /* !CONFIG_EROFS_FS_ZIP */ /* we strictly follow PAGE_SIZE and no buffer head yet */ diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index e3f0100d82d1..db296d324333 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -10,6 +10,93 @@ #include +/* + * since pclustersize is variable for big pcluster feature, introduce slab + * pools implementation for different pcluster sizes. + */ +struct z_erofs_pcluster_slab { + struct kmem_cache *slab; + unsigned int maxpages; + char name[48]; +}; + +#define _PCLP(n) { .maxpages = n } + +static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = { + _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128), + _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES) +}; + +static void z_erofs_destroy_pcluster_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { + if (!pcluster_pool[i].slab) + continue; + kmem_cache_destroy(pcluster_pool[i].slab); + pcluster_pool[i].slab = NULL; + } +} + +static int z_erofs_create_pcluster_pool(void) +{ + struct z_erofs_pcluster_slab *pcs; + struct z_erofs_pcluster *a; + unsigned int size; + + for (pcs = pcluster_pool; + pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { + size = struct_size(a, compressed_pages, pcs->maxpages); + + sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages); + pcs->slab = kmem_cache_create(pcs->name, size, 0, + SLAB_RECLAIM_ACCOUNT, NULL); + if (pcs->slab) + continue; + + z_erofs_destroy_pcluster_pool(); + return -ENOMEM; + } + return 0; +} + +static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { + struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; + struct z_erofs_pcluster *pcl; + + if (nrpages > pcs->maxpages) + continue; + + pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); + if (!pcl) + return ERR_PTR(-ENOMEM); + pcl->pclusterpages = nrpages; + return pcl; + } + return ERR_PTR(-EINVAL); +} + +static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { + struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; + + if (pcl->pclusterpages > pcs->maxpages) + continue; + + kmem_cache_free(pcs->slab, pcl); + return; + } + DBG_BUGON(1); +} + /* * a compressed_pages[] placeholder in order to avoid * being filled with file pages for in-place decompression. @@ -37,12 +124,11 @@ typedef tagptr1_t compressed_page_t; tagptr_fold(compressed_page_t, page, 1) static struct workqueue_struct *z_erofs_workqueue __read_mostly; -static struct kmem_cache *pcluster_cachep __read_mostly; void z_erofs_exit_zip_subsystem(void) { destroy_workqueue(z_erofs_workqueue); - kmem_cache_destroy(pcluster_cachep); + z_erofs_destroy_pcluster_pool(); } static inline int z_erofs_init_workqueue(void) @@ -59,32 +145,16 @@ static inline int z_erofs_init_workqueue(void) return z_erofs_workqueue ? 0 : -ENOMEM; } -static void z_erofs_pcluster_init_once(void *ptr) -{ - struct z_erofs_pcluster *pcl = ptr; - struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); - unsigned int i; - - mutex_init(&cl->lock); - cl->nr_pages = 0; - cl->vcnt = 0; - for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i) - pcl->compressed_pages[i] = NULL; -} - int __init z_erofs_init_zip_subsystem(void) { - pcluster_cachep = kmem_cache_create("erofs_compress", - Z_EROFS_WORKGROUP_SIZE, 0, - SLAB_RECLAIM_ACCOUNT, - z_erofs_pcluster_init_once); - if (pcluster_cachep) { - if (!z_erofs_init_workqueue()) - return 0; + int err = z_erofs_create_pcluster_pool(); - kmem_cache_destroy(pcluster_cachep); - } - return -ENOMEM; + if (err) + return err; + err = z_erofs_init_workqueue(); + if (err) + z_erofs_destroy_pcluster_pool(); + return err; } enum z_erofs_collectmode { @@ -169,7 +239,6 @@ static void preload_compressed_pages(struct z_erofs_collector *clt, struct list_head *pagepool) { const struct z_erofs_pcluster *pcl = clt->pcl; - const unsigned int clusterpages = BIT(pcl->clusterbits); struct page **pages = clt->compressedpages; pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); bool standalone = true; @@ -179,7 +248,7 @@ static void preload_compressed_pages(struct z_erofs_collector *clt, if (clt->mode < COLLECT_PRIMARY_FOLLOWED) return; - for (; pages < pcl->compressed_pages + clusterpages; ++pages) { + for (; pages < pcl->compressed_pages + pcl->pclusterpages; ++pages) { struct page *page; compressed_page_t t; struct page *newpage = NULL; @@ -239,14 +308,13 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); struct address_space *const mapping = MNGD_MAPPING(sbi); - const unsigned int clusterpages = BIT(pcl->clusterbits); int i; /* * refcount of workgroup is now freezed as 1, * therefore no need to worry about available decompression users. */ - for (i = 0; i < clusterpages; ++i) { + for (i = 0; i < pcl->pclusterpages; ++i) { struct page *page = pcl->compressed_pages[i]; if (!page) @@ -271,13 +339,12 @@ int erofs_try_to_free_cached_page(struct address_space *mapping, struct page *page) { struct z_erofs_pcluster *const pcl = (void *)page_private(page); - const unsigned int clusterpages = BIT(pcl->clusterbits); int ret = 0; /* 0 - busy */ if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { unsigned int i; - for (i = 0; i < clusterpages; ++i) { + for (i = 0; i < pcl->pclusterpages; ++i) { if (pcl->compressed_pages[i] == page) { WRITE_ONCE(pcl->compressed_pages[i], NULL); ret = 1; @@ -297,9 +364,9 @@ static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt, struct page *page) { struct z_erofs_pcluster *const pcl = clt->pcl; - const unsigned int clusterpages = BIT(pcl->clusterbits); - while (clt->compressedpages < pcl->compressed_pages + clusterpages) { + while (clt->compressedpages < + pcl->compressed_pages + pcl->pclusterpages) { if (!cmpxchg(clt->compressedpages++, NULL, page)) return true; } @@ -413,10 +480,10 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt, struct erofs_workgroup *grp; int err; - /* no available workgroup, let's allocate one */ - pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); - if (!pcl) - return -ENOMEM; + /* no available pcluster, let's allocate one */ + pcl = z_erofs_alloc_pcluster(map->m_plen >> PAGE_SHIFT); + if (IS_ERR(pcl)) + return PTR_ERR(pcl); atomic_set(&pcl->obj.refcount, 1); pcl->obj.index = map->m_pa >> PAGE_SHIFT; @@ -430,24 +497,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt, else pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; - pcl->clusterbits = 0; - /* new pclusters should be claimed as type 1, primary and followed */ pcl->next = clt->owned_head; clt->mode = COLLECT_PRIMARY_FOLLOWED; cl = z_erofs_primarycollection(pcl); - - /* must be cleaned before freeing to slab */ - DBG_BUGON(cl->nr_pages); - DBG_BUGON(cl->vcnt); - cl->pageofs = map->m_la & ~PAGE_MASK; /* * lock all primary followed works before visible to others * and mutex_trylock *never* fails for a new pcluster. */ + mutex_init(&cl->lock); DBG_BUGON(!mutex_trylock(&cl->lock)); grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj); @@ -471,7 +532,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt, err_out: mutex_unlock(&cl->lock); - kmem_cache_free(pcluster_cachep, pcl); + z_erofs_free_pcluster(pcl); return err; } @@ -517,7 +578,7 @@ out: clt->compressedpages = clt->pcl->compressed_pages; if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ - clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES; + clt->compressedpages += clt->pcl->pclusterpages; return 0; } @@ -530,9 +591,8 @@ static void z_erofs_rcu_callback(struct rcu_head *head) struct z_erofs_collection *const cl = container_of(head, struct z_erofs_collection, rcu); - kmem_cache_free(pcluster_cachep, - container_of(cl, struct z_erofs_pcluster, - primary_collection)); + z_erofs_free_pcluster(container_of(cl, struct z_erofs_pcluster, + primary_collection)); } void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) @@ -784,9 +844,8 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, struct list_head *pagepool) { struct erofs_sb_info *const sbi = EROFS_SB(sb); - const unsigned int clusterpages = BIT(pcl->clusterbits); struct z_erofs_pagevec_ctor ctor; - unsigned int i, outputsize, llen, nr_pages; + unsigned int i, inputsize, outputsize, llen, nr_pages; struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; struct page **pages, **compressed_pages, *page; @@ -866,7 +925,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, overlapped = false; compressed_pages = pcl->compressed_pages; - for (i = 0; i < clusterpages; ++i) { + for (i = 0; i < pcl->pclusterpages; ++i) { unsigned int pagenr; page = compressed_pages[i]; @@ -919,12 +978,13 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, partial = true; } + inputsize = pcl->pclusterpages * PAGE_SIZE; err = z_erofs_decompress(&(struct z_erofs_decompress_req) { .sb = sb, .in = compressed_pages, .out = pages, .pageofs_out = cl->pageofs, - .inputsize = PAGE_SIZE, + .inputsize = inputsize, .outputsize = outputsize, .alg = pcl->algorithmformat, .inplace_io = overlapped, @@ -933,7 +993,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, out: /* must handle all compressed pages before ending pages */ - for (i = 0; i < clusterpages; ++i) { + for (i = 0; i < pcl->pclusterpages; ++i) { page = compressed_pages[i]; if (erofs_page_is_managed(sbi, page)) @@ -1236,7 +1296,7 @@ static void z_erofs_submit_queue(struct super_block *sb, pcl = container_of(owned_head, struct z_erofs_pcluster, next); cur = pcl->obj.index; - end = cur + BIT(pcl->clusterbits); + end = cur + pcl->pclusterpages; /* close the main owned chain at first */ owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index b503b353d4ab..942ee69dff6a 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -10,6 +10,7 @@ #include "internal.h" #include "zpvec.h" +#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) #define Z_EROFS_NR_INLINE_PAGEVECS 3 /* @@ -59,16 +60,17 @@ struct z_erofs_pcluster { /* A: point to next chained pcluster or TAILs */ z_erofs_next_pcluster_t next; - /* A: compressed pages (including multi-usage pages) */ - struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES]; - /* A: lower limit of decompressed length and if full length or not */ unsigned int length; + /* I: physical cluster size in pages */ + unsigned short pclusterpages; + /* I: compression algorithm format */ unsigned char algorithmformat; - /* I: bit shift of physical cluster size */ - unsigned char clusterbits; + + /* A: compressed pages (can be cached or inplaced pages) */ + struct page *compressed_pages[]; }; #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) @@ -82,8 +84,6 @@ struct z_erofs_pcluster { #define Z_EROFS_PCLUSTER_NIL (NULL) -#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) - struct z_erofs_decompressqueue { struct super_block *sb; atomic_t pending_bios; From 81382f5f5cb0c9c5694c19d36460f757a8c96841 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 7 Apr 2021 12:39:21 +0800 Subject: [PATCH 15/21] erofs: fix up inplace I/O pointer for big pcluster When picking up inplace I/O pages, it should be traversed in reverse order in aligned with the traversal order of file-backed online pages. Also, index should be updated together when preloading compressed pages. Previously, only page-sized pclustersize was supported so no problem at all. Also rename `compressedpages' to `icpage_ptr' to reflect its functionality. Link: https://lore.kernel.org/r/20210407043927.10623-5-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index db296d324333..78e4b598ecca 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -204,7 +204,8 @@ struct z_erofs_collector { struct z_erofs_pcluster *pcl, *tailpcl; struct z_erofs_collection *cl; - struct page **compressedpages; + /* a pointer used to pick up inplace I/O pages */ + struct page **icpage_ptr; z_erofs_next_pcluster_t owned_head; enum z_erofs_collectmode mode; @@ -238,17 +239,19 @@ static void preload_compressed_pages(struct z_erofs_collector *clt, enum z_erofs_cache_alloctype type, struct list_head *pagepool) { - const struct z_erofs_pcluster *pcl = clt->pcl; - struct page **pages = clt->compressedpages; - pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); + struct z_erofs_pcluster *pcl = clt->pcl; bool standalone = true; gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; + struct page **pages; + pgoff_t index; if (clt->mode < COLLECT_PRIMARY_FOLLOWED) return; - for (; pages < pcl->compressed_pages + pcl->pclusterpages; ++pages) { + pages = pcl->compressed_pages; + index = pcl->obj.index; + for (; index < pcl->obj.index + pcl->pclusterpages; ++index, ++pages) { struct page *page; compressed_page_t t; struct page *newpage = NULL; @@ -360,16 +363,14 @@ int erofs_try_to_free_cached_page(struct address_space *mapping, } /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ -static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt, - struct page *page) +static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt, + struct page *page) { struct z_erofs_pcluster *const pcl = clt->pcl; - while (clt->compressedpages < - pcl->compressed_pages + pcl->pclusterpages) { - if (!cmpxchg(clt->compressedpages++, NULL, page)) + while (clt->icpage_ptr > pcl->compressed_pages) + if (!cmpxchg(--clt->icpage_ptr, NULL, page)) return true; - } return false; } @@ -576,9 +577,8 @@ out: z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, clt->cl->pagevec, clt->cl->vcnt); - clt->compressedpages = clt->pcl->compressed_pages; - if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ - clt->compressedpages += clt->pcl->pclusterpages; + /* since file-backed online pages are traversed in reverse order */ + clt->icpage_ptr = clt->pcl->compressed_pages + clt->pcl->pclusterpages; return 0; } From 5404c33010cb8ee063c05376d4a2eba129872281 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 7 Apr 2021 12:39:22 +0800 Subject: [PATCH 16/21] erofs: add big physical cluster definition Big pcluster indicates the size of compressed data for each physical pcluster is no longer fixed as block size, but could be more than 1 block (more accurately, 1 logical pcluster) When big pcluster feature is enabled for head0/1, delta0 of the 1st non-head lcluster index will keep block count of this pcluster in lcluster size instead of 1. Or, the compressed size of pcluster should be 1 lcluster if pcluster has no non-head lcluster index. Also note that BIG_PCLUSTER feature reuses COMPR_CFGS feature since it depends on COMPR_CFGS and will be released together. Link: https://lore.kernel.org/r/20210407043927.10623-6-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/erofs_fs.h | 19 +++++++++++++++---- fs/erofs/internal.h | 1 + 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 76777673eb63..ecc3a0ea0bc4 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -19,6 +19,7 @@ */ #define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001 #define EROFS_FEATURE_INCOMPAT_COMPR_CFGS 0x00000002 +#define EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER 0x00000002 #define EROFS_ALL_FEATURE_INCOMPAT EROFS_FEATURE_INCOMPAT_LZ4_0PADDING #define EROFS_SB_EXTSLOT_SIZE 16 @@ -214,17 +215,20 @@ enum { /* 14 bytes (+ length field = 16 bytes) */ struct z_erofs_lz4_cfgs { __le16 max_distance; - u8 reserved[12]; + __le16 max_pclusterblks; + u8 reserved[10]; } __packed; /* * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) * e.g. for 4k logical cluster size, 4B if compacted 2B is off; * (4B) + 2B + (4B) if compacted 2B is on. + * bit 1 : HEAD1 big pcluster (0 - off; 1 - on) + * bit 2 : HEAD2 big pcluster (0 - off; 1 - on) */ -#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0 - -#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT) +#define Z_EROFS_ADVISE_COMPACTED_2B 0x0001 +#define Z_EROFS_ADVISE_BIG_PCLUSTER_1 0x0002 +#define Z_EROFS_ADVISE_BIG_PCLUSTER_2 0x0004 struct z_erofs_map_header { __le32 h_reserved1; @@ -279,6 +283,13 @@ enum { #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 #define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 +/* + * D0_CBLKCNT will be marked _only_ at the 1st non-head lcluster to store the + * compressed block count of a compressed extent (in logical clusters, aka. + * block count of a pcluster). + */ +#define Z_EROFS_VLE_DI_D0_CBLKCNT (1 << 11) + struct z_erofs_vle_decompressed_index { __le16 di_advise; /* where to decompress in the head cluster */ diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 898838e1965e..c130e3848322 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -230,6 +230,7 @@ static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \ EROFS_FEATURE_FUNCS(lz4_0padding, incompat, INCOMPAT_LZ4_0PADDING) EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS) +EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER) EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM) /* atomic flag definitions */ From 4fea63f7d76e425965033938bab6488e48579e3f Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 7 Apr 2021 12:39:23 +0800 Subject: [PATCH 17/21] erofs: adjust per-CPU buffers according to max_pclusterblks Adjust per-CPU buffers on demand since big pcluster definition is available. Also, bail out unsupported pcluster size according to Z_EROFS_PCLUSTER_MAX_SIZE. Link: https://lore.kernel.org/r/20210407043927.10623-7-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/decompressor.c | 20 ++++++++++++++++---- fs/erofs/internal.h | 2 ++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index fb4838c0f0df..900de4725d35 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -32,6 +32,7 @@ int z_erofs_load_lz4_config(struct super_block *sb, struct erofs_super_block *dsb, struct z_erofs_lz4_cfgs *lz4, int size) { + struct erofs_sb_info *sbi = EROFS_SB(sb); u16 distance; if (lz4) { @@ -40,16 +41,27 @@ int z_erofs_load_lz4_config(struct super_block *sb, return -EINVAL; } distance = le16_to_cpu(lz4->max_distance); + + sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks); + if (!sbi->lz4.max_pclusterblks) { + sbi->lz4.max_pclusterblks = 1; /* reserved case */ + } else if (sbi->lz4.max_pclusterblks > + Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) { + erofs_err(sb, "too large lz4 pclusterblks %u", + sbi->lz4.max_pclusterblks); + return -EINVAL; + } else if (sbi->lz4.max_pclusterblks >= 2) { + erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!"); + } } else { distance = le16_to_cpu(dsb->u1.lz4_max_distance); + sbi->lz4.max_pclusterblks = 1; } - EROFS_SB(sb)->lz4.max_distance_pages = distance ? + sbi->lz4.max_distance_pages = distance ? DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : LZ4_MAX_DISTANCE_PAGES; - - /* TODO: use max pclusterblks after bigpcluster is enabled */ - return erofs_pcpubuf_growsize(1); + return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks); } static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index c130e3848322..5c2388daee6e 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -63,6 +63,8 @@ struct erofs_fs_context { struct erofs_sb_lz4_info { /* # of pages needed for EROFS lz4 rolling decompression */ u16 max_distance_pages; + /* maximum possible blocks for pclusters in the filesystem */ + u16 max_pclusterblks; }; struct erofs_sb_info { From cec6e93beadfd145758af2c0854fcc2abb8170cb Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 7 Apr 2021 12:39:24 +0800 Subject: [PATCH 18/21] erofs: support parsing big pcluster compress indexes When INCOMPAT_BIG_PCLUSTER sb feature is enabled, legacy compress indexes will also have the same on-disk header compact indexes to keep per-file configurations instead of leaving it zeroed. If ADVISE_BIG_PCLUSTER is set for a file, CBLKCNT will be loaded for each pcluster in this file by parsing 1st non-head lcluster. Link: https://lore.kernel.org/r/20210407043927.10623-8-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zmap.c | 79 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 73 insertions(+), 6 deletions(-) diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 7fd6bd843471..6c0c47f68b75 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -11,8 +11,10 @@ int z_erofs_fill_inode(struct inode *inode) { struct erofs_inode *const vi = EROFS_I(inode); + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { + if (!erofs_sb_has_big_pcluster(sbi) && + vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { vi->z_advise = 0; vi->z_algorithmtype[0] = 0; vi->z_algorithmtype[1] = 0; @@ -49,7 +51,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) goto out_unlock; - DBG_BUGON(vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY); + DBG_BUGON(!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && + vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY); pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + vi->xattr_isize, 8); @@ -96,7 +99,7 @@ struct z_erofs_maprecorder { u8 type; u16 clusterofs; u16 delta[2]; - erofs_blk_t pblk; + erofs_blk_t pblk, compressedlcs; }; static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, @@ -159,6 +162,15 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: m->clusterofs = 1 << vi->z_logical_clusterbits; m->delta[0] = le16_to_cpu(di->di_u.delta[0]); + if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) { + if (!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) { + DBG_BUGON(1); + return -EFSCORRUPTED; + } + m->compressedlcs = m->delta[0] & + ~Z_EROFS_VLE_DI_D0_CBLKCNT; + m->delta[0] = 1; + } m->delta[1] = le16_to_cpu(di->di_u.delta[1]); break; case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: @@ -366,6 +378,58 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, return 0; } +static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, + unsigned int initial_lcn) +{ + struct erofs_inode *const vi = EROFS_I(m->inode); + struct erofs_map_blocks *const map = m->map; + const unsigned int lclusterbits = vi->z_logical_clusterbits; + unsigned long lcn; + int err; + + DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN && + m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD); + if (!(map->m_flags & EROFS_MAP_ZIPPED) || + !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) { + map->m_plen = 1 << lclusterbits; + return 0; + } + + lcn = m->lcn + 1; + if (m->compressedlcs) + goto out; + if (lcn == initial_lcn) + goto err_bonus_cblkcnt; + + err = z_erofs_load_cluster_from_disk(m, lcn); + if (err) + return err; + + switch (m->type) { + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + if (m->delta[0] != 1) + goto err_bonus_cblkcnt; + if (m->compressedlcs) + break; + fallthrough; + default: + erofs_err(m->inode->i_sb, + "cannot found CBLKCNT @ lcn %lu of nid %llu", + lcn, vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } +out: + map->m_plen = m->compressedlcs << lclusterbits; + return 0; +err_bonus_cblkcnt: + erofs_err(m->inode->i_sb, + "bogus CBLKCNT @ lcn %lu of nid %llu", + lcn, vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; +} + int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int flags) @@ -377,6 +441,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, }; int err = 0; unsigned int lclusterbits, endoff; + unsigned long initial_lcn; unsigned long long ofs, end; trace_z_erofs_map_blocks_iter_enter(inode, map, flags); @@ -395,10 +460,10 @@ int z_erofs_map_blocks_iter(struct inode *inode, lclusterbits = vi->z_logical_clusterbits; ofs = map->m_la; - m.lcn = ofs >> lclusterbits; + initial_lcn = ofs >> lclusterbits; endoff = ofs & ((1 << lclusterbits) - 1); - err = z_erofs_load_cluster_from_disk(&m, m.lcn); + err = z_erofs_load_cluster_from_disk(&m, initial_lcn); if (err) goto unmap_out; @@ -442,10 +507,12 @@ int z_erofs_map_blocks_iter(struct inode *inode, } map->m_llen = end - map->m_la; - map->m_plen = 1 << lclusterbits; map->m_pa = blknr_to_addr(m.pblk); map->m_flags |= EROFS_MAP_MAPPED; + err = z_erofs_get_extent_compressedlen(&m, initial_lcn); + if (err) + goto out; unmap_out: if (m.kaddr) kunmap_atomic(m.kaddr); From b86269f43892316ef5a177d7180d09d101a46f22 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 7 Apr 2021 12:39:25 +0800 Subject: [PATCH 19/21] erofs: support parsing big pcluster compact indexes Different from non-compact indexes, several lclusters are packed as the compact form at once and an unique base blkaddr is stored for each pack, so each lcluster index would take less space on avarage (e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER switch should be consistent for compact head0/1. Prior to big pcluster, the size of all pclusters was 1 lcluster. Therefore, when a new HEAD lcluster was scanned, blkaddr would be bumped by 1 lcluster. However, that way doesn't work anymore for big pcluster since we actually don't know the compressed size of pclusters in advance (before reading CBLKCNT lcluster). So, instead, let blkaddr of each pack be the first pcluster blkaddr with a valid CBLKCNT, in detail, 1) if CBLKCNT starts at the pack, this first valid pcluster is itself, e.g. _____________________________________________________________ |_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ... ^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1 2) if CBLKCNT doesn't start at the pack, the first valid pcluster is the next pcluster, e.g. _________________________________________________________ | NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ... ^ = blkaddr base ^ += CBLKCNT0 ^ += 1 When a CBLKCNT is found, blkaddr will be increased by CBLKCNT lclusters, or a new HEAD is found immediately, bump blkaddr by 1 instead (see the picture above.) Also noted if CBLKCNT is the end of the pack, instead of storing delta1 (distance of the next HEAD lcluster) as normal NONHEADs, it still uses the compressed block count (delta0) since delta1 can be calculated indirectly but the block count can't. Adjust decoding logic to fit big pcluster compact indexes as well. Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zmap.c | 70 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 9 deletions(-) diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 6c0c47f68b75..e62d813756f2 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -77,6 +77,22 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) } vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); + if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && + vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | + Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { + erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu", + vi->nid); + err = -EFSCORRUPTED; + goto unmap_done; + } + if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION && + !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^ + !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { + erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu", + vi->nid); + err = -EFSCORRUPTED; + goto unmap_done; + } /* paired with smp_mb() at the beginning of the function */ smp_mb(); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); @@ -207,6 +223,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, unsigned int vcnt, base, lo, encodebits, nblk; int i; u8 *in, type; + bool big_pcluster; if (1 << amortizedshift == 4) vcnt = 2; @@ -215,6 +232,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, else return -EOPNOTSUPP; + big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; base = round_down(eofs, vcnt << amortizedshift); in = m->kaddr + base; @@ -226,7 +244,15 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, m->type = type; if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { m->clusterofs = 1 << lclusterbits; - if (i + 1 != vcnt) { + if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { + if (!big_pcluster) { + DBG_BUGON(1); + return -EFSCORRUPTED; + } + m->compressedlcs = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; + m->delta[0] = 1; + return 0; + } else if (i + 1 != (int)vcnt) { m->delta[0] = lo; return 0; } @@ -239,22 +265,48 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, in, encodebits * (i - 1), &type); if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) lo = 0; + else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) + lo = 1; m->delta[0] = lo + 1; return 0; } m->clusterofs = lo; m->delta[0] = 0; /* figout out blkaddr (pblk) for HEAD lclusters */ - nblk = 1; - while (i > 0) { - --i; - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) - i -= lo; + if (!big_pcluster) { + nblk = 1; + while (i > 0) { + --i; + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * i, &type); + if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + i -= lo; - if (i >= 0) + if (i >= 0) + ++nblk; + } + } else { + nblk = 0; + while (i > 0) { + --i; + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * i, &type); + if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { + if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { + --i; + nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; + continue; + } + /* bigpcluster shouldn't have plain d0 == 1 */ + if (lo <= 1) { + DBG_BUGON(1); + return -EFSCORRUPTED; + } + i -= lo - 2; + continue; + } ++nblk; + } } in += (vcnt << amortizedshift) - sizeof(__le32); m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; From 598162d050801e556750defff4ddab499e5d76ed Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 7 Apr 2021 12:39:26 +0800 Subject: [PATCH 20/21] erofs: support decompress big pcluster for lz4 backend Prior to big pcluster, there was only one compressed page so it'd easy to map this. However, when big pcluster is enabled, more work needs to be done to handle multiple compressed pages. In detail, - (maptype 0) if there is only one compressed page + no need to copy inplace I/O, just map it directly what we did before; - (maptype 1) if there are more compressed pages + no need to copy inplace I/O, vmap such compressed pages instead; - (maptype 2) if inplace I/O needs to be copied, use per-CPU buffers for decompression then. Another thing is how to detect inplace decompression is feasable or not (it's still quite easy for non big pclusters), apart from the inplace margin calculation, inplace I/O page reusing order is also needed to be considered for each compressed page. Currently, if the compressed page is the xth page, it shouldn't be reused as [0 ... nrpages_out - nrpages_in + x], otherwise a full copy will be triggered. Although there are some extra optimization ideas for this, I'd like to make big pcluster work correctly first and obviously it can be further optimized later since it has nothing with the on-disk format at all. Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/decompressor.c | 220 ++++++++++++++++++++++------------------ fs/erofs/internal.h | 15 +++ 2 files changed, 139 insertions(+), 96 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 900de4725d35..88e33addf229 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -120,44 +120,85 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, return kaddr ? 1 : 0; } -static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, - u8 *src, unsigned int pageofs_in) +static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq, + void *inpage, unsigned int *inputmargin, int *maptype, + bool support_0padding) { - /* - * if in-place decompression is ongoing, those decompressed - * pages should be copied in order to avoid being overlapped. - */ - struct page **in = rq->in; - u8 *const tmp = erofs_get_pcpubuf(1); - u8 *tmpp = tmp; - unsigned int inlen = rq->inputsize - pageofs_in; - unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); + unsigned int nrpages_in, nrpages_out; + unsigned int ofull, oend, inputsize, total, i, j; + struct page **in; + void *src, *tmp; - while (tmpp < tmp + inlen) { - if (!src) - src = kmap_atomic(*in); - memcpy(tmpp, src + pageofs_in, count); - kunmap_atomic(src); - src = NULL; - tmpp += count; - pageofs_in = 0; - count = PAGE_SIZE; - ++in; + inputsize = rq->inputsize; + nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT; + oend = rq->pageofs_out + rq->outputsize; + ofull = PAGE_ALIGN(oend); + nrpages_out = ofull >> PAGE_SHIFT; + + if (rq->inplace_io) { + if (rq->partial_decoding || !support_0padding || + ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize)) + goto docopy; + + for (i = 0; i < nrpages_in; ++i) { + DBG_BUGON(rq->in[i] == NULL); + for (j = 0; j < nrpages_out - nrpages_in + i; ++j) + if (rq->out[j] == rq->in[i]) + goto docopy; + } } - return tmp; + + if (nrpages_in <= 1) { + *maptype = 0; + return inpage; + } + kunmap_atomic(inpage); + might_sleep(); + src = erofs_vm_map_ram(rq->in, nrpages_in); + if (!src) + return ERR_PTR(-ENOMEM); + *maptype = 1; + return src; + +docopy: + /* Or copy compressed data which can be overlapped to per-CPU buffer */ + in = rq->in; + src = erofs_get_pcpubuf(nrpages_in); + if (!src) { + DBG_BUGON(1); + kunmap_atomic(inpage); + return ERR_PTR(-EFAULT); + } + + tmp = src; + total = rq->inputsize; + while (total) { + unsigned int page_copycnt = + min_t(unsigned int, total, PAGE_SIZE - *inputmargin); + + if (!inpage) + inpage = kmap_atomic(*in); + memcpy(tmp, inpage + *inputmargin, page_copycnt); + kunmap_atomic(inpage); + inpage = NULL; + tmp += page_copycnt; + total -= page_copycnt; + ++in; + *inputmargin = 0; + } + *maptype = 2; + return src; } static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) { - unsigned int inputmargin, inlen; - u8 *src; - bool copied, support_0padding; - int ret; + unsigned int inputmargin; + u8 *headpage, *src; + bool support_0padding; + int ret, maptype; - if (rq->inputsize > PAGE_SIZE) - return -EOPNOTSUPP; - - src = kmap_atomic(*rq->in); + DBG_BUGON(*rq->in == NULL); + headpage = kmap_atomic(*rq->in); inputmargin = 0; support_0padding = false; @@ -165,50 +206,37 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) { support_0padding = true; - while (!src[inputmargin & ~PAGE_MASK]) + while (!headpage[inputmargin & ~PAGE_MASK]) if (!(++inputmargin & ~PAGE_MASK)) break; if (inputmargin >= rq->inputsize) { - kunmap_atomic(src); + kunmap_atomic(headpage); return -EIO; } } - copied = false; - inlen = rq->inputsize - inputmargin; - if (rq->inplace_io) { - const uint oend = (rq->pageofs_out + - rq->outputsize) & ~PAGE_MASK; - const uint nr = PAGE_ALIGN(rq->pageofs_out + - rq->outputsize) >> PAGE_SHIFT; - - if (rq->partial_decoding || !support_0padding || - rq->out[nr - 1] != rq->in[0] || - rq->inputsize - oend < - LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { - src = generic_copy_inplace_data(rq, src, inputmargin); - inputmargin = 0; - copied = true; - } - } + rq->inputsize -= inputmargin; + src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype, + support_0padding); + if (IS_ERR(src)) + return PTR_ERR(src); /* legacy format could compress extra data in a pcluster. */ if (rq->partial_decoding || !support_0padding) ret = LZ4_decompress_safe_partial(src + inputmargin, out, - inlen, rq->outputsize, - rq->outputsize); + rq->inputsize, rq->outputsize, rq->outputsize); else ret = LZ4_decompress_safe(src + inputmargin, out, - inlen, rq->outputsize); + rq->inputsize, rq->outputsize); if (ret != rq->outputsize) { erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", - ret, inlen, inputmargin, rq->outputsize); + ret, rq->inputsize, inputmargin, rq->outputsize); WARN_ON(1); print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, - 16, 1, src + inputmargin, inlen, true); + 16, 1, src + inputmargin, rq->inputsize, true); print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, 16, 1, out, rq->outputsize, true); @@ -217,10 +245,16 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) ret = -EIO; } - if (copied) - erofs_put_pcpubuf(src); - else + if (maptype == 0) { kunmap_atomic(src); + } else if (maptype == 1) { + vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT); + } else if (maptype == 2) { + erofs_put_pcpubuf(src); + } else { + DBG_BUGON(1); + return -EFAULT; + } return ret; } @@ -270,57 +304,51 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq, const struct z_erofs_decompressor *alg = decompressors + rq->alg; unsigned int dst_maptype; void *dst; - int ret, i; + int ret; - if (nrpages_out == 1 && !rq->inplace_io) { - DBG_BUGON(!*rq->out); - dst = kmap_atomic(*rq->out); - dst_maptype = 0; - goto dstmap_out; - } - - /* - * For the case of small output size (especially much less - * than PAGE_SIZE), memcpy the decompressed data rather than - * compressed data is preferred. - */ - if (rq->outputsize <= PAGE_SIZE * 7 / 8) { - dst = erofs_get_pcpubuf(1); - if (IS_ERR(dst)) - return PTR_ERR(dst); - - rq->inplace_io = false; - ret = alg->decompress(rq, dst); - if (!ret) - copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, - rq->outputsize); - - erofs_put_pcpubuf(dst); - return ret; + /* two optimized fast paths only for non bigpcluster cases yet */ + if (rq->inputsize <= PAGE_SIZE) { + if (nrpages_out == 1 && !rq->inplace_io) { + DBG_BUGON(!*rq->out); + dst = kmap_atomic(*rq->out); + dst_maptype = 0; + goto dstmap_out; + } + + /* + * For the case of small output size (especially much less + * than PAGE_SIZE), memcpy the decompressed data rather than + * compressed data is preferred. + */ + if (rq->outputsize <= PAGE_SIZE * 7 / 8) { + dst = erofs_get_pcpubuf(1); + if (IS_ERR(dst)) + return PTR_ERR(dst); + + rq->inplace_io = false; + ret = alg->decompress(rq, dst); + if (!ret) + copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, + rq->outputsize); + + erofs_put_pcpubuf(dst); + return ret; + } } + /* general decoding path which can be used for all cases */ ret = alg->prepare_destpages(rq, pagepool); - if (ret < 0) { + if (ret < 0) return ret; - } else if (ret) { + if (ret) { dst = page_address(*rq->out); dst_maptype = 1; goto dstmap_out; } - i = 0; - while (1) { - dst = vm_map_ram(rq->out, nrpages_out, -1); - - /* retry two more times (totally 3 times) */ - if (dst || ++i >= 3) - break; - vm_unmap_aliases(); - } - + dst = erofs_vm_map_ram(rq->out, nrpages_out); if (!dst) return -ENOMEM; - dst_maptype = 2; dstmap_out: diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 5c2388daee6e..f92e3e32b9f4 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -402,6 +402,21 @@ int erofs_namei(struct inode *dir, struct qstr *name, /* dir.c */ extern const struct file_operations erofs_dir_fops; +static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count) +{ + int retried = 0; + + while (1) { + void *p = vm_map_ram(pages, count, -1); + + /* retry two more times (totally 3 times) */ + if (p || ++retried >= 3) + return p; + vm_unmap_aliases(); + } + return NULL; +} + /* pcpubuf.c */ void *erofs_get_pcpubuf(unsigned int requiredpages); void erofs_put_pcpubuf(void *ptr); From 8e6c8fa9f2e95c88a642521a5da19a8e31748846 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 7 Apr 2021 12:39:27 +0800 Subject: [PATCH 21/21] erofs: enable big pcluster feature Enable COMPR_CFGS and BIG_PCLUSTER since the implementations are all settled properly. Link: https://lore.kernel.org/r/20210407043927.10623-11-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/erofs_fs.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index ecc3a0ea0bc4..8739d3adf51f 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -20,7 +20,10 @@ #define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001 #define EROFS_FEATURE_INCOMPAT_COMPR_CFGS 0x00000002 #define EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER 0x00000002 -#define EROFS_ALL_FEATURE_INCOMPAT EROFS_FEATURE_INCOMPAT_LZ4_0PADDING +#define EROFS_ALL_FEATURE_INCOMPAT \ + (EROFS_FEATURE_INCOMPAT_LZ4_0PADDING | \ + EROFS_FEATURE_INCOMPAT_COMPR_CFGS | \ + EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER) #define EROFS_SB_EXTSLOT_SIZE 16