Changes since last update:

- Fix two unexpected loop cases when reading beyond EOF;
 
  - Fix fsdax unavailability for chunk-based regular files;
 
  - Get rid of the remaining kmap_atomic();
 
  - Minor cleanups.
 -----BEGIN PGP SIGNATURE-----
 
 iIcEABYIAC8WIQThPAmQN9sSA0DVxtI5NzHcH7XmBAUCZLAQrhEceGlhbmdAa2Vy
 bmVsLm9yZwAKCRA5NzHcH7XmBOH5AQCOtA08HR2/ijKBPbylnwAy5AojBzEfO83a
 Z6zzWpWRCQEAlqNKwhp5j8NFKJZIaK/bvsCYGUlhqFLSEeVRQ4AcgAM=
 =gHxf
 -----END PGP SIGNATURE-----

Merge tag 'erofs-for-6.5-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs fixes from Gao Xiang:
 "Three patches address regressions related to post-EOF unexpected
  behaviors and fsdax unavailability of chunk-based regular files.

  The other two patches mainly get rid of kmap_atomic() and simplify
  z_erofs_transform_plain().

   - Fix two unexpected loop cases when reading beyond EOF

   - Fix fsdax unavailability for chunk-based regular files

   - Get rid of the remaining kmap_atomic()

   - Minor cleanups"

* tag 'erofs-for-6.5-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: fix fsdax unavailability for chunk-based regular files
  erofs: avoid infinite loop in z_erofs_do_read_page() when reading beyond EOF
  erofs: avoid useless loops in z_erofs_pcluster_readmore() when reading beyond EOF
  erofs: simplify z_erofs_transform_plain()
  erofs: get rid of the remaining kmap_atomic()
This commit is contained in:
Linus Torvalds 2023-07-13 14:35:02 -07:00
commit 4b810bf037
3 changed files with 21 additions and 23 deletions

View File

@ -148,7 +148,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
*maptype = 0;
return inpage;
}
kunmap_atomic(inpage);
kunmap_local(inpage);
might_sleep();
src = erofs_vm_map_ram(rq->in, ctx->inpages);
if (!src)
@ -162,7 +162,7 @@ docopy:
src = erofs_get_pcpubuf(ctx->inpages);
if (!src) {
DBG_BUGON(1);
kunmap_atomic(inpage);
kunmap_local(inpage);
return ERR_PTR(-EFAULT);
}
@ -173,9 +173,9 @@ docopy:
min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
if (!inpage)
inpage = kmap_atomic(*in);
inpage = kmap_local_page(*in);
memcpy(tmp, inpage + *inputmargin, page_copycnt);
kunmap_atomic(inpage);
kunmap_local(inpage);
inpage = NULL;
tmp += page_copycnt;
total -= page_copycnt;
@ -214,7 +214,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
int ret, maptype;
DBG_BUGON(*rq->in == NULL);
headpage = kmap_atomic(*rq->in);
headpage = kmap_local_page(*rq->in);
/* LZ4 decompression inplace is only safe if zero_padding is enabled */
if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
@ -223,7 +223,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
min_t(unsigned int, rq->inputsize,
rq->sb->s_blocksize - rq->pageofs_in));
if (ret) {
kunmap_atomic(headpage);
kunmap_local(headpage);
return ret;
}
may_inplace = !((rq->pageofs_in + rq->inputsize) &
@ -261,7 +261,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
}
if (maptype == 0) {
kunmap_atomic(headpage);
kunmap_local(headpage);
} else if (maptype == 1) {
vm_unmap_ram(src, ctx->inpages);
} else if (maptype == 2) {
@ -289,7 +289,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
/* one optimized fast path only for non bigpcluster cases yet */
if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
DBG_BUGON(!*rq->out);
dst = kmap_atomic(*rq->out);
dst = kmap_local_page(*rq->out);
dst_maptype = 0;
goto dstmap_out;
}
@ -311,7 +311,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
dstmap_out:
ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
if (!dst_maptype)
kunmap_atomic(dst);
kunmap_local(dst);
else if (dst_maptype == 2)
vm_unmap_ram(dst, ctx.outpages);
return ret;
@ -328,7 +328,7 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
const unsigned int lefthalf = rq->outputsize - righthalf;
const unsigned int interlaced_offset =
rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out;
unsigned char *src, *dst;
u8 *src;
if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) {
DBG_BUGON(1);
@ -341,22 +341,19 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
}
src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in;
if (rq->out[0]) {
dst = kmap_local_page(rq->out[0]);
memcpy(dst + rq->pageofs_out, src + interlaced_offset,
righthalf);
kunmap_local(dst);
}
if (rq->out[0])
memcpy_to_page(rq->out[0], rq->pageofs_out,
src + interlaced_offset, righthalf);
if (outpages > inpages) {
DBG_BUGON(!rq->out[outpages - 1]);
if (rq->out[outpages - 1] != rq->in[inpages - 1]) {
dst = kmap_local_page(rq->out[outpages - 1]);
memcpy(dst, interlaced_offset ? src :
(src + righthalf), lefthalf);
kunmap_local(dst);
memcpy_to_page(rq->out[outpages - 1], 0, src +
(interlaced_offset ? 0 : righthalf),
lefthalf);
} else if (!interlaced_offset) {
memmove(src, src + righthalf, lefthalf);
flush_dcache_page(rq->in[inpages - 1]);
}
}
kunmap_local(src);

View File

@ -183,7 +183,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
inode->i_flags &= ~S_DAX;
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
vi->datalayout == EROFS_INODE_FLAT_PLAIN)
(vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
vi->datalayout == EROFS_INODE_CHUNK_BASED))
inode->i_flags |= S_DAX;
if (!nblks)

View File

@ -1035,7 +1035,7 @@ hitted:
*/
tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
cur = end - min_t(unsigned int, offset + end - map->m_la, end);
cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
zero_user_segment(page, cur, end);
goto next_part;
@ -1841,7 +1841,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
}
cur = map->m_la + map->m_llen - 1;
while (cur >= end) {
while ((cur >= end) && (cur < i_size_read(inode))) {
pgoff_t index = cur >> PAGE_SHIFT;
struct page *page;