Revert "btrfs: compression: drop kmap/kunmap from lzo"
This reverts commit8c945d32e6
. The kmaps in compression code are still needed and cause crashes on 32bit machines (ARM, x86). Reproducible eg. by running fstest btrfs/004 with enabled LZO or ZSTD compression. The revert does not apply cleanly due to changes ina6e66e6f8c
("btrfs: rework lzo_decompress_bio() to make it subpage compatible") that reworked the page iteration so the revert is done to be equivalent to the original code. Link: https://lore.kernel.org/all/CAJCQCtT+OuemovPO7GZk8Y8=qtOObr0XTDp8jh4OHD6y84AFxw@mail.gmail.com/ Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=214839 Tested-by: Qu Wenruo <wqu@suse.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
55276e14df
commit
ccaa66c8dd
@ -141,7 +141,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
*total_in = 0;
|
||||
|
||||
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
||||
data_in = page_address(in_page);
|
||||
data_in = kmap(in_page);
|
||||
|
||||
/*
|
||||
* store the size of all chunks of compressed data in
|
||||
@ -152,7 +152,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
cpage_out = page_address(out_page);
|
||||
cpage_out = kmap(out_page);
|
||||
out_offset = LZO_LEN;
|
||||
tot_out = LZO_LEN;
|
||||
pages[0] = out_page;
|
||||
@ -210,6 +210,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
if (out_len == 0 && tot_in >= len)
|
||||
break;
|
||||
|
||||
kunmap(out_page);
|
||||
if (nr_pages == nr_dest_pages) {
|
||||
out_page = NULL;
|
||||
ret = -E2BIG;
|
||||
@ -221,7 +222,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
cpage_out = page_address(out_page);
|
||||
cpage_out = kmap(out_page);
|
||||
pages[nr_pages++] = out_page;
|
||||
|
||||
pg_bytes_left = PAGE_SIZE;
|
||||
@ -243,11 +244,12 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
break;
|
||||
|
||||
bytes_left = len - tot_in;
|
||||
kunmap(in_page);
|
||||
put_page(in_page);
|
||||
|
||||
start += PAGE_SIZE;
|
||||
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
||||
data_in = page_address(in_page);
|
||||
data_in = kmap(in_page);
|
||||
in_len = min(bytes_left, PAGE_SIZE);
|
||||
}
|
||||
|
||||
@ -257,17 +259,22 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
||||
}
|
||||
|
||||
/* store the size of all chunks of compressed data */
|
||||
sizes_ptr = page_address(pages[0]);
|
||||
sizes_ptr = kmap_local_page(pages[0]);
|
||||
write_compress_length(sizes_ptr, tot_out);
|
||||
kunmap_local(sizes_ptr);
|
||||
|
||||
ret = 0;
|
||||
*total_out = tot_out;
|
||||
*total_in = tot_in;
|
||||
out:
|
||||
*out_pages = nr_pages;
|
||||
if (out_page)
|
||||
kunmap(out_page);
|
||||
|
||||
if (in_page)
|
||||
if (in_page) {
|
||||
kunmap(in_page);
|
||||
put_page(in_page);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -283,6 +290,7 @@ static void copy_compressed_segment(struct compressed_bio *cb,
|
||||
u32 orig_in = *cur_in;
|
||||
|
||||
while (*cur_in < orig_in + len) {
|
||||
char *kaddr;
|
||||
struct page *cur_page;
|
||||
u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
|
||||
orig_in + len - *cur_in);
|
||||
@ -290,9 +298,11 @@ static void copy_compressed_segment(struct compressed_bio *cb,
|
||||
ASSERT(copy_len);
|
||||
cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
|
||||
|
||||
kaddr = kmap(cur_page);
|
||||
memcpy(dest + *cur_in - orig_in,
|
||||
page_address(cur_page) + offset_in_page(*cur_in),
|
||||
kaddr + offset_in_page(*cur_in),
|
||||
copy_len);
|
||||
kunmap(cur_page);
|
||||
|
||||
*cur_in += copy_len;
|
||||
}
|
||||
@ -303,6 +313,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
|
||||
const u32 sectorsize = fs_info->sectorsize;
|
||||
char *kaddr;
|
||||
int ret;
|
||||
/* Compressed data length, can be unaligned */
|
||||
u32 len_in;
|
||||
@ -311,7 +322,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
/* Bytes decompressed so far */
|
||||
u32 cur_out = 0;
|
||||
|
||||
len_in = read_compress_length(page_address(cb->compressed_pages[0]));
|
||||
kaddr = kmap(cb->compressed_pages[0]);
|
||||
len_in = read_compress_length(kaddr);
|
||||
kunmap(cb->compressed_pages[0]);
|
||||
cur_in += LZO_LEN;
|
||||
|
||||
/*
|
||||
@ -344,9 +357,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||
ASSERT(cur_in / sectorsize ==
|
||||
(cur_in + LZO_LEN - 1) / sectorsize);
|
||||
cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
|
||||
kaddr = kmap(cur_page);
|
||||
ASSERT(cur_page);
|
||||
seg_len = read_compress_length(page_address(cur_page) +
|
||||
offset_in_page(cur_in));
|
||||
seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
|
||||
cur_in += LZO_LEN;
|
||||
|
||||
/* Copy the compressed segment payload into workspace */
|
||||
@ -431,7 +444,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
|
||||
bytes = min_t(unsigned long, destlen, out_len - start_byte);
|
||||
|
||||
kaddr = page_address(dest_page);
|
||||
kaddr = kmap_local_page(dest_page);
|
||||
memcpy(kaddr, workspace->buf + start_byte, bytes);
|
||||
|
||||
/*
|
||||
@ -441,6 +454,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
|
||||
*/
|
||||
if (bytes < destlen)
|
||||
memset(kaddr+bytes, 0, destlen-bytes);
|
||||
kunmap_local(kaddr);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user