btrfs: compression: migrate compression/decompression paths to folios
For both compression and decompression paths, we always require a "struct page **pages" and "unsigned long nr_pages", this involves quite some part of the btrfs compression paths: - All the compression entry points - compressed_bio structure This affects both compression and decompression. - async_extent structure Unfortunately with all those involved parts, there is no good way to split the conversion into smaller patches while still passing compiling. So do this in one big conversion in one go. Please note this is direct page->folio conversion, no change on the page sized folio requirement yet. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> [ minor style fixups ] Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
11e03f2f4b
commit
400b172b8c
@ -90,20 +90,20 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int compression_compress_pages(int type, struct list_head *ws,
|
static int compression_compress_pages(int type, struct list_head *ws,
|
||||||
struct address_space *mapping, u64 start, struct page **pages,
|
struct address_space *mapping, u64 start,
|
||||||
unsigned long *out_pages, unsigned long *total_in,
|
struct folio **folios, unsigned long *out_folios,
|
||||||
unsigned long *total_out)
|
unsigned long *total_in, unsigned long *total_out)
|
||||||
{
|
{
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case BTRFS_COMPRESS_ZLIB:
|
case BTRFS_COMPRESS_ZLIB:
|
||||||
return zlib_compress_pages(ws, mapping, start, pages,
|
return zlib_compress_folios(ws, mapping, start, folios,
|
||||||
out_pages, total_in, total_out);
|
out_folios, total_in, total_out);
|
||||||
case BTRFS_COMPRESS_LZO:
|
case BTRFS_COMPRESS_LZO:
|
||||||
return lzo_compress_pages(ws, mapping, start, pages,
|
return lzo_compress_folios(ws, mapping, start, folios,
|
||||||
out_pages, total_in, total_out);
|
out_folios, total_in, total_out);
|
||||||
case BTRFS_COMPRESS_ZSTD:
|
case BTRFS_COMPRESS_ZSTD:
|
||||||
return zstd_compress_pages(ws, mapping, start, pages,
|
return zstd_compress_folios(ws, mapping, start, folios,
|
||||||
out_pages, total_in, total_out);
|
out_folios, total_in, total_out);
|
||||||
case BTRFS_COMPRESS_NONE:
|
case BTRFS_COMPRESS_NONE:
|
||||||
default:
|
default:
|
||||||
/*
|
/*
|
||||||
@ -115,7 +115,7 @@ static int compression_compress_pages(int type, struct list_head *ws,
|
|||||||
* Not a big deal, just need to inform caller that we
|
* Not a big deal, just need to inform caller that we
|
||||||
* haven't allocated any pages yet.
|
* haven't allocated any pages yet.
|
||||||
*/
|
*/
|
||||||
*out_pages = 0;
|
*out_folios = 0;
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -158,11 +158,11 @@ static int compression_decompress(int type, struct list_head *ws,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void btrfs_free_compressed_pages(struct compressed_bio *cb)
|
static void btrfs_free_compressed_folios(struct compressed_bio *cb)
|
||||||
{
|
{
|
||||||
for (unsigned int i = 0; i < cb->nr_pages; i++)
|
for (unsigned int i = 0; i < cb->nr_folios; i++)
|
||||||
btrfs_free_compr_folio(page_folio(cb->compressed_pages[i]));
|
btrfs_free_compr_folio(cb->compressed_folios[i]);
|
||||||
kfree(cb->compressed_pages);
|
kfree(cb->compressed_folios);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btrfs_decompress_bio(struct compressed_bio *cb);
|
static int btrfs_decompress_bio(struct compressed_bio *cb);
|
||||||
@ -269,7 +269,7 @@ static void end_bbio_comprssed_read(struct btrfs_bio *bbio)
|
|||||||
if (!status)
|
if (!status)
|
||||||
status = errno_to_blk_status(btrfs_decompress_bio(cb));
|
status = errno_to_blk_status(btrfs_decompress_bio(cb));
|
||||||
|
|
||||||
btrfs_free_compressed_pages(cb);
|
btrfs_free_compressed_folios(cb);
|
||||||
btrfs_bio_end_io(cb->orig_bbio, status);
|
btrfs_bio_end_io(cb->orig_bbio, status);
|
||||||
bio_put(&bbio->bio);
|
bio_put(&bbio->bio);
|
||||||
}
|
}
|
||||||
@ -323,7 +323,7 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work)
|
|||||||
end_compressed_writeback(cb);
|
end_compressed_writeback(cb);
|
||||||
/* Note, our inode could be gone now */
|
/* Note, our inode could be gone now */
|
||||||
|
|
||||||
btrfs_free_compressed_pages(cb);
|
btrfs_free_compressed_folios(cb);
|
||||||
bio_put(&cb->bbio.bio);
|
bio_put(&cb->bbio.bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,17 +342,19 @@ static void end_bbio_comprssed_write(struct btrfs_bio *bbio)
|
|||||||
queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
|
queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb)
|
static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
|
||||||
{
|
{
|
||||||
struct bio *bio = &cb->bbio.bio;
|
struct bio *bio = &cb->bbio.bio;
|
||||||
u32 offset = 0;
|
u32 offset = 0;
|
||||||
|
|
||||||
while (offset < cb->compressed_len) {
|
while (offset < cb->compressed_len) {
|
||||||
|
int ret;
|
||||||
u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
|
u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
|
||||||
|
|
||||||
/* Maximum compressed extent is smaller than bio size limit. */
|
/* Maximum compressed extent is smaller than bio size limit. */
|
||||||
__bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT],
|
ret = bio_add_folio(bio, cb->compressed_folios[offset >> PAGE_SHIFT],
|
||||||
len, 0);
|
len, 0);
|
||||||
|
ASSERT(ret);
|
||||||
offset += len;
|
offset += len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -367,8 +369,8 @@ static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb)
|
|||||||
* the end io hooks.
|
* the end io hooks.
|
||||||
*/
|
*/
|
||||||
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
|
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
|
||||||
struct page **compressed_pages,
|
struct folio **compressed_folios,
|
||||||
unsigned int nr_pages,
|
unsigned int nr_folios,
|
||||||
blk_opf_t write_flags,
|
blk_opf_t write_flags,
|
||||||
bool writeback)
|
bool writeback)
|
||||||
{
|
{
|
||||||
@ -384,14 +386,14 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
|
|||||||
end_bbio_comprssed_write);
|
end_bbio_comprssed_write);
|
||||||
cb->start = ordered->file_offset;
|
cb->start = ordered->file_offset;
|
||||||
cb->len = ordered->num_bytes;
|
cb->len = ordered->num_bytes;
|
||||||
cb->compressed_pages = compressed_pages;
|
cb->compressed_folios = compressed_folios;
|
||||||
cb->compressed_len = ordered->disk_num_bytes;
|
cb->compressed_len = ordered->disk_num_bytes;
|
||||||
cb->writeback = writeback;
|
cb->writeback = writeback;
|
||||||
INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
|
INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
|
||||||
cb->nr_pages = nr_pages;
|
cb->nr_folios = nr_folios;
|
||||||
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
|
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
|
||||||
cb->bbio.ordered = ordered;
|
cb->bbio.ordered = ordered;
|
||||||
btrfs_add_compressed_bio_pages(cb);
|
btrfs_add_compressed_bio_folios(cb);
|
||||||
|
|
||||||
btrfs_submit_bio(&cb->bbio, 0);
|
btrfs_submit_bio(&cb->bbio, 0);
|
||||||
}
|
}
|
||||||
@ -599,14 +601,14 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
|
|||||||
|
|
||||||
free_extent_map(em);
|
free_extent_map(em);
|
||||||
|
|
||||||
cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
|
cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
|
||||||
cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
|
cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
|
||||||
if (!cb->compressed_pages) {
|
if (!cb->compressed_folios) {
|
||||||
ret = BLK_STS_RESOURCE;
|
ret = BLK_STS_RESOURCE;
|
||||||
goto out_free_bio;
|
goto out_free_bio;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages, 0);
|
ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios, 0);
|
||||||
if (ret2) {
|
if (ret2) {
|
||||||
ret = BLK_STS_RESOURCE;
|
ret = BLK_STS_RESOURCE;
|
||||||
goto out_free_compressed_pages;
|
goto out_free_compressed_pages;
|
||||||
@ -618,7 +620,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
|
|||||||
/* include any pages we added in add_ra-bio_pages */
|
/* include any pages we added in add_ra-bio_pages */
|
||||||
cb->len = bbio->bio.bi_iter.bi_size;
|
cb->len = bbio->bio.bi_iter.bi_size;
|
||||||
cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
|
cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
|
||||||
btrfs_add_compressed_bio_pages(cb);
|
btrfs_add_compressed_bio_folios(cb);
|
||||||
|
|
||||||
if (memstall)
|
if (memstall)
|
||||||
psi_memstall_leave(&pflags);
|
psi_memstall_leave(&pflags);
|
||||||
@ -627,7 +629,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
out_free_compressed_pages:
|
out_free_compressed_pages:
|
||||||
kfree(cb->compressed_pages);
|
kfree(cb->compressed_folios);
|
||||||
out_free_bio:
|
out_free_bio:
|
||||||
bio_put(&cb->bbio.bio);
|
bio_put(&cb->bbio.bio);
|
||||||
out:
|
out:
|
||||||
@ -975,17 +977,17 @@ static unsigned int btrfs_compress_set_level(int type, unsigned level)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Wrapper around find_get_page(), with extra error message. */
|
/* Wrapper around find_get_page(), with extra error message. */
|
||||||
int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
|
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
|
||||||
struct page **in_page_ret)
|
struct folio **in_folio_ret)
|
||||||
{
|
{
|
||||||
struct page *in_page;
|
struct folio *in_folio;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The compressed write path should have the page locked already, thus
|
* The compressed write path should have the folio locked already, thus
|
||||||
* we only need to grab one reference of the page cache.
|
* we only need to grab one reference.
|
||||||
*/
|
*/
|
||||||
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
|
in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
|
||||||
if (unlikely(!in_page)) {
|
if (IS_ERR(in_folio)) {
|
||||||
struct btrfs_inode *inode = BTRFS_I(mapping->host);
|
struct btrfs_inode *inode = BTRFS_I(mapping->host);
|
||||||
|
|
||||||
btrfs_crit(inode->root->fs_info,
|
btrfs_crit(inode->root->fs_info,
|
||||||
@ -993,7 +995,7 @@ int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
|
|||||||
inode->root->root_key.objectid, btrfs_ino(inode), start);
|
inode->root->root_key.objectid, btrfs_ino(inode), start);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
*in_page_ret = in_page;
|
*in_folio_ret = in_folio;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1017,11 +1019,9 @@ int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
|
|||||||
* @total_out is an in/out parameter, must be set to the input length and will
|
* @total_out is an in/out parameter, must be set to the input length and will
|
||||||
* be also used to return the total number of compressed bytes
|
* be also used to return the total number of compressed bytes
|
||||||
*/
|
*/
|
||||||
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
|
int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
|
||||||
u64 start, struct page **pages,
|
u64 start, struct folio **folios, unsigned long *out_folios,
|
||||||
unsigned long *out_pages,
|
unsigned long *total_in, unsigned long *total_out)
|
||||||
unsigned long *total_in,
|
|
||||||
unsigned long *total_out)
|
|
||||||
{
|
{
|
||||||
int type = btrfs_compress_type(type_level);
|
int type = btrfs_compress_type(type_level);
|
||||||
int level = btrfs_compress_level(type_level);
|
int level = btrfs_compress_level(type_level);
|
||||||
@ -1030,8 +1030,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
|
|||||||
|
|
||||||
level = btrfs_compress_set_level(type, level);
|
level = btrfs_compress_set_level(type, level);
|
||||||
workspace = get_workspace(type, level);
|
workspace = get_workspace(type, level);
|
||||||
ret = compression_compress_pages(type, workspace, mapping, start, pages,
|
ret = compression_compress_pages(type, workspace, mapping, start, folios,
|
||||||
out_pages, total_in, total_out);
|
out_folios, total_in, total_out);
|
||||||
put_workspace(type, workspace);
|
put_workspace(type, workspace);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -41,11 +41,11 @@ static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
|
|||||||
#define BTRFS_ZLIB_DEFAULT_LEVEL 3
|
#define BTRFS_ZLIB_DEFAULT_LEVEL 3
|
||||||
|
|
||||||
struct compressed_bio {
|
struct compressed_bio {
|
||||||
/* Number of compressed pages in the array */
|
/* Number of compressed folios in the array. */
|
||||||
unsigned int nr_pages;
|
unsigned int nr_folios;
|
||||||
|
|
||||||
/* the pages with the compressed data on them */
|
/* The folios with the compressed data on them. */
|
||||||
struct page **compressed_pages;
|
struct folio **compressed_folios;
|
||||||
|
|
||||||
/* starting offset in the inode for our pages */
|
/* starting offset in the inode for our pages */
|
||||||
u64 start;
|
u64 start;
|
||||||
@ -85,20 +85,17 @@ static inline unsigned int btrfs_compress_level(unsigned int type_level)
|
|||||||
int __init btrfs_init_compress(void);
|
int __init btrfs_init_compress(void);
|
||||||
void __cold btrfs_exit_compress(void);
|
void __cold btrfs_exit_compress(void);
|
||||||
|
|
||||||
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
|
int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
|
||||||
u64 start, struct page **pages,
|
u64 start, struct folio **folios, unsigned long *out_folios,
|
||||||
unsigned long *out_pages,
|
unsigned long *total_in, unsigned long *total_out);
|
||||||
unsigned long *total_in,
|
|
||||||
unsigned long *total_out);
|
|
||||||
int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
|
int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
|
||||||
unsigned long start_byte, size_t srclen, size_t destlen);
|
unsigned long start_byte, size_t srclen, size_t destlen);
|
||||||
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
|
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
|
||||||
struct compressed_bio *cb, u32 decompressed);
|
struct compressed_bio *cb, u32 decompressed);
|
||||||
|
|
||||||
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
|
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
|
||||||
struct page **compressed_pages,
|
struct folio **compressed_folios,
|
||||||
unsigned int nr_pages,
|
unsigned int nr_folios, blk_opf_t write_flags,
|
||||||
blk_opf_t write_flags,
|
|
||||||
bool writeback);
|
bool writeback);
|
||||||
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
|
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
|
||||||
|
|
||||||
@ -149,11 +146,11 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len);
|
|||||||
|
|
||||||
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
|
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
|
||||||
|
|
||||||
int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
|
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
|
||||||
struct page **in_page_ret);
|
struct folio **in_folio_ret);
|
||||||
|
|
||||||
int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
|
||||||
u64 start, struct page **pages, unsigned long *out_pages,
|
u64 start, struct folio **folios, unsigned long *out_folios,
|
||||||
unsigned long *total_in, unsigned long *total_out);
|
unsigned long *total_in, unsigned long *total_out);
|
||||||
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
||||||
int zlib_decompress(struct list_head *ws, const u8 *data_in,
|
int zlib_decompress(struct list_head *ws, const u8 *data_in,
|
||||||
@ -163,8 +160,8 @@ struct list_head *zlib_alloc_workspace(unsigned int level);
|
|||||||
void zlib_free_workspace(struct list_head *ws);
|
void zlib_free_workspace(struct list_head *ws);
|
||||||
struct list_head *zlib_get_workspace(unsigned int level);
|
struct list_head *zlib_get_workspace(unsigned int level);
|
||||||
|
|
||||||
int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
|
||||||
u64 start, struct page **pages, unsigned long *out_pages,
|
u64 start, struct folio **folios, unsigned long *out_folios,
|
||||||
unsigned long *total_in, unsigned long *total_out);
|
unsigned long *total_in, unsigned long *total_out);
|
||||||
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
||||||
int lzo_decompress(struct list_head *ws, const u8 *data_in,
|
int lzo_decompress(struct list_head *ws, const u8 *data_in,
|
||||||
@ -173,8 +170,8 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
|
|||||||
struct list_head *lzo_alloc_workspace(unsigned int level);
|
struct list_head *lzo_alloc_workspace(unsigned int level);
|
||||||
void lzo_free_workspace(struct list_head *ws);
|
void lzo_free_workspace(struct list_head *ws);
|
||||||
|
|
||||||
int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
|
||||||
u64 start, struct page **pages, unsigned long *out_pages,
|
u64 start, struct folio **folios, unsigned long *out_folios,
|
||||||
unsigned long *total_in, unsigned long *total_out);
|
unsigned long *total_in, unsigned long *total_out);
|
||||||
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
||||||
int zstd_decompress(struct list_head *ws, const u8 *data_in,
|
int zstd_decompress(struct list_head *ws, const u8 *data_in,
|
||||||
|
108
fs/btrfs/inode.c
108
fs/btrfs/inode.c
@ -708,8 +708,8 @@ struct async_extent {
|
|||||||
u64 start;
|
u64 start;
|
||||||
u64 ram_size;
|
u64 ram_size;
|
||||||
u64 compressed_size;
|
u64 compressed_size;
|
||||||
struct page **pages;
|
struct folio **folios;
|
||||||
unsigned long nr_pages;
|
unsigned long nr_folios;
|
||||||
int compress_type;
|
int compress_type;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
@ -734,8 +734,8 @@ struct async_cow {
|
|||||||
static noinline int add_async_extent(struct async_chunk *cow,
|
static noinline int add_async_extent(struct async_chunk *cow,
|
||||||
u64 start, u64 ram_size,
|
u64 start, u64 ram_size,
|
||||||
u64 compressed_size,
|
u64 compressed_size,
|
||||||
struct page **pages,
|
struct folio **folios,
|
||||||
unsigned long nr_pages,
|
unsigned long nr_folios,
|
||||||
int compress_type)
|
int compress_type)
|
||||||
{
|
{
|
||||||
struct async_extent *async_extent;
|
struct async_extent *async_extent;
|
||||||
@ -746,8 +746,8 @@ static noinline int add_async_extent(struct async_chunk *cow,
|
|||||||
async_extent->start = start;
|
async_extent->start = start;
|
||||||
async_extent->ram_size = ram_size;
|
async_extent->ram_size = ram_size;
|
||||||
async_extent->compressed_size = compressed_size;
|
async_extent->compressed_size = compressed_size;
|
||||||
async_extent->pages = pages;
|
async_extent->folios = folios;
|
||||||
async_extent->nr_pages = nr_pages;
|
async_extent->nr_folios = nr_folios;
|
||||||
async_extent->compress_type = compress_type;
|
async_extent->compress_type = compress_type;
|
||||||
list_add_tail(&async_extent->list, &cow->extents);
|
list_add_tail(&async_extent->list, &cow->extents);
|
||||||
return 0;
|
return 0;
|
||||||
@ -851,8 +851,8 @@ static void compress_file_range(struct btrfs_work *work)
|
|||||||
u64 actual_end;
|
u64 actual_end;
|
||||||
u64 i_size;
|
u64 i_size;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct page **pages;
|
struct folio **folios;
|
||||||
unsigned long nr_pages;
|
unsigned long nr_folios;
|
||||||
unsigned long total_compressed = 0;
|
unsigned long total_compressed = 0;
|
||||||
unsigned long total_in = 0;
|
unsigned long total_in = 0;
|
||||||
unsigned int poff;
|
unsigned int poff;
|
||||||
@ -882,9 +882,9 @@ static void compress_file_range(struct btrfs_work *work)
|
|||||||
barrier();
|
barrier();
|
||||||
actual_end = min_t(u64, i_size, end + 1);
|
actual_end = min_t(u64, i_size, end + 1);
|
||||||
again:
|
again:
|
||||||
pages = NULL;
|
folios = NULL;
|
||||||
nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
|
nr_folios = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
|
||||||
nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES);
|
nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED_PAGES);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we don't want to send crud past the end of i_size through
|
* we don't want to send crud past the end of i_size through
|
||||||
@ -933,8 +933,8 @@ again:
|
|||||||
if (!inode_need_compress(inode, start, end))
|
if (!inode_need_compress(inode, start, end))
|
||||||
goto cleanup_and_bail_uncompressed;
|
goto cleanup_and_bail_uncompressed;
|
||||||
|
|
||||||
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
|
folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS);
|
||||||
if (!pages) {
|
if (!folios) {
|
||||||
/*
|
/*
|
||||||
* Memory allocation failure is not a fatal error, we can fall
|
* Memory allocation failure is not a fatal error, we can fall
|
||||||
* back to uncompressed code.
|
* back to uncompressed code.
|
||||||
@ -948,8 +948,8 @@ again:
|
|||||||
compress_type = inode->prop_compress;
|
compress_type = inode->prop_compress;
|
||||||
|
|
||||||
/* Compression level is applied here. */
|
/* Compression level is applied here. */
|
||||||
ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4),
|
ret = btrfs_compress_folios(compress_type | (fs_info->compress_level << 4),
|
||||||
mapping, start, pages, &nr_pages, &total_in,
|
mapping, start, folios, &nr_folios, &total_in,
|
||||||
&total_compressed);
|
&total_compressed);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto mark_incompressible;
|
goto mark_incompressible;
|
||||||
@ -960,7 +960,7 @@ again:
|
|||||||
*/
|
*/
|
||||||
poff = offset_in_page(total_compressed);
|
poff = offset_in_page(total_compressed);
|
||||||
if (poff)
|
if (poff)
|
||||||
memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff);
|
folio_zero_range(folios[nr_folios - 1], poff, PAGE_SIZE - poff);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to create an inline extent.
|
* Try to create an inline extent.
|
||||||
@ -979,8 +979,7 @@ again:
|
|||||||
} else {
|
} else {
|
||||||
ret = cow_file_range_inline(inode, actual_end,
|
ret = cow_file_range_inline(inode, actual_end,
|
||||||
total_compressed,
|
total_compressed,
|
||||||
compress_type,
|
compress_type, folios[0],
|
||||||
page_folio(pages[0]),
|
|
||||||
false);
|
false);
|
||||||
}
|
}
|
||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
@ -1030,8 +1029,8 @@ again:
|
|||||||
* The async work queues will take care of doing actual allocation on
|
* The async work queues will take care of doing actual allocation on
|
||||||
* disk for these compressed pages, and will submit the bios.
|
* disk for these compressed pages, and will submit the bios.
|
||||||
*/
|
*/
|
||||||
ret = add_async_extent(async_chunk, start, total_in, total_compressed, pages,
|
ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios,
|
||||||
nr_pages, compress_type);
|
nr_folios, compress_type);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
if (start + total_in < end) {
|
if (start + total_in < end) {
|
||||||
start += total_in;
|
start += total_in;
|
||||||
@ -1048,12 +1047,12 @@ cleanup_and_bail_uncompressed:
|
|||||||
BTRFS_COMPRESS_NONE);
|
BTRFS_COMPRESS_NONE);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
free_pages:
|
free_pages:
|
||||||
if (pages) {
|
if (folios) {
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_folios; i++) {
|
||||||
WARN_ON(pages[i]->mapping);
|
WARN_ON(folios[i]->mapping);
|
||||||
btrfs_free_compr_folio(page_folio(pages[i]));
|
btrfs_free_compr_folio(folios[i]);
|
||||||
}
|
}
|
||||||
kfree(pages);
|
kfree(folios);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1061,16 +1060,16 @@ static void free_async_extent_pages(struct async_extent *async_extent)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!async_extent->pages)
|
if (!async_extent->folios)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < async_extent->nr_pages; i++) {
|
for (i = 0; i < async_extent->nr_folios; i++) {
|
||||||
WARN_ON(async_extent->pages[i]->mapping);
|
WARN_ON(async_extent->folios[i]->mapping);
|
||||||
btrfs_free_compr_folio(page_folio(async_extent->pages[i]));
|
btrfs_free_compr_folio(async_extent->folios[i]);
|
||||||
}
|
}
|
||||||
kfree(async_extent->pages);
|
kfree(async_extent->folios);
|
||||||
async_extent->nr_pages = 0;
|
async_extent->nr_folios = 0;
|
||||||
async_extent->pages = NULL;
|
async_extent->folios = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void submit_uncompressed_range(struct btrfs_inode *inode,
|
static void submit_uncompressed_range(struct btrfs_inode *inode,
|
||||||
@ -1194,8 +1193,8 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
|
|||||||
NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
|
NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
|
||||||
PAGE_UNLOCK | PAGE_START_WRITEBACK);
|
PAGE_UNLOCK | PAGE_START_WRITEBACK);
|
||||||
btrfs_submit_compressed_write(ordered,
|
btrfs_submit_compressed_write(ordered,
|
||||||
async_extent->pages, /* compressed_pages */
|
async_extent->folios, /* compressed_folios */
|
||||||
async_extent->nr_pages,
|
async_extent->nr_folios,
|
||||||
async_chunk->write_flags, true);
|
async_chunk->write_flags, true);
|
||||||
*alloc_hint = ins.objectid + ins.offset;
|
*alloc_hint = ins.objectid + ins.offset;
|
||||||
done:
|
done:
|
||||||
@ -10309,8 +10308,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
|
|||||||
size_t orig_count;
|
size_t orig_count;
|
||||||
u64 start, end;
|
u64 start, end;
|
||||||
u64 num_bytes, ram_bytes, disk_num_bytes;
|
u64 num_bytes, ram_bytes, disk_num_bytes;
|
||||||
unsigned long nr_pages, i;
|
unsigned long nr_folios, i;
|
||||||
struct page **pages;
|
struct folio **folios;
|
||||||
struct btrfs_key ins;
|
struct btrfs_key ins;
|
||||||
bool extent_reserved = false;
|
bool extent_reserved = false;
|
||||||
struct extent_map *em;
|
struct extent_map *em;
|
||||||
@ -10399,24 +10398,24 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
|
|||||||
* isn't.
|
* isn't.
|
||||||
*/
|
*/
|
||||||
disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
|
disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
|
||||||
nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
|
nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
|
||||||
pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
|
folios = kvcalloc(nr_folios, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
|
||||||
if (!pages)
|
if (!folios)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_folios; i++) {
|
||||||
size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
|
size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
|
||||||
char *kaddr;
|
char *kaddr;
|
||||||
|
|
||||||
pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
|
folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0);
|
||||||
if (!pages[i]) {
|
if (!folios[i]) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_pages;
|
goto out_folios;
|
||||||
}
|
}
|
||||||
kaddr = kmap_local_page(pages[i]);
|
kaddr = kmap_local_folio(folios[i], 0);
|
||||||
if (copy_from_iter(kaddr, bytes, from) != bytes) {
|
if (copy_from_iter(kaddr, bytes, from) != bytes) {
|
||||||
kunmap_local(kaddr);
|
kunmap_local(kaddr);
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out_pages;
|
goto out_folios;
|
||||||
}
|
}
|
||||||
if (bytes < PAGE_SIZE)
|
if (bytes < PAGE_SIZE)
|
||||||
memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
|
memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
|
||||||
@ -10428,12 +10427,12 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
|
|||||||
|
|
||||||
ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
|
ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_pages;
|
goto out_folios;
|
||||||
ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
|
ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
|
||||||
start >> PAGE_SHIFT,
|
start >> PAGE_SHIFT,
|
||||||
end >> PAGE_SHIFT);
|
end >> PAGE_SHIFT);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_pages;
|
goto out_folios;
|
||||||
lock_extent(io_tree, start, end, &cached_state);
|
lock_extent(io_tree, start, end, &cached_state);
|
||||||
ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
|
ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
|
||||||
if (!ordered &&
|
if (!ordered &&
|
||||||
@ -10464,8 +10463,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
|
|||||||
if (start == 0 && encoded->unencoded_len == encoded->len &&
|
if (start == 0 && encoded->unencoded_len == encoded->len &&
|
||||||
encoded->unencoded_offset == 0) {
|
encoded->unencoded_offset == 0) {
|
||||||
ret = cow_file_range_inline(inode, encoded->len, orig_count,
|
ret = cow_file_range_inline(inode, encoded->len, orig_count,
|
||||||
compression, page_folio(pages[0]),
|
compression, folios[0], true);
|
||||||
true);
|
|
||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = orig_count;
|
ret = orig_count;
|
||||||
@ -10509,7 +10507,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
|
|||||||
|
|
||||||
btrfs_delalloc_release_extents(inode, num_bytes);
|
btrfs_delalloc_release_extents(inode, num_bytes);
|
||||||
|
|
||||||
btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false);
|
btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
|
||||||
ret = orig_count;
|
ret = orig_count;
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -10531,12 +10529,12 @@ out_free_data_space:
|
|||||||
btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
|
btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
unlock_extent(io_tree, start, end, &cached_state);
|
unlock_extent(io_tree, start, end, &cached_state);
|
||||||
out_pages:
|
out_folios:
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_folios; i++) {
|
||||||
if (pages[i])
|
if (folios[i])
|
||||||
__free_page(pages[i]);
|
__folio_put(folios[i]);
|
||||||
}
|
}
|
||||||
kvfree(pages);
|
kvfree(folios);
|
||||||
out:
|
out:
|
||||||
if (ret >= 0)
|
if (ret >= 0)
|
||||||
iocb->ki_pos += encoded->len;
|
iocb->ki_pos += encoded->len;
|
||||||
|
@ -130,17 +130,17 @@ static inline size_t read_compress_length(const char *buf)
|
|||||||
*/
|
*/
|
||||||
static int copy_compressed_data_to_page(char *compressed_data,
|
static int copy_compressed_data_to_page(char *compressed_data,
|
||||||
size_t compressed_size,
|
size_t compressed_size,
|
||||||
struct page **out_pages,
|
struct folio **out_folios,
|
||||||
unsigned long max_nr_page,
|
unsigned long max_nr_folio,
|
||||||
u32 *cur_out,
|
u32 *cur_out,
|
||||||
const u32 sectorsize)
|
const u32 sectorsize)
|
||||||
{
|
{
|
||||||
u32 sector_bytes_left;
|
u32 sector_bytes_left;
|
||||||
u32 orig_out;
|
u32 orig_out;
|
||||||
struct page *cur_page;
|
struct folio *cur_folio;
|
||||||
char *kaddr;
|
char *kaddr;
|
||||||
|
|
||||||
if ((*cur_out / PAGE_SIZE) >= max_nr_page)
|
if ((*cur_out / PAGE_SIZE) >= max_nr_folio)
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -149,16 +149,16 @@ static int copy_compressed_data_to_page(char *compressed_data,
|
|||||||
*/
|
*/
|
||||||
ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
|
ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
|
||||||
|
|
||||||
cur_page = out_pages[*cur_out / PAGE_SIZE];
|
cur_folio = out_folios[*cur_out / PAGE_SIZE];
|
||||||
/* Allocate a new page */
|
/* Allocate a new page */
|
||||||
if (!cur_page) {
|
if (!cur_folio) {
|
||||||
cur_page = folio_page(btrfs_alloc_compr_folio(), 0);
|
cur_folio = btrfs_alloc_compr_folio();
|
||||||
if (!cur_page)
|
if (!cur_folio)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
out_pages[*cur_out / PAGE_SIZE] = cur_page;
|
out_folios[*cur_out / PAGE_SIZE] = cur_folio;
|
||||||
}
|
}
|
||||||
|
|
||||||
kaddr = kmap_local_page(cur_page);
|
kaddr = kmap_local_folio(cur_folio, 0);
|
||||||
write_compress_length(kaddr + offset_in_page(*cur_out),
|
write_compress_length(kaddr + offset_in_page(*cur_out),
|
||||||
compressed_size);
|
compressed_size);
|
||||||
*cur_out += LZO_LEN;
|
*cur_out += LZO_LEN;
|
||||||
@ -172,18 +172,18 @@ static int copy_compressed_data_to_page(char *compressed_data,
|
|||||||
|
|
||||||
kunmap_local(kaddr);
|
kunmap_local(kaddr);
|
||||||
|
|
||||||
if ((*cur_out / PAGE_SIZE) >= max_nr_page)
|
if ((*cur_out / PAGE_SIZE) >= max_nr_folio)
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
|
|
||||||
cur_page = out_pages[*cur_out / PAGE_SIZE];
|
cur_folio = out_folios[*cur_out / PAGE_SIZE];
|
||||||
/* Allocate a new page */
|
/* Allocate a new page */
|
||||||
if (!cur_page) {
|
if (!cur_folio) {
|
||||||
cur_page = folio_page(btrfs_alloc_compr_folio(), 0);
|
cur_folio = btrfs_alloc_compr_folio();
|
||||||
if (!cur_page)
|
if (!cur_folio)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
out_pages[*cur_out / PAGE_SIZE] = cur_page;
|
out_folios[*cur_out / PAGE_SIZE] = cur_folio;
|
||||||
}
|
}
|
||||||
kaddr = kmap_local_page(cur_page);
|
kaddr = kmap_local_folio(cur_folio, 0);
|
||||||
|
|
||||||
memcpy(kaddr + offset_in_page(*cur_out),
|
memcpy(kaddr + offset_in_page(*cur_out),
|
||||||
compressed_data + *cur_out - orig_out, copy_len);
|
compressed_data + *cur_out - orig_out, copy_len);
|
||||||
@ -209,15 +209,15 @@ out:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
|
||||||
u64 start, struct page **pages, unsigned long *out_pages,
|
u64 start, struct folio **folios, unsigned long *out_folios,
|
||||||
unsigned long *total_in, unsigned long *total_out)
|
unsigned long *total_in, unsigned long *total_out)
|
||||||
{
|
{
|
||||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||||
const u32 sectorsize = inode_to_fs_info(mapping->host)->sectorsize;
|
const u32 sectorsize = inode_to_fs_info(mapping->host)->sectorsize;
|
||||||
struct page *page_in = NULL;
|
struct folio *folio_in = NULL;
|
||||||
char *sizes_ptr;
|
char *sizes_ptr;
|
||||||
const unsigned long max_nr_page = *out_pages;
|
const unsigned long max_nr_folio = *out_folios;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
/* Points to the file offset of input data */
|
/* Points to the file offset of input data */
|
||||||
u64 cur_in = start;
|
u64 cur_in = start;
|
||||||
@ -225,8 +225,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
u32 cur_out = 0;
|
u32 cur_out = 0;
|
||||||
u32 len = *total_out;
|
u32 len = *total_out;
|
||||||
|
|
||||||
ASSERT(max_nr_page > 0);
|
ASSERT(max_nr_folio > 0);
|
||||||
*out_pages = 0;
|
*out_folios = 0;
|
||||||
*total_out = 0;
|
*total_out = 0;
|
||||||
*total_in = 0;
|
*total_in = 0;
|
||||||
|
|
||||||
@ -243,8 +243,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
size_t out_len;
|
size_t out_len;
|
||||||
|
|
||||||
/* Get the input page first */
|
/* Get the input page first */
|
||||||
if (!page_in) {
|
if (!folio_in) {
|
||||||
ret = btrfs_compress_find_get_page(mapping, cur_in, &page_in);
|
ret = btrfs_compress_filemap_get_folio(mapping, cur_in, &folio_in);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -252,7 +252,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
/* Compress at most one sector of data each time */
|
/* Compress at most one sector of data each time */
|
||||||
in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
|
in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
|
||||||
ASSERT(in_len);
|
ASSERT(in_len);
|
||||||
data_in = kmap_local_page(page_in);
|
data_in = kmap_local_folio(folio_in, 0);
|
||||||
ret = lzo1x_1_compress(data_in +
|
ret = lzo1x_1_compress(data_in +
|
||||||
offset_in_page(cur_in), in_len,
|
offset_in_page(cur_in), in_len,
|
||||||
workspace->cbuf, &out_len,
|
workspace->cbuf, &out_len,
|
||||||
@ -265,7 +265,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
|
ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
|
||||||
pages, max_nr_page,
|
folios, max_nr_folio,
|
||||||
&cur_out, sectorsize);
|
&cur_out, sectorsize);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
@ -283,13 +283,13 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
|
|
||||||
/* Check if we have reached page boundary */
|
/* Check if we have reached page boundary */
|
||||||
if (PAGE_ALIGNED(cur_in)) {
|
if (PAGE_ALIGNED(cur_in)) {
|
||||||
put_page(page_in);
|
folio_put(folio_in);
|
||||||
page_in = NULL;
|
folio_in = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Store the size of all chunks of compressed data */
|
/* Store the size of all chunks of compressed data */
|
||||||
sizes_ptr = kmap_local_page(pages[0]);
|
sizes_ptr = kmap_local_folio(folios[0], 0);
|
||||||
write_compress_length(sizes_ptr, cur_out);
|
write_compress_length(sizes_ptr, cur_out);
|
||||||
kunmap_local(sizes_ptr);
|
kunmap_local(sizes_ptr);
|
||||||
|
|
||||||
@ -297,9 +297,9 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
*total_out = cur_out;
|
*total_out = cur_out;
|
||||||
*total_in = cur_in - start;
|
*total_in = cur_in - start;
|
||||||
out:
|
out:
|
||||||
if (page_in)
|
if (folio_in)
|
||||||
put_page(page_in);
|
folio_put(folio_in);
|
||||||
*out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
|
*out_folios = DIV_ROUND_UP(cur_out, PAGE_SIZE);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -314,15 +314,15 @@ static void copy_compressed_segment(struct compressed_bio *cb,
|
|||||||
u32 orig_in = *cur_in;
|
u32 orig_in = *cur_in;
|
||||||
|
|
||||||
while (*cur_in < orig_in + len) {
|
while (*cur_in < orig_in + len) {
|
||||||
struct page *cur_page;
|
struct folio *cur_folio;
|
||||||
u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
|
u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
|
||||||
orig_in + len - *cur_in);
|
orig_in + len - *cur_in);
|
||||||
|
|
||||||
ASSERT(copy_len);
|
ASSERT(copy_len);
|
||||||
cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
|
cur_folio = cb->compressed_folios[*cur_in / PAGE_SIZE];
|
||||||
|
|
||||||
memcpy_from_page(dest + *cur_in - orig_in, cur_page,
|
memcpy_from_folio(dest + *cur_in - orig_in, cur_folio,
|
||||||
offset_in_page(*cur_in), copy_len);
|
offset_in_folio(cur_folio, *cur_in), copy_len);
|
||||||
|
|
||||||
*cur_in += copy_len;
|
*cur_in += copy_len;
|
||||||
}
|
}
|
||||||
@ -342,7 +342,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
/* Bytes decompressed so far */
|
/* Bytes decompressed so far */
|
||||||
u32 cur_out = 0;
|
u32 cur_out = 0;
|
||||||
|
|
||||||
kaddr = kmap_local_page(cb->compressed_pages[0]);
|
kaddr = kmap_local_folio(cb->compressed_folios[0], 0);
|
||||||
len_in = read_compress_length(kaddr);
|
len_in = read_compress_length(kaddr);
|
||||||
kunmap_local(kaddr);
|
kunmap_local(kaddr);
|
||||||
cur_in += LZO_LEN;
|
cur_in += LZO_LEN;
|
||||||
@ -364,7 +364,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
|
|
||||||
/* Go through each lzo segment */
|
/* Go through each lzo segment */
|
||||||
while (cur_in < len_in) {
|
while (cur_in < len_in) {
|
||||||
struct page *cur_page;
|
struct folio *cur_folio;
|
||||||
/* Length of the compressed segment */
|
/* Length of the compressed segment */
|
||||||
u32 seg_len;
|
u32 seg_len;
|
||||||
u32 sector_bytes_left;
|
u32 sector_bytes_left;
|
||||||
@ -376,9 +376,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
*/
|
*/
|
||||||
ASSERT(cur_in / sectorsize ==
|
ASSERT(cur_in / sectorsize ==
|
||||||
(cur_in + LZO_LEN - 1) / sectorsize);
|
(cur_in + LZO_LEN - 1) / sectorsize);
|
||||||
cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
|
cur_folio = cb->compressed_folios[cur_in / PAGE_SIZE];
|
||||||
ASSERT(cur_page);
|
ASSERT(cur_folio);
|
||||||
kaddr = kmap_local_page(cur_page);
|
kaddr = kmap_local_folio(cur_folio, 0);
|
||||||
seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
|
seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
|
||||||
kunmap_local(kaddr);
|
kunmap_local(kaddr);
|
||||||
cur_in += LZO_LEN;
|
cur_in += LZO_LEN;
|
||||||
|
102
fs/btrfs/zlib.c
102
fs/btrfs/zlib.c
@ -91,24 +91,24 @@ fail:
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
|
||||||
u64 start, struct page **pages, unsigned long *out_pages,
|
u64 start, struct folio **folios, unsigned long *out_folios,
|
||||||
unsigned long *total_in, unsigned long *total_out)
|
unsigned long *total_in, unsigned long *total_out)
|
||||||
{
|
{
|
||||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||||
int ret;
|
int ret;
|
||||||
char *data_in = NULL;
|
char *data_in = NULL;
|
||||||
char *cpage_out;
|
char *cfolio_out;
|
||||||
int nr_pages = 0;
|
int nr_folios = 0;
|
||||||
struct page *in_page = NULL;
|
struct folio *in_folio = NULL;
|
||||||
struct page *out_page = NULL;
|
struct folio *out_folio = NULL;
|
||||||
unsigned long bytes_left;
|
unsigned long bytes_left;
|
||||||
unsigned int in_buf_pages;
|
unsigned int in_buf_folios;
|
||||||
unsigned long len = *total_out;
|
unsigned long len = *total_out;
|
||||||
unsigned long nr_dest_pages = *out_pages;
|
unsigned long nr_dest_folios = *out_folios;
|
||||||
const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
|
const unsigned long max_out = nr_dest_folios * PAGE_SIZE;
|
||||||
|
|
||||||
*out_pages = 0;
|
*out_folios = 0;
|
||||||
*total_out = 0;
|
*total_out = 0;
|
||||||
*total_in = 0;
|
*total_in = 0;
|
||||||
|
|
||||||
@ -121,18 +121,18 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
workspace->strm.total_in = 0;
|
workspace->strm.total_in = 0;
|
||||||
workspace->strm.total_out = 0;
|
workspace->strm.total_out = 0;
|
||||||
|
|
||||||
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
|
out_folio = btrfs_alloc_compr_folio();
|
||||||
if (out_page == NULL) {
|
if (out_folio == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cpage_out = page_address(out_page);
|
cfolio_out = folio_address(out_folio);
|
||||||
pages[0] = out_page;
|
folios[0] = out_folio;
|
||||||
nr_pages = 1;
|
nr_folios = 1;
|
||||||
|
|
||||||
workspace->strm.next_in = workspace->buf;
|
workspace->strm.next_in = workspace->buf;
|
||||||
workspace->strm.avail_in = 0;
|
workspace->strm.avail_in = 0;
|
||||||
workspace->strm.next_out = cpage_out;
|
workspace->strm.next_out = cfolio_out;
|
||||||
workspace->strm.avail_out = PAGE_SIZE;
|
workspace->strm.avail_out = PAGE_SIZE;
|
||||||
|
|
||||||
while (workspace->strm.total_in < len) {
|
while (workspace->strm.total_in < len) {
|
||||||
@ -142,22 +142,22 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
*/
|
*/
|
||||||
if (workspace->strm.avail_in == 0) {
|
if (workspace->strm.avail_in == 0) {
|
||||||
bytes_left = len - workspace->strm.total_in;
|
bytes_left = len - workspace->strm.total_in;
|
||||||
in_buf_pages = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
|
in_buf_folios = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
|
||||||
workspace->buf_size / PAGE_SIZE);
|
workspace->buf_size / PAGE_SIZE);
|
||||||
if (in_buf_pages > 1) {
|
if (in_buf_folios > 1) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < in_buf_pages; i++) {
|
for (i = 0; i < in_buf_folios; i++) {
|
||||||
if (data_in) {
|
if (data_in) {
|
||||||
kunmap_local(data_in);
|
kunmap_local(data_in);
|
||||||
put_page(in_page);
|
folio_put(in_folio);
|
||||||
data_in = NULL;
|
data_in = NULL;
|
||||||
}
|
}
|
||||||
ret = btrfs_compress_find_get_page(mapping,
|
ret = btrfs_compress_filemap_get_folio(mapping,
|
||||||
start, &in_page);
|
start, &in_folio);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
data_in = kmap_local_page(in_page);
|
data_in = kmap_local_folio(in_folio, 0);
|
||||||
copy_page(workspace->buf + i * PAGE_SIZE,
|
copy_page(workspace->buf + i * PAGE_SIZE,
|
||||||
data_in);
|
data_in);
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
@ -166,14 +166,14 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
} else {
|
} else {
|
||||||
if (data_in) {
|
if (data_in) {
|
||||||
kunmap_local(data_in);
|
kunmap_local(data_in);
|
||||||
put_page(in_page);
|
folio_put(in_folio);
|
||||||
data_in = NULL;
|
data_in = NULL;
|
||||||
}
|
}
|
||||||
ret = btrfs_compress_find_get_page(mapping,
|
ret = btrfs_compress_filemap_get_folio(mapping,
|
||||||
start, &in_page);
|
start, &in_folio);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
data_in = kmap_local_page(in_page);
|
data_in = kmap_local_folio(in_folio, 0);
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
workspace->strm.next_in = data_in;
|
workspace->strm.next_in = data_in;
|
||||||
}
|
}
|
||||||
@ -202,20 +202,20 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
* the stream end if required
|
* the stream end if required
|
||||||
*/
|
*/
|
||||||
if (workspace->strm.avail_out == 0) {
|
if (workspace->strm.avail_out == 0) {
|
||||||
if (nr_pages == nr_dest_pages) {
|
if (nr_folios == nr_dest_folios) {
|
||||||
ret = -E2BIG;
|
ret = -E2BIG;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
|
out_folio = btrfs_alloc_compr_folio();
|
||||||
if (out_page == NULL) {
|
if (out_folio == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cpage_out = page_address(out_page);
|
cfolio_out = folio_address(out_folio);
|
||||||
pages[nr_pages] = out_page;
|
folios[nr_folios] = out_folio;
|
||||||
nr_pages++;
|
nr_folios++;
|
||||||
workspace->strm.avail_out = PAGE_SIZE;
|
workspace->strm.avail_out = PAGE_SIZE;
|
||||||
workspace->strm.next_out = cpage_out;
|
workspace->strm.next_out = cfolio_out;
|
||||||
}
|
}
|
||||||
/* we're all done */
|
/* we're all done */
|
||||||
if (workspace->strm.total_in >= len)
|
if (workspace->strm.total_in >= len)
|
||||||
@ -237,21 +237,21 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
} else if (workspace->strm.avail_out == 0) {
|
} else if (workspace->strm.avail_out == 0) {
|
||||||
/* get another page for the stream end */
|
/* Get another folio for the stream end. */
|
||||||
if (nr_pages == nr_dest_pages) {
|
if (nr_folios == nr_dest_folios) {
|
||||||
ret = -E2BIG;
|
ret = -E2BIG;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
|
out_folio = btrfs_alloc_compr_folio();
|
||||||
if (out_page == NULL) {
|
if (out_folio == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cpage_out = page_address(out_page);
|
cfolio_out = folio_address(out_folio);
|
||||||
pages[nr_pages] = out_page;
|
folios[nr_folios] = out_folio;
|
||||||
nr_pages++;
|
nr_folios++;
|
||||||
workspace->strm.avail_out = PAGE_SIZE;
|
workspace->strm.avail_out = PAGE_SIZE;
|
||||||
workspace->strm.next_out = cpage_out;
|
workspace->strm.next_out = cfolio_out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
zlib_deflateEnd(&workspace->strm);
|
zlib_deflateEnd(&workspace->strm);
|
||||||
@ -265,10 +265,10 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
*total_out = workspace->strm.total_out;
|
*total_out = workspace->strm.total_out;
|
||||||
*total_in = workspace->strm.total_in;
|
*total_in = workspace->strm.total_in;
|
||||||
out:
|
out:
|
||||||
*out_pages = nr_pages;
|
*out_folios = nr_folios;
|
||||||
if (data_in) {
|
if (data_in) {
|
||||||
kunmap_local(data_in);
|
kunmap_local(data_in);
|
||||||
put_page(in_page);
|
folio_put(in_folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -281,13 +281,13 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
int wbits = MAX_WBITS;
|
int wbits = MAX_WBITS;
|
||||||
char *data_in;
|
char *data_in;
|
||||||
size_t total_out = 0;
|
size_t total_out = 0;
|
||||||
unsigned long page_in_index = 0;
|
unsigned long folio_in_index = 0;
|
||||||
size_t srclen = cb->compressed_len;
|
size_t srclen = cb->compressed_len;
|
||||||
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
|
unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
|
||||||
unsigned long buf_start;
|
unsigned long buf_start;
|
||||||
struct page **pages_in = cb->compressed_pages;
|
struct folio **folios_in = cb->compressed_folios;
|
||||||
|
|
||||||
data_in = kmap_local_page(pages_in[page_in_index]);
|
data_in = kmap_local_folio(folios_in[folio_in_index], 0);
|
||||||
workspace->strm.next_in = data_in;
|
workspace->strm.next_in = data_in;
|
||||||
workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
|
workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
|
||||||
workspace->strm.total_in = 0;
|
workspace->strm.total_in = 0;
|
||||||
@ -337,12 +337,12 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
if (workspace->strm.avail_in == 0) {
|
if (workspace->strm.avail_in == 0) {
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
kunmap_local(data_in);
|
kunmap_local(data_in);
|
||||||
page_in_index++;
|
folio_in_index++;
|
||||||
if (page_in_index >= total_pages_in) {
|
if (folio_in_index >= total_folios_in) {
|
||||||
data_in = NULL;
|
data_in = NULL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
data_in = kmap_local_page(pages_in[page_in_index]);
|
data_in = kmap_local_folio(folios_in[folio_in_index], 0);
|
||||||
workspace->strm.next_in = data_in;
|
workspace->strm.next_in = data_in;
|
||||||
tmp = srclen - workspace->strm.total_in;
|
tmp = srclen - workspace->strm.total_in;
|
||||||
workspace->strm.avail_in = min(tmp, PAGE_SIZE);
|
workspace->strm.avail_in = min(tmp, PAGE_SIZE);
|
||||||
|
@ -374,25 +374,25 @@ fail:
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
|
||||||
u64 start, struct page **pages, unsigned long *out_pages,
|
u64 start, struct folio **folios, unsigned long *out_folios,
|
||||||
unsigned long *total_in, unsigned long *total_out)
|
unsigned long *total_in, unsigned long *total_out)
|
||||||
{
|
{
|
||||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||||
zstd_cstream *stream;
|
zstd_cstream *stream;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int nr_pages = 0;
|
int nr_folios = 0;
|
||||||
struct page *in_page = NULL; /* The current page to read */
|
struct folio *in_folio = NULL; /* The current folio to read. */
|
||||||
struct page *out_page = NULL; /* The current page to write to */
|
struct folio *out_folio = NULL; /* The current folio to write to. */
|
||||||
unsigned long tot_in = 0;
|
unsigned long tot_in = 0;
|
||||||
unsigned long tot_out = 0;
|
unsigned long tot_out = 0;
|
||||||
unsigned long len = *total_out;
|
unsigned long len = *total_out;
|
||||||
const unsigned long nr_dest_pages = *out_pages;
|
const unsigned long nr_dest_folios = *out_folios;
|
||||||
unsigned long max_out = nr_dest_pages * PAGE_SIZE;
|
unsigned long max_out = nr_dest_folios * PAGE_SIZE;
|
||||||
zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
|
zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
|
||||||
len);
|
len);
|
||||||
|
|
||||||
*out_pages = 0;
|
*out_folios = 0;
|
||||||
*total_out = 0;
|
*total_out = 0;
|
||||||
*total_in = 0;
|
*total_in = 0;
|
||||||
|
|
||||||
@ -406,21 +406,21 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* map in the first page of input data */
|
/* map in the first page of input data */
|
||||||
ret = btrfs_compress_find_get_page(mapping, start, &in_page);
|
ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
workspace->in_buf.src = kmap_local_page(in_page);
|
workspace->in_buf.src = kmap_local_folio(in_folio, 0);
|
||||||
workspace->in_buf.pos = 0;
|
workspace->in_buf.pos = 0;
|
||||||
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
|
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
|
||||||
|
|
||||||
/* Allocate and map in the output buffer */
|
/* Allocate and map in the output buffer */
|
||||||
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
|
out_folio = btrfs_alloc_compr_folio();
|
||||||
if (out_page == NULL) {
|
if (out_folio == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
pages[nr_pages++] = out_page;
|
folios[nr_folios++] = out_folio;
|
||||||
workspace->out_buf.dst = page_address(out_page);
|
workspace->out_buf.dst = folio_address(out_folio);
|
||||||
workspace->out_buf.pos = 0;
|
workspace->out_buf.pos = 0;
|
||||||
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
|
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
|
||||||
|
|
||||||
@ -455,17 +455,17 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
if (workspace->out_buf.pos == workspace->out_buf.size) {
|
if (workspace->out_buf.pos == workspace->out_buf.size) {
|
||||||
tot_out += PAGE_SIZE;
|
tot_out += PAGE_SIZE;
|
||||||
max_out -= PAGE_SIZE;
|
max_out -= PAGE_SIZE;
|
||||||
if (nr_pages == nr_dest_pages) {
|
if (nr_folios == nr_dest_folios) {
|
||||||
ret = -E2BIG;
|
ret = -E2BIG;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
|
out_folio = btrfs_alloc_compr_folio();
|
||||||
if (out_page == NULL) {
|
if (out_folio == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
pages[nr_pages++] = out_page;
|
folios[nr_folios++] = out_folio;
|
||||||
workspace->out_buf.dst = page_address(out_page);
|
workspace->out_buf.dst = folio_address(out_folio);
|
||||||
workspace->out_buf.pos = 0;
|
workspace->out_buf.pos = 0;
|
||||||
workspace->out_buf.size = min_t(size_t, max_out,
|
workspace->out_buf.size = min_t(size_t, max_out,
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
@ -482,13 +482,13 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
tot_in += PAGE_SIZE;
|
tot_in += PAGE_SIZE;
|
||||||
kunmap_local(workspace->in_buf.src);
|
kunmap_local(workspace->in_buf.src);
|
||||||
workspace->in_buf.src = NULL;
|
workspace->in_buf.src = NULL;
|
||||||
put_page(in_page);
|
folio_put(in_folio);
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
len -= PAGE_SIZE;
|
len -= PAGE_SIZE;
|
||||||
ret = btrfs_compress_find_get_page(mapping, start, &in_page);
|
ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
workspace->in_buf.src = kmap_local_page(in_page);
|
workspace->in_buf.src = kmap_local_folio(in_folio, 0);
|
||||||
workspace->in_buf.pos = 0;
|
workspace->in_buf.pos = 0;
|
||||||
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
|
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
@ -515,17 +515,17 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
|
|
||||||
tot_out += PAGE_SIZE;
|
tot_out += PAGE_SIZE;
|
||||||
max_out -= PAGE_SIZE;
|
max_out -= PAGE_SIZE;
|
||||||
if (nr_pages == nr_dest_pages) {
|
if (nr_folios == nr_dest_folios) {
|
||||||
ret = -E2BIG;
|
ret = -E2BIG;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
|
out_folio = btrfs_alloc_compr_folio();
|
||||||
if (out_page == NULL) {
|
if (out_folio == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
pages[nr_pages++] = out_page;
|
folios[nr_folios++] = out_folio;
|
||||||
workspace->out_buf.dst = page_address(out_page);
|
workspace->out_buf.dst = folio_address(out_folio);
|
||||||
workspace->out_buf.pos = 0;
|
workspace->out_buf.pos = 0;
|
||||||
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
|
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
@ -539,10 +539,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|||||||
*total_in = tot_in;
|
*total_in = tot_in;
|
||||||
*total_out = tot_out;
|
*total_out = tot_out;
|
||||||
out:
|
out:
|
||||||
*out_pages = nr_pages;
|
*out_folios = nr_folios;
|
||||||
if (workspace->in_buf.src) {
|
if (workspace->in_buf.src) {
|
||||||
kunmap_local(workspace->in_buf.src);
|
kunmap_local(workspace->in_buf.src);
|
||||||
put_page(in_page);
|
folio_put(in_folio);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -550,12 +550,12 @@ out:
|
|||||||
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
||||||
{
|
{
|
||||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||||
struct page **pages_in = cb->compressed_pages;
|
struct folio **folios_in = cb->compressed_folios;
|
||||||
size_t srclen = cb->compressed_len;
|
size_t srclen = cb->compressed_len;
|
||||||
zstd_dstream *stream;
|
zstd_dstream *stream;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long page_in_index = 0;
|
unsigned long folio_in_index = 0;
|
||||||
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
|
unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
|
||||||
unsigned long buf_start;
|
unsigned long buf_start;
|
||||||
unsigned long total_out = 0;
|
unsigned long total_out = 0;
|
||||||
|
|
||||||
@ -567,7 +567,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
|
workspace->in_buf.src = kmap_local_folio(folios_in[folio_in_index], 0);
|
||||||
workspace->in_buf.pos = 0;
|
workspace->in_buf.pos = 0;
|
||||||
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
|
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
|
||||||
|
|
||||||
@ -604,14 +604,15 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
|
|||||||
|
|
||||||
if (workspace->in_buf.pos == workspace->in_buf.size) {
|
if (workspace->in_buf.pos == workspace->in_buf.size) {
|
||||||
kunmap_local(workspace->in_buf.src);
|
kunmap_local(workspace->in_buf.src);
|
||||||
page_in_index++;
|
folio_in_index++;
|
||||||
if (page_in_index >= total_pages_in) {
|
if (folio_in_index >= total_folios_in) {
|
||||||
workspace->in_buf.src = NULL;
|
workspace->in_buf.src = NULL;
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
srclen -= PAGE_SIZE;
|
srclen -= PAGE_SIZE;
|
||||||
workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
|
workspace->in_buf.src =
|
||||||
|
kmap_local_folio(folios_in[folio_in_index], 0);
|
||||||
workspace->in_buf.pos = 0;
|
workspace->in_buf.pos = 0;
|
||||||
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
|
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user