btrfs: use a folio array throughout the defrag process

Remove more hidden calls to compound_head() by using an array of folios
instead of pages.  Also neaten the error path in defrag_one_range() by
adjusting the length of the array instead of checking for NULL.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-12-14 16:13:31 +00:00 committed by David Sterba
parent 03fbf77a2c
commit fae9cd252f

View File

@ -861,7 +861,7 @@ out:
* NOTE: Caller should also wait for page writeback after the cluster is * NOTE: Caller should also wait for page writeback after the cluster is
* prepared, here we don't do writeback wait for each page. * prepared, here we don't do writeback wait for each page.
*/ */
static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t index) static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t index)
{ {
struct address_space *mapping = inode->vfs_inode.i_mapping; struct address_space *mapping = inode->vfs_inode.i_mapping;
gfp_t mask = btrfs_alloc_write_mask(mapping); gfp_t mask = btrfs_alloc_write_mask(mapping);
@ -875,7 +875,7 @@ again:
folio = __filemap_get_folio(mapping, index, folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
if (IS_ERR(folio)) if (IS_ERR(folio))
return &folio->page; return folio;
/* /*
* Since we can defragment files opened read-only, we can encounter * Since we can defragment files opened read-only, we can encounter
@ -942,7 +942,7 @@ again:
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
} }
return &folio->page; return folio;
} }
struct defrag_target_range { struct defrag_target_range {
@ -1163,7 +1163,7 @@ static_assert(PAGE_ALIGNED(CLUSTER_SIZE));
*/ */
static int defrag_one_locked_target(struct btrfs_inode *inode, static int defrag_one_locked_target(struct btrfs_inode *inode,
struct defrag_target_range *target, struct defrag_target_range *target,
struct page **pages, int nr_pages, struct folio **folios, int nr_pages,
struct extent_state **cached_state) struct extent_state **cached_state)
{ {
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
@ -1172,7 +1172,7 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
const u64 len = target->len; const u64 len = target->len;
unsigned long last_index = (start + len - 1) >> PAGE_SHIFT; unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
unsigned long start_index = start >> PAGE_SHIFT; unsigned long start_index = start >> PAGE_SHIFT;
unsigned long first_index = page_index(pages[0]); unsigned long first_index = folios[0]->index;
int ret = 0; int ret = 0;
int i; int i;
@ -1189,8 +1189,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
/* Update the page status */ /* Update the page status */
for (i = start_index - first_index; i <= last_index - first_index; i++) { for (i = start_index - first_index; i <= last_index - first_index; i++) {
ClearPageChecked(pages[i]); folio_clear_checked(folios[i]);
btrfs_folio_clamp_set_dirty(fs_info, page_folio(pages[i]), start, len); btrfs_folio_clamp_set_dirty(fs_info, folios[i], start, len);
} }
btrfs_delalloc_release_extents(inode, len); btrfs_delalloc_release_extents(inode, len);
extent_changeset_free(data_reserved); extent_changeset_free(data_reserved);
@ -1206,7 +1206,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
struct defrag_target_range *entry; struct defrag_target_range *entry;
struct defrag_target_range *tmp; struct defrag_target_range *tmp;
LIST_HEAD(target_list); LIST_HEAD(target_list);
struct page **pages; struct folio **folios;
const u32 sectorsize = inode->root->fs_info->sectorsize; const u32 sectorsize = inode->root->fs_info->sectorsize;
u64 last_index = (start + len - 1) >> PAGE_SHIFT; u64 last_index = (start + len - 1) >> PAGE_SHIFT;
u64 start_index = start >> PAGE_SHIFT; u64 start_index = start >> PAGE_SHIFT;
@ -1217,21 +1217,21 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE); ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize)); ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS);
if (!pages) if (!folios)
return -ENOMEM; return -ENOMEM;
/* Prepare all pages */ /* Prepare all pages */
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
pages[i] = defrag_prepare_one_page(inode, start_index + i); folios[i] = defrag_prepare_one_folio(inode, start_index + i);
if (IS_ERR(pages[i])) { if (IS_ERR(folios[i])) {
ret = PTR_ERR(pages[i]); ret = PTR_ERR(folios[i]);
pages[i] = NULL; nr_pages = i;
goto free_pages; goto free_folios;
} }
} }
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
wait_on_page_writeback(pages[i]); folio_wait_writeback(folios[i]);
/* Lock the pages range */ /* Lock the pages range */
lock_extent(&inode->io_tree, start_index << PAGE_SHIFT, lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
@ -1251,7 +1251,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
goto unlock_extent; goto unlock_extent;
list_for_each_entry(entry, &target_list, list) { list_for_each_entry(entry, &target_list, list) {
ret = defrag_one_locked_target(inode, entry, pages, nr_pages, ret = defrag_one_locked_target(inode, entry, folios, nr_pages,
&cached_state); &cached_state);
if (ret < 0) if (ret < 0)
break; break;
@ -1265,14 +1265,12 @@ unlock_extent:
unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT, unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
(last_index << PAGE_SHIFT) + PAGE_SIZE - 1, (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
&cached_state); &cached_state);
free_pages: free_folios:
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
if (pages[i]) { folio_unlock(folios[i]);
unlock_page(pages[i]); folio_put(folios[i]);
put_page(pages[i]);
} }
} kfree(folios);
kfree(pages);
return ret; return ret;
} }