btrfs: extend btrfs_cleanup_ordered_extents for NULL locked_page
btrfs_cleanup_ordered_extents() assumes locked_page to be non-NULL, so it is not usable for submit_uncompressed_range() which can have NULL locked_page. Add support supports locked_page == NULL case. Also, it rewrites redundant "page_offset(locked_page)". Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
9ce7466f37
commit
99826e4cab
@ -190,11 +190,14 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
|
||||
{
|
||||
unsigned long index = offset >> PAGE_SHIFT;
|
||||
unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
|
||||
u64 page_start = page_offset(locked_page);
|
||||
u64 page_end = page_start + PAGE_SIZE - 1;
|
||||
|
||||
u64 page_start, page_end;
|
||||
struct page *page;
|
||||
|
||||
if (locked_page) {
|
||||
page_start = page_offset(locked_page);
|
||||
page_end = page_start + PAGE_SIZE - 1;
|
||||
}
|
||||
|
||||
while (index <= end_index) {
|
||||
/*
|
||||
* For locked page, we will call end_extent_writepage() on it
|
||||
@ -207,7 +210,7 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
|
||||
* btrfs_mark_ordered_io_finished() would skip the accounting
|
||||
* for the page range, and the ordered extent will never finish.
|
||||
*/
|
||||
if (index == (page_offset(locked_page) >> PAGE_SHIFT)) {
|
||||
if (locked_page && index == (page_start >> PAGE_SHIFT)) {
|
||||
index++;
|
||||
continue;
|
||||
}
|
||||
@ -226,17 +229,20 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
/* The locked page covers the full range, nothing needs to be done */
|
||||
if (bytes + offset <= page_offset(locked_page) + PAGE_SIZE)
|
||||
return;
|
||||
/*
|
||||
* In case this page belongs to the delalloc range being instantiated
|
||||
* then skip it, since the first page of a range is going to be
|
||||
* properly cleaned up by the caller of run_delalloc_range
|
||||
*/
|
||||
if (page_start >= offset && page_end <= (offset + bytes - 1)) {
|
||||
bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
|
||||
offset = page_offset(locked_page) + PAGE_SIZE;
|
||||
if (locked_page) {
|
||||
/* The locked page covers the full range, nothing needs to be done */
|
||||
if (bytes + offset <= page_start + PAGE_SIZE)
|
||||
return;
|
||||
/*
|
||||
* In case this page belongs to the delalloc range being
|
||||
* instantiated then skip it, since the first page of a range is
|
||||
* going to be properly cleaned up by the caller of
|
||||
* run_delalloc_range
|
||||
*/
|
||||
if (page_start >= offset && page_end <= (offset + bytes - 1)) {
|
||||
bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
|
||||
offset = page_offset(locked_page) + PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
|
||||
|
Loading…
Reference in New Issue
Block a user