ntfs: convert ntfs_read_block() to use a folio
The caller already has the folio, so pass it in and use the folio API throughout saving five hidden calls to compound_head(). Link: https://lkml.kernel.org/r/20231016201114.1928083-18-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Andreas Gruenbacher <agruenba@redhat.com> Cc: Pankaj Raghav <p.raghav@samsung.com> Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
922b12eff0
commit
a2da3afce9
@ -145,13 +145,12 @@ still_busy:
|
||||
}
|
||||
|
||||
/**
|
||||
* ntfs_read_block - fill a @page of an address space with data
|
||||
* @page: page cache page to fill with data
|
||||
* ntfs_read_block - fill a @folio of an address space with data
|
||||
* @folio: page cache folio to fill with data
|
||||
*
|
||||
* Fill the page @page of the address space belonging to the @page->host inode.
|
||||
* We read each buffer asynchronously and when all buffers are read in, our io
|
||||
* completion handler ntfs_end_buffer_read_async(), if required, automatically
|
||||
* applies the mst fixups to the page before finally marking it uptodate and
|
||||
* applies the mst fixups to the folio before finally marking it uptodate and
|
||||
* unlocking it.
|
||||
*
|
||||
* We only enforce allocated_size limit because i_size is checked for in
|
||||
@ -161,7 +160,7 @@ still_busy:
|
||||
*
|
||||
* Contains an adapted version of fs/buffer.c::block_read_full_folio().
|
||||
*/
|
||||
static int ntfs_read_block(struct page *page)
|
||||
static int ntfs_read_block(struct folio *folio)
|
||||
{
|
||||
loff_t i_size;
|
||||
VCN vcn;
|
||||
@ -178,7 +177,7 @@ static int ntfs_read_block(struct page *page)
|
||||
int i, nr;
|
||||
unsigned char blocksize_bits;
|
||||
|
||||
vi = page->mapping->host;
|
||||
vi = folio->mapping->host;
|
||||
ni = NTFS_I(vi);
|
||||
vol = ni->vol;
|
||||
|
||||
@ -188,15 +187,10 @@ static int ntfs_read_block(struct page *page)
|
||||
blocksize = vol->sb->s_blocksize;
|
||||
blocksize_bits = vol->sb->s_blocksize_bits;
|
||||
|
||||
if (!page_has_buffers(page)) {
|
||||
create_empty_buffers(page, blocksize, 0);
|
||||
if (unlikely(!page_has_buffers(page))) {
|
||||
unlock_page(page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
bh = head = page_buffers(page);
|
||||
BUG_ON(!bh);
|
||||
head = folio_buffers(folio);
|
||||
if (!head)
|
||||
head = folio_create_empty_buffers(folio, blocksize, 0);
|
||||
bh = head;
|
||||
|
||||
/*
|
||||
* We may be racing with truncate. To avoid some of the problems we
|
||||
@ -205,11 +199,11 @@ static int ntfs_read_block(struct page *page)
|
||||
* may leave some buffers unmapped which are now allocated. This is
|
||||
* not a problem since these buffers will just get mapped when a write
|
||||
* occurs. In case of a shrinking truncate, we will detect this later
|
||||
* on due to the runlist being incomplete and if the page is being
|
||||
* on due to the runlist being incomplete and if the folio is being
|
||||
* fully truncated, truncate will throw it away as soon as we unlock
|
||||
* it so no need to worry what we do with it.
|
||||
*/
|
||||
iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
|
||||
iblock = (s64)folio->index << (PAGE_SHIFT - blocksize_bits);
|
||||
read_lock_irqsave(&ni->size_lock, flags);
|
||||
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
|
||||
init_size = ni->initialized_size;
|
||||
@ -221,7 +215,7 @@ static int ntfs_read_block(struct page *page)
|
||||
}
|
||||
zblock = (init_size + blocksize - 1) >> blocksize_bits;
|
||||
|
||||
/* Loop through all the buffers in the page. */
|
||||
/* Loop through all the buffers in the folio. */
|
||||
rl = NULL;
|
||||
nr = i = 0;
|
||||
do {
|
||||
@ -299,7 +293,7 @@ lock_retry_remap:
|
||||
if (!err)
|
||||
err = -EIO;
|
||||
bh->b_blocknr = -1;
|
||||
SetPageError(page);
|
||||
folio_set_error(folio);
|
||||
ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
|
||||
"attribute type 0x%x, vcn 0x%llx, "
|
||||
"offset 0x%x because its location on "
|
||||
@ -312,13 +306,13 @@ lock_retry_remap:
|
||||
/*
|
||||
* Either iblock was outside lblock limits or
|
||||
* ntfs_rl_vcn_to_lcn() returned error. Just zero that portion
|
||||
* of the page and set the buffer uptodate.
|
||||
* of the folio and set the buffer uptodate.
|
||||
*/
|
||||
handle_hole:
|
||||
bh->b_blocknr = -1UL;
|
||||
clear_buffer_mapped(bh);
|
||||
handle_zblock:
|
||||
zero_user(page, i * blocksize, blocksize);
|
||||
folio_zero_range(folio, i * blocksize, blocksize);
|
||||
if (likely(!err))
|
||||
set_buffer_uptodate(bh);
|
||||
} while (i++, iblock++, (bh = bh->b_this_page) != head);
|
||||
@ -349,11 +343,11 @@ handle_zblock:
|
||||
return 0;
|
||||
}
|
||||
/* No i/o was scheduled on any of the buffers. */
|
||||
if (likely(!PageError(page)))
|
||||
SetPageUptodate(page);
|
||||
if (likely(!folio_test_error(folio)))
|
||||
folio_mark_uptodate(folio);
|
||||
else /* Signal synchronous i/o error. */
|
||||
nr = -EIO;
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
return nr;
|
||||
}
|
||||
|
||||
@ -433,7 +427,7 @@ retry_readpage:
|
||||
/* NInoNonResident() == NInoIndexAllocPresent() */
|
||||
if (NInoNonResident(ni)) {
|
||||
/* Normal, non-resident data stream. */
|
||||
return ntfs_read_block(page);
|
||||
return ntfs_read_block(folio);
|
||||
}
|
||||
/*
|
||||
* Attribute is resident, implying it is not compressed or encrypted.
|
||||
|
Loading…
x
Reference in New Issue
Block a user