ext4: Convert invalidatepage to invalidate_folio
Extensive changes, but fairly mechanical. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs Tested-by: David Howells <dhowells@redhat.com> # afs
This commit is contained in:
@ -184,7 +184,7 @@ void ext4_evict_inode(struct inode *inode)
|
||||
* journal. So although mm thinks everything is clean and
|
||||
* ready for reaping the inode might still have some pages to
|
||||
* write in the running transaction or waiting to be
|
||||
* checkpointed. Thus calling jbd2_journal_invalidatepage()
|
||||
* checkpointed. Thus calling jbd2_journal_invalidate_folio()
|
||||
* (via truncate_inode_pages()) to discard these buffers can
|
||||
* cause data loss. Also even if we did not discard these
|
||||
* buffers, we would have no way to find them after the inode
|
||||
@ -3186,7 +3186,7 @@ static void ext4_readahead(struct readahead_control *rac)
|
||||
static void ext4_invalidate_folio(struct folio *folio, size_t offset,
|
||||
size_t length)
|
||||
{
|
||||
trace_ext4_invalidatepage(&folio->page, offset, length);
|
||||
trace_ext4_invalidate_folio(folio, offset, length);
|
||||
|
||||
/* No journalling happens on data buffers when this function is used */
|
||||
WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
|
||||
@ -3194,29 +3194,28 @@ static void ext4_invalidate_folio(struct folio *folio, size_t offset,
|
||||
block_invalidate_folio(folio, offset, length);
|
||||
}
|
||||
|
||||
static int __ext4_journalled_invalidatepage(struct page *page,
|
||||
unsigned int offset,
|
||||
unsigned int length)
|
||||
static int __ext4_journalled_invalidate_folio(struct folio *folio,
|
||||
size_t offset, size_t length)
|
||||
{
|
||||
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
|
||||
journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
|
||||
|
||||
trace_ext4_journalled_invalidatepage(page, offset, length);
|
||||
trace_ext4_journalled_invalidate_folio(folio, offset, length);
|
||||
|
||||
/*
|
||||
* If it's a full truncate we just forget about the pending dirtying
|
||||
*/
|
||||
if (offset == 0 && length == PAGE_SIZE)
|
||||
ClearPageChecked(page);
|
||||
if (offset == 0 && length == folio_size(folio))
|
||||
folio_clear_checked(folio);
|
||||
|
||||
return jbd2_journal_invalidatepage(journal, page, offset, length);
|
||||
return jbd2_journal_invalidate_folio(journal, folio, offset, length);
|
||||
}
|
||||
|
||||
/* Wrapper for aops... */
|
||||
static void ext4_journalled_invalidatepage(struct page *page,
|
||||
unsigned int offset,
|
||||
unsigned int length)
|
||||
static void ext4_journalled_invalidate_folio(struct folio *folio,
|
||||
size_t offset,
|
||||
size_t length)
|
||||
{
|
||||
WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
|
||||
WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
|
||||
}
|
||||
|
||||
static int ext4_releasepage(struct page *page, gfp_t wait)
|
||||
@ -3601,7 +3600,7 @@ static const struct address_space_operations ext4_journalled_aops = {
|
||||
.write_end = ext4_journalled_write_end,
|
||||
.set_page_dirty = ext4_journalled_set_page_dirty,
|
||||
.bmap = ext4_bmap,
|
||||
.invalidatepage = ext4_journalled_invalidatepage,
|
||||
.invalidate_folio = ext4_journalled_invalidate_folio,
|
||||
.releasepage = ext4_releasepage,
|
||||
.direct_IO = noop_direct_IO,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
@ -5204,13 +5203,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
}
|
||||
|
||||
/*
|
||||
* In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
|
||||
* buffers that are attached to a page stradding i_size and are undergoing
|
||||
* In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
|
||||
* buffers that are attached to a folio straddling i_size and are undergoing
|
||||
* commit. In that case we have to wait for commit to finish and try again.
|
||||
*/
|
||||
static void ext4_wait_for_tail_page_commit(struct inode *inode)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned offset;
|
||||
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
|
||||
tid_t commit_tid = 0;
|
||||
@ -5218,25 +5216,25 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
|
||||
|
||||
offset = inode->i_size & (PAGE_SIZE - 1);
|
||||
/*
|
||||
* If the page is fully truncated, we don't need to wait for any commit
|
||||
* (and we even should not as __ext4_journalled_invalidatepage() may
|
||||
* strip all buffers from the page but keep the page dirty which can then
|
||||
* confuse e.g. concurrent ext4_writepage() seeing dirty page without
|
||||
* If the folio is fully truncated, we don't need to wait for any commit
|
||||
* (and we even should not as __ext4_journalled_invalidate_folio() may
|
||||
* strip all buffers from the folio but keep the folio dirty which can then
|
||||
* confuse e.g. concurrent ext4_writepage() seeing dirty folio without
|
||||
* buffers). Also we don't need to wait for any commit if all buffers in
|
||||
* the page remain valid. This is most beneficial for the common case of
|
||||
* the folio remain valid. This is most beneficial for the common case of
|
||||
* blocksize == PAGESIZE.
|
||||
*/
|
||||
if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
|
||||
return;
|
||||
while (1) {
|
||||
page = find_lock_page(inode->i_mapping,
|
||||
struct folio *folio = filemap_lock_folio(inode->i_mapping,
|
||||
inode->i_size >> PAGE_SHIFT);
|
||||
if (!page)
|
||||
if (!folio)
|
||||
return;
|
||||
ret = __ext4_journalled_invalidatepage(page, offset,
|
||||
PAGE_SIZE - offset);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
ret = __ext4_journalled_invalidate_folio(folio, offset,
|
||||
folio_size(folio) - offset);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
if (ret != -EBUSY)
|
||||
return;
|
||||
commit_tid = 0;
|
||||
|
Reference in New Issue
Block a user