nilfs2: convert nilfs_page_mkwrite() to use a folio
Using the new folio APIs saves seven hidden calls to compound_head(). Link: https://lkml.kernel.org/r/20231114084436.2755-12-konishi.ryusuke@gmail.com Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ff5710c3f3
commit
5a5cad8cb2
@ -45,34 +45,36 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct page *page = vmf->page;
|
||||
struct folio *folio = page_folio(vmf->page);
|
||||
struct inode *inode = file_inode(vma->vm_file);
|
||||
struct nilfs_transaction_info ti;
|
||||
struct buffer_head *bh, *head;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
|
||||
return VM_FAULT_SIGBUS; /* -ENOSPC */
|
||||
|
||||
sb_start_pagefault(inode->i_sb);
|
||||
lock_page(page);
|
||||
if (page->mapping != inode->i_mapping ||
|
||||
page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
|
||||
unlock_page(page);
|
||||
folio_lock(folio);
|
||||
if (folio->mapping != inode->i_mapping ||
|
||||
folio_pos(folio) >= i_size_read(inode) ||
|
||||
!folio_test_uptodate(folio)) {
|
||||
folio_unlock(folio);
|
||||
ret = -EFAULT; /* make the VM retry the fault */
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* check to see if the page is mapped already (no holes)
|
||||
* check to see if the folio is mapped already (no holes)
|
||||
*/
|
||||
if (PageMappedToDisk(page))
|
||||
if (folio_test_mappedtodisk(folio))
|
||||
goto mapped;
|
||||
|
||||
if (page_has_buffers(page)) {
|
||||
struct buffer_head *bh, *head;
|
||||
head = folio_buffers(folio);
|
||||
if (head) {
|
||||
int fully_mapped = 1;
|
||||
|
||||
bh = head = page_buffers(page);
|
||||
bh = head;
|
||||
do {
|
||||
if (!buffer_mapped(bh)) {
|
||||
fully_mapped = 0;
|
||||
@ -81,11 +83,11 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
|
||||
} while (bh = bh->b_this_page, bh != head);
|
||||
|
||||
if (fully_mapped) {
|
||||
SetPageMappedToDisk(page);
|
||||
folio_set_mappedtodisk(folio);
|
||||
goto mapped;
|
||||
}
|
||||
}
|
||||
unlock_page(page);
|
||||
folio_unlock(folio);
|
||||
|
||||
/*
|
||||
* fill hole blocks
|
||||
@ -105,7 +107,7 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
|
||||
nilfs_transaction_commit(inode->i_sb);
|
||||
|
||||
mapped:
|
||||
wait_for_stable_page(page);
|
||||
folio_wait_stable(folio);
|
||||
out:
|
||||
sb_end_pagefault(inode->i_sb);
|
||||
return vmf_fs_error(ret);
|
||||
|
Loading…
x
Reference in New Issue
Block a user