netfs: Allow buffered shared-writeable mmap through netfs_page_mkwrite()
Provide an entry point to delegate a filesystem's ->page_mkwrite() to. This checks for conflicting writes, then attached any netfs-specific group marking (e.g. ceph snap) to the page to be considered dirty. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org
This commit is contained in:
parent
938e13a73b
commit
102a7e2c59
@ -416,3 +416,62 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_file_write_iter);
|
||||
|
||||
/*
|
||||
* Notification that a previously read-only page is about to become writable.
|
||||
* Note that the caller indicates a single page of a multipage folio.
|
||||
*/
|
||||
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
|
||||
{
|
||||
struct folio *folio = page_folio(vmf->page);
|
||||
struct file *file = vmf->vma->vm_file;
|
||||
struct inode *inode = file_inode(file);
|
||||
vm_fault_t ret = VM_FAULT_RETRY;
|
||||
int err;
|
||||
|
||||
_enter("%lx", folio->index);
|
||||
|
||||
sb_start_pagefault(inode->i_sb);
|
||||
|
||||
if (folio_wait_writeback_killable(folio))
|
||||
goto out;
|
||||
|
||||
if (folio_lock_killable(folio) < 0)
|
||||
goto out;
|
||||
|
||||
/* Can we see a streaming write here? */
|
||||
if (WARN_ON(!folio_test_uptodate(folio))) {
|
||||
ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (netfs_folio_group(folio) != netfs_group) {
|
||||
folio_unlock(folio);
|
||||
err = filemap_fdatawait_range(inode->i_mapping,
|
||||
folio_pos(folio),
|
||||
folio_pos(folio) + folio_size(folio));
|
||||
switch (err) {
|
||||
case 0:
|
||||
ret = VM_FAULT_RETRY;
|
||||
goto out;
|
||||
case -ENOMEM:
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out;
|
||||
default:
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (folio_test_dirty(folio))
|
||||
trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
|
||||
else
|
||||
trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
|
||||
netfs_set_group(folio, netfs_group);
|
||||
file_update_time(file);
|
||||
ret = VM_FAULT_LOCKED;
|
||||
out:
|
||||
sb_end_pagefault(inode->i_sb);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_page_mkwrite);
|
||||
|
@ -400,6 +400,10 @@ void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
|
||||
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
|
||||
bool netfs_release_folio(struct folio *folio, gfp_t gfp);
|
||||
|
||||
/* VMA operations API. */
|
||||
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
|
||||
|
||||
/* (Sub)request management API. */
|
||||
void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
|
||||
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
|
||||
enum netfs_sreq_ref_trace what);
|
||||
|
Loading…
Reference in New Issue
Block a user