2019-05-28 19:57:16 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2006-01-19 04:43:02 +03:00
/*
* This file contians vfs address ( mmap ) ops for 9 P2000 .
*
* Copyright ( C ) 2005 by Eric Van Hensbergen < ericvh @ gmail . com >
* Copyright ( C ) 2002 by Ron Minnich < rminnich @ lanl . gov >
*/
# include <linux/module.h>
# include <linux/errno.h>
# include <linux/fs.h>
# include <linux/file.h>
# include <linux/stat.h>
# include <linux/string.h>
# include <linux/pagemap.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2021-12-22 20:21:04 +03:00
# include <linux/swap.h>
2015-02-22 19:58:50 +03:00
# include <linux/uio.h>
2021-11-02 11:29:55 +03:00
# include <linux/netfs.h>
2007-07-11 02:57:28 +04:00
# include <net/9p/9p.h>
# include <net/9p/client.h>
2006-01-19 04:43:02 +03:00
# include "v9fs.h"
# include "v9fs_vfs.h"
2009-09-23 22:00:27 +04:00
# include "cache.h"
2011-02-28 14:33:58 +03:00
# include "fid.h"
2006-01-19 04:43:02 +03:00
/**
2022-02-17 13:14:32 +03:00
* v9fs_issue_read - Issue a read from 9 P
2021-11-02 11:29:55 +03:00
* @ subreq : The read to make
2006-01-19 04:43:02 +03:00
*/
2022-02-17 13:14:32 +03:00
static void v9fs_issue_read ( struct netfs_io_subrequest * subreq )
2006-01-19 04:43:02 +03:00
{
2022-02-17 13:01:23 +03:00
struct netfs_io_request * rreq = subreq - > rreq ;
2021-11-02 11:29:55 +03:00
struct p9_fid * fid = rreq - > netfs_priv ;
2015-04-02 06:42:28 +03:00
struct iov_iter to ;
2021-11-02 11:29:55 +03:00
loff_t pos = subreq - > start + subreq - > transferred ;
size_t len = subreq - > len - subreq - > transferred ;
int total , err ;
2007-02-11 22:21:39 +03:00
2022-09-16 03:25:47 +03:00
iov_iter_xarray ( & to , ITER_DEST , & rreq - > mapping - > i_pages , pos , len ) ;
2009-09-23 22:00:27 +04:00
2021-11-02 11:29:55 +03:00
total = p9_client_read ( fid , pos , & to , & err ) ;
2022-01-10 14:10:31 +03:00
/* if we just extended the file size, any portion not in
* cache won ' t be on server and is zeroes */
__set_bit ( NETFS_SREQ_CLEAR_TAIL , & subreq - > flags ) ;
2021-11-02 11:29:55 +03:00
netfs_subreq_terminated ( subreq , err ? : total , false ) ;
}
2009-09-23 22:00:27 +04:00
2021-11-02 11:29:55 +03:00
/**
2022-02-17 13:01:23 +03:00
* v9fs_init_request - Initialise a read request
2021-11-02 11:29:55 +03:00
* @ rreq : The read request
* @ file : The file being read from
*/
2022-01-21 00:55:46 +03:00
static int v9fs_init_request ( struct netfs_io_request * rreq , struct file * file )
2021-11-02 11:29:55 +03:00
{
struct p9_fid * fid = file - > private_data ;
2009-09-23 22:00:27 +04:00
2022-06-14 06:19:02 +03:00
BUG_ON ( ! fid ) ;
/* we might need to read from a fid that was opened write-only
* for read - modify - write of page cache , use the writeback fid
* for that */
2023-03-27 05:06:37 +03:00
WARN_ON ( rreq - > origin = = NETFS_READ_FOR_WRITE & &
! ( fid - > mode & P9_ORDWR ) ) ;
2022-06-14 06:19:02 +03:00
2022-06-12 07:42:32 +03:00
p9_fid_get ( fid ) ;
2021-11-02 11:29:55 +03:00
rreq - > netfs_priv = fid ;
2022-01-21 00:55:46 +03:00
return 0 ;
2021-11-02 11:29:55 +03:00
}
2006-01-19 04:43:02 +03:00
2021-11-02 11:29:55 +03:00
/**
2022-02-25 14:19:14 +03:00
* v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
* @ rreq : The I / O request to clean up
2021-11-02 11:29:55 +03:00
*/
2022-02-25 14:19:14 +03:00
static void v9fs_free_request ( struct netfs_io_request * rreq )
2021-11-02 11:29:55 +03:00
{
2022-02-25 14:19:14 +03:00
struct p9_fid * fid = rreq - > netfs_priv ;
2006-01-19 04:43:02 +03:00
2022-06-12 07:42:32 +03:00
p9_fid_put ( fid ) ;
2021-11-02 11:29:55 +03:00
}
2009-09-23 22:00:27 +04:00
2021-06-30 00:37:05 +03:00
const struct netfs_request_ops v9fs_req_ops = {
2022-02-17 13:01:23 +03:00
. init_request = v9fs_init_request ,
2022-02-25 14:19:14 +03:00
. free_request = v9fs_free_request ,
2022-02-17 13:14:32 +03:00
. issue_read = v9fs_issue_read ,
2021-11-02 11:29:55 +03:00
} ;
2023-03-27 04:53:10 +03:00
# ifdef CONFIG_9P_FSCACHE
2020-11-18 12:06:42 +03:00
static void v9fs_write_to_cache_done ( void * priv , ssize_t transferred_or_error ,
bool was_async )
{
struct v9fs_inode * v9inode = priv ;
__le32 version ;
if ( IS_ERR_VALUE ( transferred_or_error ) & &
transferred_or_error ! = - ENOBUFS ) {
version = cpu_to_le32 ( v9inode - > qid . version ) ;
fscache_invalidate ( v9fs_inode_cookie ( v9inode ) , & version ,
netfs: Fix gcc-12 warning by embedding vfs inode in netfs_i_context
While randstruct was satisfied with using an open-coded "void *" offset
cast for the netfs_i_context <-> inode casting, __builtin_object_size() as
used by FORTIFY_SOURCE was not as easily fooled. This was causing the
following complaint[1] from gcc v12:
In file included from include/linux/string.h:253,
from include/linux/ceph/ceph_debug.h:7,
from fs/ceph/inode.c:2:
In function 'fortify_memset_chk',
inlined from 'netfs_i_context_init' at include/linux/netfs.h:326:2,
inlined from 'ceph_alloc_inode' at fs/ceph/inode.c:463:2:
include/linux/fortify-string.h:242:25: warning: call to '__write_overflow_field' declared with attribute warning: detected write beyond size of field (1st parameter); maybe use struct_group()? [-Wattribute-warning]
242 | __write_overflow_field(p_size_field, size);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fix this by embedding a struct inode into struct netfs_i_context (which
should perhaps be renamed to struct netfs_inode). The struct inode
vfs_inode fields are then removed from the 9p, afs, ceph and cifs inode
structs and vfs_inode is then simply changed to "netfs.inode" in those
filesystems.
Further, rename netfs_i_context to netfs_inode, get rid of the
netfs_inode() function that converted a netfs_i_context pointer to an
inode pointer (that can now be done with &ctx->inode) and rename the
netfs_i_context() function to netfs_inode() (which is now a wrapper
around container_of()).
Most of the changes were done with:
perl -p -i -e 's/vfs_inode/netfs.inode/'g \
`git grep -l 'vfs_inode' -- fs/{9p,afs,ceph,cifs}/*.[ch]`
Kees suggested doing it with a pair structure[2] and a special
declarator to insert that into the network filesystem's inode
wrapper[3], but I think it's cleaner to embed it - and then it doesn't
matter if struct randomisation reorders things.
Dave Chinner suggested using a filesystem-specific VFS_I() function in
each filesystem to convert that filesystem's own inode wrapper struct
into the VFS inode struct[4].
Version #2:
- Fix a couple of missed name changes due to a disabled cifs option.
- Rename nfs_i_context to nfs_inode
- Use "netfs" instead of "nic" as the member name in per-fs inode wrapper
structs.
[ This also undoes commit 507160f46c55 ("netfs: gcc-12: temporarily
disable '-Wattribute-warning' for now") that is no longer needed ]
Fixes: bc899ee1c898 ("netfs: Add a netfs inode context")
Reported-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Xiubo Li <xiubli@redhat.com>
cc: Jonathan Corbet <corbet@lwn.net>
cc: Eric Van Hensbergen <ericvh@gmail.com>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Steve French <smfrench@gmail.com>
cc: William Kucharski <william.kucharski@oracle.com>
cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
cc: Dave Chinner <david@fromorbit.com>
cc: linux-doc@vger.kernel.org
cc: v9fs-developer@lists.sourceforge.net
cc: linux-afs@lists.infradead.org
cc: ceph-devel@vger.kernel.org
cc: linux-cifs@vger.kernel.org
cc: samba-technical@lists.samba.org
cc: linux-fsdevel@vger.kernel.org
cc: linux-hardening@vger.kernel.org
Link: https://lore.kernel.org/r/d2ad3a3d7bdd794c6efb562d2f2b655fb67756b9.camel@kernel.org/ [1]
Link: https://lore.kernel.org/r/20220517210230.864239-1-keescook@chromium.org/ [2]
Link: https://lore.kernel.org/r/20220518202212.2322058-1-keescook@chromium.org/ [3]
Link: https://lore.kernel.org/r/20220524101205.GI2306852@dread.disaster.area/ [4]
Link: https://lore.kernel.org/r/165296786831.3591209.12111293034669289733.stgit@warthog.procyon.org.uk/ # v1
Link: https://lore.kernel.org/r/165305805651.4094995.7763502506786714216.stgit@warthog.procyon.org.uk # v2
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-06-09 23:46:04 +03:00
i_size_read ( & v9inode - > netfs . inode ) , 0 ) ;
2020-11-18 12:06:42 +03:00
}
}
2023-03-27 04:53:10 +03:00
# endif
2020-11-18 12:06:42 +03:00
2021-08-11 11:49:13 +03:00
static int v9fs_vfs_write_folio_locked ( struct folio * folio )
2011-02-28 14:33:58 +03:00
{
2021-08-11 11:49:13 +03:00
struct inode * inode = folio_inode ( folio ) ;
loff_t start = folio_pos ( folio ) ;
loff_t i_size = i_size_read ( inode ) ;
2015-04-02 04:54:42 +03:00
struct iov_iter from ;
2021-08-11 11:49:13 +03:00
size_t len = folio_size ( folio ) ;
2023-03-27 05:06:37 +03:00
struct p9_fid * writeback_fid ;
2021-08-11 11:49:13 +03:00
int err ;
2023-03-27 04:53:10 +03:00
struct v9fs_inode __maybe_unused * v9inode = V9FS_I ( inode ) ;
struct fscache_cookie __maybe_unused * cookie = v9fs_inode_cookie ( v9inode ) ;
2021-08-11 11:49:13 +03:00
if ( start > = i_size )
return 0 ; /* Simultaneous truncation occurred */
2011-02-28 14:33:58 +03:00
2021-08-11 11:49:13 +03:00
len = min_t ( loff_t , i_size - start , len ) ;
2011-02-28 14:33:58 +03:00
2022-09-16 03:25:47 +03:00
iov_iter_xarray ( & from , ITER_SOURCE , & folio_mapping ( folio ) - > i_pages , start , len ) ;
2011-02-28 14:33:58 +03:00
2023-03-27 05:06:37 +03:00
writeback_fid = v9fs_fid_find_inode ( inode , true , INVALID_UID , true ) ;
if ( ! writeback_fid ) {
WARN_ONCE ( 1 , " folio expected an open fid inode->i_private=%p \n " ,
inode - > i_private ) ;
return - EINVAL ;
}
2011-02-28 14:33:58 +03:00
2020-11-18 12:06:42 +03:00
folio_wait_fscache ( folio ) ;
2021-08-11 11:49:13 +03:00
folio_start_writeback ( folio ) ;
2015-04-02 04:54:42 +03:00
2023-03-27 05:06:37 +03:00
p9_client_write ( writeback_fid , start , & from , & err ) ;
2011-02-28 14:33:58 +03:00
2023-03-27 04:53:10 +03:00
# ifdef CONFIG_9P_FSCACHE
2020-11-18 12:06:42 +03:00
if ( err = = 0 & &
2023-03-27 04:53:10 +03:00
fscache_cookie_enabled ( cookie ) & &
test_bit ( FSCACHE_COOKIE_IS_CACHING , & cookie - > flags ) ) {
2020-11-18 12:06:42 +03:00
folio_start_fscache ( folio ) ;
fscache_write_to_cache ( v9fs_inode_cookie ( v9inode ) ,
2023-03-27 04:53:10 +03:00
folio_mapping ( folio ) , start , len , i_size ,
v9fs_write_to_cache_done , v9inode ,
true ) ;
2020-11-18 12:06:42 +03:00
}
2023-03-27 04:53:10 +03:00
# endif
2020-11-18 12:06:42 +03:00
2021-08-11 11:49:13 +03:00
folio_end_writeback ( folio ) ;
2023-03-27 05:06:37 +03:00
p9_fid_put ( writeback_fid ) ;
2015-04-02 04:54:42 +03:00
return err ;
2011-02-28 14:33:58 +03:00
}
static int v9fs_vfs_writepage ( struct page * page , struct writeback_control * wbc )
{
2021-08-11 11:49:13 +03:00
struct folio * folio = page_folio ( page ) ;
2011-02-28 14:33:58 +03:00
int retval ;
2021-08-11 11:49:13 +03:00
p9_debug ( P9_DEBUG_VFS , " folio %p \n " , folio ) ;
2014-01-10 16:44:09 +04:00
2021-08-11 11:49:13 +03:00
retval = v9fs_vfs_write_folio_locked ( folio ) ;
2011-02-28 14:33:58 +03:00
if ( retval < 0 ) {
if ( retval = = - EAGAIN ) {
2021-08-11 11:49:13 +03:00
folio_redirty_for_writepage ( wbc , folio ) ;
2011-02-28 14:33:58 +03:00
retval = 0 ;
} else {
2021-08-11 11:49:13 +03:00
mapping_set_error ( folio_mapping ( folio ) , retval ) ;
2011-02-28 14:33:58 +03:00
}
} else
retval = 0 ;
2021-08-11 11:49:13 +03:00
folio_unlock ( folio ) ;
2011-02-28 14:33:58 +03:00
return retval ;
}
2022-02-09 23:21:53 +03:00
static int v9fs_launder_folio ( struct folio * folio )
2009-09-23 22:00:27 +04:00
{
2011-02-28 14:33:58 +03:00
int retval ;
2021-08-11 11:49:13 +03:00
if ( folio_clear_dirty_for_io ( folio ) ) {
retval = v9fs_vfs_write_folio_locked ( folio ) ;
2011-02-28 14:33:58 +03:00
if ( retval )
return retval ;
}
2021-08-11 11:49:13 +03:00
folio_wait_fscache ( folio ) ;
2009-09-23 22:00:27 +04:00
return 0 ;
}
2010-08-24 19:43:28 +04:00
/**
* v9fs_direct_IO - 9 P address space operation for direct I / O
* @ iocb : target I / O control block
2021-10-05 00:07:22 +03:00
* @ iter : The data / buffer to use
2010-08-24 19:43:28 +04:00
*
* The presence of v9fs_direct_IO ( ) in the address space ops vector
* allowes open ( ) O_DIRECT flags which would have failed otherwise .
*
* In the non - cached mode , we shunt off direct read and write requests before
* the VFS gets them , so this method should never be called .
*
* Direct IO is not ' yet ' supported in the cached mode . Hence when
* this routine is called through generic_file_aio_read ( ) , the read / write fails
* with an error .
*
*/
2011-02-28 14:34:04 +03:00
static ssize_t
2016-04-07 18:51:58 +03:00
v9fs_direct_IO ( struct kiocb * iocb , struct iov_iter * iter )
2010-08-24 19:43:28 +04:00
{
2015-04-02 05:32:23 +03:00
struct file * file = iocb - > ki_filp ;
2016-04-07 18:51:58 +03:00
loff_t pos = iocb - > ki_pos ;
2015-04-02 06:49:24 +03:00
ssize_t n ;
int err = 0 ;
2021-11-02 16:16:43 +03:00
2015-03-16 14:33:52 +03:00
if ( iov_iter_rw ( iter ) = = WRITE ) {
2015-04-02 06:49:24 +03:00
n = p9_client_write ( file - > private_data , pos , iter , & err ) ;
if ( n ) {
2015-04-02 05:32:23 +03:00
struct inode * inode = file_inode ( file ) ;
loff_t i_size = i_size_read ( inode ) ;
2021-11-02 16:16:43 +03:00
2015-04-02 06:49:24 +03:00
if ( pos + n > i_size )
inode_add_bytes ( inode , pos + n - i_size ) ;
2015-04-02 05:32:23 +03:00
}
2015-04-02 06:49:24 +03:00
} else {
n = p9_client_read ( file - > private_data , pos , iter , & err ) ;
2015-04-02 05:32:23 +03:00
}
2015-04-02 06:49:24 +03:00
return n ? n : err ;
2010-08-24 19:43:28 +04:00
}
2011-02-28 14:33:58 +03:00
static int v9fs_write_begin ( struct file * filp , struct address_space * mapping ,
2022-02-22 22:31:43 +03:00
loff_t pos , unsigned int len ,
2021-08-11 11:49:13 +03:00
struct page * * subpagep , void * * fsdata )
2011-02-28 14:33:58 +03:00
{
2021-11-02 11:29:55 +03:00
int retval ;
2021-08-11 11:49:13 +03:00
struct folio * folio ;
2021-11-02 11:29:55 +03:00
struct v9fs_inode * v9inode = V9FS_I ( mapping - > host ) ;
2014-01-10 16:44:09 +04:00
p9_debug ( P9_DEBUG_VFS , " filp %p, mapping %p \n " , filp , mapping ) ;
2021-11-02 11:29:55 +03:00
/* Prefetch area to be written into the cache if we're caching this
* file . We need to do this before we get a lock on the page in case
* there ' s more than one writer competing for the same cache block .
*/
2022-06-10 01:04:01 +03:00
retval = netfs_write_begin ( & v9inode - > netfs , filp , mapping , pos , len , & folio , fsdata ) ;
2021-11-02 11:29:55 +03:00
if ( retval < 0 )
return retval ;
2011-02-28 14:33:58 +03:00
2021-08-11 11:49:13 +03:00
* subpagep = & folio - > page ;
2011-02-28 14:33:58 +03:00
return retval ;
}
static int v9fs_write_end ( struct file * filp , struct address_space * mapping ,
2021-11-02 16:16:43 +03:00
loff_t pos , unsigned int len , unsigned int copied ,
2021-08-11 11:49:13 +03:00
struct page * subpage , void * fsdata )
2011-02-28 14:33:58 +03:00
{
loff_t last_pos = pos + copied ;
2021-08-11 11:49:13 +03:00
struct folio * folio = page_folio ( subpage ) ;
struct inode * inode = mapping - > host ;
2011-02-28 14:33:58 +03:00
2014-01-10 16:44:09 +04:00
p9_debug ( P9_DEBUG_VFS , " filp %p, mapping %p \n " , filp , mapping ) ;
2021-08-11 11:49:13 +03:00
if ( ! folio_test_uptodate ( folio ) ) {
2017-04-10 21:46:51 +03:00
if ( unlikely ( copied < len ) ) {
copied = 0 ;
goto out ;
}
2021-11-02 11:29:55 +03:00
2021-08-11 11:49:13 +03:00
folio_mark_uptodate ( folio ) ;
2011-02-28 14:33:58 +03:00
}
2021-11-02 11:29:55 +03:00
2011-02-28 14:33:58 +03:00
/*
* No need to use i_size_read ( ) here , the i_size
* cannot change under us because we hold the i_mutex .
*/
if ( last_pos > inode - > i_size ) {
inode_add_bytes ( inode , last_pos - inode - > i_size ) ;
i_size_write ( inode , last_pos ) ;
2023-03-27 04:53:10 +03:00
# ifdef CONFIG_9P_FSCACHE
fscache_update_cookie ( v9fs_inode_cookie ( V9FS_I ( inode ) ) , NULL ,
& last_pos ) ;
# endif
2011-02-28 14:33:58 +03:00
}
2021-08-11 11:49:13 +03:00
folio_mark_dirty ( folio ) ;
2016-08-30 03:56:35 +03:00
out :
2021-08-11 11:49:13 +03:00
folio_unlock ( folio ) ;
folio_put ( folio ) ;
2011-02-28 14:33:58 +03:00
return copied ;
}
2006-06-28 15:26:44 +04:00
const struct address_space_operations v9fs_addr_operations = {
2023-11-27 16:58:07 +03:00
. read_folio = netfs_read_folio ,
. readahead = netfs_readahead ,
. dirty_folio = netfs_dirty_folio ,
. writepage = v9fs_vfs_writepage ,
. write_begin = v9fs_write_begin ,
. write_end = v9fs_write_end ,
2021-08-20 19:08:30 +03:00
. release_folio = netfs_release_folio ,
. invalidate_folio = netfs_invalidate_folio ,
2023-11-27 16:58:07 +03:00
. launder_folio = v9fs_launder_folio ,
. direct_IO = v9fs_direct_IO ,
2006-01-19 04:43:02 +03:00
} ;