2019-05-28 09:57:16 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2006-01-18 17:43:02 -08:00
/*
* This file contians vfs address ( mmap ) ops for 9 P2000 .
*
* Copyright ( C ) 2005 by Eric Van Hensbergen < ericvh @ gmail . com >
* Copyright ( C ) 2002 by Ron Minnich < rminnich @ lanl . gov >
*/
# include <linux/module.h>
# include <linux/errno.h>
# include <linux/fs.h>
# include <linux/file.h>
# include <linux/stat.h>
# include <linux/string.h>
# include <linux/pagemap.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2021-12-22 17:21:04 +00:00
# include <linux/swap.h>
2015-02-22 08:58:50 -08:00
# include <linux/uio.h>
2021-11-02 08:29:55 +00:00
# include <linux/netfs.h>
2007-07-10 17:57:28 -05:00
# include <net/9p/9p.h>
# include <net/9p/client.h>
2023-12-06 12:48:56 +00:00
# include <trace/events/netfs.h>
2006-01-18 17:43:02 -08:00
# include "v9fs.h"
# include "v9fs_vfs.h"
2009-09-23 13:00:27 -05:00
# include "cache.h"
2011-02-28 17:03:58 +05:30
# include "fid.h"
2006-01-18 17:43:02 -08:00
2024-03-18 20:29:53 +00:00
/*
* Writeback calls this when it finds a folio that needs uploading . This isn ' t
* called if writeback only has copy - to - cache to deal with .
*/
static void v9fs_begin_writeback ( struct netfs_io_request * wreq )
{
struct p9_fid * fid ;
fid = v9fs_fid_find_inode ( wreq - > inode , true , INVALID_UID , true ) ;
if ( ! fid ) {
WARN_ONCE ( 1 , " folio expected an open fid inode->i_ino=%lx \n " ,
wreq - > inode - > i_ino ) ;
return ;
}
wreq - > wsize = fid - > clnt - > msize - P9_IOHDRSZ ;
if ( fid - > iounit )
wreq - > wsize = min ( wreq - > wsize , fid - > iounit ) ;
wreq - > netfs_priv = fid ;
wreq - > io_streams [ 0 ] . avail = true ;
}
/*
* Issue a subrequest to write to the server .
*/
static void v9fs_issue_write ( struct netfs_io_subrequest * subreq )
{
struct p9_fid * fid = subreq - > rreq - > netfs_priv ;
int err , len ;
len = p9_client_write ( fid , subreq - > start , & subreq - > io_iter , & err ) ;
netfs_write_subrequest_terminated ( subreq , len ? : err , false ) ;
}
2006-01-18 17:43:02 -08:00
/**
2022-02-17 10:14:32 +00:00
* v9fs_issue_read - Issue a read from 9 P
2021-11-02 08:29:55 +00:00
* @ subreq : The read to make
2006-01-18 17:43:02 -08:00
*/
2022-02-17 10:14:32 +00:00
static void v9fs_issue_read ( struct netfs_io_subrequest * subreq )
2006-01-18 17:43:02 -08:00
{
2022-02-17 10:01:23 +00:00
struct netfs_io_request * rreq = subreq - > rreq ;
2021-11-02 08:29:55 +00:00
struct p9_fid * fid = rreq - > netfs_priv ;
int total , err ;
2007-02-11 13:21:39 -06:00
2023-12-06 12:48:56 +00:00
total = p9_client_read ( fid , subreq - > start + subreq - > transferred ,
& subreq - > io_iter , & err ) ;
2022-01-10 20:10:31 +09:00
/* if we just extended the file size, any portion not in
* cache won ' t be on server and is zeroes */
__set_bit ( NETFS_SREQ_CLEAR_TAIL , & subreq - > flags ) ;
2021-11-02 08:29:55 +00:00
netfs_subreq_terminated ( subreq , err ? : total , false ) ;
}
2009-09-23 13:00:27 -05:00
2021-11-02 08:29:55 +00:00
/**
2023-12-06 12:48:56 +00:00
* v9fs_init_request - Initialise a request
2021-11-02 08:29:55 +00:00
* @ rreq : The read request
* @ file : The file being read from
*/
2022-01-20 21:55:46 +00:00
static int v9fs_init_request ( struct netfs_io_request * rreq , struct file * file )
2021-11-02 08:29:55 +00:00
{
2023-12-06 12:48:56 +00:00
struct p9_fid * fid ;
bool writing = ( rreq - > origin = = NETFS_READ_FOR_WRITE | |
rreq - > origin = = NETFS_WRITETHROUGH | |
rreq - > origin = = NETFS_UNBUFFERED_WRITE | |
rreq - > origin = = NETFS_DIO_WRITE ) ;
2024-03-18 20:29:53 +00:00
if ( rreq - > origin = = NETFS_WRITEBACK )
return 0 ; /* We don't get the write handle until we find we
* have actually dirty data and not just
* copy - to - cache data .
*/
2023-12-06 12:48:56 +00:00
if ( file ) {
fid = file - > private_data ;
2024-01-03 12:08:46 +00:00
if ( ! fid )
goto no_fid ;
2023-12-06 12:48:56 +00:00
p9_fid_get ( fid ) ;
} else {
fid = v9fs_fid_find_inode ( rreq - > inode , writing , INVALID_UID , true ) ;
2024-01-03 12:08:46 +00:00
if ( ! fid )
goto no_fid ;
2023-12-06 12:48:56 +00:00
}
2022-06-14 12:19:02 +09:00
2024-03-18 20:29:53 +00:00
rreq - > wsize = fid - > clnt - > msize - P9_IOHDRSZ ;
if ( fid - > iounit )
rreq - > wsize = min ( rreq - > wsize , fid - > iounit ) ;
2022-06-14 12:19:02 +09:00
/* we might need to read from a fid that was opened write-only
* for read - modify - write of page cache , use the writeback fid
* for that */
2023-12-06 12:48:56 +00:00
WARN_ON ( rreq - > origin = = NETFS_READ_FOR_WRITE & & ! ( fid - > mode & P9_ORDWR ) ) ;
2021-11-02 08:29:55 +00:00
rreq - > netfs_priv = fid ;
2022-01-20 21:55:46 +00:00
return 0 ;
2024-01-03 12:08:46 +00:00
no_fid :
WARN_ONCE ( 1 , " folio expected an open fid inode->i_ino=%lx \n " ,
rreq - > inode - > i_ino ) ;
return - EINVAL ;
2021-11-02 08:29:55 +00:00
}
2006-01-18 17:43:02 -08:00
2021-11-02 08:29:55 +00:00
/**
2022-02-25 11:19:14 +00:00
* v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
* @ rreq : The I / O request to clean up
2021-11-02 08:29:55 +00:00
*/
2022-02-25 11:19:14 +00:00
static void v9fs_free_request ( struct netfs_io_request * rreq )
2021-11-02 08:29:55 +00:00
{
2022-02-25 11:19:14 +00:00
struct p9_fid * fid = rreq - > netfs_priv ;
2006-01-18 17:43:02 -08:00
2022-06-12 13:42:32 +09:00
p9_fid_put ( fid ) ;
2021-11-02 08:29:55 +00:00
}
2009-09-23 13:00:27 -05:00
2021-06-29 22:37:05 +01:00
const struct netfs_request_ops v9fs_req_ops = {
2022-02-17 10:01:23 +00:00
. init_request = v9fs_init_request ,
2022-02-25 11:19:14 +00:00
. free_request = v9fs_free_request ,
2022-02-17 10:14:32 +00:00
. issue_read = v9fs_issue_read ,
2024-03-18 20:29:53 +00:00
. begin_writeback = v9fs_begin_writeback ,
. issue_write = v9fs_issue_write ,
2021-11-02 08:29:55 +00:00
} ;
2006-06-28 04:26:44 -07:00
const struct address_space_operations v9fs_addr_operations = {
2023-12-06 12:48:56 +00:00
. read_folio = netfs_read_folio ,
. readahead = netfs_readahead ,
. dirty_folio = netfs_dirty_folio ,
. release_folio = netfs_release_folio ,
. invalidate_folio = netfs_invalidate_folio ,
. direct_IO = noop_direct_IO ,
. writepages = netfs_writepages ,
2006-01-18 17:43:02 -08:00
} ;