2005-04-17 02:20:36 +04:00
/*
* linux / mm / msync . c
*
* Copyright ( C ) 1994 - 1999 Linus Torvalds
*/
/*
* The msync ( ) system call .
*/
2006-03-24 14:18:15 +03:00
# include <linux/fs.h>
2005-04-17 02:20:36 +04:00
# include <linux/mm.h>
# include <linux/mman.h>
2006-03-24 14:18:12 +03:00
# include <linux/file.h>
2005-04-17 02:20:36 +04:00
# include <linux/syscalls.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
/*
* MS_SYNC syncs the entire file - including mappings .
*
2006-09-26 10:31:01 +04:00
* MS_ASYNC does not start I / O ( it used to , up to 2.5 .67 ) .
* Nor does it marks the relevant pages dirty ( it used to up to 2.6 .17 ) .
* Now it doesn ' t do anything , since dirty pages are properly tracked .
*
* The application may now run fsync ( ) to
2005-04-17 02:20:36 +04:00
* write out the dirty pages and wait on the writeout and check the result .
* Or the application may run fadvise ( FADV_DONTNEED ) against the fd to start
* async writeout immediately .
2006-03-24 20:30:53 +03:00
* So by _not_ starting I / O in MS_ASYNC we provide complete flexibility to
2005-04-17 02:20:36 +04:00
* applications .
*/
2009-01-14 16:14:15 +03:00
SYSCALL_DEFINE3 ( msync , unsigned long , start , size_t , len , int , flags )
2005-04-17 02:20:36 +04:00
{
unsigned long end ;
2006-09-26 10:31:01 +04:00
struct mm_struct * mm = current - > mm ;
2005-04-17 02:20:36 +04:00
struct vm_area_struct * vma ;
2006-03-24 14:18:14 +03:00
int unmapped_error = 0 ;
int error = - EINVAL ;
2005-04-17 02:20:36 +04:00
if ( flags & ~ ( MS_ASYNC | MS_INVALIDATE | MS_SYNC ) )
goto out ;
if ( start & ~ PAGE_MASK )
goto out ;
if ( ( flags & MS_ASYNC ) & & ( flags & MS_SYNC ) )
goto out ;
error = - ENOMEM ;
len = ( len + ~ PAGE_MASK ) & PAGE_MASK ;
end = start + len ;
if ( end < start )
goto out ;
error = 0 ;
if ( end = = start )
goto out ;
/*
* If the interval [ start , end ) covers some unmapped address ranges ,
* just ignore them , but return - ENOMEM at the end .
*/
2006-09-26 10:31:01 +04:00
down_read ( & mm - > mmap_sem ) ;
vma = find_vma ( mm , start ) ;
for ( ; ; ) {
2006-03-24 14:18:12 +03:00
struct file * file ;
2014-06-05 03:10:44 +04:00
loff_t fstart , fend ;
2006-03-24 14:18:12 +03:00
2006-09-26 10:31:01 +04:00
/* Still start < end. */
error = - ENOMEM ;
if ( ! vma )
goto out_unlock ;
2005-04-17 02:20:36 +04:00
/* Here start < vma->vm_end. */
if ( start < vma - > vm_start ) {
start = vma - > vm_start ;
2006-09-26 10:31:01 +04:00
if ( start > = end )
goto out_unlock ;
unmapped_error = - ENOMEM ;
2005-04-17 02:20:36 +04:00
}
/* Here vma->vm_start <= start < vma->vm_end. */
2006-09-26 10:31:01 +04:00
if ( ( flags & MS_INVALIDATE ) & &
( vma - > vm_flags & VM_LOCKED ) ) {
error = - EBUSY ;
goto out_unlock ;
2005-04-17 02:20:36 +04:00
}
2006-03-24 14:18:12 +03:00
file = vma - > vm_file ;
2014-07-03 02:22:36 +04:00
fstart = ( start - vma - > vm_start ) +
( ( loff_t ) vma - > vm_pgoff < < PAGE_SHIFT ) ;
2014-06-05 03:10:44 +04:00
fend = fstart + ( min ( end , vma - > vm_end ) - start ) - 1 ;
2005-04-17 02:20:36 +04:00
start = vma - > vm_end ;
2006-09-26 10:31:01 +04:00
if ( ( flags & MS_SYNC ) & & file & &
2006-03-24 14:18:13 +03:00
( vma - > vm_flags & VM_SHARED ) ) {
get_file ( file ) ;
2006-09-26 10:31:01 +04:00
up_read ( & mm - > mmap_sem ) ;
2015-02-11 01:10:04 +03:00
error = vfs_fsync_range ( file , fstart , fend , 1 ) ;
2006-03-24 14:18:13 +03:00
fput ( file ) ;
2006-09-26 10:31:01 +04:00
if ( error | | start > = end )
goto out ;
down_read ( & mm - > mmap_sem ) ;
vma = find_vma ( mm , start ) ;
2006-03-24 14:18:12 +03:00
} else {
2006-09-26 10:31:01 +04:00
if ( start > = end ) {
error = 0 ;
goto out_unlock ;
}
2006-03-24 14:18:12 +03:00
vma = vma - > vm_next ;
}
2006-09-26 10:31:01 +04:00
}
2006-03-24 14:18:12 +03:00
out_unlock :
2006-09-26 10:31:01 +04:00
up_read ( & mm - > mmap_sem ) ;
2006-03-24 14:18:12 +03:00
out :
2006-09-26 10:31:01 +04:00
return error ? : unmapped_error ;
2005-04-17 02:20:36 +04:00
}