2005-04-17 02:20:36 +04:00
/*
* linux / mm / mlock . c
*
* ( C ) Copyright 1995 Linus Torvalds
* ( C ) Copyright 2002 Christoph Hellwig
*/
2006-01-11 23:17:46 +03:00
# include <linux/capability.h>
2005-04-17 02:20:36 +04:00
# include <linux/mman.h>
# include <linux/mm.h>
# include <linux/mempolicy.h>
# include <linux/syscalls.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
int can_do_mlock ( void )
{
if ( capable ( CAP_IPC_LOCK ) )
return 1 ;
if ( current - > signal - > rlim [ RLIMIT_MEMLOCK ] . rlim_cur ! = 0 )
return 1 ;
return 0 ;
}
EXPORT_SYMBOL ( can_do_mlock ) ;
2005-04-17 02:20:36 +04:00
static int mlock_fixup ( struct vm_area_struct * vma , struct vm_area_struct * * prev ,
unsigned long start , unsigned long end , unsigned int newflags )
{
struct mm_struct * mm = vma - > vm_mm ;
pgoff_t pgoff ;
int pages ;
int ret = 0 ;
if ( newflags = = vma - > vm_flags ) {
* prev = vma ;
goto out ;
}
pgoff = vma - > vm_pgoff + ( ( start - vma - > vm_start ) > > PAGE_SHIFT ) ;
* prev = vma_merge ( mm , * prev , start , end , newflags , vma - > anon_vma ,
vma - > vm_file , pgoff , vma_policy ( vma ) ) ;
if ( * prev ) {
vma = * prev ;
goto success ;
}
* prev = vma ;
if ( start ! = vma - > vm_start ) {
ret = split_vma ( mm , vma , start , 1 ) ;
if ( ret )
goto out ;
}
if ( end ! = vma - > vm_end ) {
ret = split_vma ( mm , vma , end , 0 ) ;
if ( ret )
goto out ;
}
success :
/*
* vm_flags is protected by the mmap_sem held in write mode .
* It ' s okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED , make_pages_present below will bring it back .
*/
vma - > vm_flags = newflags ;
/*
* Keep track of amount of locked VM .
*/
pages = ( end - start ) > > PAGE_SHIFT ;
if ( newflags & VM_LOCKED ) {
pages = - pages ;
if ( ! ( newflags & VM_IO ) )
ret = make_pages_present ( start , end ) ;
}
2006-12-07 07:32:25 +03:00
mm - > locked_vm - = pages ;
2005-04-17 02:20:36 +04:00
out :
if ( ret = = - ENOMEM )
ret = - EAGAIN ;
return ret ;
}
static int do_mlock ( unsigned long start , size_t len , int on )
{
unsigned long nstart , end , tmp ;
struct vm_area_struct * vma , * prev ;
int error ;
len = PAGE_ALIGN ( len ) ;
end = start + len ;
if ( end < start )
return - EINVAL ;
if ( end = = start )
return 0 ;
vma = find_vma_prev ( current - > mm , start , & prev ) ;
if ( ! vma | | vma - > vm_start > start )
return - ENOMEM ;
if ( start > vma - > vm_start )
prev = vma ;
for ( nstart = start ; ; ) {
unsigned int newflags ;
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
newflags = vma - > vm_flags | VM_LOCKED ;
if ( ! on )
newflags & = ~ VM_LOCKED ;
tmp = vma - > vm_end ;
if ( tmp > end )
tmp = end ;
error = mlock_fixup ( vma , & prev , nstart , tmp , newflags ) ;
if ( error )
break ;
nstart = tmp ;
if ( nstart < prev - > vm_end )
nstart = prev - > vm_end ;
if ( nstart > = end )
break ;
vma = prev - > vm_next ;
if ( ! vma | | vma - > vm_start ! = nstart ) {
error = - ENOMEM ;
break ;
}
}
return error ;
}
asmlinkage long sys_mlock ( unsigned long start , size_t len )
{
unsigned long locked ;
unsigned long lock_limit ;
int error = - ENOMEM ;
if ( ! can_do_mlock ( ) )
return - EPERM ;
down_write ( & current - > mm - > mmap_sem ) ;
len = PAGE_ALIGN ( len + ( start & ~ PAGE_MASK ) ) ;
start & = PAGE_MASK ;
locked = len > > PAGE_SHIFT ;
locked + = current - > mm - > locked_vm ;
lock_limit = current - > signal - > rlim [ RLIMIT_MEMLOCK ] . rlim_cur ;
lock_limit > > = PAGE_SHIFT ;
/* check against resource limits */
if ( ( locked < = lock_limit ) | | capable ( CAP_IPC_LOCK ) )
error = do_mlock ( start , len , 1 ) ;
up_write ( & current - > mm - > mmap_sem ) ;
return error ;
}
asmlinkage long sys_munlock ( unsigned long start , size_t len )
{
int ret ;
down_write ( & current - > mm - > mmap_sem ) ;
len = PAGE_ALIGN ( len + ( start & ~ PAGE_MASK ) ) ;
start & = PAGE_MASK ;
ret = do_mlock ( start , len , 0 ) ;
up_write ( & current - > mm - > mmap_sem ) ;
return ret ;
}
static int do_mlockall ( int flags )
{
struct vm_area_struct * vma , * prev = NULL ;
unsigned int def_flags = 0 ;
if ( flags & MCL_FUTURE )
def_flags = VM_LOCKED ;
current - > mm - > def_flags = def_flags ;
if ( flags = = MCL_FUTURE )
goto out ;
for ( vma = current - > mm - > mmap ; vma ; vma = prev - > vm_next ) {
unsigned int newflags ;
newflags = vma - > vm_flags | VM_LOCKED ;
if ( ! ( flags & MCL_CURRENT ) )
newflags & = ~ VM_LOCKED ;
/* Ignore errors */
mlock_fixup ( vma , & prev , vma - > vm_start , vma - > vm_end , newflags ) ;
}
out :
return 0 ;
}
asmlinkage long sys_mlockall ( int flags )
{
unsigned long lock_limit ;
int ret = - EINVAL ;
if ( ! flags | | ( flags & ~ ( MCL_CURRENT | MCL_FUTURE ) ) )
goto out ;
ret = - EPERM ;
if ( ! can_do_mlock ( ) )
goto out ;
down_write ( & current - > mm - > mmap_sem ) ;
lock_limit = current - > signal - > rlim [ RLIMIT_MEMLOCK ] . rlim_cur ;
lock_limit > > = PAGE_SHIFT ;
ret = - ENOMEM ;
if ( ! ( flags & MCL_CURRENT ) | | ( current - > mm - > total_vm < = lock_limit ) | |
capable ( CAP_IPC_LOCK ) )
ret = do_mlockall ( flags ) ;
up_write ( & current - > mm - > mmap_sem ) ;
out :
return ret ;
}
asmlinkage long sys_munlockall ( void )
{
int ret ;
down_write ( & current - > mm - > mmap_sem ) ;
ret = do_mlockall ( 0 ) ;
up_write ( & current - > mm - > mmap_sem ) ;
return ret ;
}
/*
* Objects with different lifetime than processes ( SHM_LOCK and SHM_HUGETLB
* shm segments ) get accounted against the user_struct instead .
*/
static DEFINE_SPINLOCK ( shmlock_user_lock ) ;
int user_shm_lock ( size_t size , struct user_struct * user )
{
unsigned long lock_limit , locked ;
int allowed = 0 ;
locked = ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
lock_limit = current - > signal - > rlim [ RLIMIT_MEMLOCK ] . rlim_cur ;
2007-07-16 10:38:25 +04:00
if ( lock_limit = = RLIM_INFINITY )
allowed = 1 ;
2005-04-17 02:20:36 +04:00
lock_limit > > = PAGE_SHIFT ;
spin_lock ( & shmlock_user_lock ) ;
2007-07-16 10:38:25 +04:00
if ( ! allowed & &
locked + user - > locked_shm > lock_limit & & ! capable ( CAP_IPC_LOCK ) )
2005-04-17 02:20:36 +04:00
goto out ;
get_uid ( user ) ;
user - > locked_shm + = locked ;
allowed = 1 ;
out :
spin_unlock ( & shmlock_user_lock ) ;
return allowed ;
}
void user_shm_unlock ( size_t size , struct user_struct * user )
{
spin_lock ( & shmlock_user_lock ) ;
user - > locked_shm - = ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
spin_unlock ( & shmlock_user_lock ) ;
free_uid ( user ) ;
}