2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / mm / mmap . c
*/
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/mman.h>
# include <linux/shm.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2008-09-05 17:08:44 +04:00
# include <linux/io.h>
2011-04-13 07:57:17 +04:00
# include <linux/personality.h>
2010-06-15 05:16:19 +04:00
# include <linux/random.h>
2011-11-22 07:01:06 +04:00
# include <asm/cachetype.h>
2005-04-17 02:20:36 +04:00
# define COLOUR_ALIGN(addr,pgoff) \
( ( ( ( addr ) + SHMLBA - 1 ) & ~ ( SHMLBA - 1 ) ) + \
( ( ( pgoff ) < < PAGE_SHIFT ) & ( SHMLBA - 1 ) ) )
2011-11-22 07:01:07 +04:00
/* gap between mmap and stack */
# define MIN_GAP (128*1024*1024UL)
# define MAX_GAP ((TASK_SIZE) / 6*5)
static int mmap_is_legacy ( void )
{
if ( current - > personality & ADDR_COMPAT_LAYOUT )
return 1 ;
if ( rlimit ( RLIMIT_STACK ) = = RLIM_INFINITY )
return 1 ;
return sysctl_legacy_va_layout ;
}
static unsigned long mmap_base ( unsigned long rnd )
{
unsigned long gap = rlimit ( RLIMIT_STACK ) ;
if ( gap < MIN_GAP )
gap = MIN_GAP ;
else if ( gap > MAX_GAP )
gap = MAX_GAP ;
return PAGE_ALIGN ( TASK_SIZE - gap - rnd ) ;
}
2005-04-17 02:20:36 +04:00
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches . We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes .
*
* We unconditionally provide this function for all cases , however
* in the VIVT case , we optimise out the alignment rules .
*/
unsigned long
arch_get_unmapped_area ( struct file * filp , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
2011-11-22 07:01:06 +04:00
int do_align = 0 ;
int aliasing = cache_is_vipt_aliasing ( ) ;
2012-12-12 04:02:10 +04:00
struct vm_unmapped_area_info info ;
2005-04-17 02:20:36 +04:00
/*
* We only need to do colour alignment if either the I or D
2011-11-22 07:01:06 +04:00
* caches alias .
2005-04-17 02:20:36 +04:00
*/
2011-11-22 07:01:06 +04:00
if ( aliasing )
do_align = filp | | ( flags & MAP_SHARED ) ;
2005-04-17 02:20:36 +04:00
/*
2007-05-07 01:50:07 +04:00
* We enforce the MAP_FIXED case .
2005-04-17 02:20:36 +04:00
*/
if ( flags & MAP_FIXED ) {
2009-12-05 23:10:44 +03:00
if ( aliasing & & flags & MAP_SHARED & &
( addr - ( pgoff < < PAGE_SHIFT ) ) & ( SHMLBA - 1 ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
return addr ;
}
if ( len > TASK_SIZE )
return - ENOMEM ;
if ( addr ) {
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2012-12-12 04:02:10 +04:00
info . flags = 0 ;
info . length = len ;
info . low_limit = mm - > mmap_base ;
info . high_limit = TASK_SIZE ;
info . align_mask = do_align ? ( PAGE_MASK & ( SHMLBA - 1 ) ) : 0 ;
info . align_offset = pgoff < < PAGE_SHIFT ;
return vm_unmapped_area ( & info ) ;
2005-04-17 02:20:36 +04:00
}
2011-11-22 07:01:07 +04:00
unsigned long
arch_get_unmapped_area_topdown ( struct file * filp , const unsigned long addr0 ,
const unsigned long len , const unsigned long pgoff ,
const unsigned long flags )
{
struct vm_area_struct * vma ;
struct mm_struct * mm = current - > mm ;
unsigned long addr = addr0 ;
int do_align = 0 ;
int aliasing = cache_is_vipt_aliasing ( ) ;
2012-12-12 04:02:10 +04:00
struct vm_unmapped_area_info info ;
2011-11-22 07:01:07 +04:00
/*
* We only need to do colour alignment if either the I or D
* caches alias .
*/
if ( aliasing )
do_align = filp | | ( flags & MAP_SHARED ) ;
/* requested length too big for entire address space */
if ( len > TASK_SIZE )
return - ENOMEM ;
if ( flags & MAP_FIXED ) {
if ( aliasing & & flags & MAP_SHARED & &
( addr - ( pgoff < < PAGE_SHIFT ) ) & ( SHMLBA - 1 ) )
return - EINVAL ;
return addr ;
}
/* requesting a specific address */
if ( addr ) {
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2012-12-12 04:02:10 +04:00
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
2013-11-29 01:43:40 +04:00
info . low_limit = FIRST_USER_ADDRESS ;
2012-12-12 04:02:10 +04:00
info . high_limit = mm - > mmap_base ;
info . align_mask = do_align ? ( PAGE_MASK & ( SHMLBA - 1 ) ) : 0 ;
info . align_offset = pgoff < < PAGE_SHIFT ;
addr = vm_unmapped_area ( & info ) ;
2011-11-22 07:01:07 +04:00
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2012-12-12 04:02:10 +04:00
if ( addr & ~ PAGE_MASK ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . flags = 0 ;
info . low_limit = mm - > mmap_base ;
info . high_limit = TASK_SIZE ;
addr = vm_unmapped_area ( & info ) ;
}
2011-11-22 07:01:07 +04:00
return addr ;
}
void arch_pick_mmap_layout ( struct mm_struct * mm )
{
unsigned long random_factor = 0UL ;
/* 8 bits of randomness in 20 address space bits */
if ( ( current - > flags & PF_RANDOMIZE ) & &
! ( current - > personality & ADDR_NO_RANDOMIZE ) )
random_factor = ( get_random_int ( ) % ( 1 < < 8 ) ) < < PAGE_SHIFT ;
if ( mmap_is_legacy ( ) ) {
mm - > mmap_base = TASK_UNMAPPED_BASE + random_factor ;
mm - > get_unmapped_area = arch_get_unmapped_area ;
} else {
mm - > mmap_base = mmap_base ( random_factor ) ;
mm - > get_unmapped_area = arch_get_unmapped_area_topdown ;
}
}
2006-09-16 13:50:22 +04:00
/*
* You really shouldn ' t be using read ( ) or write ( ) on / dev / mem . This
* might go away in the future .
*/
2012-09-12 22:05:58 +04:00
int valid_phys_addr_range ( phys_addr_t addr , size_t size )
2006-09-16 13:50:22 +04:00
{
2008-02-26 20:42:10 +03:00
if ( addr < PHYS_OFFSET )
return 0 ;
2009-10-02 03:45:28 +04:00
if ( addr + size > __pa ( high_memory - 1 ) + 1 )
2006-09-16 13:50:22 +04:00
return 0 ;
return 1 ;
}
/*
2013-09-24 19:38:00 +04:00
* Do not allow / dev / mem mappings beyond the supported physical range .
2006-09-16 13:50:22 +04:00
*/
int valid_mmap_phys_addr_range ( unsigned long pfn , size_t size )
{
2013-09-24 19:38:00 +04:00
return ( pfn + ( size > > PAGE_SHIFT ) ) < = ( 1 + ( PHYS_MASK > > PAGE_SHIFT ) ) ;
2006-09-16 13:50:22 +04:00
}
2010-09-23 02:34:36 +04:00
# ifdef CONFIG_STRICT_DEVMEM
# include <linux/ioport.h>
/*
* devmem_is_allowed ( ) checks to see if / dev / mem access to a certain
* address is valid . The argument is a physical page number .
* We mimic x86 here by disallowing access to system RAM as well as
* device - exclusive MMIO regions . This effectively disable read ( ) / write ( )
* on / dev / mem .
*/
int devmem_is_allowed ( unsigned long pfn )
{
if ( iomem_is_exclusive ( pfn < < PAGE_SHIFT ) )
return 0 ;
if ( ! page_is_ram ( pfn ) )
return 1 ;
return 0 ;
}
# endif