2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / mm / mmap . c
*/
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/mman.h>
# include <linux/shm.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2008-09-05 17:08:44 +04:00
# include <linux/io.h>
2010-06-15 05:16:19 +04:00
# include <linux/random.h>
2008-08-10 21:08:10 +04:00
# include <asm/cputype.h>
2005-04-17 02:20:36 +04:00
# include <asm/system.h>
# define COLOUR_ALIGN(addr,pgoff) \
( ( ( ( addr ) + SHMLBA - 1 ) & ~ ( SHMLBA - 1 ) ) + \
( ( ( pgoff ) < < PAGE_SHIFT ) & ( SHMLBA - 1 ) ) )
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches . We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes .
*
* We unconditionally provide this function for all cases , however
* in the VIVT case , we optimise out the alignment rules .
*/
unsigned long
arch_get_unmapped_area ( struct file * filp , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
unsigned long start_addr ;
2011-01-17 18:08:32 +03:00
# if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
2005-04-17 02:20:36 +04:00
unsigned int cache_type ;
int do_align = 0 , aliasing = 0 ;
/*
* We only need to do colour alignment if either the I or D
* caches alias . This is indicated by bits 9 and 21 of the
* cache type register .
*/
2008-08-10 21:08:10 +04:00
cache_type = read_cpuid_cachetype ( ) ;
if ( cache_type ! = read_cpuid_id ( ) ) {
2005-04-17 02:20:36 +04:00
aliasing = ( cache_type | cache_type > > 12 ) & ( 1 < < 11 ) ;
if ( aliasing )
do_align = filp | | flags & MAP_SHARED ;
}
# else
# define do_align 0
# define aliasing 0
# endif
/*
2007-05-07 01:50:07 +04:00
* We enforce the MAP_FIXED case .
2005-04-17 02:20:36 +04:00
*/
if ( flags & MAP_FIXED ) {
2009-12-05 23:10:44 +03:00
if ( aliasing & & flags & MAP_SHARED & &
( addr - ( pgoff < < PAGE_SHIFT ) ) & ( SHMLBA - 1 ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
return addr ;
}
if ( len > TASK_SIZE )
return - ENOMEM ;
if ( addr ) {
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2005-06-22 04:14:49 +04:00
if ( len > mm - > cached_hole_size ) {
start_addr = addr = mm - > free_area_cache ;
} else {
start_addr = addr = TASK_UNMAPPED_BASE ;
mm - > cached_hole_size = 0 ;
}
2010-06-15 05:16:19 +04:00
/* 8 bits of randomness in 20 address space bits */
if ( current - > flags & PF_RANDOMIZE )
addr + = ( get_random_int ( ) % ( 1 < < 8 ) ) < < PAGE_SHIFT ;
2005-04-17 02:20:36 +04:00
full_search :
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
for ( vma = find_vma ( mm , addr ) ; ; vma = vma - > vm_next ) {
/* At this point: (!vma || addr < vma->vm_end). */
if ( TASK_SIZE - len < addr ) {
/*
* Start a new search - just in case we missed
* some holes .
*/
if ( start_addr ! = TASK_UNMAPPED_BASE ) {
start_addr = addr = TASK_UNMAPPED_BASE ;
2005-06-22 04:14:49 +04:00
mm - > cached_hole_size = 0 ;
2005-04-17 02:20:36 +04:00
goto full_search ;
}
return - ENOMEM ;
}
if ( ! vma | | addr + len < = vma - > vm_start ) {
/*
* Remember the place where we stopped the search :
*/
mm - > free_area_cache = addr + len ;
return addr ;
}
2005-06-22 04:14:49 +04:00
if ( addr + mm - > cached_hole_size < vma - > vm_start )
mm - > cached_hole_size = vma - > vm_start - addr ;
2005-04-17 02:20:36 +04:00
addr = vma - > vm_end ;
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
}
}
2006-09-16 13:50:22 +04:00
/*
* You really shouldn ' t be using read ( ) or write ( ) on / dev / mem . This
* might go away in the future .
*/
int valid_phys_addr_range ( unsigned long addr , size_t size )
{
2008-02-26 20:42:10 +03:00
if ( addr < PHYS_OFFSET )
return 0 ;
2009-10-02 03:45:28 +04:00
if ( addr + size > __pa ( high_memory - 1 ) + 1 )
2006-09-16 13:50:22 +04:00
return 0 ;
return 1 ;
}
/*
* We don ' t use supersection mappings for mmap ( ) on / dev / mem , which
* means that we can ' t map the memory area above the 4 G barrier into
* userspace .
*/
int valid_mmap_phys_addr_range ( unsigned long pfn , size_t size )
{
return ! ( pfn + ( size > > PAGE_SHIFT ) > 0x00100000 ) ;
}
2010-09-23 02:34:36 +04:00
# ifdef CONFIG_STRICT_DEVMEM
# include <linux/ioport.h>
/*
* devmem_is_allowed ( ) checks to see if / dev / mem access to a certain
* address is valid . The argument is a physical page number .
* We mimic x86 here by disallowing access to system RAM as well as
* device - exclusive MMIO regions . This effectively disable read ( ) / write ( )
* on / dev / mem .
*/
int devmem_is_allowed ( unsigned long pfn )
{
if ( iomem_is_exclusive ( pfn < < PAGE_SHIFT ) )
return 0 ;
if ( ! page_is_ram ( pfn ) )
return 1 ;
return 0 ;
}
# endif