2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / mm / mmap . c
*/
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/mman.h>
# include <linux/shm.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
# include <asm/system.h>
# define COLOUR_ALIGN(addr,pgoff) \
( ( ( ( addr ) + SHMLBA - 1 ) & ~ ( SHMLBA - 1 ) ) + \
( ( ( pgoff ) < < PAGE_SHIFT ) & ( SHMLBA - 1 ) ) )
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches . We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes .
*
* We unconditionally provide this function for all cases , however
* in the VIVT case , we optimise out the alignment rules .
*/
unsigned long
arch_get_unmapped_area ( struct file * filp , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
unsigned long start_addr ;
# ifdef CONFIG_CPU_V6
unsigned int cache_type ;
int do_align = 0 , aliasing = 0 ;
/*
* We only need to do colour alignment if either the I or D
* caches alias . This is indicated by bits 9 and 21 of the
* cache type register .
*/
cache_type = read_cpuid ( CPUID_CACHETYPE ) ;
if ( cache_type ! = read_cpuid ( CPUID_ID ) ) {
aliasing = ( cache_type | cache_type > > 12 ) & ( 1 < < 11 ) ;
if ( aliasing )
do_align = filp | | flags & MAP_SHARED ;
}
# else
# define do_align 0
# define aliasing 0
# endif
/*
2007-05-07 01:50:07 +04:00
* We enforce the MAP_FIXED case .
2005-04-17 02:20:36 +04:00
*/
if ( flags & MAP_FIXED ) {
if ( aliasing & & flags & MAP_SHARED & & addr & ( SHMLBA - 1 ) )
return - EINVAL ;
return addr ;
}
if ( len > TASK_SIZE )
return - ENOMEM ;
if ( addr ) {
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2005-06-22 04:14:49 +04:00
if ( len > mm - > cached_hole_size ) {
start_addr = addr = mm - > free_area_cache ;
} else {
start_addr = addr = TASK_UNMAPPED_BASE ;
mm - > cached_hole_size = 0 ;
}
2005-04-17 02:20:36 +04:00
full_search :
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
for ( vma = find_vma ( mm , addr ) ; ; vma = vma - > vm_next ) {
/* At this point: (!vma || addr < vma->vm_end). */
if ( TASK_SIZE - len < addr ) {
/*
* Start a new search - just in case we missed
* some holes .
*/
if ( start_addr ! = TASK_UNMAPPED_BASE ) {
start_addr = addr = TASK_UNMAPPED_BASE ;
2005-06-22 04:14:49 +04:00
mm - > cached_hole_size = 0 ;
2005-04-17 02:20:36 +04:00
goto full_search ;
}
return - ENOMEM ;
}
if ( ! vma | | addr + len < = vma - > vm_start ) {
/*
* Remember the place where we stopped the search :
*/
mm - > free_area_cache = addr + len ;
return addr ;
}
2005-06-22 04:14:49 +04:00
if ( addr + mm - > cached_hole_size < vma - > vm_start )
mm - > cached_hole_size = vma - > vm_start - addr ;
2005-04-17 02:20:36 +04:00
addr = vma - > vm_end ;
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
}
}
2006-09-16 13:50:22 +04:00
/*
* You really shouldn ' t be using read ( ) or write ( ) on / dev / mem . This
* might go away in the future .
*/
int valid_phys_addr_range ( unsigned long addr , size_t size )
{
if ( addr + size > __pa ( high_memory ) )
return 0 ;
return 1 ;
}
/*
* We don ' t use supersection mappings for mmap ( ) on / dev / mem , which
* means that we can ' t map the memory area above the 4 G barrier into
* userspace .
*/
int valid_mmap_phys_addr_range ( unsigned long pfn , size_t size )
{
return ! ( pfn + ( size > > PAGE_SHIFT ) > 0x00100000 ) ;
}