2005-04-16 15:20:36 -07:00
/*
* linux / arch / arm / mm / mmap . c
*/
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/mman.h>
# include <linux/shm.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2008-09-05 14:08:44 +01:00
# include <linux/io.h>
2011-04-13 04:57:17 +01:00
# include <linux/personality.h>
2010-06-14 21:16:19 -04:00
# include <linux/random.h>
2011-11-22 04:01:06 +01:00
# include <asm/cachetype.h>
2005-04-16 15:20:36 -07:00
# define COLOUR_ALIGN(addr,pgoff) \
( ( ( ( addr ) + SHMLBA - 1 ) & ~ ( SHMLBA - 1 ) ) + \
( ( ( pgoff ) < < PAGE_SHIFT ) & ( SHMLBA - 1 ) ) )
2011-11-22 04:01:07 +01:00
/* gap between mmap and stack */
# define MIN_GAP (128*1024*1024UL)
# define MAX_GAP ((TASK_SIZE) / 6*5)
static int mmap_is_legacy ( void )
{
if ( current - > personality & ADDR_COMPAT_LAYOUT )
return 1 ;
if ( rlimit ( RLIMIT_STACK ) = = RLIM_INFINITY )
return 1 ;
return sysctl_legacy_va_layout ;
}
static unsigned long mmap_base ( unsigned long rnd )
{
unsigned long gap = rlimit ( RLIMIT_STACK ) ;
if ( gap < MIN_GAP )
gap = MIN_GAP ;
else if ( gap > MAX_GAP )
gap = MAX_GAP ;
return PAGE_ALIGN ( TASK_SIZE - gap - rnd ) ;
}
2005-04-16 15:20:36 -07:00
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches . We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes .
*
* We unconditionally provide this function for all cases , however
* in the VIVT case , we optimise out the alignment rules .
*/
unsigned long
arch_get_unmapped_area ( struct file * filp , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
2011-11-22 04:01:06 +01:00
int do_align = 0 ;
int aliasing = cache_is_vipt_aliasing ( ) ;
2012-12-11 16:02:10 -08:00
struct vm_unmapped_area_info info ;
2005-04-16 15:20:36 -07:00
/*
* We only need to do colour alignment if either the I or D
2011-11-22 04:01:06 +01:00
* caches alias .
2005-04-16 15:20:36 -07:00
*/
2011-11-22 04:01:06 +01:00
if ( aliasing )
do_align = filp | | ( flags & MAP_SHARED ) ;
2005-04-16 15:20:36 -07:00
/*
2007-05-06 14:50:07 -07:00
* We enforce the MAP_FIXED case .
2005-04-16 15:20:36 -07:00
*/
if ( flags & MAP_FIXED ) {
2009-12-05 15:10:44 -05:00
if ( aliasing & & flags & MAP_SHARED & &
( addr - ( pgoff < < PAGE_SHIFT ) ) & ( SHMLBA - 1 ) )
2005-04-16 15:20:36 -07:00
return - EINVAL ;
return addr ;
}
if ( len > TASK_SIZE )
return - ENOMEM ;
if ( addr ) {
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2012-12-11 16:02:10 -08:00
info . flags = 0 ;
info . length = len ;
info . low_limit = mm - > mmap_base ;
info . high_limit = TASK_SIZE ;
info . align_mask = do_align ? ( PAGE_MASK & ( SHMLBA - 1 ) ) : 0 ;
info . align_offset = pgoff < < PAGE_SHIFT ;
return vm_unmapped_area ( & info ) ;
2005-04-16 15:20:36 -07:00
}
2011-11-22 04:01:07 +01:00
unsigned long
arch_get_unmapped_area_topdown ( struct file * filp , const unsigned long addr0 ,
const unsigned long len , const unsigned long pgoff ,
const unsigned long flags )
{
struct vm_area_struct * vma ;
struct mm_struct * mm = current - > mm ;
unsigned long addr = addr0 ;
int do_align = 0 ;
int aliasing = cache_is_vipt_aliasing ( ) ;
2012-12-11 16:02:10 -08:00
struct vm_unmapped_area_info info ;
2011-11-22 04:01:07 +01:00
/*
* We only need to do colour alignment if either the I or D
* caches alias .
*/
if ( aliasing )
do_align = filp | | ( flags & MAP_SHARED ) ;
/* requested length too big for entire address space */
if ( len > TASK_SIZE )
return - ENOMEM ;
if ( flags & MAP_FIXED ) {
if ( aliasing & & flags & MAP_SHARED & &
( addr - ( pgoff < < PAGE_SHIFT ) ) & ( SHMLBA - 1 ) )
return - EINVAL ;
return addr ;
}
/* requesting a specific address */
if ( addr ) {
if ( do_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2012-12-11 16:02:10 -08:00
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
2013-11-28 21:43:40 +00:00
info . low_limit = FIRST_USER_ADDRESS ;
2012-12-11 16:02:10 -08:00
info . high_limit = mm - > mmap_base ;
info . align_mask = do_align ? ( PAGE_MASK & ( SHMLBA - 1 ) ) : 0 ;
info . align_offset = pgoff < < PAGE_SHIFT ;
addr = vm_unmapped_area ( & info ) ;
2011-11-22 04:01:07 +01:00
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2012-12-11 16:02:10 -08:00
if ( addr & ~ PAGE_MASK ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . flags = 0 ;
info . low_limit = mm - > mmap_base ;
info . high_limit = TASK_SIZE ;
addr = vm_unmapped_area ( & info ) ;
}
2011-11-22 04:01:07 +01:00
return addr ;
}
2015-04-14 15:48:00 -07:00
unsigned long arch_mmap_rnd ( void )
arm: factor out mmap ASLR into mmap_rnd
To address the "offset2lib" ASLR weakness[1], this separates ET_DYN ASLR
from mmap ASLR, as already done on s390. The architectures that are
already randomizing mmap (arm, arm64, mips, powerpc, s390, and x86), have
their various forms of arch_mmap_rnd() made available via the new
CONFIG_ARCH_HAS_ELF_RANDOMIZE. For these architectures,
arch_randomize_brk() is collapsed as well.
This is an alternative to the solutions in:
https://lkml.org/lkml/2015/2/23/442
I've been able to test x86 and arm, and the buildbot (so far) seems happy
with building the rest.
[1] http://cybersecurity.upv.es/attacks/offset2lib/offset2lib.html
This patch (of 10):
In preparation for splitting out ET_DYN ASLR, this moves the ASLR
calculations for mmap on ARM into a separate routine, similar to x86.
This also removes the redundant check of personality (PF_RANDOMIZE is
already set before calling arch_pick_mmap_layout).
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Hector Marco-Gisbert <hecmargi@upv.es>
Cc: Russell King <linux@arm.linux.org.uk>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: "David A. Long" <dave.long@linaro.org>
Cc: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: Arun Chandran <achandran@mvista.com>
Cc: Yann Droneaud <ydroneaud@opteya.com>
Cc: Min-Hua Chen <orca.chen@gmail.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Alex Smith <alex@alex-smith.me.uk>
Cc: Markos Chandras <markos.chandras@imgtec.com>
Cc: Vineeth Vijayan <vvijayan@mvista.com>
Cc: Jeff Bailey <jeffbailey@google.com>
Cc: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Behan Webster <behanw@converseincode.com>
Cc: Ismael Ripoll <iripoll@upv.es>
Cc: Jan-Simon Mller <dl9pf@gmx.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-14 15:47:41 -07:00
{
unsigned long rnd ;
/* 8 bits of randomness in 20 address space bits */
rnd = ( unsigned long ) get_random_int ( ) % ( 1 < < 8 ) ;
return rnd < < PAGE_SHIFT ;
}
2011-11-22 04:01:07 +01:00
void arch_pick_mmap_layout ( struct mm_struct * mm )
{
unsigned long random_factor = 0UL ;
arm: factor out mmap ASLR into mmap_rnd
To address the "offset2lib" ASLR weakness[1], this separates ET_DYN ASLR
from mmap ASLR, as already done on s390. The architectures that are
already randomizing mmap (arm, arm64, mips, powerpc, s390, and x86), have
their various forms of arch_mmap_rnd() made available via the new
CONFIG_ARCH_HAS_ELF_RANDOMIZE. For these architectures,
arch_randomize_brk() is collapsed as well.
This is an alternative to the solutions in:
https://lkml.org/lkml/2015/2/23/442
I've been able to test x86 and arm, and the buildbot (so far) seems happy
with building the rest.
[1] http://cybersecurity.upv.es/attacks/offset2lib/offset2lib.html
This patch (of 10):
In preparation for splitting out ET_DYN ASLR, this moves the ASLR
calculations for mmap on ARM into a separate routine, similar to x86.
This also removes the redundant check of personality (PF_RANDOMIZE is
already set before calling arch_pick_mmap_layout).
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Hector Marco-Gisbert <hecmargi@upv.es>
Cc: Russell King <linux@arm.linux.org.uk>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: "David A. Long" <dave.long@linaro.org>
Cc: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: Arun Chandran <achandran@mvista.com>
Cc: Yann Droneaud <ydroneaud@opteya.com>
Cc: Min-Hua Chen <orca.chen@gmail.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Alex Smith <alex@alex-smith.me.uk>
Cc: Markos Chandras <markos.chandras@imgtec.com>
Cc: Vineeth Vijayan <vvijayan@mvista.com>
Cc: Jeff Bailey <jeffbailey@google.com>
Cc: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Behan Webster <behanw@converseincode.com>
Cc: Ismael Ripoll <iripoll@upv.es>
Cc: Jan-Simon Mller <dl9pf@gmx.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-14 15:47:41 -07:00
if ( current - > flags & PF_RANDOMIZE )
2015-04-14 15:48:00 -07:00
random_factor = arch_mmap_rnd ( ) ;
2011-11-22 04:01:07 +01:00
if ( mmap_is_legacy ( ) ) {
mm - > mmap_base = TASK_UNMAPPED_BASE + random_factor ;
mm - > get_unmapped_area = arch_get_unmapped_area ;
} else {
mm - > mmap_base = mmap_base ( random_factor ) ;
mm - > get_unmapped_area = arch_get_unmapped_area_topdown ;
}
}
2006-09-16 10:50:22 +01:00
/*
* You really shouldn ' t be using read ( ) or write ( ) on / dev / mem . This
* might go away in the future .
*/
2012-09-12 14:05:58 -04:00
int valid_phys_addr_range ( phys_addr_t addr , size_t size )
2006-09-16 10:50:22 +01:00
{
2008-02-26 18:42:10 +01:00
if ( addr < PHYS_OFFSET )
return 0 ;
2009-10-02 00:45:28 +01:00
if ( addr + size > __pa ( high_memory - 1 ) + 1 )
2006-09-16 10:50:22 +01:00
return 0 ;
return 1 ;
}
/*
2013-09-24 16:38:00 +01:00
* Do not allow / dev / mem mappings beyond the supported physical range .
2006-09-16 10:50:22 +01:00
*/
int valid_mmap_phys_addr_range ( unsigned long pfn , size_t size )
{
2013-09-24 16:38:00 +01:00
return ( pfn + ( size > > PAGE_SHIFT ) ) < = ( 1 + ( PHYS_MASK > > PAGE_SHIFT ) ) ;
2006-09-16 10:50:22 +01:00
}
2010-09-22 18:34:36 -04:00
# ifdef CONFIG_STRICT_DEVMEM
# include <linux/ioport.h>
/*
* devmem_is_allowed ( ) checks to see if / dev / mem access to a certain
* address is valid . The argument is a physical page number .
* We mimic x86 here by disallowing access to system RAM as well as
* device - exclusive MMIO regions . This effectively disable read ( ) / write ( )
* on / dev / mem .
*/
int devmem_is_allowed ( unsigned long pfn )
{
if ( iomem_is_exclusive ( pfn < < PAGE_SHIFT ) )
return 0 ;
if ( ! page_is_ram ( pfn ) )
return 1 ;
return 0 ;
}
# endif