2005-04-16 15:20:36 -07:00
# include <linux/errno.h>
# include <linux/sched.h>
2017-02-08 18:51:31 +01:00
# include <linux/sched/mm.h>
2005-04-16 15:20:36 -07:00
# include <linux/syscalls.h>
# include <linux/mm.h>
2007-07-30 02:36:13 +04:00
# include <linux/fs.h>
2005-04-16 15:20:36 -07:00
# include <linux/smp.h>
# include <linux/sem.h>
# include <linux/msg.h>
# include <linux/shm.h>
# include <linux/stat.h>
# include <linux/mman.h>
# include <linux/file.h>
# include <linux/utsname.h>
# include <linux/personality.h>
2008-01-30 13:31:07 +01:00
# include <linux/random.h>
2008-07-29 02:48:56 -03:00
# include <linux/uaccess.h>
2011-09-28 17:42:14 +10:00
# include <linux/elf.h>
2005-04-16 15:20:36 -07:00
# include <asm/ia32.h>
2008-07-21 21:34:13 +05:30
# include <asm/syscalls.h>
2005-04-16 15:20:36 -07:00
2011-08-05 15:15:08 +02:00
/*
* Align a virtual address to avoid aliasing in the I $ on AMD F15h .
*/
2012-12-11 16:01:52 -08:00
static unsigned long get_align_mask ( void )
2011-08-05 15:15:08 +02:00
{
/* handle 32- and 64-bit case with a single conditional */
if ( va_align . flags < 0 | | ! ( va_align . flags & ( 2 - mmap_is_ia32 ( ) ) ) )
2012-12-11 16:01:52 -08:00
return 0 ;
2011-08-05 15:15:08 +02:00
if ( ! ( current - > flags & PF_RANDOMIZE ) )
2012-12-11 16:01:52 -08:00
return 0 ;
2011-08-05 15:15:08 +02:00
2012-12-11 16:01:52 -08:00
return va_align . mask ;
}
2011-08-05 15:15:08 +02:00
2015-03-27 12:38:21 +01:00
/*
* To avoid aliasing in the I $ on AMD F15h , the bits defined by the
* va_align . bits , [ 12 : upper_bit ) , are set to a random value instead of
* zeroing them . This random value is computed once per boot . This form
* of ASLR is known as " per-boot ASLR " .
*
* To achieve this , the random value is added to the info . align_offset
* value before calling vm_unmapped_area ( ) or ORed directly to the
* address .
*/
static unsigned long get_align_bits ( void )
{
return va_align . bits & get_align_mask ( ) ;
}
2012-12-11 16:01:52 -08:00
unsigned long align_vdso_addr ( unsigned long addr )
{
unsigned long align_mask = get_align_mask ( ) ;
2015-03-27 12:38:21 +01:00
addr = ( addr + align_mask ) & ~ align_mask ;
return addr | get_align_bits ( ) ;
2011-08-05 15:15:08 +02:00
}
static int __init control_va_addr_alignment ( char * str )
{
/* guard against enabling this on other CPU families */
if ( va_align . flags < 0 )
return 1 ;
if ( * str = = 0 )
return 1 ;
if ( * str = = ' = ' )
str + + ;
if ( ! strcmp ( str , " 32 " ) )
va_align . flags = ALIGN_VA_32 ;
else if ( ! strcmp ( str , " 64 " ) )
va_align . flags = ALIGN_VA_64 ;
else if ( ! strcmp ( str , " off " ) )
va_align . flags = 0 ;
else if ( ! strcmp ( str , " on " ) )
va_align . flags = ALIGN_VA_32 | ALIGN_VA_64 ;
else
return 0 ;
return 1 ;
}
__setup ( " align_va_addr " , control_va_addr_alignment ) ;
2009-08-10 16:53:11 -04:00
SYSCALL_DEFINE6 ( mmap , unsigned long , addr , unsigned long , len ,
unsigned long , prot , unsigned long , flags ,
unsigned long , fd , unsigned long , off )
2005-04-16 15:20:36 -07:00
{
long error ;
error = - EINVAL ;
if ( off & ~ PAGE_MASK )
goto out ;
2009-11-30 17:37:04 -05:00
error = sys_mmap_pgoff ( addr , len , prot , flags , fd , off > > PAGE_SHIFT ) ;
2005-04-16 15:20:36 -07:00
out :
return error ;
}
static void find_start_end ( unsigned long flags , unsigned long * begin ,
unsigned long * end )
{
2012-02-06 13:03:09 -08:00
if ( ! test_thread_flag ( TIF_ADDR32 ) & & ( flags & MAP_32BIT ) ) {
2005-04-16 15:20:36 -07:00
/* This is usually used needed to map code in small
model , so it needs to be in the first 31 bit . Limit
it to that . This means we need to move the
unmapped base down for this case . This can give
conflicts with the heap , but we assume that glibc
malloc knows how to fall back to mmap . Give it 1 GB
2008-07-29 02:48:56 -03:00
of playground for now . - AK */
* begin = 0x40000000 ;
* end = 0x80000000 ;
2008-01-30 13:31:07 +01:00
if ( current - > flags & PF_RANDOMIZE ) {
2016-10-11 13:53:56 -07:00
* begin = randomize_page ( * begin , 0x02000000 ) ;
2008-01-30 13:31:07 +01:00
}
[PATCH] x86_64: TASK_SIZE fixes for compatibility mode processes
Appended patch will setup compatibility mode TASK_SIZE properly. This will
fix atleast three known bugs that can be encountered while running
compatibility mode apps.
a) A malicious 32bit app can have an elf section at 0xffffe000. During
exec of this app, we will have a memory leak as insert_vm_struct() is
not checking for return value in syscall32_setup_pages() and thus not
freeing the vma allocated for the vsyscall page. And instead of exec
failing (as it has addresses > TASK_SIZE), we were allowing it to
succeed previously.
b) With a 32bit app, hugetlb_get_unmapped_area/arch_get_unmapped_area
may return addresses beyond 32bits, ultimately causing corruption
because of wrap-around and resulting in SEGFAULT, instead of returning
ENOMEM.
c) 32bit app doing this below mmap will now fail.
mmap((void *)(0xFFFFE000UL), 0x10000UL, PROT_READ|PROT_WRITE,
MAP_FIXED|MAP_PRIVATE|MAP_ANON, 0, 0);
Signed-off-by: Zou Nan hai <nanhai.zou@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-21 17:14:32 -07:00
} else {
2013-08-21 20:55:59 +03:00
* begin = current - > mm - > mmap_legacy_base ;
2008-07-29 02:48:56 -03:00
* end = TASK_SIZE ;
[PATCH] x86_64: TASK_SIZE fixes for compatibility mode processes
Appended patch will setup compatibility mode TASK_SIZE properly. This will
fix atleast three known bugs that can be encountered while running
compatibility mode apps.
a) A malicious 32bit app can have an elf section at 0xffffe000. During
exec of this app, we will have a memory leak as insert_vm_struct() is
not checking for return value in syscall32_setup_pages() and thus not
freeing the vma allocated for the vsyscall page. And instead of exec
failing (as it has addresses > TASK_SIZE), we were allowing it to
succeed previously.
b) With a 32bit app, hugetlb_get_unmapped_area/arch_get_unmapped_area
may return addresses beyond 32bits, ultimately causing corruption
because of wrap-around and resulting in SEGFAULT, instead of returning
ENOMEM.
c) 32bit app doing this below mmap will now fail.
mmap((void *)(0xFFFFE000UL), 0x10000UL, PROT_READ|PROT_WRITE,
MAP_FIXED|MAP_PRIVATE|MAP_ANON, 0, 0);
Signed-off-by: Zou Nan hai <nanhai.zou@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-21 17:14:32 -07:00
}
2008-07-29 02:48:56 -03:00
}
2005-04-16 15:20:36 -07:00
unsigned long
arch_get_unmapped_area ( struct file * filp , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
2012-12-11 16:01:52 -08:00
struct vm_unmapped_area_info info ;
2005-04-16 15:20:36 -07:00
unsigned long begin , end ;
2008-07-29 02:48:56 -03:00
2007-05-06 14:50:11 -07:00
if ( flags & MAP_FIXED )
return addr ;
2008-07-29 02:48:56 -03:00
find_start_end ( flags , & begin , & end ) ;
2005-04-16 15:20:36 -07:00
if ( len > end )
return - ENOMEM ;
if ( addr ) {
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( end - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2005-06-21 17:14:49 -07:00
2012-12-11 16:01:52 -08:00
info . flags = 0 ;
info . length = len ;
info . low_limit = begin ;
info . high_limit = end ;
2015-03-27 12:38:21 +01:00
info . align_mask = 0 ;
2012-12-11 16:01:56 -08:00
info . align_offset = pgoff < < PAGE_SHIFT ;
2015-03-27 12:38:21 +01:00
if ( filp ) {
info . align_mask = get_align_mask ( ) ;
info . align_offset + = get_align_bits ( ) ;
}
2012-12-11 16:01:52 -08:00
return vm_unmapped_area ( & info ) ;
2005-04-16 15:20:36 -07:00
}
2008-01-30 13:31:07 +01:00
unsigned long
arch_get_unmapped_area_topdown ( struct file * filp , const unsigned long addr0 ,
const unsigned long len , const unsigned long pgoff ,
const unsigned long flags )
{
struct vm_area_struct * vma ;
struct mm_struct * mm = current - > mm ;
2012-12-11 16:01:52 -08:00
unsigned long addr = addr0 ;
struct vm_unmapped_area_info info ;
2008-01-30 13:31:07 +01:00
/* requested length too big for entire address space */
if ( len > TASK_SIZE )
return - ENOMEM ;
if ( flags & MAP_FIXED )
return addr ;
2012-12-17 17:42:56 +08:00
/* for MAP_32BIT mappings we force the legacy mmap base */
2012-02-06 13:03:09 -08:00
if ( ! test_thread_flag ( TIF_ADDR32 ) & & ( flags & MAP_32BIT ) )
2008-01-30 13:31:07 +01:00
goto bottomup ;
/* requesting a specific address */
if ( addr ) {
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2012-12-11 16:01:52 -08:00
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
info . low_limit = PAGE_SIZE ;
info . high_limit = mm - > mmap_base ;
2015-03-27 12:38:21 +01:00
info . align_mask = 0 ;
2012-12-11 16:01:56 -08:00
info . align_offset = pgoff < < PAGE_SHIFT ;
2015-03-27 12:38:21 +01:00
if ( filp ) {
info . align_mask = get_align_mask ( ) ;
info . align_offset + = get_align_bits ( ) ;
}
2012-12-11 16:01:52 -08:00
addr = vm_unmapped_area ( & info ) ;
if ( ! ( addr & ~ PAGE_MASK ) )
return addr ;
VM_BUG_ON ( addr ! = - ENOMEM ) ;
2012-03-21 16:33:56 -07:00
2008-01-30 13:31:07 +01:00
bottomup :
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2012-12-11 16:01:52 -08:00
return arch_get_unmapped_area ( filp , addr0 , len , pgoff , flags ) ;
2008-01-30 13:31:07 +01:00
}