2005-04-16 15:20:36 -07:00
/*
* flexible mmap layout support
*
* Copyright 2003 - 2004 Red Hat Inc . , Durham , North Carolina .
* All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*
*
* Started by Ingo Molnar < mingo @ elte . hu >
*/
# include <linux/personality.h>
# include <linux/mm.h>
2009-02-22 01:50:01 +00:00
# include <linux/random.h>
2017-02-08 18:51:30 +01:00
# include <linux/sched/signal.h>
2017-02-08 18:51:31 +01:00
# include <linux/sched/mm.h>
2016-01-06 11:45:51 +11:00
# include <linux/elf-randomize.h>
2016-04-29 23:26:11 +10:00
# include <linux/security.h>
# include <linux/mman.h>
2005-04-16 15:20:36 -07:00
/*
* Top of mmap area ( just below the process stack ) .
*
2009-02-22 01:50:06 +00:00
* Leave at least a ~ 128 MB hole on 32 bit applications .
*
* On 64 bit applications we randomise the stack by 1 GB so we need to
* space our mmap start address by a further 1 GB , otherwise there is a
* chance the mmap area will end up closer to the stack than our ulimit
* requires .
2005-04-16 15:20:36 -07:00
*/
2009-02-22 01:50:06 +00:00
# define MIN_GAP32 (128*1024*1024)
# define MIN_GAP64 ((128 + 1024)*1024*1024UL)
# define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
2005-04-16 15:20:36 -07:00
# define MAX_GAP (TASK_SIZE / 6*5)
2009-02-22 01:50:00 +00:00
static inline int mmap_is_legacy ( void )
{
if ( current - > personality & ADDR_COMPAT_LAYOUT )
return 1 ;
2010-01-06 05:24:31 +00:00
if ( rlimit ( RLIMIT_STACK ) = = RLIM_INFINITY )
2009-02-22 01:50:00 +00:00
return 1 ;
return sysctl_legacy_va_layout ;
}
2015-04-14 15:48:00 -07:00
unsigned long arch_mmap_rnd ( void )
2009-02-22 01:50:01 +00:00
{
2015-04-14 15:47:54 -07:00
unsigned long rnd ;
/* 8MB for 32bit, 1GB for 64bit */
if ( is_32bit_task ( ) )
2016-02-26 15:19:37 -08:00
rnd = get_random_long ( ) % ( 1 < < ( 23 - PAGE_SHIFT ) ) ;
2015-04-14 15:47:54 -07:00
else
2016-02-26 15:19:37 -08:00
rnd = get_random_long ( ) % ( 1UL < < ( 30 - PAGE_SHIFT ) ) ;
2009-02-22 01:50:01 +00:00
2011-10-17 13:05:23 +00:00
return rnd < < PAGE_SHIFT ;
2009-02-22 01:50:01 +00:00
}
2015-04-14 15:47:54 -07:00
static inline unsigned long mmap_base ( unsigned long rnd )
2005-04-16 15:20:36 -07:00
{
2010-01-06 05:24:31 +00:00
unsigned long gap = rlimit ( RLIMIT_STACK ) ;
2005-04-16 15:20:36 -07:00
if ( gap < MIN_GAP )
gap = MIN_GAP ;
else if ( gap > MAX_GAP )
gap = MAX_GAP ;
2017-03-30 16:35:21 +05:30
return PAGE_ALIGN ( DEFAULT_MAP_WINDOW - gap - rnd ) ;
2005-04-16 15:20:36 -07:00
}
2016-04-29 23:26:11 +10:00
# ifdef CONFIG_PPC_RADIX_MMU
/*
* Same function as generic code used only for radix , because we don ' t need to overload
* the generic one . But we will have to duplicate , because hash select
* HAVE_ARCH_UNMAPPED_AREA
*/
static unsigned long
radix__arch_get_unmapped_area ( struct file * filp , unsigned long addr ,
unsigned long len , unsigned long pgoff ,
unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
struct vm_unmapped_area_info info ;
2017-03-30 16:35:21 +05:30
if ( unlikely ( addr > mm - > context . addr_limit & & addr < TASK_SIZE ) )
mm - > context . addr_limit = TASK_SIZE ;
2017-03-22 09:07:01 +05:30
if ( len > mm - > context . addr_limit - mmap_min_addr )
2016-04-29 23:26:11 +10:00
return - ENOMEM ;
if ( flags & MAP_FIXED )
return addr ;
if ( addr ) {
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
2017-03-22 09:07:01 +05:30
if ( mm - > context . addr_limit - len > = addr & & addr > = mmap_min_addr & &
2016-04-29 23:26:11 +10:00
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
info . flags = 0 ;
info . length = len ;
info . low_limit = mm - > mmap_base ;
info . align_mask = 0 ;
2017-03-30 16:35:21 +05:30
if ( unlikely ( addr > DEFAULT_MAP_WINDOW ) )
info . high_limit = mm - > context . addr_limit ;
else
info . high_limit = DEFAULT_MAP_WINDOW ;
2016-04-29 23:26:11 +10:00
return vm_unmapped_area ( & info ) ;
}
static unsigned long
radix__arch_get_unmapped_area_topdown ( struct file * filp ,
const unsigned long addr0 ,
const unsigned long len ,
const unsigned long pgoff ,
const unsigned long flags )
{
struct vm_area_struct * vma ;
struct mm_struct * mm = current - > mm ;
unsigned long addr = addr0 ;
struct vm_unmapped_area_info info ;
2017-03-30 16:35:21 +05:30
if ( unlikely ( addr > mm - > context . addr_limit & & addr < TASK_SIZE ) )
mm - > context . addr_limit = TASK_SIZE ;
2016-04-29 23:26:11 +10:00
/* requested length too big for entire address space */
2017-03-22 09:07:01 +05:30
if ( len > mm - > context . addr_limit - mmap_min_addr )
2016-04-29 23:26:11 +10:00
return - ENOMEM ;
if ( flags & MAP_FIXED )
return addr ;
/* requesting a specific address */
if ( addr ) {
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
2017-03-22 09:07:01 +05:30
if ( mm - > context . addr_limit - len > = addr & & addr > = mmap_min_addr & &
2016-04-29 23:26:11 +10:00
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
info . low_limit = max ( PAGE_SIZE , mmap_min_addr ) ;
info . high_limit = mm - > mmap_base ;
info . align_mask = 0 ;
2017-03-30 16:35:21 +05:30
if ( addr > DEFAULT_MAP_WINDOW )
info . high_limit + = mm - > context . addr_limit - DEFAULT_MAP_WINDOW ;
2016-04-29 23:26:11 +10:00
addr = vm_unmapped_area ( & info ) ;
2017-03-30 16:35:21 +05:30
if ( ! ( addr & ~ PAGE_MASK ) )
return addr ;
VM_BUG_ON ( addr ! = - ENOMEM ) ;
2016-04-29 23:26:11 +10:00
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2017-03-30 16:35:21 +05:30
return radix__arch_get_unmapped_area ( filp , addr0 , len , pgoff , flags ) ;
2016-04-29 23:26:11 +10:00
}
static void radix__arch_pick_mmap_layout ( struct mm_struct * mm ,
unsigned long random_factor )
{
if ( mmap_is_legacy ( ) ) {
mm - > mmap_base = TASK_UNMAPPED_BASE ;
mm - > get_unmapped_area = radix__arch_get_unmapped_area ;
} else {
mm - > mmap_base = mmap_base ( random_factor ) ;
mm - > get_unmapped_area = radix__arch_get_unmapped_area_topdown ;
}
}
# else
/* dummy */
extern void radix__arch_pick_mmap_layout ( struct mm_struct * mm ,
unsigned long random_factor ) ;
# endif
2005-04-16 15:20:36 -07:00
/*
* This function , called very early during the creation of a new
* process VM image , sets up which VM layout function to use :
*/
void arch_pick_mmap_layout ( struct mm_struct * mm )
{
2015-04-14 15:47:54 -07:00
unsigned long random_factor = 0UL ;
if ( current - > flags & PF_RANDOMIZE )
2015-04-14 15:48:00 -07:00
random_factor = arch_mmap_rnd ( ) ;
2015-04-14 15:47:54 -07:00
2016-04-29 23:26:11 +10:00
if ( radix_enabled ( ) )
return radix__arch_pick_mmap_layout ( mm , random_factor ) ;
2005-04-16 15:20:36 -07:00
/*
* Fall back to the standard layout if the personality
* bit is set , or if the expected stack growth is unlimited :
*/
if ( mmap_is_legacy ( ) ) {
mm - > mmap_base = TASK_UNMAPPED_BASE ;
mm - > get_unmapped_area = arch_get_unmapped_area ;
} else {
2015-04-14 15:47:54 -07:00
mm - > mmap_base = mmap_base ( random_factor ) ;
2005-04-16 15:20:36 -07:00
mm - > get_unmapped_area = arch_get_unmapped_area_topdown ;
}
}