2007-05-08 10:27:27 +04:00
/*
* address space " slices " ( meta - segments ) support
*
* Copyright ( C ) 2007 Benjamin Herrenschmidt , IBM Corporation .
*
* Based on hugetlb implementation
*
* Copyright ( C ) 2003 David Gibson , IBM Corporation .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# undef DEBUG
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/pagemap.h>
# include <linux/err.h>
# include <linux/spinlock.h>
2011-07-23 02:24:23 +04:00
# include <linux/export.h>
2014-08-20 02:55:19 +04:00
# include <linux/hugetlb.h>
2007-05-08 10:27:27 +04:00
# include <asm/mman.h>
# include <asm/mmu.h>
2014-10-08 12:54:52 +04:00
# include <asm/copro.h>
2014-08-20 02:55:19 +04:00
# include <asm/hugetlb.h>
2007-05-08 10:27:27 +04:00
2007-10-17 10:30:25 +04:00
static DEFINE_SPINLOCK ( slice_convert_lock ) ;
2017-03-22 06:36:51 +03:00
/*
* One bit per slice . We have lower slices which cover 256 MB segments
* upto 4 G range . That gets us 16 low slices . For the rest we track slices
* in 1 TB size .
*/
struct slice_mask {
u64 low_slices ;
DECLARE_BITMAP ( high_slices , SLICE_NUM_HIGH ) ;
} ;
2007-05-08 10:27:27 +04:00
# ifdef DEBUG
int _slice_debug = 1 ;
static void slice_print_mask ( const char * label , struct slice_mask mask )
{
if ( ! _slice_debug )
return ;
2017-03-22 06:36:52 +03:00
pr_devel ( " %s low_slice: %*pbl \n " , label , ( int ) SLICE_NUM_LOW , & mask . low_slices ) ;
pr_devel ( " %s high_slice: %*pbl \n " , label , ( int ) SLICE_NUM_HIGH , mask . high_slices ) ;
2007-05-08 10:27:27 +04:00
}
2017-03-22 06:36:52 +03:00
# define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
2007-05-08 10:27:27 +04:00
# else
static void slice_print_mask ( const char * label , struct slice_mask mask ) { }
# define slice_dbg(fmt...)
# endif
2017-03-22 06:36:48 +03:00
static void slice_range_to_mask ( unsigned long start , unsigned long len ,
struct slice_mask * ret )
2007-05-08 10:27:27 +04:00
{
unsigned long end = start + len - 1 ;
2017-03-22 06:36:47 +03:00
2017-03-22 06:36:48 +03:00
ret - > low_slices = 0 ;
bitmap_zero ( ret - > high_slices , SLICE_NUM_HIGH ) ;
2007-05-08 10:27:27 +04:00
if ( start < SLICE_LOW_TOP ) {
2017-03-21 20:29:52 +03:00
unsigned long mend = min ( end , ( SLICE_LOW_TOP - 1 ) ) ;
2007-05-08 10:27:27 +04:00
2017-03-22 06:36:48 +03:00
ret - > low_slices = ( 1u < < ( GET_LOW_SLICE_INDEX ( mend ) + 1 ) )
2017-03-21 20:29:52 +03:00
- ( 1u < < GET_LOW_SLICE_INDEX ( start ) ) ;
2007-05-08 10:27:27 +04:00
}
2017-03-22 06:36:47 +03:00
if ( ( start + len ) > SLICE_LOW_TOP ) {
unsigned long start_index = GET_HIGH_SLICE_INDEX ( start ) ;
unsigned long align_end = ALIGN ( end , ( 1UL < < SLICE_HIGH_SHIFT ) ) ;
unsigned long count = GET_HIGH_SLICE_INDEX ( align_end ) - start_index ;
2007-05-08 10:27:27 +04:00
2017-03-22 06:36:48 +03:00
bitmap_set ( ret - > high_slices , start_index , count ) ;
2017-03-22 06:36:47 +03:00
}
2007-05-08 10:27:27 +04:00
}
static int slice_area_is_free ( struct mm_struct * mm , unsigned long addr ,
unsigned long len )
{
struct vm_area_struct * vma ;
if ( ( mm - > task_size - len ) < addr )
return 0 ;
vma = find_vma ( mm , addr ) ;
return ( ! vma | | ( addr + len ) < = vma - > vm_start ) ;
}
static int slice_low_has_vma ( struct mm_struct * mm , unsigned long slice )
{
return ! slice_area_is_free ( mm , slice < < SLICE_LOW_SHIFT ,
1ul < < SLICE_LOW_SHIFT ) ;
}
static int slice_high_has_vma ( struct mm_struct * mm , unsigned long slice )
{
unsigned long start = slice < < SLICE_HIGH_SHIFT ;
unsigned long end = start + ( 1ul < < SLICE_HIGH_SHIFT ) ;
/* Hack, so that each addresses is controlled by exactly one
* of the high or low area bitmaps , the first high area starts
* at 4 GB , not 0 */
if ( start = = 0 )
start = SLICE_LOW_TOP ;
return ! slice_area_is_free ( mm , start , end - start ) ;
}
2017-03-22 06:36:48 +03:00
static void slice_mask_for_free ( struct mm_struct * mm , struct slice_mask * ret )
2007-05-08 10:27:27 +04:00
{
unsigned long i ;
2017-03-22 06:36:48 +03:00
ret - > low_slices = 0 ;
bitmap_zero ( ret - > high_slices , SLICE_NUM_HIGH ) ;
2017-03-22 06:36:47 +03:00
2007-05-08 10:27:27 +04:00
for ( i = 0 ; i < SLICE_NUM_LOW ; i + + )
if ( ! slice_low_has_vma ( mm , i ) )
2017-03-22 06:36:48 +03:00
ret - > low_slices | = 1u < < i ;
2007-05-08 10:27:27 +04:00
if ( mm - > task_size < = SLICE_LOW_TOP )
2017-03-22 06:36:48 +03:00
return ;
2007-05-08 10:27:27 +04:00
2017-03-22 06:36:58 +03:00
for ( i = 0 ; i < GET_HIGH_SLICE_INDEX ( mm - > context . addr_limit ) ; i + + )
2007-05-08 10:27:27 +04:00
if ( ! slice_high_has_vma ( mm , i ) )
2017-03-22 06:36:48 +03:00
__set_bit ( i , ret - > high_slices ) ;
2007-05-08 10:27:27 +04:00
}
2017-03-22 06:36:48 +03:00
static void slice_mask_for_size ( struct mm_struct * mm , int psize , struct slice_mask * ret )
2007-05-08 10:27:27 +04:00
{
2012-09-10 06:52:52 +04:00
unsigned char * hpsizes ;
int index , mask_index ;
2007-05-08 10:27:27 +04:00
unsigned long i ;
2012-09-10 06:52:52 +04:00
u64 lpsizes ;
2007-05-08 10:27:27 +04:00
2017-03-22 06:36:48 +03:00
ret - > low_slices = 0 ;
bitmap_zero ( ret - > high_slices , SLICE_NUM_HIGH ) ;
2017-03-22 06:36:47 +03:00
2012-09-10 06:52:52 +04:00
lpsizes = mm - > context . low_slices_psize ;
2007-05-08 10:27:27 +04:00
for ( i = 0 ; i < SLICE_NUM_LOW ; i + + )
2012-09-10 06:52:52 +04:00
if ( ( ( lpsizes > > ( i * 4 ) ) & 0xf ) = = psize )
2017-03-22 06:36:48 +03:00
ret - > low_slices | = 1u < < i ;
2007-05-08 10:27:27 +04:00
2012-09-10 06:52:52 +04:00
hpsizes = mm - > context . high_slices_psize ;
2017-03-22 06:36:58 +03:00
for ( i = 0 ; i < GET_HIGH_SLICE_INDEX ( mm - > context . addr_limit ) ; i + + ) {
2012-09-10 06:52:52 +04:00
mask_index = i & 0x1 ;
index = i > > 1 ;
if ( ( ( hpsizes [ index ] > > ( mask_index * 4 ) ) & 0xf ) = = psize )
2017-03-22 06:36:48 +03:00
__set_bit ( i , ret - > high_slices ) ;
2012-09-10 06:52:52 +04:00
}
2007-05-08 10:27:27 +04:00
}
2017-03-22 06:36:58 +03:00
static int slice_check_fit ( struct mm_struct * mm ,
struct slice_mask mask , struct slice_mask available )
2007-05-08 10:27:27 +04:00
{
2017-03-22 06:36:47 +03:00
DECLARE_BITMAP ( result , SLICE_NUM_HIGH ) ;
2017-03-22 06:36:58 +03:00
unsigned long slice_count = GET_HIGH_SLICE_INDEX ( mm - > context . addr_limit ) ;
2017-03-22 06:36:47 +03:00
bitmap_and ( result , mask . high_slices ,
2017-03-22 06:36:58 +03:00
available . high_slices , slice_count ) ;
2017-03-22 06:36:47 +03:00
2007-05-08 10:27:27 +04:00
return ( mask . low_slices & available . low_slices ) = = mask . low_slices & &
2017-03-22 06:36:58 +03:00
bitmap_equal ( result , mask . high_slices , slice_count ) ;
2007-05-08 10:27:27 +04:00
}
static void slice_flush_segments ( void * parm )
{
struct mm_struct * mm = parm ;
unsigned long flags ;
if ( mm ! = current - > active_mm )
return ;
2017-03-22 06:36:49 +03:00
copy_mm_to_paca ( current - > active_mm ) ;
2007-05-08 10:27:27 +04:00
local_irq_save ( flags ) ;
slb_flush_and_rebolt ( ) ;
local_irq_restore ( flags ) ;
}
static void slice_convert ( struct mm_struct * mm , struct slice_mask mask , int psize )
{
2012-09-10 06:52:52 +04:00
int index , mask_index ;
2007-05-08 10:27:27 +04:00
/* Write the new slice psize bits */
2012-09-10 06:52:52 +04:00
unsigned char * hpsizes ;
u64 lpsizes ;
2007-05-08 10:27:27 +04:00
unsigned long i , flags ;
slice_dbg ( " slice_convert(mm=%p, psize=%d) \n " , mm , psize ) ;
slice_print_mask ( " mask " , mask ) ;
/* We need to use a spinlock here to protect against
* concurrent 64 k - > 4 k demotion . . .
*/
spin_lock_irqsave ( & slice_convert_lock , flags ) ;
lpsizes = mm - > context . low_slices_psize ;
for ( i = 0 ; i < SLICE_NUM_LOW ; i + + )
if ( mask . low_slices & ( 1u < < i ) )
lpsizes = ( lpsizes & ~ ( 0xful < < ( i * 4 ) ) ) |
( ( ( unsigned long ) psize ) < < ( i * 4 ) ) ;
2012-09-10 06:52:52 +04:00
/* Assign the value back */
2007-05-08 10:27:27 +04:00
mm - > context . low_slices_psize = lpsizes ;
2012-09-10 06:52:52 +04:00
hpsizes = mm - > context . high_slices_psize ;
2017-03-22 06:36:58 +03:00
for ( i = 0 ; i < GET_HIGH_SLICE_INDEX ( mm - > context . addr_limit ) ; i + + ) {
2012-09-10 06:52:52 +04:00
mask_index = i & 0x1 ;
index = i > > 1 ;
2017-03-22 06:36:47 +03:00
if ( test_bit ( i , mask . high_slices ) )
2012-09-10 06:52:52 +04:00
hpsizes [ index ] = ( hpsizes [ index ] &
~ ( 0xf < < ( mask_index * 4 ) ) ) |
( ( ( unsigned long ) psize ) < < ( mask_index * 4 ) ) ;
}
2007-05-08 10:27:27 +04:00
slice_dbg ( " lsps=%lx, hsps=%lx \n " ,
2017-03-22 06:36:52 +03:00
( unsigned long ) mm - > context . low_slices_psize ,
( unsigned long ) mm - > context . high_slices_psize ) ;
2007-05-08 10:27:27 +04:00
spin_unlock_irqrestore ( & slice_convert_lock , flags ) ;
2014-10-08 12:54:52 +04:00
copro_flush_all_slbs ( mm ) ;
2007-05-08 10:27:27 +04:00
}
2013-04-29 22:53:53 +04:00
/*
* Compute which slice addr is part of ;
* set * boundary_addr to the start or end boundary of that slice
* ( depending on ' end ' parameter ) ;
* return boolean indicating if the slice is marked as available in the
* ' available ' slice_mark .
*/
static bool slice_scan_available ( unsigned long addr ,
struct slice_mask available ,
int end ,
unsigned long * boundary_addr )
{
unsigned long slice ;
if ( addr < SLICE_LOW_TOP ) {
slice = GET_LOW_SLICE_INDEX ( addr ) ;
* boundary_addr = ( slice + end ) < < SLICE_LOW_SHIFT ;
return ! ! ( available . low_slices & ( 1u < < slice ) ) ;
} else {
slice = GET_HIGH_SLICE_INDEX ( addr ) ;
* boundary_addr = ( slice + end ) ?
( ( slice + end ) < < SLICE_HIGH_SHIFT ) : SLICE_LOW_TOP ;
2017-03-22 06:36:47 +03:00
return ! ! test_bit ( slice , available . high_slices ) ;
2013-04-29 22:53:53 +04:00
}
}
2007-05-08 10:27:27 +04:00
static unsigned long slice_find_area_bottomup ( struct mm_struct * mm ,
unsigned long len ,
struct slice_mask available ,
2017-03-30 14:05:21 +03:00
int psize , unsigned long high_limit )
2007-05-08 10:27:27 +04:00
{
int pshift = max_t ( int , mmu_psize_defs [ psize ] . shift , PAGE_SHIFT ) ;
2013-04-29 22:53:53 +04:00
unsigned long addr , found , next_end ;
struct vm_unmapped_area_info info ;
2007-05-08 10:27:27 +04:00
2013-04-29 22:53:53 +04:00
info . flags = 0 ;
info . length = len ;
info . align_mask = PAGE_MASK & ( ( 1ul < < pshift ) - 1 ) ;
info . align_offset = 0 ;
2007-05-08 10:27:27 +04:00
2013-04-29 22:53:53 +04:00
addr = TASK_UNMAPPED_BASE ;
2017-03-30 14:05:21 +03:00
/*
* Check till the allow max value for this mmap request
*/
while ( addr < high_limit ) {
2013-04-29 22:53:53 +04:00
info . low_limit = addr ;
if ( ! slice_scan_available ( addr , available , 1 , & addr ) )
2007-05-08 10:27:27 +04:00
continue ;
2013-04-29 22:53:53 +04:00
next_slice :
/*
* At this point [ info . low_limit ; addr ) covers
* available slices only and ends at a slice boundary .
* Check if we need to reduce the range , or if we can
* extend it to cover the next available slice .
*/
2017-04-13 22:18:21 +03:00
if ( addr > = high_limit )
addr = high_limit ;
2013-04-29 22:53:53 +04:00
else if ( slice_scan_available ( addr , available , 1 , & next_end ) ) {
addr = next_end ;
goto next_slice ;
2007-05-08 10:27:27 +04:00
}
2013-04-29 22:53:53 +04:00
info . high_limit = addr ;
found = vm_unmapped_area ( & info ) ;
if ( ! ( found & ~ PAGE_MASK ) )
return found ;
2007-05-08 10:27:27 +04:00
}
return - ENOMEM ;
}
static unsigned long slice_find_area_topdown ( struct mm_struct * mm ,
unsigned long len ,
struct slice_mask available ,
2017-03-30 14:05:21 +03:00
int psize , unsigned long high_limit )
2007-05-08 10:27:27 +04:00
{
int pshift = max_t ( int , mmu_psize_defs [ psize ] . shift , PAGE_SHIFT ) ;
2013-04-29 22:53:53 +04:00
unsigned long addr , found , prev ;
struct vm_unmapped_area_info info ;
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
info . align_mask = PAGE_MASK & ( ( 1ul < < pshift ) - 1 ) ;
info . align_offset = 0 ;
2007-05-08 10:27:27 +04:00
addr = mm - > mmap_base ;
2017-03-30 14:05:21 +03:00
/*
* If we are trying to allocate above DEFAULT_MAP_WINDOW
* Add the different to the mmap_base .
* Only for that request for which high_limit is above
* DEFAULT_MAP_WINDOW we should apply this .
*/
if ( high_limit > DEFAULT_MAP_WINDOW )
addr + = mm - > context . addr_limit - DEFAULT_MAP_WINDOW ;
2013-04-29 22:53:53 +04:00
while ( addr > PAGE_SIZE ) {
info . high_limit = addr ;
if ( ! slice_scan_available ( addr - 1 , available , 0 , & addr ) )
2007-05-08 10:27:27 +04:00
continue ;
2013-04-29 22:53:53 +04:00
prev_slice :
2007-05-08 10:27:27 +04:00
/*
2013-04-29 22:53:53 +04:00
* At this point [ addr ; info . high_limit ) covers
* available slices only and starts at a slice boundary .
* Check if we need to reduce the range , or if we can
* extend it to cover the previous available slice .
2007-05-08 10:27:27 +04:00
*/
2013-04-29 22:53:53 +04:00
if ( addr < PAGE_SIZE )
addr = PAGE_SIZE ;
else if ( slice_scan_available ( addr - 1 , available , 0 , & prev ) ) {
addr = prev ;
goto prev_slice ;
}
info . low_limit = addr ;
2007-05-08 10:27:27 +04:00
2013-04-29 22:53:53 +04:00
found = vm_unmapped_area ( & info ) ;
if ( ! ( found & ~ PAGE_MASK ) )
return found ;
2007-05-08 10:27:27 +04:00
}
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2017-03-30 14:05:21 +03:00
return slice_find_area_bottomup ( mm , len , available , psize , high_limit ) ;
2007-05-08 10:27:27 +04:00
}
static unsigned long slice_find_area ( struct mm_struct * mm , unsigned long len ,
struct slice_mask mask , int psize ,
2017-03-30 14:05:21 +03:00
int topdown , unsigned long high_limit )
2007-05-08 10:27:27 +04:00
{
if ( topdown )
2017-03-30 14:05:21 +03:00
return slice_find_area_topdown ( mm , len , mask , psize , high_limit ) ;
2007-05-08 10:27:27 +04:00
else
2017-03-30 14:05:21 +03:00
return slice_find_area_bottomup ( mm , len , mask , psize , high_limit ) ;
2007-05-08 10:27:27 +04:00
}
2017-03-22 06:36:47 +03:00
static inline void slice_or_mask ( struct slice_mask * dst , struct slice_mask * src )
{
DECLARE_BITMAP ( result , SLICE_NUM_HIGH ) ;
2008-06-18 09:29:12 +04:00
2017-03-22 06:36:47 +03:00
dst - > low_slices | = src - > low_slices ;
bitmap_or ( result , dst - > high_slices , src - > high_slices , SLICE_NUM_HIGH ) ;
bitmap_copy ( dst - > high_slices , result , SLICE_NUM_HIGH ) ;
}
static inline void slice_andnot_mask ( struct slice_mask * dst , struct slice_mask * src )
{
DECLARE_BITMAP ( result , SLICE_NUM_HIGH ) ;
dst - > low_slices & = ~ src - > low_slices ;
bitmap_andnot ( result , dst - > high_slices , src - > high_slices , SLICE_NUM_HIGH ) ;
bitmap_copy ( dst - > high_slices , result , SLICE_NUM_HIGH ) ;
}
2008-06-18 09:29:12 +04:00
# ifdef CONFIG_PPC_64K_PAGES
# define MMU_PAGE_BASE MMU_PAGE_64K
# else
# define MMU_PAGE_BASE MMU_PAGE_4K
# endif
2007-05-08 10:27:27 +04:00
unsigned long slice_get_unmapped_area ( unsigned long addr , unsigned long len ,
unsigned long flags , unsigned int psize ,
2013-04-29 22:53:52 +04:00
int topdown )
2007-05-08 10:27:27 +04:00
{
2017-03-22 06:36:47 +03:00
struct slice_mask mask ;
2007-05-08 10:27:27 +04:00
struct slice_mask good_mask ;
2017-03-22 06:36:47 +03:00
struct slice_mask potential_mask ;
struct slice_mask compat_mask ;
2007-05-08 10:27:27 +04:00
int fixed = ( flags & MAP_FIXED ) ;
int pshift = max_t ( int , mmu_psize_defs [ psize ] . shift , PAGE_SHIFT ) ;
struct mm_struct * mm = current - > mm ;
2008-06-18 09:29:12 +04:00
unsigned long newaddr ;
2017-03-30 14:05:21 +03:00
unsigned long high_limit ;
2007-05-08 10:27:27 +04:00
2017-03-30 14:05:21 +03:00
/*
* Check if we need to expland slice area .
*/
2017-04-18 10:01:27 +03:00
if ( unlikely ( addr > mm - > context . addr_limit & &
mm - > context . addr_limit ! = TASK_SIZE ) ) {
2017-03-30 14:05:21 +03:00
mm - > context . addr_limit = TASK_SIZE ;
on_each_cpu ( slice_flush_segments , mm , 1 ) ;
}
/*
* This mmap request can allocate upt to 512 TB
*/
if ( addr > DEFAULT_MAP_WINDOW )
high_limit = mm - > context . addr_limit ;
else
high_limit = DEFAULT_MAP_WINDOW ;
2017-03-22 06:36:47 +03:00
/*
* init different masks
*/
mask . low_slices = 0 ;
bitmap_zero ( mask . high_slices , SLICE_NUM_HIGH ) ;
/* silence stupid warning */ ;
potential_mask . low_slices = 0 ;
bitmap_zero ( potential_mask . high_slices , SLICE_NUM_HIGH ) ;
compat_mask . low_slices = 0 ;
bitmap_zero ( compat_mask . high_slices , SLICE_NUM_HIGH ) ;
2007-05-08 10:27:27 +04:00
/* Sanity checks */
BUG_ON ( mm - > task_size = = 0 ) ;
2016-04-29 16:26:09 +03:00
VM_BUG_ON ( radix_enabled ( ) ) ;
2007-05-08 10:27:27 +04:00
slice_dbg ( " slice_get_unmapped_area(mm=%p, psize=%d... \n " , mm , psize ) ;
2013-04-29 22:53:52 +04:00
slice_dbg ( " addr=%lx, len=%lx, flags=%lx, topdown=%d \n " ,
addr , len , flags , topdown ) ;
2007-05-08 10:27:27 +04:00
if ( len > mm - > task_size )
return - ENOMEM ;
2007-08-08 09:44:15 +04:00
if ( len & ( ( 1ul < < pshift ) - 1 ) )
return - EINVAL ;
2007-05-08 10:27:27 +04:00
if ( fixed & & ( addr & ( ( 1ul < < pshift ) - 1 ) ) )
return - EINVAL ;
if ( fixed & & addr > ( mm - > task_size - len ) )
2014-01-15 19:27:11 +04:00
return - ENOMEM ;
2007-05-08 10:27:27 +04:00
/* If hint, make sure it matches our alignment restrictions */
if ( ! fixed & & addr ) {
addr = _ALIGN_UP ( addr , 1ul < < pshift ) ;
slice_dbg ( " aligned addr=%lx \n " , addr ) ;
2008-06-18 09:29:12 +04:00
/* Ignore hint if it's too large or overlaps a VMA */
if ( addr > mm - > task_size - len | |
! slice_area_is_free ( mm , addr , len ) )
addr = 0 ;
2007-05-08 10:27:27 +04:00
}
2008-06-18 09:29:12 +04:00
/* First make up a "good" mask of slices that have the right size
2007-05-08 10:27:27 +04:00
* already
*/
2017-03-22 06:36:48 +03:00
slice_mask_for_size ( mm , psize , & good_mask ) ;
2007-05-08 10:27:27 +04:00
slice_print_mask ( " good_mask " , good_mask ) ;
2008-06-18 09:29:12 +04:00
/*
* Here " good " means slices that are already the right page size ,
* " compat " means slices that have a compatible page size ( i . e .
* 4 k in a 64 k pagesize kernel ) , and " free " means slices without
* any VMAs .
*
* If MAP_FIXED :
* check if fits in good | compat = > OK
* check if fits in good | compat | free = > convert free
* else bad
* If have hint :
* check if hint fits in good = > OK
* check if hint fits in good | free = > convert free
* Otherwise :
* search in good , found = > OK
* search in good | free , found = > convert free
* search in good | compat | free , found = > convert free .
*/
2007-05-08 10:27:27 +04:00
2008-06-18 09:29:12 +04:00
# ifdef CONFIG_PPC_64K_PAGES
/* If we support combo pages, we can allow 64k pages in 4k slices */
if ( psize = = MMU_PAGE_64K ) {
2017-03-22 06:36:48 +03:00
slice_mask_for_size ( mm , MMU_PAGE_4K , & compat_mask ) ;
2008-06-18 09:29:12 +04:00
if ( fixed )
2017-03-22 06:36:47 +03:00
slice_or_mask ( & good_mask , & compat_mask ) ;
2008-06-18 09:29:12 +04:00
}
# endif
2007-05-08 10:27:27 +04:00
2008-06-18 09:29:12 +04:00
/* First check hint if it's valid or if we have MAP_FIXED */
if ( addr ! = 0 | | fixed ) {
2007-05-08 10:27:27 +04:00
/* Build a mask for the requested range */
2017-03-22 06:36:48 +03:00
slice_range_to_mask ( addr , len , & mask ) ;
2007-05-08 10:27:27 +04:00
slice_print_mask ( " mask " , mask ) ;
/* Check if we fit in the good mask. If we do, we just return,
* nothing else to do
*/
2017-03-22 06:36:58 +03:00
if ( slice_check_fit ( mm , mask , good_mask ) ) {
2007-05-08 10:27:27 +04:00
slice_dbg ( " fits good ! \n " ) ;
return addr ;
}
2008-06-18 09:29:12 +04:00
} else {
/* Now let's see if we can find something in the existing
* slices for that size
2007-05-08 10:27:27 +04:00
*/
2017-03-30 14:05:21 +03:00
newaddr = slice_find_area ( mm , len , good_mask ,
psize , topdown , high_limit ) ;
2008-06-18 09:29:12 +04:00
if ( newaddr ! = - ENOMEM ) {
/* Found within the good mask, we don't have to setup,
* we thus return directly
*/
slice_dbg ( " found area at 0x%lx \n " , newaddr ) ;
return newaddr ;
2007-05-08 10:27:27 +04:00
}
}
2008-06-18 09:29:12 +04:00
/* We don't fit in the good mask, check what other slices are
* empty and thus can be converted
*/
2017-03-22 06:36:48 +03:00
slice_mask_for_free ( mm , & potential_mask ) ;
2017-03-22 06:36:47 +03:00
slice_or_mask ( & potential_mask , & good_mask ) ;
2008-06-18 09:29:12 +04:00
slice_print_mask ( " potential " , potential_mask ) ;
2017-03-22 06:36:58 +03:00
if ( ( addr ! = 0 | | fixed ) & & slice_check_fit ( mm , mask , potential_mask ) ) {
2008-06-18 09:29:12 +04:00
slice_dbg ( " fits potential ! \n " ) ;
goto convert ;
}
/* If we have MAP_FIXED and failed the above steps, then error out */
2007-05-08 10:27:27 +04:00
if ( fixed )
return - EBUSY ;
slice_dbg ( " search... \n " ) ;
2008-06-18 09:29:12 +04:00
/* If we had a hint that didn't work out, see if we can fit
* anywhere in the good area .
2007-05-08 10:27:27 +04:00
*/
2008-06-18 09:29:12 +04:00
if ( addr ) {
2017-03-30 14:05:21 +03:00
addr = slice_find_area ( mm , len , good_mask ,
psize , topdown , high_limit ) ;
2008-06-18 09:29:12 +04:00
if ( addr ! = - ENOMEM ) {
slice_dbg ( " found area at 0x%lx \n " , addr ) ;
return addr ;
}
2007-05-08 10:27:27 +04:00
}
/* Now let's see if we can find something in the existing slices
2008-06-18 09:29:12 +04:00
* for that size plus free slices
2007-05-08 10:27:27 +04:00
*/
2017-03-30 14:05:21 +03:00
addr = slice_find_area ( mm , len , potential_mask ,
psize , topdown , high_limit ) ;
2008-06-18 09:29:12 +04:00
# ifdef CONFIG_PPC_64K_PAGES
if ( addr = = - ENOMEM & & psize = = MMU_PAGE_64K ) {
/* retry the search with 4k-page slices included */
2017-03-22 06:36:47 +03:00
slice_or_mask ( & potential_mask , & compat_mask ) ;
2017-03-30 14:05:21 +03:00
addr = slice_find_area ( mm , len , potential_mask ,
psize , topdown , high_limit ) ;
2008-06-18 09:29:12 +04:00
}
# endif
2007-05-08 10:27:27 +04:00
if ( addr = = - ENOMEM )
return - ENOMEM ;
2017-03-22 06:36:48 +03:00
slice_range_to_mask ( addr , len , & mask ) ;
2007-05-08 10:27:27 +04:00
slice_dbg ( " found potential area at 0x%lx \n " , addr ) ;
slice_print_mask ( " mask " , mask ) ;
convert :
2017-03-22 06:36:47 +03:00
slice_andnot_mask ( & mask , & good_mask ) ;
slice_andnot_mask ( & mask , & compat_mask ) ;
if ( mask . low_slices | | ! bitmap_empty ( mask . high_slices , SLICE_NUM_HIGH ) ) {
2008-06-18 09:29:12 +04:00
slice_convert ( mm , mask , psize ) ;
if ( psize > MMU_PAGE_BASE )
2008-07-16 05:07:59 +04:00
on_each_cpu ( slice_flush_segments , mm , 1 ) ;
2008-06-18 09:29:12 +04:00
}
2007-05-08 10:27:27 +04:00
return addr ;
}
EXPORT_SYMBOL_GPL ( slice_get_unmapped_area ) ;
unsigned long arch_get_unmapped_area ( struct file * filp ,
unsigned long addr ,
unsigned long len ,
unsigned long pgoff ,
unsigned long flags )
{
return slice_get_unmapped_area ( addr , len , flags ,
2013-04-29 22:53:52 +04:00
current - > mm - > context . user_psize , 0 ) ;
2007-05-08 10:27:27 +04:00
}
unsigned long arch_get_unmapped_area_topdown ( struct file * filp ,
const unsigned long addr0 ,
const unsigned long len ,
const unsigned long pgoff ,
const unsigned long flags )
{
return slice_get_unmapped_area ( addr0 , len , flags ,
2013-04-29 22:53:52 +04:00
current - > mm - > context . user_psize , 1 ) ;
2007-05-08 10:27:27 +04:00
}
unsigned int get_slice_psize ( struct mm_struct * mm , unsigned long addr )
{
2012-09-10 06:52:52 +04:00
unsigned char * hpsizes ;
int index , mask_index ;
2007-05-08 10:27:27 +04:00
2016-04-29 16:26:09 +03:00
/*
* Radix doesn ' t use slice , but can get enabled along with MMU_SLICE
*/
if ( radix_enabled ( ) ) {
# ifdef CONFIG_PPC_64K_PAGES
return MMU_PAGE_64K ;
# else
return MMU_PAGE_4K ;
# endif
}
2007-05-08 10:27:27 +04:00
if ( addr < SLICE_LOW_TOP ) {
2012-09-10 06:52:52 +04:00
u64 lpsizes ;
lpsizes = mm - > context . low_slices_psize ;
2007-05-08 10:27:27 +04:00
index = GET_LOW_SLICE_INDEX ( addr ) ;
2012-09-10 06:52:52 +04:00
return ( lpsizes > > ( index * 4 ) ) & 0xf ;
2007-05-08 10:27:27 +04:00
}
2012-09-10 06:52:52 +04:00
hpsizes = mm - > context . high_slices_psize ;
index = GET_HIGH_SLICE_INDEX ( addr ) ;
mask_index = index & 0x1 ;
return ( hpsizes [ index > > 1 ] > > ( mask_index * 4 ) ) & 0xf ;
2007-05-08 10:27:27 +04:00
}
EXPORT_SYMBOL_GPL ( get_slice_psize ) ;
/*
* This is called by hash_page when it needs to do a lazy conversion of
* an address space from real 64 K pages to combo 4 K pages ( typically
* when hitting a non cacheable mapping on a processor or hypervisor
* that won ' t allow them for 64 K pages ) .
*
* This is also called in init_new_context ( ) to change back the user
* psize from whatever the parent context had it set to
2007-08-15 10:33:55 +04:00
* N . B . This may be called before mm - > context . id has been set .
2007-05-08 10:27:27 +04:00
*
* This function will only change the content of the { low , high ) _slice_psize
* masks , it will not flush SLBs as this shall be handled lazily by the
* caller .
*/
void slice_set_user_psize ( struct mm_struct * mm , unsigned int psize )
{
2012-09-10 06:52:52 +04:00
int index , mask_index ;
unsigned char * hpsizes ;
unsigned long flags , lpsizes ;
2007-05-08 10:27:27 +04:00
unsigned int old_psize ;
int i ;
slice_dbg ( " slice_set_user_psize(mm=%p, psize=%d) \n " , mm , psize ) ;
2016-04-29 16:26:09 +03:00
VM_BUG_ON ( radix_enabled ( ) ) ;
2007-05-08 10:27:27 +04:00
spin_lock_irqsave ( & slice_convert_lock , flags ) ;
old_psize = mm - > context . user_psize ;
slice_dbg ( " old_psize=%d \n " , old_psize ) ;
if ( old_psize = = psize )
goto bail ;
mm - > context . user_psize = psize ;
wmb ( ) ;
lpsizes = mm - > context . low_slices_psize ;
for ( i = 0 ; i < SLICE_NUM_LOW ; i + + )
if ( ( ( lpsizes > > ( i * 4 ) ) & 0xf ) = = old_psize )
lpsizes = ( lpsizes & ~ ( 0xful < < ( i * 4 ) ) ) |
( ( ( unsigned long ) psize ) < < ( i * 4 ) ) ;
2012-09-10 06:52:52 +04:00
/* Assign the value back */
mm - > context . low_slices_psize = lpsizes ;
2007-05-08 10:27:27 +04:00
hpsizes = mm - > context . high_slices_psize ;
2012-09-10 06:52:52 +04:00
for ( i = 0 ; i < SLICE_NUM_HIGH ; i + + ) {
mask_index = i & 0x1 ;
index = i > > 1 ;
if ( ( ( hpsizes [ index ] > > ( mask_index * 4 ) ) & 0xf ) = = old_psize )
hpsizes [ index ] = ( hpsizes [ index ] &
~ ( 0xf < < ( mask_index * 4 ) ) ) |
( ( ( unsigned long ) psize ) < < ( mask_index * 4 ) ) ;
}
2007-05-08 10:27:27 +04:00
slice_dbg ( " lsps=%lx, hsps=%lx \n " ,
2017-03-22 06:36:52 +03:00
( unsigned long ) mm - > context . low_slices_psize ,
( unsigned long ) mm - > context . high_slices_psize ) ;
2007-05-08 10:27:27 +04:00
bail :
spin_unlock_irqrestore ( & slice_convert_lock , flags ) ;
}
2008-06-18 09:29:12 +04:00
void slice_set_range_psize ( struct mm_struct * mm , unsigned long start ,
unsigned long len , unsigned int psize )
{
2017-03-22 06:36:48 +03:00
struct slice_mask mask ;
2008-06-18 09:29:12 +04:00
2016-04-29 16:26:09 +03:00
VM_BUG_ON ( radix_enabled ( ) ) ;
2017-03-22 06:36:48 +03:00
slice_range_to_mask ( start , len , & mask ) ;
2008-06-18 09:29:12 +04:00
slice_convert ( mm , mask , psize ) ;
}
2014-10-21 07:25:38 +04:00
# ifdef CONFIG_HUGETLB_PAGE
2007-05-08 10:27:27 +04:00
/*
2012-09-20 05:48:00 +04:00
* is_hugepage_only_range ( ) is used by generic code to verify whether
2007-05-08 10:27:27 +04:00
* a normal mmap mapping ( non hugetlbfs ) is valid on a given area .
*
* until the generic code provides a more generic hook and / or starts
* calling arch get_unmapped_area for MAP_FIXED ( which our implementation
* here knows how to deal with ) , we hijack it to keep standard mappings
* away from us .
*
* because of that generic code limitation , MAP_FIXED mapping cannot
* " convert " back a slice with no VMAs to the standard page size , only
* get_unmapped_area ( ) can . It would be possible to fix it here but I
* prefer working on fixing the generic code instead .
*
* WARNING : This will not work if hugetlbfs isn ' t enabled since the
* generic code will redefine that function as 0 in that . This is ok
* for now as we only use slices with hugetlbfs enabled . This should
* be fixed as the generic code gets fixed .
*/
int is_hugepage_only_range ( struct mm_struct * mm , unsigned long addr ,
unsigned long len )
{
struct slice_mask mask , available ;
2009-01-14 12:09:34 +03:00
unsigned int psize = mm - > context . user_psize ;
2007-05-08 10:27:27 +04:00
2016-04-29 16:26:09 +03:00
if ( radix_enabled ( ) )
return 0 ;
2017-03-22 06:36:48 +03:00
slice_range_to_mask ( addr , len , & mask ) ;
slice_mask_for_size ( mm , psize , & available ) ;
2009-01-14 12:09:34 +03:00
# ifdef CONFIG_PPC_64K_PAGES
/* We need to account for 4k slices too */
if ( psize = = MMU_PAGE_64K ) {
struct slice_mask compat_mask ;
2017-03-22 06:36:48 +03:00
slice_mask_for_size ( mm , MMU_PAGE_4K , & compat_mask ) ;
2017-03-22 06:36:47 +03:00
slice_or_mask ( & available , & compat_mask ) ;
2009-01-14 12:09:34 +03:00
}
# endif
2007-05-08 10:27:27 +04:00
#if 0 /* too verbose */
slice_dbg ( " is_hugepage_only_range(mm=%p, addr=%lx, len=%lx) \n " ,
mm , addr , len ) ;
slice_print_mask ( " mask " , mask ) ;
slice_print_mask ( " available " , available ) ;
# endif
2017-03-22 06:36:58 +03:00
return ! slice_check_fit ( mm , mask , available ) ;
2007-05-08 10:27:27 +04:00
}
2014-10-21 07:25:38 +04:00
# endif