2010-07-12 08:36:09 +04:00
/*
* Procedures for maintaining information about logical memory blocks .
*
* Peter Bergner , IBM Corp . June 2001.
* Copyright ( C ) 2001 Peter Bergner .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/kernel.h>
2010-07-07 02:39:13 +04:00
# include <linux/slab.h>
2010-07-12 08:36:09 +04:00
# include <linux/init.h>
# include <linux/bitops.h>
2010-07-07 02:39:07 +04:00
# include <linux/poison.h>
2010-07-07 02:39:16 +04:00
# include <linux/pfn.h>
2010-07-07 02:39:19 +04:00
# include <linux/debugfs.h>
# include <linux/seq_file.h>
2010-07-12 08:36:09 +04:00
# include <linux/memblock.h>
2010-07-28 09:43:02 +04:00
struct memblock memblock __initdata_memblock ;
2010-07-12 08:36:09 +04:00
2010-07-28 09:43:02 +04:00
int memblock_debug __initdata_memblock ;
int memblock_can_resize __initdata_memblock ;
static struct memblock_region memblock_memory_init_regions [ INIT_MEMBLOCK_REGIONS + 1 ] __initdata_memblock ;
static struct memblock_region memblock_reserved_init_regions [ INIT_MEMBLOCK_REGIONS + 1 ] __initdata_memblock ;
2010-07-12 08:36:09 +04:00
2010-07-07 02:39:13 +04:00
/* inline so we don't get a warning when pr_debug is compiled out */
static inline const char * memblock_type_name ( struct memblock_type * type )
{
if ( type = = & memblock . memory )
return " memory " ;
else if ( type = = & memblock . reserved )
return " reserved " ;
else
return " unknown " ;
}
2010-07-12 08:36:48 +04:00
/*
* Address comparison utilities
*/
2010-07-12 08:36:09 +04:00
2010-07-28 09:43:02 +04:00
static phys_addr_t __init_memblock memblock_align_down ( phys_addr_t addr , phys_addr_t size )
2010-07-12 08:36:09 +04:00
{
2010-07-12 08:36:48 +04:00
return addr & ~ ( size - 1 ) ;
2010-07-12 08:36:09 +04:00
}
2010-07-28 09:43:02 +04:00
static phys_addr_t __init_memblock memblock_align_up ( phys_addr_t addr , phys_addr_t size )
2010-07-12 08:36:09 +04:00
{
2010-07-12 08:36:48 +04:00
return ( addr + ( size - 1 ) ) & ~ ( size - 1 ) ;
2010-07-12 08:36:09 +04:00
}
2010-07-28 09:43:02 +04:00
static unsigned long __init_memblock memblock_addrs_overlap ( phys_addr_t base1 , phys_addr_t size1 ,
2010-08-04 07:34:42 +04:00
phys_addr_t base2 , phys_addr_t size2 )
2010-07-12 08:36:09 +04:00
{
return ( ( base1 < ( base2 + size2 ) ) & & ( base2 < ( base1 + size1 ) ) ) ;
}
2010-07-28 09:43:02 +04:00
static long __init_memblock memblock_addrs_adjacent ( phys_addr_t base1 , phys_addr_t size1 ,
2010-08-04 07:34:42 +04:00
phys_addr_t base2 , phys_addr_t size2 )
2010-07-12 08:36:09 +04:00
{
if ( base2 = = base1 + size1 )
return 1 ;
else if ( base1 = = base2 + size2 )
return - 1 ;
return 0 ;
}
2010-07-28 09:43:02 +04:00
static long __init_memblock memblock_regions_adjacent ( struct memblock_type * type ,
2010-08-04 07:34:42 +04:00
unsigned long r1 , unsigned long r2 )
2010-07-12 08:36:09 +04:00
{
2010-08-04 07:34:42 +04:00
phys_addr_t base1 = type - > regions [ r1 ] . base ;
phys_addr_t size1 = type - > regions [ r1 ] . size ;
phys_addr_t base2 = type - > regions [ r2 ] . base ;
phys_addr_t size2 = type - > regions [ r2 ] . size ;
2010-07-12 08:36:09 +04:00
return memblock_addrs_adjacent ( base1 , size1 , base2 , size2 ) ;
}
2010-07-28 09:43:02 +04:00
long __init_memblock memblock_overlaps_region ( struct memblock_type * type , phys_addr_t base , phys_addr_t size )
2010-07-12 08:36:48 +04:00
{
unsigned long i ;
for ( i = 0 ; i < type - > cnt ; i + + ) {
phys_addr_t rgnbase = type - > regions [ i ] . base ;
phys_addr_t rgnsize = type - > regions [ i ] . size ;
if ( memblock_addrs_overlap ( base , size , rgnbase , rgnsize ) )
break ;
}
return ( i < type - > cnt ) ? i : - 1 ;
}
/*
* Find , allocate , deallocate or reserve unreserved regions . All allocations
* are top - down .
*/
2010-10-11 23:34:09 +04:00
static phys_addr_t __init_memblock memblock_find_region ( phys_addr_t start , phys_addr_t end ,
2010-07-12 08:36:48 +04:00
phys_addr_t size , phys_addr_t align )
{
phys_addr_t base , res_base ;
long j ;
2010-10-05 01:57:39 +04:00
/* In case, huge size is requested */
if ( end < size )
return MEMBLOCK_ERROR ;
base = memblock_align_down ( ( end - size ) , align ) ;
2010-07-28 09:25:10 +04:00
/* Prevent allocations returning 0 as it's also used to
* indicate an allocation failure
*/
if ( start = = 0 )
start = PAGE_SIZE ;
2010-07-12 08:36:48 +04:00
while ( start < = base ) {
j = memblock_overlaps_region ( & memblock . reserved , base , size ) ;
if ( j < 0 )
return base ;
res_base = memblock . reserved . regions [ j ] . base ;
if ( res_base < size )
break ;
base = memblock_align_down ( res_base - size , align ) ;
}
return MEMBLOCK_ERROR ;
}
2010-09-16 00:05:29 +04:00
static phys_addr_t __init_memblock memblock_find_base ( phys_addr_t size ,
phys_addr_t align , phys_addr_t start , phys_addr_t end )
2010-07-12 08:36:48 +04:00
{
long i ;
BUG_ON ( 0 = = size ) ;
size = memblock_align_up ( size , align ) ;
/* Pump up max_addr */
2010-07-12 09:00:34 +04:00
if ( end = = MEMBLOCK_ALLOC_ACCESSIBLE )
end = memblock . current_limit ;
2010-07-12 08:36:48 +04:00
/* We do a top-down search, this tends to limit memory
* fragmentation by keeping early boot allocs near the
* top of memory
*/
for ( i = memblock . memory . cnt - 1 ; i > = 0 ; i - - ) {
phys_addr_t memblockbase = memblock . memory . regions [ i ] . base ;
phys_addr_t memblocksize = memblock . memory . regions [ i ] . size ;
2010-07-12 09:00:34 +04:00
phys_addr_t bottom , top , found ;
2010-07-12 08:36:48 +04:00
if ( memblocksize < size )
continue ;
2010-07-12 09:00:34 +04:00
if ( ( memblockbase + memblocksize ) < = start )
break ;
bottom = max ( memblockbase , start ) ;
top = min ( memblockbase + memblocksize , end ) ;
if ( bottom > = top )
continue ;
found = memblock_find_region ( bottom , top , size , align ) ;
if ( found ! = MEMBLOCK_ERROR )
return found ;
2010-07-12 08:36:48 +04:00
}
return MEMBLOCK_ERROR ;
}
2010-07-28 09:38:40 +04:00
/*
* Find a free area with specified alignment in a specific range .
*/
u64 __init_memblock memblock_find_in_range ( u64 start , u64 end , u64 size , u64 align )
{
return memblock_find_base ( size , align , start , end ) ;
}
2010-08-26 00:39:14 +04:00
/*
* Free memblock . reserved . regions
*/
int __init_memblock memblock_free_reserved_regions ( void )
{
if ( memblock . reserved . regions = = memblock_reserved_init_regions )
return 0 ;
return memblock_free ( __pa ( memblock . reserved . regions ) ,
sizeof ( struct memblock_region ) * memblock . reserved . max ) ;
}
/*
* Reserve memblock . reserved . regions
*/
int __init_memblock memblock_reserve_reserved_regions ( void )
{
if ( memblock . reserved . regions = = memblock_reserved_init_regions )
return 0 ;
return memblock_reserve ( __pa ( memblock . reserved . regions ) ,
sizeof ( struct memblock_region ) * memblock . reserved . max ) ;
}
2010-07-28 09:43:02 +04:00
static void __init_memblock memblock_remove_region ( struct memblock_type * type , unsigned long r )
2010-07-12 08:36:09 +04:00
{
unsigned long i ;
2010-08-04 08:06:41 +04:00
for ( i = r ; i < type - > cnt - 1 ; i + + ) {
type - > regions [ i ] . base = type - > regions [ i + 1 ] . base ;
type - > regions [ i ] . size = type - > regions [ i + 1 ] . size ;
2010-07-12 08:36:09 +04:00
}
2010-08-04 08:06:41 +04:00
type - > cnt - - ;
2010-07-12 08:36:09 +04:00
}
/* Assumption: base addr of region 1 < base addr of region 2 */
2010-07-28 09:43:02 +04:00
static void __init_memblock memblock_coalesce_regions ( struct memblock_type * type ,
2010-07-12 08:36:09 +04:00
unsigned long r1 , unsigned long r2 )
{
2010-08-04 08:06:41 +04:00
type - > regions [ r1 ] . size + = type - > regions [ r2 ] . size ;
memblock_remove_region ( type , r2 ) ;
2010-07-12 08:36:09 +04:00
}
2010-07-07 02:39:13 +04:00
/* Defined below but needed now */
static long memblock_add_region ( struct memblock_type * type , phys_addr_t base , phys_addr_t size ) ;
2010-07-28 09:43:02 +04:00
static int __init_memblock memblock_double_array ( struct memblock_type * type )
2010-07-07 02:39:13 +04:00
{
struct memblock_region * new_array , * old_array ;
phys_addr_t old_size , new_size , addr ;
int use_slab = slab_is_available ( ) ;
/* We don't allow resizing until we know about the reserved regions
* of memory that aren ' t suitable for allocation
*/
if ( ! memblock_can_resize )
return - 1 ;
/* Calculate new doubled size */
old_size = type - > max * sizeof ( struct memblock_region ) ;
new_size = old_size < < 1 ;
/* Try to find some space for it.
*
* WARNING : We assume that either slab_is_available ( ) and we use it or
* we use MEMBLOCK for allocations . That means that this is unsafe to use
* when bootmem is currently active ( unless bootmem itself is implemented
* on top of MEMBLOCK which isn ' t the case yet )
*
* This should however not be an issue for now , as we currently only
* call into MEMBLOCK while it ' s still active , or much later when slab is
* active for memory hotplug operations
*/
if ( use_slab ) {
new_array = kmalloc ( new_size , GFP_KERNEL ) ;
addr = new_array = = NULL ? MEMBLOCK_ERROR : __pa ( new_array ) ;
} else
2010-07-12 09:00:34 +04:00
addr = memblock_find_base ( new_size , sizeof ( phys_addr_t ) , 0 , MEMBLOCK_ALLOC_ACCESSIBLE ) ;
2010-07-07 02:39:13 +04:00
if ( addr = = MEMBLOCK_ERROR ) {
pr_err ( " memblock: Failed to double %s array from %ld to %ld entries ! \n " ,
memblock_type_name ( type ) , type - > max , type - > max * 2 ) ;
return - 1 ;
}
new_array = __va ( addr ) ;
2010-07-28 09:13:22 +04:00
memblock_dbg ( " memblock: %s array is doubled to %ld at [%#010llx-%#010llx] " ,
memblock_type_name ( type ) , type - > max * 2 , ( u64 ) addr , ( u64 ) addr + new_size - 1 ) ;
2010-07-07 02:39:13 +04:00
/* Found space, we now need to move the array over before
* we add the reserved region since it may be our reserved
* array itself that is full .
*/
memcpy ( new_array , type - > regions , old_size ) ;
memset ( new_array + type - > max , 0 , old_size ) ;
old_array = type - > regions ;
type - > regions = new_array ;
type - > max < < = 1 ;
/* If we use SLAB that's it, we are done */
if ( use_slab )
return 0 ;
/* Add the new reserved region now. Should not fail ! */
BUG_ON ( memblock_add_region ( & memblock . reserved , addr , new_size ) < 0 ) ;
/* If the array wasn't our static init one, then free it. We only do
* that before SLAB is available as later on , we don ' t know whether
* to use kfree or free_bootmem_pages ( ) . Shouldn ' t be a big deal
* anyways
*/
if ( old_array ! = memblock_memory_init_regions & &
old_array ! = memblock_reserved_init_regions )
memblock_free ( __pa ( old_array ) , old_size ) ;
return 0 ;
}
2010-07-28 09:43:02 +04:00
extern int __init_memblock __weak memblock_memory_can_coalesce ( phys_addr_t addr1 , phys_addr_t size1 ,
2010-07-07 02:39:14 +04:00
phys_addr_t addr2 , phys_addr_t size2 )
{
return 1 ;
}
2010-07-28 09:43:02 +04:00
static long __init_memblock memblock_add_region ( struct memblock_type * type , phys_addr_t base , phys_addr_t size )
2010-07-12 08:36:09 +04:00
{
unsigned long coalesced = 0 ;
long adjacent , i ;
2010-08-04 08:06:41 +04:00
if ( ( type - > cnt = = 1 ) & & ( type - > regions [ 0 ] . size = = 0 ) ) {
type - > regions [ 0 ] . base = base ;
type - > regions [ 0 ] . size = size ;
2010-07-12 08:36:09 +04:00
return 0 ;
}
/* First try and coalesce this MEMBLOCK with another. */
2010-08-04 08:06:41 +04:00
for ( i = 0 ; i < type - > cnt ; i + + ) {
2010-08-04 07:34:42 +04:00
phys_addr_t rgnbase = type - > regions [ i ] . base ;
phys_addr_t rgnsize = type - > regions [ i ] . size ;
2010-07-12 08:36:09 +04:00
if ( ( rgnbase = = base ) & & ( rgnsize = = size ) )
/* Already have this region, so we're done */
return 0 ;
adjacent = memblock_addrs_adjacent ( base , size , rgnbase , rgnsize ) ;
2010-07-07 02:39:14 +04:00
/* Check if arch allows coalescing */
if ( adjacent ! = 0 & & type = = & memblock . memory & &
! memblock_memory_can_coalesce ( base , size , rgnbase , rgnsize ) )
break ;
2010-07-12 08:36:09 +04:00
if ( adjacent > 0 ) {
2010-08-04 08:06:41 +04:00
type - > regions [ i ] . base - = size ;
type - > regions [ i ] . size + = size ;
2010-07-12 08:36:09 +04:00
coalesced + + ;
break ;
} else if ( adjacent < 0 ) {
2010-08-04 08:06:41 +04:00
type - > regions [ i ] . size + = size ;
2010-07-12 08:36:09 +04:00
coalesced + + ;
break ;
}
}
2010-07-07 02:39:14 +04:00
/* If we plugged a hole, we may want to also coalesce with the
* next region
*/
if ( ( i < type - > cnt - 1 ) & & memblock_regions_adjacent ( type , i , i + 1 ) & &
( ( type ! = & memblock . memory | | memblock_memory_can_coalesce ( type - > regions [ i ] . base ,
type - > regions [ i ] . size ,
type - > regions [ i + 1 ] . base ,
type - > regions [ i + 1 ] . size ) ) ) ) {
2010-08-04 08:06:41 +04:00
memblock_coalesce_regions ( type , i , i + 1 ) ;
2010-07-12 08:36:09 +04:00
coalesced + + ;
}
if ( coalesced )
return coalesced ;
2010-07-07 02:39:13 +04:00
/* If we are out of space, we fail. It's too late to resize the array
* but then this shouldn ' t have happened in the first place .
*/
if ( WARN_ON ( type - > cnt > = type - > max ) )
2010-07-12 08:36:09 +04:00
return - 1 ;
/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
2010-08-04 08:06:41 +04:00
for ( i = type - > cnt - 1 ; i > = 0 ; i - - ) {
if ( base < type - > regions [ i ] . base ) {
type - > regions [ i + 1 ] . base = type - > regions [ i ] . base ;
type - > regions [ i + 1 ] . size = type - > regions [ i ] . size ;
2010-07-12 08:36:09 +04:00
} else {
2010-08-04 08:06:41 +04:00
type - > regions [ i + 1 ] . base = base ;
type - > regions [ i + 1 ] . size = size ;
2010-07-12 08:36:09 +04:00
break ;
}
}
2010-08-04 08:06:41 +04:00
if ( base < type - > regions [ 0 ] . base ) {
type - > regions [ 0 ] . base = base ;
type - > regions [ 0 ] . size = size ;
2010-07-12 08:36:09 +04:00
}
2010-08-04 08:06:41 +04:00
type - > cnt + + ;
2010-07-12 08:36:09 +04:00
2010-07-07 02:39:13 +04:00
/* The array is full ? Try to resize it. If that fails, we undo
* our allocation and return an error
*/
if ( type - > cnt = = type - > max & & memblock_double_array ( type ) ) {
type - > cnt - - ;
return - 1 ;
}
2010-07-12 08:36:09 +04:00
return 0 ;
}
2010-07-28 09:43:02 +04:00
long __init_memblock memblock_add ( phys_addr_t base , phys_addr_t size )
2010-07-12 08:36:09 +04:00
{
2010-08-04 08:06:41 +04:00
return memblock_add_region ( & memblock . memory , base , size ) ;
2010-07-12 08:36:09 +04:00
}
2010-07-28 09:43:02 +04:00
static long __init_memblock __memblock_remove ( struct memblock_type * type , phys_addr_t base , phys_addr_t size )
2010-07-12 08:36:09 +04:00
{
2010-08-04 07:34:42 +04:00
phys_addr_t rgnbegin , rgnend ;
phys_addr_t end = base + size ;
2010-07-12 08:36:09 +04:00
int i ;
rgnbegin = rgnend = 0 ; /* supress gcc warnings */
/* Find the region where (base, size) belongs to */
2010-08-04 08:06:41 +04:00
for ( i = 0 ; i < type - > cnt ; i + + ) {
rgnbegin = type - > regions [ i ] . base ;
rgnend = rgnbegin + type - > regions [ i ] . size ;
2010-07-12 08:36:09 +04:00
if ( ( rgnbegin < = base ) & & ( end < = rgnend ) )
break ;
}
/* Didn't find the region */
2010-08-04 08:06:41 +04:00
if ( i = = type - > cnt )
2010-07-12 08:36:09 +04:00
return - 1 ;
/* Check to see if we are removing entire region */
if ( ( rgnbegin = = base ) & & ( rgnend = = end ) ) {
2010-08-04 08:06:41 +04:00
memblock_remove_region ( type , i ) ;
2010-07-12 08:36:09 +04:00
return 0 ;
}
/* Check to see if region is matching at the front */
if ( rgnbegin = = base ) {
2010-08-04 08:06:41 +04:00
type - > regions [ i ] . base = end ;
type - > regions [ i ] . size - = size ;
2010-07-12 08:36:09 +04:00
return 0 ;
}
/* Check to see if the region is matching at the end */
if ( rgnend = = end ) {
2010-08-04 08:06:41 +04:00
type - > regions [ i ] . size - = size ;
2010-07-12 08:36:09 +04:00
return 0 ;
}
/*
* We need to split the entry - adjust the current one to the
* beginging of the hole and add the region after hole .
*/
2010-08-04 08:06:41 +04:00
type - > regions [ i ] . size = base - type - > regions [ i ] . base ;
return memblock_add_region ( type , end , rgnend - end ) ;
2010-07-12 08:36:09 +04:00
}
2010-07-28 09:43:02 +04:00
long __init_memblock memblock_remove ( phys_addr_t base , phys_addr_t size )
2010-07-12 08:36:09 +04:00
{
return __memblock_remove ( & memblock . memory , base , size ) ;
}
2010-09-16 00:05:29 +04:00
long __init_memblock memblock_free ( phys_addr_t base , phys_addr_t size )
2010-07-12 08:36:09 +04:00
{
return __memblock_remove ( & memblock . reserved , base , size ) ;
}
2010-09-16 00:05:29 +04:00
long __init_memblock memblock_reserve ( phys_addr_t base , phys_addr_t size )
2010-07-12 08:36:09 +04:00
{
2010-08-04 08:06:41 +04:00
struct memblock_type * _rgn = & memblock . reserved ;
2010-07-12 08:36:09 +04:00
BUG_ON ( 0 = = size ) ;
return memblock_add_region ( _rgn , base , size ) ;
}
2010-07-12 08:36:48 +04:00
phys_addr_t __init __memblock_alloc_base ( phys_addr_t size , phys_addr_t align , phys_addr_t max_addr )
2010-07-12 08:36:09 +04:00
{
2010-07-12 08:36:48 +04:00
phys_addr_t found ;
2010-07-12 08:36:09 +04:00
2010-07-12 08:36:48 +04:00
/* We align the size to limit fragmentation. Without this, a lot of
* small allocs quickly eat up the whole reserve array on sparc
*/
size = memblock_align_up ( size , align ) ;
2010-07-12 08:36:09 +04:00
2010-07-12 09:00:34 +04:00
found = memblock_find_base ( size , align , 0 , max_addr ) ;
2010-07-12 08:36:48 +04:00
if ( found ! = MEMBLOCK_ERROR & &
memblock_add_region ( & memblock . reserved , found , size ) > = 0 )
return found ;
2010-07-12 08:36:09 +04:00
2010-07-12 08:36:48 +04:00
return 0 ;
2010-07-12 08:36:09 +04:00
}
2010-07-12 08:36:48 +04:00
phys_addr_t __init memblock_alloc_base ( phys_addr_t size , phys_addr_t align , phys_addr_t max_addr )
2010-07-12 08:36:09 +04:00
{
2010-07-12 08:36:48 +04:00
phys_addr_t alloc ;
alloc = __memblock_alloc_base ( size , align , max_addr ) ;
if ( alloc = = 0 )
panic ( " ERROR: Failed to allocate 0x%llx bytes below 0x%llx. \n " ,
( unsigned long long ) size , ( unsigned long long ) max_addr ) ;
return alloc ;
2010-07-12 08:36:09 +04:00
}
2010-07-12 08:36:48 +04:00
phys_addr_t __init memblock_alloc ( phys_addr_t size , phys_addr_t align )
2010-07-12 08:36:09 +04:00
{
2010-07-12 08:36:48 +04:00
return memblock_alloc_base ( size , align , MEMBLOCK_ALLOC_ACCESSIBLE ) ;
}
2010-07-12 08:36:09 +04:00
2010-07-12 08:36:48 +04:00
/*
* Additional node - local allocators . Search for node memory is bottom up
* and walks memblock regions within that node bottom - up as well , but allocation
2010-07-07 02:39:16 +04:00
* within an memblock region is top - down . XXX I plan to fix that at some stage
*
* WARNING : Only available after early_node_map [ ] has been populated ,
* on some architectures , that is after all the calls to add_active_range ( )
* have been done to populate it .
2010-07-12 08:36:48 +04:00
*/
2010-07-12 08:36:09 +04:00
2010-08-04 07:34:42 +04:00
phys_addr_t __weak __init memblock_nid_range ( phys_addr_t start , phys_addr_t end , int * nid )
2010-07-07 02:38:59 +04:00
{
2010-07-07 02:39:16 +04:00
# ifdef CONFIG_ARCH_POPULATES_NODE_MAP
/*
* This code originates from sparc which really wants use to walk by addresses
* and returns the nid . This is not very convenient for early_pfn_map [ ] users
* as the map isn ' t sorted yet , and it really wants to be walked by nid .
*
* For now , I implement the inefficient method below which walks the early
* map multiple times . Eventually we may want to use an ARCH config option
* to implement a completely different method for both case .
*/
unsigned long start_pfn , end_pfn ;
int i ;
for ( i = 0 ; i < MAX_NUMNODES ; i + + ) {
get_pfn_range_for_nid ( i , & start_pfn , & end_pfn ) ;
if ( start < PFN_PHYS ( start_pfn ) | | start > = PFN_PHYS ( end_pfn ) )
continue ;
* nid = i ;
return min ( end , PFN_PHYS ( end_pfn ) ) ;
}
# endif
2010-07-07 02:38:59 +04:00
* nid = 0 ;
return end ;
}
2010-08-04 07:34:42 +04:00
static phys_addr_t __init memblock_alloc_nid_region ( struct memblock_region * mp ,
phys_addr_t size ,
phys_addr_t align , int nid )
2010-07-12 08:36:09 +04:00
{
2010-08-04 07:34:42 +04:00
phys_addr_t start , end ;
2010-07-12 08:36:09 +04:00
start = mp - > base ;
end = start + mp - > size ;
start = memblock_align_up ( start , align ) ;
while ( start < end ) {
2010-08-04 07:34:42 +04:00
phys_addr_t this_end ;
2010-07-12 08:36:09 +04:00
int this_nid ;
2010-07-07 02:38:58 +04:00
this_end = memblock_nid_range ( start , end , & this_nid ) ;
2010-07-12 08:36:09 +04:00
if ( this_nid = = nid ) {
2010-07-12 07:28:15 +04:00
phys_addr_t ret = memblock_find_region ( start , this_end , size , align ) ;
2010-07-07 02:39:09 +04:00
if ( ret ! = MEMBLOCK_ERROR & &
2010-07-12 07:28:15 +04:00
memblock_add_region ( & memblock . reserved , ret , size ) > = 0 )
2010-07-12 08:36:09 +04:00
return ret ;
}
start = this_end ;
}
2010-07-07 02:39:09 +04:00
return MEMBLOCK_ERROR ;
2010-07-12 08:36:09 +04:00
}
2010-08-04 07:34:42 +04:00
phys_addr_t __init memblock_alloc_nid ( phys_addr_t size , phys_addr_t align , int nid )
2010-07-12 08:36:09 +04:00
{
2010-08-04 08:06:41 +04:00
struct memblock_type * mem = & memblock . memory ;
2010-07-12 08:36:09 +04:00
int i ;
BUG_ON ( 0 = = size ) ;
2010-07-12 08:24:57 +04:00
/* We align the size to limit fragmentation. Without this, a lot of
* small allocs quickly eat up the whole reserve array on sparc
*/
size = memblock_align_up ( size , align ) ;
2010-07-07 02:38:59 +04:00
/* We do a bottom-up search for a region with the right
* nid since that ' s easier considering how memblock_nid_range ( )
* works
*/
2010-07-12 08:36:09 +04:00
for ( i = 0 ; i < mem - > cnt ; i + + ) {
2010-08-04 07:34:42 +04:00
phys_addr_t ret = memblock_alloc_nid_region ( & mem - > regions [ i ] ,
2010-07-12 08:36:09 +04:00
size , align , nid ) ;
2010-07-07 02:39:09 +04:00
if ( ret ! = MEMBLOCK_ERROR )
2010-07-12 08:36:09 +04:00
return ret ;
}
2010-07-07 02:39:17 +04:00
return 0 ;
}
phys_addr_t __init memblock_alloc_try_nid ( phys_addr_t size , phys_addr_t align , int nid )
{
phys_addr_t res = memblock_alloc_nid ( size , align , nid ) ;
if ( res )
return res ;
2010-07-07 02:39:18 +04:00
return memblock_alloc_base ( size , align , MEMBLOCK_ALLOC_ANYWHERE ) ;
2010-07-12 08:36:09 +04:00
}
2010-07-07 02:39:17 +04:00
/*
* Remaining API functions
*/
2010-07-12 08:36:09 +04:00
/* You must call memblock_analyze() before this. */
2010-08-04 07:34:42 +04:00
phys_addr_t __init memblock_phys_mem_size ( void )
2010-07-12 08:36:09 +04:00
{
2010-07-28 08:31:29 +04:00
return memblock . memory_size ;
2010-07-12 08:36:09 +04:00
}
2010-07-28 09:43:02 +04:00
phys_addr_t __init_memblock memblock_end_of_DRAM ( void )
2010-07-12 08:36:09 +04:00
{
int idx = memblock . memory . cnt - 1 ;
2010-08-04 08:06:41 +04:00
return ( memblock . memory . regions [ idx ] . base + memblock . memory . regions [ idx ] . size ) ;
2010-07-12 08:36:09 +04:00
}
/* You must call memblock_analyze() after this. */
2010-08-04 07:34:42 +04:00
void __init memblock_enforce_memory_limit ( phys_addr_t memory_limit )
2010-07-12 08:36:09 +04:00
{
unsigned long i ;
2010-08-04 07:34:42 +04:00
phys_addr_t limit ;
2010-08-04 08:06:41 +04:00
struct memblock_region * p ;
2010-07-12 08:36:09 +04:00
if ( ! memory_limit )
return ;
/* Truncate the memblock regions to satisfy the memory limit. */
limit = memory_limit ;
for ( i = 0 ; i < memblock . memory . cnt ; i + + ) {
2010-08-04 08:06:41 +04:00
if ( limit > memblock . memory . regions [ i ] . size ) {
limit - = memblock . memory . regions [ i ] . size ;
2010-07-12 08:36:09 +04:00
continue ;
}
2010-08-04 08:06:41 +04:00
memblock . memory . regions [ i ] . size = limit ;
2010-07-12 08:36:09 +04:00
memblock . memory . cnt = i + 1 ;
break ;
}
memory_limit = memblock_end_of_DRAM ( ) ;
/* And truncate any reserves above the limit also. */
for ( i = 0 ; i < memblock . reserved . cnt ; i + + ) {
2010-08-04 08:06:41 +04:00
p = & memblock . reserved . regions [ i ] ;
2010-07-12 08:36:09 +04:00
if ( p - > base > memory_limit )
p - > size = 0 ;
else if ( ( p - > base + p - > size ) > memory_limit )
p - > size = memory_limit - p - > base ;
if ( p - > size = = 0 ) {
memblock_remove_region ( & memblock . reserved , i ) ;
i - - ;
}
}
}
2010-10-11 23:34:09 +04:00
static int __init_memblock memblock_search ( struct memblock_type * type , phys_addr_t addr )
2010-08-04 08:38:47 +04:00
{
unsigned int left = 0 , right = type - > cnt ;
do {
unsigned int mid = ( right + left ) / 2 ;
if ( addr < type - > regions [ mid ] . base )
right = mid ;
else if ( addr > = ( type - > regions [ mid ] . base +
type - > regions [ mid ] . size ) )
left = mid + 1 ;
else
return mid ;
} while ( left < right ) ;
return - 1 ;
}
2010-08-04 07:34:42 +04:00
int __init memblock_is_reserved ( phys_addr_t addr )
2010-07-12 08:36:09 +04:00
{
2010-08-04 08:38:47 +04:00
return memblock_search ( & memblock . reserved , addr ) ! = - 1 ;
}
2010-07-12 08:36:09 +04:00
2010-09-16 00:05:29 +04:00
int __init_memblock memblock_is_memory ( phys_addr_t addr )
2010-08-04 08:38:47 +04:00
{
return memblock_search ( & memblock . memory , addr ) ! = - 1 ;
}
2010-09-16 00:05:29 +04:00
int __init_memblock memblock_is_region_memory ( phys_addr_t base , phys_addr_t size )
2010-08-04 08:38:47 +04:00
{
int idx = memblock_search ( & memblock . reserved , base ) ;
if ( idx = = - 1 )
return 0 ;
return memblock . reserved . regions [ idx ] . base < = base & &
( memblock . reserved . regions [ idx ] . base +
memblock . reserved . regions [ idx ] . size ) > = ( base + size ) ;
2010-07-12 08:36:09 +04:00
}
2010-07-28 09:43:02 +04:00
int __init_memblock memblock_is_region_reserved ( phys_addr_t base , phys_addr_t size )
2010-07-12 08:36:09 +04:00
{
2010-08-04 08:17:17 +04:00
return memblock_overlaps_region ( & memblock . reserved , base , size ) > = 0 ;
2010-07-12 08:36:09 +04:00
}
2010-07-07 02:39:01 +04:00
2010-09-16 00:05:29 +04:00
void __init_memblock memblock_set_current_limit ( phys_addr_t limit )
2010-07-07 02:39:01 +04:00
{
memblock . current_limit = limit ;
}
2010-07-28 09:43:02 +04:00
static void __init_memblock memblock_dump ( struct memblock_type * region , char * name )
2010-07-12 08:36:48 +04:00
{
unsigned long long base , size ;
int i ;
pr_info ( " %s.cnt = 0x%lx \n " , name , region - > cnt ) ;
for ( i = 0 ; i < region - > cnt ; i + + ) {
base = region - > regions [ i ] . base ;
size = region - > regions [ i ] . size ;
2010-07-28 09:13:22 +04:00
pr_info ( " %s[%#x] \t [%#016llx-%#016llx], %#llx bytes \n " ,
2010-07-12 08:36:48 +04:00
name , i , base , base + size - 1 , size ) ;
}
}
2010-07-28 09:43:02 +04:00
void __init_memblock memblock_dump_all ( void )
2010-07-12 08:36:48 +04:00
{
if ( ! memblock_debug )
return ;
pr_info ( " MEMBLOCK configuration: \n " ) ;
pr_info ( " memory size = 0x%llx \n " , ( unsigned long long ) memblock . memory_size ) ;
memblock_dump ( & memblock . memory , " memory " ) ;
memblock_dump ( & memblock . reserved , " reserved " ) ;
}
void __init memblock_analyze ( void )
{
int i ;
/* Check marker in the unused last array entry */
WARN_ON ( memblock_memory_init_regions [ INIT_MEMBLOCK_REGIONS ] . base
! = ( phys_addr_t ) RED_INACTIVE ) ;
WARN_ON ( memblock_reserved_init_regions [ INIT_MEMBLOCK_REGIONS ] . base
! = ( phys_addr_t ) RED_INACTIVE ) ;
memblock . memory_size = 0 ;
for ( i = 0 ; i < memblock . memory . cnt ; i + + )
memblock . memory_size + = memblock . memory . regions [ i ] . size ;
2010-07-07 02:39:13 +04:00
/* We allow resizing from there */
memblock_can_resize = 1 ;
2010-07-12 08:36:48 +04:00
}
2010-07-07 02:39:10 +04:00
void __init memblock_init ( void )
{
2010-10-07 02:52:29 +04:00
static int init_done __initdata = 0 ;
if ( init_done )
return ;
init_done = 1 ;
2010-07-07 02:39:10 +04:00
/* Hookup the initial arrays */
memblock . memory . regions = memblock_memory_init_regions ;
memblock . memory . max = INIT_MEMBLOCK_REGIONS ;
memblock . reserved . regions = memblock_reserved_init_regions ;
memblock . reserved . max = INIT_MEMBLOCK_REGIONS ;
/* Write a marker in the unused last array entry */
memblock . memory . regions [ INIT_MEMBLOCK_REGIONS ] . base = ( phys_addr_t ) RED_INACTIVE ;
memblock . reserved . regions [ INIT_MEMBLOCK_REGIONS ] . base = ( phys_addr_t ) RED_INACTIVE ;
/* Create a dummy zero size MEMBLOCK which will get coalesced away later.
* This simplifies the memblock_add ( ) code below . . .
*/
memblock . memory . regions [ 0 ] . base = 0 ;
memblock . memory . regions [ 0 ] . size = 0 ;
memblock . memory . cnt = 1 ;
/* Ditto. */
memblock . reserved . regions [ 0 ] . base = 0 ;
memblock . reserved . regions [ 0 ] . size = 0 ;
memblock . reserved . cnt = 1 ;
memblock . current_limit = MEMBLOCK_ALLOC_ANYWHERE ;
}
2010-07-12 08:36:48 +04:00
static int __init early_memblock ( char * p )
{
if ( p & & strstr ( p , " debug " ) )
memblock_debug = 1 ;
return 0 ;
}
early_param ( " memblock " , early_memblock ) ;
2010-07-28 09:43:02 +04:00
# if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
2010-07-07 02:39:19 +04:00
static int memblock_debug_show ( struct seq_file * m , void * private )
{
struct memblock_type * type = m - > private ;
struct memblock_region * reg ;
int i ;
for ( i = 0 ; i < type - > cnt ; i + + ) {
reg = & type - > regions [ i ] ;
seq_printf ( m , " %4d: " , i ) ;
if ( sizeof ( phys_addr_t ) = = 4 )
seq_printf ( m , " 0x%08lx..0x%08lx \n " ,
( unsigned long ) reg - > base ,
( unsigned long ) ( reg - > base + reg - > size - 1 ) ) ;
else
seq_printf ( m , " 0x%016llx..0x%016llx \n " ,
( unsigned long long ) reg - > base ,
( unsigned long long ) ( reg - > base + reg - > size - 1 ) ) ;
}
return 0 ;
}
static int memblock_debug_open ( struct inode * inode , struct file * file )
{
return single_open ( file , memblock_debug_show , inode - > i_private ) ;
}
static const struct file_operations memblock_debug_fops = {
. open = memblock_debug_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
static int __init memblock_init_debugfs ( void )
{
struct dentry * root = debugfs_create_dir ( " memblock " , NULL ) ;
if ( ! root )
return - ENXIO ;
debugfs_create_file ( " memory " , S_IRUGO , root , & memblock . memory , & memblock_debug_fops ) ;
debugfs_create_file ( " reserved " , S_IRUGO , root , & memblock . reserved , & memblock_debug_fops ) ;
return 0 ;
}
__initcall ( memblock_init_debugfs ) ;
# endif /* CONFIG_DEBUG_FS */