2005-04-17 02:20:36 +04:00
/*
* Written by : Patricia Gaughen < gone @ us . ibm . com > , IBM Corporation
* August 2002 : added remote node KVA remap - Martin J . Bligh
*
* Copyright ( C ) 2002 , IBM Corp .
*
* All rights reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE , GOOD TITLE or
* NON INFRINGEMENT . See the GNU General Public License for more
* details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*/
# include <linux/config.h>
# include <linux/mm.h>
# include <linux/bootmem.h>
# include <linux/mmzone.h>
# include <linux/highmem.h>
# include <linux/initrd.h>
# include <linux/nodemask.h>
# include <asm/e820.h>
# include <asm/setup.h>
# include <asm/mmzone.h>
# include <bios_ebda.h>
struct pglist_data * node_data [ MAX_NUMNODES ] ;
bootmem_data_t node0_bdata ;
/*
* numa interface - we expect the numa architecture specfic code to have
* populated the following initialisation .
*
* 1 ) node_online_map - the map of all nodes configured ( online ) in the system
2005-06-23 11:07:57 +04:00
* 2 ) node_start_pfn - the starting page frame number for a node
2005-04-17 02:20:36 +04:00
* 3 ) node_end_pfn - the ending page fram number for a node
*/
2005-06-23 11:07:57 +04:00
unsigned long node_start_pfn [ MAX_NUMNODES ] ;
unsigned long node_end_pfn [ MAX_NUMNODES ] ;
2005-04-17 02:20:36 +04:00
2005-06-23 11:07:57 +04:00
# ifdef CONFIG_DISCONTIGMEM
2005-04-17 02:20:36 +04:00
/*
2005-06-23 11:07:57 +04:00
* 4 ) physnode_map - the mapping between a pfn and owning node
2005-04-17 02:20:36 +04:00
* physnode_map keeps track of the physical memory layout of a generic
* numa node on a 256 Mb break ( each element of the array will
* represent 256 Mb of memory and will be marked by the node id . so ,
* if the first gig is on node 0 , and the second gig is on node 1
* physnode_map will contain :
*
* physnode_map [ 0 - 3 ] = 0 ;
* physnode_map [ 4 - 7 ] = 1 ;
* physnode_map [ 8 - ] = - 1 ;
*/
s8 physnode_map [ MAX_ELEMENTS ] = { [ 0 . . . ( MAX_ELEMENTS - 1 ) ] = - 1 } ;
void memory_present ( int nid , unsigned long start , unsigned long end )
{
unsigned long pfn ;
printk ( KERN_INFO " Node: %d, start_pfn: %ld, end_pfn: %ld \n " ,
nid , start , end ) ;
printk ( KERN_DEBUG " Setting physnode_map array to node %d for pfns: \n " , nid ) ;
printk ( KERN_DEBUG " " ) ;
for ( pfn = start ; pfn < end ; pfn + = PAGES_PER_ELEMENT ) {
physnode_map [ pfn / PAGES_PER_ELEMENT ] = nid ;
printk ( " %ld " , pfn ) ;
}
printk ( " \n " ) ;
}
unsigned long node_memmap_size_bytes ( int nid , unsigned long start_pfn ,
unsigned long end_pfn )
{
unsigned long nr_pages = end_pfn - start_pfn ;
if ( ! nr_pages )
return 0 ;
return ( nr_pages + 1 ) * sizeof ( struct page ) ;
}
2005-06-23 11:07:57 +04:00
# endif
2005-04-17 02:20:36 +04:00
extern unsigned long find_max_low_pfn ( void ) ;
extern void find_max_pfn ( void ) ;
extern void one_highpage_init ( struct page * , int , int ) ;
extern struct e820map e820 ;
extern unsigned long init_pg_tables_end ;
extern unsigned long highend_pfn , highstart_pfn ;
extern unsigned long max_low_pfn ;
extern unsigned long totalram_pages ;
extern unsigned long totalhigh_pages ;
# define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
unsigned long node_remap_start_pfn [ MAX_NUMNODES ] ;
unsigned long node_remap_size [ MAX_NUMNODES ] ;
unsigned long node_remap_offset [ MAX_NUMNODES ] ;
void * node_remap_start_vaddr [ MAX_NUMNODES ] ;
void set_pmd_pfn ( unsigned long vaddr , unsigned long pfn , pgprot_t flags ) ;
2005-06-23 11:07:39 +04:00
void * node_remap_end_vaddr [ MAX_NUMNODES ] ;
void * node_remap_alloc_vaddr [ MAX_NUMNODES ] ;
2005-04-17 02:20:36 +04:00
/*
* FLAT - support for basic PC memory model with discontig enabled , essentially
* a single node with all available processors in it with a flat
* memory map .
*/
int __init get_memcfg_numa_flat ( void )
{
printk ( " NUMA - single node, flat memory mode \n " ) ;
/* Run the memory configuration and find the top of memory. */
find_max_pfn ( ) ;
node_start_pfn [ 0 ] = 0 ;
node_end_pfn [ 0 ] = max_pfn ;
memory_present ( 0 , 0 , max_pfn ) ;
/* Indicate there is one node available. */
nodes_clear ( node_online_map ) ;
node_set_online ( 0 ) ;
return 1 ;
}
/*
* Find the highest page frame number we have available for the node
*/
static void __init find_max_pfn_node ( int nid )
{
if ( node_end_pfn [ nid ] > max_pfn )
node_end_pfn [ nid ] = max_pfn ;
/*
* if a user has given mem = XXXX , then we need to make sure
* that the node _starts_ before that , too , not just ends
*/
if ( node_start_pfn [ nid ] > max_pfn )
node_start_pfn [ nid ] = max_pfn ;
if ( node_start_pfn [ nid ] > node_end_pfn [ nid ] )
BUG ( ) ;
}
[PATCH] sparsemem base: early_pfn_to_nid() (works before sparse is initialized)
The following four patches provide the last needed changes before the
introduction of sparsemem. For a more complete description of what this
will do, please see this patch:
http://www.sr71.net/patches/2.6.11/2.6.11-bk7-mhp1/broken-out/B-sparse-150-sparsemem.patch
or previous posts on the subject:
http://marc.theaimsgroup.com/?t=110868540700001&r=1&w=2
http://marc.theaimsgroup.com/?l=linux-mm&m=109897373315016&w=2
Three of these are i386-only, but one of them reorganizes the macros
used to manage the space in page->flags, and will affect all platforms.
There are analogous patches to the i386 ones for ppc64, ia64, and
x86_64, but those will be submitted by the normal arch maintainers.
The combination of the four patches has been test-booted on a variety of
i386 hardware, and compiled for ppc64, i386, and x86-64 with about 17
different .configs. It's also been runtime-tested on ia64 configs (with
more patches on top).
This patch:
We _know_ which node pages in general belong to, at least at a very gross
level in node_{start,end}_pfn[]. Use those to target the allocations of
pages.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 11:07:38 +04:00
/* Find the owning node for a pfn. */
int early_pfn_to_nid ( unsigned long pfn )
{
int nid ;
for_each_node ( nid ) {
if ( node_end_pfn [ nid ] = = 0 )
break ;
if ( node_start_pfn [ nid ] < = pfn & & node_end_pfn [ nid ] > = pfn )
return nid ;
}
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
* Allocate memory for the pg_data_t for this node via a crude pre - bootmem
* method . For node zero take this from the bottom of memory , for
* subsequent nodes place them at node_remap_start_vaddr which contains
* node local data in physically node local memory . See setup_memory ( )
* for details .
*/
static void __init allocate_pgdat ( int nid )
{
if ( nid & & node_has_online_mem ( nid ) )
NODE_DATA ( nid ) = ( pg_data_t * ) node_remap_start_vaddr [ nid ] ;
else {
NODE_DATA ( nid ) = ( pg_data_t * ) ( __va ( min_low_pfn < < PAGE_SHIFT ) ) ;
min_low_pfn + = PFN_UP ( sizeof ( pg_data_t ) ) ;
}
}
2005-06-23 11:07:39 +04:00
void * alloc_remap ( int nid , unsigned long size )
{
void * allocation = node_remap_alloc_vaddr [ nid ] ;
size = ALIGN ( size , L1_CACHE_BYTES ) ;
if ( ! allocation | | ( allocation + size ) > = node_remap_end_vaddr [ nid ] )
return 0 ;
node_remap_alloc_vaddr [ nid ] + = size ;
memset ( allocation , 0 , size ) ;
return allocation ;
}
2005-04-17 02:20:36 +04:00
void __init remap_numa_kva ( void )
{
void * vaddr ;
unsigned long pfn ;
int node ;
for_each_online_node ( node ) {
for ( pfn = 0 ; pfn < node_remap_size [ node ] ; pfn + = PTRS_PER_PTE ) {
vaddr = node_remap_start_vaddr [ node ] + ( pfn < < PAGE_SHIFT ) ;
set_pmd_pfn ( ( ulong ) vaddr ,
node_remap_start_pfn [ node ] + pfn ,
PAGE_KERNEL_LARGE ) ;
}
}
}
static unsigned long calculate_numa_remap_pages ( void )
{
int nid ;
unsigned long size , reserve_pages = 0 ;
2005-06-23 11:07:41 +04:00
unsigned long pfn ;
2005-04-17 02:20:36 +04:00
for_each_online_node ( nid ) {
/*
* The acpi / srat node info can show hot - add memroy zones
* where memory could be added but not currently present .
*/
if ( node_start_pfn [ nid ] > max_pfn )
continue ;
if ( node_end_pfn [ nid ] > max_pfn )
node_end_pfn [ nid ] = max_pfn ;
/* ensure the remap includes space for the pgdat. */
size = node_remap_size [ nid ] + sizeof ( pg_data_t ) ;
/* convert size to large (pmd size) pages, rounding up */
size = ( size + LARGE_PAGE_BYTES - 1 ) / LARGE_PAGE_BYTES ;
/* now the roundup is correct, convert to PAGE_SIZE pages */
size = size * PTRS_PER_PTE ;
2005-06-23 11:07:41 +04:00
/*
* Validate the region we are allocating only contains valid
* pages .
*/
for ( pfn = node_end_pfn [ nid ] - size ;
pfn < node_end_pfn [ nid ] ; pfn + + )
if ( ! page_is_ram ( pfn ) )
break ;
if ( pfn ! = node_end_pfn [ nid ] )
size = 0 ;
2005-04-17 02:20:36 +04:00
printk ( " Reserving %ld pages of KVA for lmem_map of node %d \n " ,
size , nid ) ;
node_remap_size [ nid ] = size ;
node_remap_offset [ nid ] = reserve_pages ;
2005-06-23 11:07:39 +04:00
reserve_pages + = size ;
2005-04-17 02:20:36 +04:00
printk ( " Shrinking node %d from %ld pages to %ld pages \n " ,
nid , node_end_pfn [ nid ] , node_end_pfn [ nid ] - size ) ;
node_end_pfn [ nid ] - = size ;
node_remap_start_pfn [ nid ] = node_end_pfn [ nid ] ;
}
printk ( " Reserving total of %ld pages for numa KVA remap \n " ,
reserve_pages ) ;
return reserve_pages ;
}
extern void setup_bootmem_allocator ( void ) ;
unsigned long __init setup_memory ( void )
{
int nid ;
unsigned long system_start_pfn , system_max_low_pfn ;
unsigned long reserve_pages ;
/*
* When mapping a NUMA machine we allocate the node_mem_map arrays
* from node local memory . They are then mapped directly into KVA
* between zone normal and vmalloc space . Calculate the size of
* this space and use it to adjust the boundry between ZONE_NORMAL
* and ZONE_HIGHMEM .
*/
find_max_pfn ( ) ;
get_memcfg_numa ( ) ;
reserve_pages = calculate_numa_remap_pages ( ) ;
/* partially used pages are not usable - thus round upwards */
system_start_pfn = min_low_pfn = PFN_UP ( init_pg_tables_end ) ;
system_max_low_pfn = max_low_pfn = find_max_low_pfn ( ) - reserve_pages ;
printk ( " reserve_pages = %ld find_max_low_pfn() ~ %ld \n " ,
reserve_pages , max_low_pfn + reserve_pages ) ;
printk ( " max_pfn = %ld \n " , max_pfn ) ;
# ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn ;
if ( max_pfn > system_max_low_pfn )
highstart_pfn = system_max_low_pfn ;
printk ( KERN_NOTICE " %ldMB HIGHMEM available. \n " ,
pages_to_mb ( highend_pfn - highstart_pfn ) ) ;
# endif
printk ( KERN_NOTICE " %ldMB LOWMEM available. \n " ,
pages_to_mb ( system_max_low_pfn ) ) ;
printk ( " min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld \n " ,
min_low_pfn , max_low_pfn , highstart_pfn ) ;
printk ( " Low memory ends at vaddr %08lx \n " ,
( ulong ) pfn_to_kaddr ( max_low_pfn ) ) ;
for_each_online_node ( nid ) {
node_remap_start_vaddr [ nid ] = pfn_to_kaddr (
2005-06-23 11:07:39 +04:00
highstart_pfn + node_remap_offset [ nid ] ) ;
/* Init the node remap allocator */
node_remap_end_vaddr [ nid ] = node_remap_start_vaddr [ nid ] +
( node_remap_size [ nid ] * PAGE_SIZE ) ;
node_remap_alloc_vaddr [ nid ] = node_remap_start_vaddr [ nid ] +
ALIGN ( sizeof ( pg_data_t ) , PAGE_SIZE ) ;
2005-04-17 02:20:36 +04:00
allocate_pgdat ( nid ) ;
printk ( " node %d will remap to vaddr %08lx - %08lx \n " , nid ,
( ulong ) node_remap_start_vaddr [ nid ] ,
2005-06-23 11:07:39 +04:00
( ulong ) pfn_to_kaddr ( highstart_pfn
+ node_remap_offset [ nid ] + node_remap_size [ nid ] ) ) ;
2005-04-17 02:20:36 +04:00
}
printk ( " High memory starts at vaddr %08lx \n " ,
( ulong ) pfn_to_kaddr ( highstart_pfn ) ) ;
vmalloc_earlyreserve = reserve_pages * PAGE_SIZE ;
for_each_online_node ( nid )
find_max_pfn_node ( nid ) ;
memset ( NODE_DATA ( 0 ) , 0 , sizeof ( struct pglist_data ) ) ;
NODE_DATA ( 0 ) - > bdata = & node0_bdata ;
setup_bootmem_allocator ( ) ;
return max_low_pfn ;
}
void __init zone_sizes_init ( void )
{
int nid ;
/*
* Insert nodes into pgdat_list backward so they appear in order .
* Clobber node 0 ' s links and NULL out pgdat_list before starting .
*/
pgdat_list = NULL ;
for ( nid = MAX_NUMNODES - 1 ; nid > = 0 ; nid - - ) {
if ( ! node_online ( nid ) )
continue ;
NODE_DATA ( nid ) - > pgdat_next = pgdat_list ;
pgdat_list = NODE_DATA ( nid ) ;
}
for_each_online_node ( nid ) {
unsigned long zones_size [ MAX_NR_ZONES ] = { 0 , 0 , 0 } ;
unsigned long * zholes_size ;
unsigned int max_dma ;
unsigned long low = max_low_pfn ;
unsigned long start = node_start_pfn [ nid ] ;
unsigned long high = node_end_pfn [ nid ] ;
max_dma = virt_to_phys ( ( char * ) MAX_DMA_ADDRESS ) > > PAGE_SHIFT ;
if ( node_has_online_mem ( nid ) ) {
if ( start > low ) {
# ifdef CONFIG_HIGHMEM
BUG_ON ( start > high ) ;
zones_size [ ZONE_HIGHMEM ] = high - start ;
# endif
} else {
if ( low < max_dma )
zones_size [ ZONE_DMA ] = low ;
else {
BUG_ON ( max_dma > low ) ;
BUG_ON ( low > high ) ;
zones_size [ ZONE_DMA ] = max_dma ;
zones_size [ ZONE_NORMAL ] = low - max_dma ;
# ifdef CONFIG_HIGHMEM
zones_size [ ZONE_HIGHMEM ] = high - low ;
# endif
}
}
}
zholes_size = get_zholes_size ( nid ) ;
2005-06-23 11:07:39 +04:00
free_area_init_node ( nid , NODE_DATA ( nid ) , zones_size , start ,
zholes_size ) ;
2005-04-17 02:20:36 +04:00
}
return ;
}
void __init set_highmem_pages_init ( int bad_ppro )
{
# ifdef CONFIG_HIGHMEM
struct zone * zone ;
2005-06-23 11:07:57 +04:00
struct page * page ;
2005-04-17 02:20:36 +04:00
for_each_zone ( zone ) {
2005-06-23 11:07:57 +04:00
unsigned long node_pfn , zone_start_pfn , zone_end_pfn ;
2005-04-17 02:20:36 +04:00
if ( ! is_highmem ( zone ) )
continue ;
zone_start_pfn = zone - > zone_start_pfn ;
2005-06-23 11:07:57 +04:00
zone_end_pfn = zone_start_pfn + zone - > spanned_pages ;
printk ( " Initializing %s for node %d (%08lx:%08lx) \n " ,
zone - > name , zone - > zone_pgdat - > node_id ,
zone_start_pfn , zone_end_pfn ) ;
2005-04-17 02:20:36 +04:00
2005-06-23 11:07:57 +04:00
for ( node_pfn = zone_start_pfn ; node_pfn < zone_end_pfn ; node_pfn + + ) {
if ( ! pfn_valid ( node_pfn ) )
continue ;
page = pfn_to_page ( node_pfn ) ;
one_highpage_init ( page , node_pfn , bad_ppro ) ;
2005-04-17 02:20:36 +04:00
}
}
totalram_pages + = totalhigh_pages ;
# endif
}