2009-03-13 07:19:52 +03:00
/* Common code for 32 and 64-bit NUMA */
# include <linux/topology.h>
# include <linux/module.h>
# include <linux/bootmem.h>
2011-01-19 11:57:21 +03:00
# include <asm/numa.h>
# include <asm/acpi.h>
int __initdata numa_off ;
static __init int numa_setup ( char * opt )
{
if ( ! opt )
return - EINVAL ;
if ( ! strncmp ( opt , " off " , 3 ) )
numa_off = 1 ;
# ifdef CONFIG_NUMA_EMU
if ( ! strncmp ( opt , " fake= " , 5 ) )
numa_emu_cmdline ( opt + 5 ) ;
# endif
# ifdef CONFIG_ACPI_NUMA
if ( ! strncmp ( opt , " noacpi " , 6 ) )
acpi_numa = - 1 ;
# endif
return 0 ;
}
early_param ( " numa " , numa_setup ) ;
2009-03-13 07:19:52 +03:00
/*
2011-01-23 16:37:39 +03:00
* apicid , cpu , node mappings
2009-03-13 07:19:52 +03:00
*/
2011-01-23 16:37:39 +03:00
s16 __apicid_to_node [ MAX_LOCAL_APIC ] __cpuinitdata = {
[ 0 . . . MAX_LOCAL_APIC - 1 ] = NUMA_NO_NODE
} ;
2009-03-13 07:19:53 +03:00
cpumask_var_t node_to_cpumask_map [ MAX_NUMNODES ] ;
2009-03-13 07:19:52 +03:00
EXPORT_SYMBOL ( node_to_cpumask_map ) ;
2011-01-23 16:37:40 +03:00
/*
* Map cpu index to node index
*/
# ifdef CONFIG_X86_32
DEFINE_EARLY_PER_CPU ( int , x86_cpu_to_node_map , 0 ) ;
# else
DEFINE_EARLY_PER_CPU ( int , x86_cpu_to_node_map , NUMA_NO_NODE ) ;
# endif
EXPORT_EARLY_PER_CPU_SYMBOL ( x86_cpu_to_node_map ) ;
void __cpuinit numa_set_node ( int cpu , int node )
{
int * cpu_to_node_map = early_per_cpu_ptr ( x86_cpu_to_node_map ) ;
/* early setting, no percpu area yet */
if ( cpu_to_node_map ) {
cpu_to_node_map [ cpu ] = node ;
return ;
}
# ifdef CONFIG_DEBUG_PER_CPU_MAPS
if ( cpu > = nr_cpu_ids | | ! cpu_possible ( cpu ) ) {
printk ( KERN_ERR " numa_set_node: invalid cpu# (%d) \n " , cpu ) ;
dump_stack ( ) ;
return ;
}
# endif
per_cpu ( x86_cpu_to_node_map , cpu ) = node ;
if ( node ! = NUMA_NO_NODE )
set_cpu_numa_node ( cpu , node ) ;
}
void __cpuinit numa_clear_node ( int cpu )
{
numa_set_node ( cpu , NUMA_NO_NODE ) ;
}
2009-03-13 07:19:52 +03:00
/*
* Allocate node_to_cpumask_map based on number of available nodes
* Requires node_possible_map to be valid .
*
* Note : node_to_cpumask ( ) is not valid until after this is done .
* ( Use CONFIG_DEBUG_PER_CPU_MAPS to check this . )
*/
void __init setup_node_to_cpumask_map ( void )
{
unsigned int node , num = 0 ;
/* setup nr_node_ids if not done yet */
if ( nr_node_ids = = MAX_NUMNODES ) {
for_each_node_mask ( node , node_possible_map )
num = node ;
nr_node_ids = num + 1 ;
}
/* allocate the map */
2009-03-13 07:19:53 +03:00
for ( node = 0 ; node < nr_node_ids ; node + + )
alloc_bootmem_cpumask_var ( & node_to_cpumask_map [ node ] ) ;
2009-03-13 07:19:52 +03:00
2009-03-13 07:19:53 +03:00
/* cpumask_of_node() will now work */
pr_debug ( " Node to cpumask map for %d nodes \n " , nr_node_ids ) ;
2009-03-13 07:19:52 +03:00
}
# ifdef CONFIG_DEBUG_PER_CPU_MAPS
2011-01-23 16:37:40 +03:00
int __cpu_to_node ( int cpu )
{
if ( early_per_cpu_ptr ( x86_cpu_to_node_map ) ) {
printk ( KERN_WARNING
" cpu_to_node(%d): usage too early! \n " , cpu ) ;
dump_stack ( ) ;
return early_per_cpu_ptr ( x86_cpu_to_node_map ) [ cpu ] ;
}
return per_cpu ( x86_cpu_to_node_map , cpu ) ;
}
EXPORT_SYMBOL ( __cpu_to_node ) ;
/*
* Same function as cpu_to_node ( ) but used if called before the
* per_cpu areas are setup .
*/
int early_cpu_to_node ( int cpu )
{
if ( early_per_cpu_ptr ( x86_cpu_to_node_map ) )
return early_per_cpu_ptr ( x86_cpu_to_node_map ) [ cpu ] ;
if ( ! cpu_possible ( cpu ) ) {
printk ( KERN_WARNING
" early_cpu_to_node(%d): no per_cpu area! \n " , cpu ) ;
dump_stack ( ) ;
return NUMA_NO_NODE ;
}
return per_cpu ( x86_cpu_to_node_map , cpu ) ;
}
2009-03-13 07:19:52 +03:00
/*
* Returns a pointer to the bitmask of CPUs on Node ' node ' .
*/
2009-03-13 07:19:57 +03:00
const struct cpumask * cpumask_of_node ( int node )
2009-03-13 07:19:52 +03:00
{
if ( node > = nr_node_ids ) {
printk ( KERN_WARNING
" cpumask_of_node(%d): node > nr_node_ids(%d) \n " ,
node , nr_node_ids ) ;
dump_stack ( ) ;
return cpu_none_mask ;
}
2009-03-13 07:19:53 +03:00
if ( node_to_cpumask_map [ node ] = = NULL ) {
printk ( KERN_WARNING
" cpumask_of_node(%d): no node_to_cpumask_map! \n " ,
node ) ;
dump_stack ( ) ;
return cpu_online_mask ;
}
2009-03-13 16:12:42 +03:00
return node_to_cpumask_map [ node ] ;
2009-03-13 07:19:52 +03:00
}
EXPORT_SYMBOL ( cpumask_of_node ) ;
2011-01-23 16:37:40 +03:00
# endif /* CONFIG_DEBUG_PER_CPU_MAPS */