2008-03-19 14:25:23 -03:00
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/bootmem.h>
# include <linux/percpu.h>
# include <asm/smp.h>
# include <asm/percpu.h>
# include <asm/sections.h>
# include <asm/processor.h>
# include <asm/setup.h>
# include <asm/topology.h>
2008-04-04 23:40:48 +04:00
# include <asm/mpspec.h>
2008-04-04 23:40:41 +04:00
# include <asm/apicdef.h>
2008-05-10 09:01:48 -05:00
# ifdef CONFIG_X86_LOCAL_APIC
2008-04-04 23:41:44 +04:00
unsigned int num_processors ;
unsigned disabled_cpus __cpuinitdata ;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = - 1U ;
EXPORT_SYMBOL ( boot_cpu_physical_apicid ) ;
2008-04-04 23:40:41 +04:00
DEFINE_PER_CPU ( u16 , x86_cpu_to_apicid ) = BAD_APICID ;
EXPORT_PER_CPU_SYMBOL ( x86_cpu_to_apicid ) ;
2008-03-19 14:25:23 -03:00
2008-04-04 23:40:48 +04:00
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map ;
2008-05-10 09:01:48 -05:00
# endif
2008-04-04 23:40:48 +04:00
2008-05-10 09:01:48 -05:00
# if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
2008-03-19 14:25:23 -03:00
/*
* Copy data used in early init routines from the initial arrays to the
* per cpu data areas . These arrays then become expendable and the
* * _early_ptr ' s are zeroed indicating that the static arrays are gone .
*/
static void __init setup_per_cpu_maps ( void )
{
int cpu ;
for_each_possible_cpu ( cpu ) {
2008-03-25 15:06:51 -07:00
per_cpu ( x86_cpu_to_apicid , cpu ) = x86_cpu_to_apicid_init [ cpu ] ;
per_cpu ( x86_bios_cpu_apicid , cpu ) =
2008-03-19 14:25:23 -03:00
x86_bios_cpu_apicid_init [ cpu ] ;
# ifdef CONFIG_NUMA
2008-03-25 15:06:51 -07:00
per_cpu ( x86_cpu_to_node_map , cpu ) =
2008-03-19 14:25:23 -03:00
x86_cpu_to_node_map_init [ cpu ] ;
# endif
}
/* indicate the early static arrays will soon be gone */
x86_cpu_to_apicid_early_ptr = NULL ;
x86_bios_cpu_apicid_early_ptr = NULL ;
# ifdef CONFIG_NUMA
x86_cpu_to_node_map_early_ptr = NULL ;
# endif
}
2008-04-04 18:11:01 -07:00
# ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
cpumask_t * cpumask_of_cpu_map __read_mostly ;
EXPORT_SYMBOL ( cpumask_of_cpu_map ) ;
/* requires nr_cpu_ids to be initialized */
static void __init setup_cpumask_of_cpu ( void )
{
int i ;
/* alloc_bootmem zeroes memory */
cpumask_of_cpu_map = alloc_bootmem_low ( sizeof ( cpumask_t ) * nr_cpu_ids ) ;
for ( i = 0 ; i < nr_cpu_ids ; i + + )
cpu_set ( i , cpumask_of_cpu_map [ i ] ) ;
}
# else
static inline void setup_cpumask_of_cpu ( void ) { }
# endif
2008-03-19 14:25:23 -03:00
# ifdef CONFIG_X86_32
/*
* Great future not - so - futuristic plan : make i386 and x86_64 do it
* the same way
*/
unsigned long __per_cpu_offset [ NR_CPUS ] __read_mostly ;
EXPORT_SYMBOL ( __per_cpu_offset ) ;
# endif
/*
* Great future plan :
* Declare PDA itself and support ( irqstack , tss , pgd ) as per cpu data .
* Always point % gs to its beginning
*/
void __init setup_per_cpu_areas ( void )
{
2008-04-04 18:11:01 -07:00
int i , highest_cpu = 0 ;
2008-03-19 14:25:23 -03:00
unsigned long size ;
# ifdef CONFIG_HOTPLUG_CPU
prefill_possible_map ( ) ;
# endif
/* Copy section for each CPU (we discard the original) */
size = PERCPU_ENOUGH_ROOM ;
2008-05-08 16:38:11 +02:00
printk ( KERN_INFO " PERCPU: Allocating %lu bytes of per cpu data \n " ,
2008-03-19 14:25:23 -03:00
size ) ;
2008-03-25 15:06:51 -07:00
for_each_possible_cpu ( i ) {
2008-03-19 14:25:23 -03:00
char * ptr ;
# ifndef CONFIG_NEED_MULTIPLE_NODES
ptr = alloc_bootmem_pages ( size ) ;
# else
int node = early_cpu_to_node ( i ) ;
2008-03-25 15:06:51 -07:00
if ( ! node_online ( node ) | | ! NODE_DATA ( node ) ) {
2008-03-19 14:25:23 -03:00
ptr = alloc_bootmem_pages ( size ) ;
2008-03-25 15:06:51 -07:00
printk ( KERN_INFO
" cpu %d has no node or node-local memory \n " , i ) ;
}
2008-03-19 14:25:23 -03:00
else
ptr = alloc_bootmem_pages_node ( NODE_DATA ( node ) , size ) ;
# endif
if ( ! ptr )
panic ( " Cannot allocate cpu data for CPU %d \n " , i ) ;
# ifdef CONFIG_X86_64
cpu_pda ( i ) - > data_offset = ptr - __per_cpu_start ;
# else
__per_cpu_offset [ i ] = ptr - __per_cpu_start ;
# endif
memcpy ( ptr , __per_cpu_start , __per_cpu_end - __per_cpu_start ) ;
2008-04-04 18:11:01 -07:00
highest_cpu = i ;
2008-03-19 14:25:23 -03:00
}
2008-04-04 18:11:01 -07:00
nr_cpu_ids = highest_cpu + 1 ;
printk ( KERN_DEBUG " NR_CPUS: %d, nr_cpu_ids: %d \n " , NR_CPUS , nr_cpu_ids ) ;
2008-03-25 15:06:51 -07:00
/* Setup percpu data maps */
2008-03-19 14:25:23 -03:00
setup_per_cpu_maps ( ) ;
2008-04-04 18:11:01 -07:00
/* Setup cpumask_of_cpu map */
setup_cpumask_of_cpu ( ) ;
2008-03-19 14:25:23 -03:00
}
# endif