2008-03-19 14:25:23 -03:00
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/bootmem.h>
# include <linux/percpu.h>
2008-06-20 15:38:22 +02:00
# include <linux/kexec.h>
2008-06-21 21:02:20 -07:00
# include <linux/crash_dump.h>
2009-01-04 17:04:26 +05:30
# include <linux/smp.h>
# include <linux/topology.h>
2008-03-19 14:25:23 -03:00
# include <asm/sections.h>
# include <asm/processor.h>
# include <asm/setup.h>
2008-04-04 23:40:48 +04:00
# include <asm/mpspec.h>
2008-04-04 23:40:41 +04:00
# include <asm/apicdef.h>
2008-06-20 15:38:22 +02:00
# include <asm/highmem.h>
2009-01-13 20:41:35 +09:00
# include <asm/proto.h>
2009-01-10 12:17:37 +05:30
# include <asm/cpumask.h>
2009-01-27 12:56:48 +09:00
# include <asm/cpu.h>
2008-04-04 23:40:41 +04:00
2009-01-13 20:41:34 +09:00
# ifdef CONFIG_DEBUG_PER_CPU_MAPS
# define DBG(x...) printk(KERN_DEBUG x)
# else
# define DBG(x...)
# endif
2009-01-19 00:38:58 +09:00
DEFINE_PER_CPU ( int , cpu_number ) ;
EXPORT_PER_CPU_SYMBOL ( cpu_number ) ;
2009-01-27 12:56:48 +09:00
# ifdef CONFIG_X86_64
# define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
# else
# define BOOT_PERCPU_OFFSET 0
# endif
DEFINE_PER_CPU ( unsigned long , this_cpu_off ) = BOOT_PERCPU_OFFSET ;
EXPORT_PER_CPU_SYMBOL ( this_cpu_off ) ;
2009-01-13 20:41:35 +09:00
unsigned long __per_cpu_offset [ NR_CPUS ] __read_mostly = {
2009-01-27 12:56:48 +09:00
[ 0 . . . NR_CPUS - 1 ] = BOOT_PERCPU_OFFSET ,
2009-01-13 20:41:35 +09:00
} ;
EXPORT_SYMBOL ( __per_cpu_offset ) ;
2008-03-19 14:25:23 -03:00
2009-01-27 12:56:48 +09:00
static inline void setup_percpu_segment ( int cpu )
{
# ifdef CONFIG_X86_32
struct desc_struct gdt ;
pack_descriptor ( & gdt , per_cpu_offset ( cpu ) , 0xFFFFF ,
0x2 | DESCTYPE_S , 0x8 ) ;
gdt . s = 1 ;
write_gdt_entry ( get_cpu_gdt_table ( cpu ) ,
GDT_ENTRY_PERCPU , & gdt , DESCTYPE_S ) ;
# endif
}
2008-03-19 14:25:23 -03:00
/*
* Great future plan :
* Declare PDA itself and support ( irqstack , tss , pgd ) as per cpu data .
* Always point % gs to its beginning
*/
void __init setup_per_cpu_areas ( void )
{
2009-01-27 12:56:47 +09:00
ssize_t size ;
2008-05-12 21:21:13 +02:00
char * ptr ;
int cpu ;
2008-03-19 14:25:23 -03:00
/* Copy section for each CPU (we discard the original) */
2009-01-27 12:56:47 +09:00
size = roundup ( PERCPU_ENOUGH_ROOM , PAGE_SIZE ) ;
2008-12-16 17:33:53 -08:00
2009-01-02 21:51:32 +03:00
pr_info ( " NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d \n " ,
2008-12-16 17:33:53 -08:00
NR_CPUS , nr_cpumask_bits , nr_cpu_ids , nr_node_ids ) ;
2009-01-02 21:51:32 +03:00
pr_info ( " PERCPU: Allocating %zd bytes of per cpu data \n " , size ) ;
2008-03-25 15:06:51 -07:00
2008-05-12 21:21:13 +02:00
for_each_possible_cpu ( cpu ) {
2008-03-19 14:25:23 -03:00
# ifndef CONFIG_NEED_MULTIPLE_NODES
2009-01-27 12:56:47 +09:00
ptr = alloc_bootmem_pages ( size ) ;
2008-03-19 14:25:23 -03:00
# else
2008-05-12 21:21:13 +02:00
int node = early_cpu_to_node ( cpu ) ;
2008-03-25 15:06:51 -07:00
if ( ! node_online ( node ) | | ! NODE_DATA ( node ) ) {
2009-01-27 12:56:47 +09:00
ptr = alloc_bootmem_pages ( size ) ;
2009-01-02 21:51:32 +03:00
pr_info ( " cpu %d has no node %d or node-local memory \n " ,
2008-05-12 21:21:13 +02:00
cpu , node ) ;
2009-01-02 21:51:32 +03:00
pr_debug ( " per cpu data for cpu%d at %016lx \n " ,
cpu , __pa ( ptr ) ) ;
} else {
2009-01-27 12:56:47 +09:00
ptr = alloc_bootmem_pages_node ( NODE_DATA ( node ) , size ) ;
2009-01-02 21:51:32 +03:00
pr_debug ( " per cpu data for cpu%d on node%d at %016lx \n " ,
cpu , node , __pa ( ptr ) ) ;
2008-07-29 00:37:10 -07:00
}
2008-03-19 14:25:23 -03:00
# endif
2009-01-13 20:41:35 +09:00
2009-01-13 20:41:35 +09:00
memcpy ( ptr , __per_cpu_load , __per_cpu_end - __per_cpu_start ) ;
2009-01-13 20:41:35 +09:00
per_cpu_offset ( cpu ) = ptr - __per_cpu_start ;
2009-01-19 00:38:58 +09:00
per_cpu ( this_cpu_off , cpu ) = per_cpu_offset ( cpu ) ;
2009-01-19 00:38:58 +09:00
per_cpu ( cpu_number , cpu ) = cpu ;
2009-01-27 12:56:48 +09:00
setup_percpu_segment ( cpu ) ;
2009-01-27 12:56:47 +09:00
/*
2009-01-27 14:25:05 +09:00
* Copy data used in early init routines from the
* initial arrays to the per cpu data areas . These
* arrays then become expendable and the * _early_ptr ' s
* are zeroed indicating that the static arrays are
* gone .
2009-01-27 12:56:47 +09:00
*/
2009-01-27 12:56:47 +09:00
# ifdef CONFIG_X86_LOCAL_APIC
2009-01-27 12:56:47 +09:00
per_cpu ( x86_cpu_to_apicid , cpu ) =
2009-01-27 14:25:05 +09:00
early_per_cpu_map ( x86_cpu_to_apicid , cpu ) ;
2009-01-27 12:56:47 +09:00
per_cpu ( x86_bios_cpu_apicid , cpu ) =
2009-01-27 14:25:05 +09:00
early_per_cpu_map ( x86_bios_cpu_apicid , cpu ) ;
2009-01-27 12:56:47 +09:00
# endif
2009-01-13 20:41:35 +09:00
# ifdef CONFIG_X86_64
2009-01-19 00:38:58 +09:00
per_cpu ( irq_stack_ptr , cpu ) =
2009-01-27 14:25:05 +09:00
per_cpu ( irq_stack_union . irq_stack , cpu ) +
IRQ_STACK_SIZE - 64 ;
2009-01-27 12:56:47 +09:00
# ifdef CONFIG_NUMA
per_cpu ( x86_cpu_to_node_map , cpu ) =
2009-01-27 14:25:05 +09:00
early_per_cpu_map ( x86_cpu_to_node_map , cpu ) ;
2009-01-27 12:56:48 +09:00
# endif
2009-01-27 12:56:47 +09:00
# endif
2009-01-13 20:41:35 +09:00
/*
2009-01-27 12:56:48 +09:00
* Up to this point , the boot CPU has been using . data . init
2009-01-27 12:56:48 +09:00
* area . Reload any changed state for the boot CPU .
2009-01-13 20:41:35 +09:00
*/
2009-01-27 12:56:48 +09:00
if ( cpu = = boot_cpu_id )
2009-01-30 17:47:53 +09:00
switch_to_new_gdt ( cpu ) ;
2009-01-13 20:41:34 +09:00
DBG ( " PERCPU: cpu %4d %p \n " , cpu , ptr ) ;
2008-03-19 14:25:23 -03:00
}
2009-01-27 12:56:47 +09:00
/* indicate the early static arrays will soon be gone */
2009-01-27 14:21:37 +09:00
# ifdef CONFIG_X86_LOCAL_APIC
2009-01-27 12:56:47 +09:00
early_per_cpu_ptr ( x86_cpu_to_apicid ) = NULL ;
early_per_cpu_ptr ( x86_bios_cpu_apicid ) = NULL ;
2009-01-27 14:21:37 +09:00
# endif
2009-01-27 12:56:47 +09:00
# if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
2009-01-27 12:56:47 +09:00
early_per_cpu_ptr ( x86_cpu_to_node_map ) = NULL ;
# endif
2008-04-04 18:11:01 -07:00
2008-05-12 21:21:12 +02:00
/* Setup node to cpumask map */
setup_node_to_cpumask_map ( ) ;
2009-01-04 05:18:03 -08:00
/* Setup cpu initialized, callin, callout masks */
setup_cpu_local_masks ( ) ;
2008-03-19 14:25:23 -03:00
}