2005-04-16 15:20:36 -07:00
# include <linux/init.h>
# include <linux/string.h>
# include <linux/delay.h>
# include <linux/smp.h>
# include <linux/module.h>
# include <linux/percpu.h>
2006-02-24 13:04:14 -08:00
# include <linux/bootmem.h>
2005-04-16 15:20:36 -07:00
# include <asm/semaphore.h>
# include <asm/processor.h>
# include <asm/i387.h>
# include <asm/msr.h>
# include <asm/io.h>
# include <asm/mmu_context.h>
2006-06-23 02:04:18 -07:00
# include <asm/mtrr.h>
2006-06-23 02:04:20 -07:00
# include <asm/mce.h>
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_X86_LOCAL_APIC
# include <asm/mpspec.h>
# include <asm/apic.h>
# include <mach_apic.h>
# endif
# include "cpu.h"
2007-05-02 19:27:15 +02:00
DEFINE_PER_CPU ( struct gdt_page , gdt_page ) = { . gdt = {
2008-01-30 13:31:11 +01:00
[ GDT_ENTRY_KERNEL_CS ] = { { { 0x0000ffff , 0x00cf9a00 } } } ,
[ GDT_ENTRY_KERNEL_DS ] = { { { 0x0000ffff , 0x00cf9200 } } } ,
[ GDT_ENTRY_DEFAULT_USER_CS ] = { { { 0x0000ffff , 0x00cffa00 } } } ,
[ GDT_ENTRY_DEFAULT_USER_DS ] = { { { 0x0000ffff , 0x00cff200 } } } ,
2007-05-02 19:27:10 +02:00
/*
* Segments used for calling PnP BIOS have byte granularity .
* They code segments and data segments have fixed 64 k limits ,
* the transfer segment sizes are set at run time .
*/
2008-01-30 13:31:11 +01:00
/* 32-bit code */
[ GDT_ENTRY_PNPBIOS_CS32 ] = { { { 0x0000ffff , 0x00409a00 } } } ,
/* 16-bit code */
[ GDT_ENTRY_PNPBIOS_CS16 ] = { { { 0x0000ffff , 0x00009a00 } } } ,
/* 16-bit data */
[ GDT_ENTRY_PNPBIOS_DS ] = { { { 0x0000ffff , 0x00009200 } } } ,
/* 16-bit data */
[ GDT_ENTRY_PNPBIOS_TS1 ] = { { { 0x00000000 , 0x00009200 } } } ,
/* 16-bit data */
[ GDT_ENTRY_PNPBIOS_TS2 ] = { { { 0x00000000 , 0x00009200 } } } ,
2007-05-02 19:27:10 +02:00
/*
* The APM segments have byte granularity and their bases
* are set at run time . All have 64 k limits .
*/
2008-01-30 13:31:11 +01:00
/* 32-bit code */
[ GDT_ENTRY_APMBIOS_BASE ] = { { { 0x0000ffff , 0x00409a00 } } } ,
2007-05-02 19:27:10 +02:00
/* 16-bit code */
2008-01-30 13:31:11 +01:00
[ GDT_ENTRY_APMBIOS_BASE + 1 ] = { { { 0x0000ffff , 0x00009a00 } } } ,
/* data */
[ GDT_ENTRY_APMBIOS_BASE + 2 ] = { { { 0x0000ffff , 0x00409200 } } } ,
2007-05-02 19:27:10 +02:00
2008-01-30 13:31:11 +01:00
[ GDT_ENTRY_ESPFIX_SS ] = { { { 0x00000000 , 0x00c09200 } } } ,
[ GDT_ENTRY_PERCPU ] = { { { 0x00000000 , 0x00000000 } } } ,
2007-05-02 19:27:15 +02:00
} } ;
EXPORT_PER_CPU_SYMBOL_GPL ( gdt_page ) ;
2007-05-02 19:27:10 +02:00
2008-01-30 13:33:20 +01:00
__u32 cleared_cpu_caps [ NCAPINTS ] __cpuinitdata ;
2006-03-23 02:59:33 -08:00
static int cachesize_override __cpuinitdata = - 1 ;
static int disable_x86_serial_nr __cpuinitdata = 1 ;
2005-04-16 15:20:36 -07:00
struct cpu_dev * cpu_devs [ X86_VENDOR_NUM ] = { } ;
2006-09-26 10:52:36 +02:00
static void __cpuinit default_init ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
/* Not much we can do here... */
/* Check if at least it has cpuid */
if ( c - > cpuid_level = = - 1 ) {
/* No cpuid. It must be an ancient CPU */
if ( c - > x86 = = 4 )
strcpy ( c - > x86_model_id , " 486 " ) ;
else if ( c - > x86 = = 3 )
strcpy ( c - > x86_model_id , " 386 " ) ;
}
}
2006-09-26 10:52:36 +02:00
static struct cpu_dev __cpuinitdata default_cpu = {
2005-04-16 15:20:36 -07:00
. c_init = default_init ,
2006-02-04 23:28:03 -08:00
. c_vendor = " Unknown " ,
2005-04-16 15:20:36 -07:00
} ;
2007-01-05 16:36:34 -08:00
static struct cpu_dev * this_cpu __cpuinitdata = & default_cpu ;
2005-04-16 15:20:36 -07:00
static int __init cachesize_setup ( char * str )
{
get_option ( & str , & cachesize_override ) ;
return 1 ;
}
__setup ( " cachesize= " , cachesize_setup ) ;
2006-03-23 02:59:33 -08:00
int __cpuinit get_model_name ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
unsigned int * v ;
char * p , * q ;
if ( cpuid_eax ( 0x80000000 ) < 0x80000004 )
return 0 ;
v = ( unsigned int * ) c - > x86_model_id ;
cpuid ( 0x80000002 , & v [ 0 ] , & v [ 1 ] , & v [ 2 ] , & v [ 3 ] ) ;
cpuid ( 0x80000003 , & v [ 4 ] , & v [ 5 ] , & v [ 6 ] , & v [ 7 ] ) ;
cpuid ( 0x80000004 , & v [ 8 ] , & v [ 9 ] , & v [ 10 ] , & v [ 11 ] ) ;
c - > x86_model_id [ 48 ] = 0 ;
/* Intel chips right-justify this string for some dumb reason;
undo that brain damage */
p = q = & c - > x86_model_id [ 0 ] ;
while ( * p = = ' ' )
p + + ;
if ( p ! = q ) {
while ( * p )
* q + + = * p + + ;
while ( q < = & c - > x86_model_id [ 48 ] )
* q + + = ' \0 ' ; /* Zero-pad the rest */
}
return 1 ;
}
2006-03-23 02:59:33 -08:00
void __cpuinit display_cacheinfo ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
unsigned int n , dummy , ecx , edx , l2size ;
n = cpuid_eax ( 0x80000000 ) ;
if ( n > = 0x80000005 ) {
cpuid ( 0x80000005 , & dummy , & dummy , & ecx , & edx ) ;
printk ( KERN_INFO " CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line) \n " ,
edx > > 24 , edx & 0xFF , ecx > > 24 , ecx & 0xFF ) ;
c - > x86_cache_size = ( ecx > > 24 ) + ( edx > > 24 ) ;
}
if ( n < 0x80000006 ) /* Some chips just has a large L1. */
return ;
ecx = cpuid_ecx ( 0x80000006 ) ;
l2size = ecx > > 16 ;
/* do processor-specific cache resizing */
if ( this_cpu - > c_size_cache )
l2size = this_cpu - > c_size_cache ( c , l2size ) ;
/* Allow user to override all this if necessary. */
if ( cachesize_override ! = - 1 )
l2size = cachesize_override ;
if ( l2size = = 0 )
return ; /* Again, no L2 cache is possible */
c - > x86_cache_size = l2size ;
printk ( KERN_INFO " CPU: L2 Cache: %dK (%d bytes/line) \n " ,
l2size , ecx & 0xFF ) ;
}
/* Naming convention should be: <Name> [(<Codename>)] */
/* This table only is used unless init_<vendor>() below doesn't set it; */
/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
/* Look up CPU names by table lookup. */
2006-03-23 02:59:33 -08:00
static char __cpuinit * table_lookup_model ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
struct cpu_model_info * info ;
if ( c - > x86_model > = 16 )
return NULL ; /* Range check */
if ( ! this_cpu )
return NULL ;
info = this_cpu - > c_models ;
while ( info & & info - > family ) {
if ( info - > family = = c - > x86 )
return info - > model_names [ c - > x86_model ] ;
info + + ;
}
return NULL ; /* Not found */
}
2006-03-23 02:59:33 -08:00
static void __cpuinit get_cpu_vendor ( struct cpuinfo_x86 * c , int early )
2005-04-16 15:20:36 -07:00
{
char * v = c - > x86_vendor_id ;
int i ;
2006-02-04 23:28:03 -08:00
static int printed ;
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < X86_VENDOR_NUM ; i + + ) {
if ( cpu_devs [ i ] ) {
if ( ! strcmp ( v , cpu_devs [ i ] - > c_ident [ 0 ] ) | |
( cpu_devs [ i ] - > c_ident [ 1 ] & &
! strcmp ( v , cpu_devs [ i ] - > c_ident [ 1 ] ) ) ) {
c - > x86_vendor = i ;
if ( ! early )
this_cpu = cpu_devs [ i ] ;
2006-02-04 23:28:03 -08:00
return ;
2005-04-16 15:20:36 -07:00
}
}
}
2006-02-04 23:28:03 -08:00
if ( ! printed ) {
printed + + ;
printk ( KERN_ERR " CPU: Vendor unknown, using generic init. \n " ) ;
printk ( KERN_ERR " CPU: Your system may be unstable. \n " ) ;
}
c - > x86_vendor = X86_VENDOR_UNKNOWN ;
this_cpu = & default_cpu ;
2005-04-16 15:20:36 -07:00
}
static int __init x86_fxsr_setup ( char * s )
{
2008-01-30 13:33:20 +01:00
setup_clear_cpu_cap ( X86_FEATURE_FXSR ) ;
setup_clear_cpu_cap ( X86_FEATURE_XMM ) ;
2005-04-16 15:20:36 -07:00
return 1 ;
}
__setup ( " nofxsr " , x86_fxsr_setup ) ;
2006-03-23 02:59:34 -08:00
static int __init x86_sep_setup ( char * s )
{
2008-01-30 13:33:20 +01:00
setup_clear_cpu_cap ( X86_FEATURE_SEP ) ;
2006-03-23 02:59:34 -08:00
return 1 ;
}
__setup ( " nosep " , x86_sep_setup ) ;
2005-04-16 15:20:36 -07:00
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p ( u32 flag )
{
u32 f1 , f2 ;
asm ( " pushfl \n \t "
" pushfl \n \t "
" popl %0 \n \t "
" movl %0,%1 \n \t "
" xorl %2,%0 \n \t "
" pushl %0 \n \t "
" popfl \n \t "
" pushfl \n \t "
" popl %0 \n \t "
" popfl \n \t "
: " =&r " ( f1 ) , " =&r " ( f2 )
: " ir " ( flag ) ) ;
return ( ( f1 ^ f2 ) & flag ) ! = 0 ;
}
/* Probe for the CPUID instruction */
2006-03-23 02:59:33 -08:00
static int __cpuinit have_cpuid_p ( void )
2005-04-16 15:20:36 -07:00
{
return flag_is_changeable_p ( X86_EFLAGS_ID ) ;
}
2006-12-07 02:14:08 +01:00
void __init cpu_detect ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
/* Get vendor name */
2008-02-01 17:49:43 +01:00
cpuid ( 0x00000000 , ( unsigned int * ) & c - > cpuid_level ,
( unsigned int * ) & c - > x86_vendor_id [ 0 ] ,
( unsigned int * ) & c - > x86_vendor_id [ 8 ] ,
( unsigned int * ) & c - > x86_vendor_id [ 4 ] ) ;
2005-04-16 15:20:36 -07:00
c - > x86 = 4 ;
if ( c - > cpuid_level > = 0x00000001 ) {
u32 junk , tfms , cap0 , misc ;
cpuid ( 0x00000001 , & tfms , & misc , & junk , & cap0 ) ;
c - > x86 = ( tfms > > 8 ) & 15 ;
c - > x86_model = ( tfms > > 4 ) & 15 ;
2005-11-05 17:25:53 +01:00
if ( c - > x86 = = 0xf )
2005-04-16 15:20:36 -07:00
c - > x86 + = ( tfms > > 20 ) & 0xff ;
2005-11-05 17:25:53 +01:00
if ( c - > x86 > = 0x6 )
2005-04-16 15:20:36 -07:00
c - > x86_model + = ( ( tfms > > 16 ) & 0xF ) < < 4 ;
c - > x86_mask = tfms & 15 ;
2008-01-31 22:05:45 +01:00
if ( cap0 & ( 1 < < 19 ) ) {
2005-04-16 15:20:36 -07:00
c - > x86_cache_alignment = ( ( misc > > 8 ) & 0xff ) * 8 ;
2008-01-31 22:05:45 +01:00
c - > x86_clflush_size = ( ( misc > > 8 ) & 0xff ) * 8 ;
}
2005-04-16 15:20:36 -07:00
}
}
2008-01-30 13:33:32 +01:00
static void __cpuinit early_get_cap ( struct cpuinfo_x86 * c )
{
u32 tfms , xlvl ;
2008-02-01 17:49:43 +01:00
unsigned int ebx ;
2008-01-30 13:33:32 +01:00
memset ( & c - > x86_capability , 0 , sizeof c - > x86_capability ) ;
if ( have_cpuid_p ( ) ) {
/* Intel-defined flags: level 0x00000001 */
if ( c - > cpuid_level > = 0x00000001 ) {
u32 capability , excap ;
cpuid ( 0x00000001 , & tfms , & ebx , & excap , & capability ) ;
c - > x86_capability [ 0 ] = capability ;
c - > x86_capability [ 4 ] = excap ;
}
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax ( 0x80000000 ) ;
if ( ( xlvl & 0xffff0000 ) = = 0x80000000 ) {
if ( xlvl > = 0x80000001 ) {
c - > x86_capability [ 1 ] = cpuid_edx ( 0x80000001 ) ;
c - > x86_capability [ 6 ] = cpuid_ecx ( 0x80000001 ) ;
}
}
}
}
2005-04-16 15:20:36 -07:00
2006-12-07 02:14:08 +01:00
/* Do minimum CPU detection early.
Fields really needed : vendor , cpuid_level , family , model , mask , cache alignment .
The others are not touched to avoid unwanted side effects .
WARNING : this function is only called on the BP . Don ' t add code here
that is supposed to run on all CPUs . */
static void __init early_cpu_detect ( void )
{
struct cpuinfo_x86 * c = & boot_cpu_data ;
c - > x86_cache_alignment = 32 ;
2008-01-31 22:05:45 +01:00
c - > x86_clflush_size = 32 ;
2006-12-07 02:14:08 +01:00
if ( ! have_cpuid_p ( ) )
return ;
cpu_detect ( c ) ;
get_cpu_vendor ( c , 1 ) ;
2008-01-30 13:32:40 +01:00
switch ( c - > x86_vendor ) {
case X86_VENDOR_AMD :
early_init_amd ( c ) ;
break ;
case X86_VENDOR_INTEL :
early_init_intel ( c ) ;
break ;
}
2008-01-30 13:33:32 +01:00
early_get_cap ( c ) ;
2006-12-07 02:14:08 +01:00
}
2006-09-26 10:52:36 +02:00
static void __cpuinit generic_identify ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
u32 tfms , xlvl ;
2008-02-01 17:49:43 +01:00
unsigned int ebx ;
2005-04-16 15:20:36 -07:00
if ( have_cpuid_p ( ) ) {
/* Get vendor name */
2008-02-01 17:49:43 +01:00
cpuid ( 0x00000000 , ( unsigned int * ) & c - > cpuid_level ,
( unsigned int * ) & c - > x86_vendor_id [ 0 ] ,
( unsigned int * ) & c - > x86_vendor_id [ 8 ] ,
( unsigned int * ) & c - > x86_vendor_id [ 4 ] ) ;
2005-04-16 15:20:36 -07:00
get_cpu_vendor ( c , 0 ) ;
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
if ( c - > cpuid_level > = 0x00000001 ) {
u32 capability , excap ;
2006-03-27 01:15:22 -08:00
cpuid ( 0x00000001 , & tfms , & ebx , & excap , & capability ) ;
2005-04-16 15:20:36 -07:00
c - > x86_capability [ 0 ] = capability ;
c - > x86_capability [ 4 ] = excap ;
c - > x86 = ( tfms > > 8 ) & 15 ;
c - > x86_model = ( tfms > > 4 ) & 15 ;
2006-03-07 21:55:40 -08:00
if ( c - > x86 = = 0xf )
2005-04-16 15:20:36 -07:00
c - > x86 + = ( tfms > > 20 ) & 0xff ;
2006-03-07 21:55:40 -08:00
if ( c - > x86 > = 0x6 )
2005-04-16 15:20:36 -07:00
c - > x86_model + = ( ( tfms > > 16 ) & 0xF ) < < 4 ;
c - > x86_mask = tfms & 15 ;
2006-06-27 02:53:49 -07:00
# ifdef CONFIG_X86_HT
2006-03-27 01:15:22 -08:00
c - > apicid = phys_pkg_id ( ( ebx > > 24 ) & 0xFF , 0 ) ;
# else
c - > apicid = ( ebx > > 24 ) & 0xFF ;
# endif
2006-12-07 02:14:05 +01:00
if ( c - > x86_capability [ 0 ] & ( 1 < < 19 ) )
c - > x86_clflush_size = ( ( ebx > > 8 ) & 0xff ) * 8 ;
2005-04-16 15:20:36 -07:00
} else {
/* Have CPUID level 0 only - unheard of */
c - > x86 = 4 ;
}
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax ( 0x80000000 ) ;
if ( ( xlvl & 0xffff0000 ) = = 0x80000000 ) {
if ( xlvl > = 0x80000001 ) {
c - > x86_capability [ 1 ] = cpuid_edx ( 0x80000001 ) ;
c - > x86_capability [ 6 ] = cpuid_ecx ( 0x80000001 ) ;
}
if ( xlvl > = 0x80000004 )
get_model_name ( c ) ; /* Default name */
}
2007-07-11 12:18:32 -07:00
init_scattered_cpuid_features ( c ) ;
2005-04-16 15:20:36 -07:00
}
2006-01-11 22:46:33 +01:00
# ifdef CONFIG_X86_HT
2006-06-27 02:53:46 -07:00
c - > phys_proc_id = ( cpuid_ebx ( 1 ) > > 24 ) & 0xff ;
2006-01-11 22:46:33 +01:00
# endif
2005-04-16 15:20:36 -07:00
}
2006-03-23 02:59:33 -08:00
static void __cpuinit squash_the_stupid_serial_number ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
if ( cpu_has ( c , X86_FEATURE_PN ) & & disable_x86_serial_nr ) {
/* Disable processor serial number */
unsigned long lo , hi ;
rdmsr ( MSR_IA32_BBL_CR_CTL , lo , hi ) ;
lo | = 0x200000 ;
wrmsr ( MSR_IA32_BBL_CR_CTL , lo , hi ) ;
printk ( KERN_NOTICE " CPU serial number disabled. \n " ) ;
clear_bit ( X86_FEATURE_PN , c - > x86_capability ) ;
/* Disabling the serial number may affect the cpuid level */
c - > cpuid_level = cpuid_eax ( 0 ) ;
}
}
static int __init x86_serial_nr_setup ( char * s )
{
disable_x86_serial_nr = 0 ;
return 1 ;
}
__setup ( " serialnumber " , x86_serial_nr_setup ) ;
/*
* This does the hard work of actually picking apart the CPU stuff . . .
*/
2008-01-30 13:31:39 +01:00
void __cpuinit identify_cpu ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
int i ;
c - > loops_per_jiffy = loops_per_jiffy ;
c - > x86_cache_size = - 1 ;
c - > x86_vendor = X86_VENDOR_UNKNOWN ;
c - > cpuid_level = - 1 ; /* CPUID not detected */
c - > x86_model = c - > x86_mask = 0 ; /* So far unknown... */
c - > x86_vendor_id [ 0 ] = ' \0 ' ; /* Unset */
c - > x86_model_id [ 0 ] = ' \0 ' ; /* Unset */
2005-11-05 17:25:54 +01:00
c - > x86_max_cores = 1 ;
2006-12-07 02:14:05 +01:00
c - > x86_clflush_size = 32 ;
2005-04-16 15:20:36 -07:00
memset ( & c - > x86_capability , 0 , sizeof c - > x86_capability ) ;
if ( ! have_cpuid_p ( ) ) {
/* First of all, decide if this is a 486 or higher */
/* It's a 486 if we can modify the AC flag */
if ( flag_is_changeable_p ( X86_EFLAGS_AC ) )
c - > x86 = 4 ;
else
c - > x86 = 3 ;
}
generic_identify ( c ) ;
2008-01-30 13:32:49 +01:00
if ( this_cpu - > c_identify )
2005-04-16 15:20:36 -07:00
this_cpu - > c_identify ( c ) ;
/*
* Vendor - specific initialization . In this section we
* canonicalize the feature flags , meaning if there are
* features a certain CPU supports which CPUID doesn ' t
* tell us , CPUID claiming incorrect flags , or other bugs ,
* we handle them here .
*
* At the end of this section , c - > x86_capability better
* indicate the features this CPU genuinely supports !
*/
if ( this_cpu - > c_init )
this_cpu - > c_init ( c ) ;
/* Disable the PN if appropriate */
squash_the_stupid_serial_number ( c ) ;
/*
* The vendor - specific functions might have changed features . Now
* we do " generic changes. "
*/
/* If the model name is still unset, do table lookup. */
if ( ! c - > x86_model_id [ 0 ] ) {
char * p ;
p = table_lookup_model ( c ) ;
if ( p )
strcpy ( c - > x86_model_id , p ) ;
else
/* Last resort... */
sprintf ( c - > x86_model_id , " %02x/%02x " ,
2006-03-23 02:59:36 -08:00
c - > x86 , c - > x86_model ) ;
2005-04-16 15:20:36 -07:00
}
/*
* On SMP , boot_cpu_data holds the common feature set between
* all CPUs ; so make sure that we indicate which features are
* common between the CPUs . The first time this routine gets
* executed , c = = & boot_cpu_data .
*/
if ( c ! = & boot_cpu_data ) {
/* AND the already accumulated flags with these */
for ( i = 0 ; i < NCAPINTS ; i + + )
boot_cpu_data . x86_capability [ i ] & = c - > x86_capability [ i ] ;
}
2008-01-30 13:33:20 +01:00
/* Clear all flags overriden by options */
for ( i = 0 ; i < NCAPINTS ; i + + )
c - > x86_capability [ i ] ^ = cleared_cpu_caps [ i ] ;
2005-04-16 15:20:36 -07:00
/* Init Machine Check Exception if available. */
mcheck_init ( c ) ;
2008-01-30 13:33:16 +01:00
select_idle_routine ( c ) ;
2007-05-02 19:27:12 +02:00
}
2005-11-07 00:58:42 -08:00
2007-05-02 19:27:12 +02:00
void __init identify_boot_cpu ( void )
{
identify_cpu ( & boot_cpu_data ) ;
sysenter_setup ( ) ;
2005-06-25 14:54:53 -07:00
enable_sep_cpu ( ) ;
2007-05-02 19:27:12 +02:00
}
2005-07-07 17:56:38 -07:00
2007-05-02 19:27:12 +02:00
void __cpuinit identify_secondary_cpu ( struct cpuinfo_x86 * c )
{
BUG_ON ( c = = & boot_cpu_data ) ;
identify_cpu ( c ) ;
enable_sep_cpu ( ) ;
mtrr_ap_init ( ) ;
2005-04-16 15:20:36 -07:00
}
# ifdef CONFIG_X86_HT
2006-03-23 02:59:33 -08:00
void __cpuinit detect_ht ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
u32 eax , ebx , ecx , edx ;
2005-11-05 17:25:54 +01:00
int index_msb , core_bits ;
2005-04-16 15:20:36 -07:00
2005-11-05 17:25:54 +01:00
cpuid ( 1 , & eax , & ebx , & ecx , & edx ) ;
2005-04-16 15:25:16 -07:00
if ( ! cpu_has ( c , X86_FEATURE_HT ) | | cpu_has ( c , X86_FEATURE_CMP_LEGACY ) )
2005-04-16 15:20:36 -07:00
return ;
smp_num_siblings = ( ebx & 0xff0000 ) > > 16 ;
if ( smp_num_siblings = = 1 ) {
printk ( KERN_INFO " CPU: Hyper-Threading is disabled \n " ) ;
} else if ( smp_num_siblings > 1 ) {
if ( smp_num_siblings > NR_CPUS ) {
2006-06-27 02:53:46 -07:00
printk ( KERN_WARNING " CPU: Unsupported number of the "
" siblings %d " , smp_num_siblings ) ;
2005-04-16 15:20:36 -07:00
smp_num_siblings = 1 ;
return ;
}
2005-11-05 17:25:54 +01:00
index_msb = get_count_order ( smp_num_siblings ) ;
2006-06-27 02:53:46 -07:00
c - > phys_proc_id = phys_pkg_id ( ( ebx > > 24 ) & 0xFF , index_msb ) ;
2005-04-16 15:20:36 -07:00
printk ( KERN_INFO " CPU: Physical Processor ID: %d \n " ,
2006-06-27 02:53:46 -07:00
c - > phys_proc_id ) ;
2005-04-16 15:25:15 -07:00
2005-11-05 17:25:54 +01:00
smp_num_siblings = smp_num_siblings / c - > x86_max_cores ;
2005-04-16 15:25:15 -07:00
2005-11-05 17:25:54 +01:00
index_msb = get_count_order ( smp_num_siblings ) ;
2005-04-16 15:25:15 -07:00
2005-11-05 17:25:54 +01:00
core_bits = get_count_order ( c - > x86_max_cores ) ;
2005-04-16 15:25:15 -07:00
2006-06-27 02:53:46 -07:00
c - > cpu_core_id = phys_pkg_id ( ( ebx > > 24 ) & 0xFF , index_msb ) &
2005-11-05 17:25:54 +01:00
( ( 1 < < core_bits ) - 1 ) ;
2005-04-16 15:25:15 -07:00
2005-11-05 17:25:54 +01:00
if ( c - > x86_max_cores > 1 )
2005-04-16 15:25:15 -07:00
printk ( KERN_INFO " CPU: Processor Core ID: %d \n " ,
2006-06-27 02:53:46 -07:00
c - > cpu_core_id ) ;
2005-04-16 15:20:36 -07:00
}
}
# endif
2008-01-30 13:33:21 +01:00
static __init int setup_noclflush ( char * arg )
{
setup_clear_cpu_cap ( X86_FEATURE_CLFLSH ) ;
return 1 ;
}
__setup ( " noclflush " , setup_noclflush ) ;
2006-03-23 02:59:33 -08:00
void __cpuinit print_cpu_info ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
char * vendor = NULL ;
if ( c - > x86_vendor < X86_VENDOR_NUM )
vendor = this_cpu - > c_vendor ;
else if ( c - > cpuid_level > = 0 )
vendor = c - > x86_vendor_id ;
if ( vendor & & strncmp ( c - > x86_model_id , vendor , strlen ( vendor ) ) )
printk ( " %s " , vendor ) ;
if ( ! c - > x86_model_id [ 0 ] )
printk ( " %d86 " , c - > x86 ) ;
else
printk ( " %s " , c - > x86_model_id ) ;
if ( c - > x86_mask | | c - > cpuid_level > = 0 )
printk ( " stepping %02x \n " , c - > x86_mask ) ;
else
printk ( " \n " ) ;
}
2008-01-30 13:33:21 +01:00
static __init int setup_disablecpuid ( char * arg )
{
int bit ;
if ( get_option ( & arg , & bit ) & & bit < NCAPINTS * 32 )
setup_clear_cpu_cap ( bit ) ;
else
return 0 ;
return 1 ;
}
__setup ( " clearcpuid= " , setup_disablecpuid ) ;
2006-03-23 02:59:33 -08:00
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE ;
2005-04-16 15:20:36 -07:00
/* This is hacky. :)
* We ' re emulating future behavior .
* In the future , the cpu - specific init functions will be called implicitly
* via the magic of initcalls .
* They will insert themselves into the cpu_devs structure .
* Then , when cpu_init ( ) is called , we can just iterate over that array .
*/
void __init early_cpu_init ( void )
{
intel_cpu_init ( ) ;
cyrix_init_cpu ( ) ;
nsc_init_cpu ( ) ;
amd_init_cpu ( ) ;
centaur_init_cpu ( ) ;
transmeta_init_cpu ( ) ;
nexgen_init_cpu ( ) ;
umc_init_cpu ( ) ;
early_cpu_detect ( ) ;
}
2006-12-07 02:14:02 +01:00
2007-05-02 19:27:16 +02:00
/* Make sure %fs is initialized properly in idle threads */
[PATCH] i386: Use %gs as the PDA base-segment in the kernel
This patch is the meat of the PDA change. This patch makes several related
changes:
1: Most significantly, %gs is now used in the kernel. This means that on
entry, the old value of %gs is saved away, and it is reloaded with
__KERNEL_PDA.
2: entry.S constructs the stack in the shape of struct pt_regs, and this
is passed around the kernel so that the process's saved register
state can be accessed.
Unfortunately struct pt_regs doesn't currently have space for %gs
(or %fs). This patch extends pt_regs to add space for gs (no space
is allocated for %fs, since it won't be used, and it would just
complicate the code in entry.S to work around the space).
3: Because %gs is now saved on the stack like %ds, %es and the integer
registers, there are a number of places where it no longer needs to
be handled specially; namely context switch, and saving/restoring the
register state in a signal context.
4: And since kernel threads run in kernel space and call normal kernel
code, they need to be created with their %gs == __KERNEL_PDA.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Chuck Ebbert <76306.1226@compuserve.com>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
2006-12-07 02:14:02 +01:00
struct pt_regs * __devinit idle_regs ( struct pt_regs * regs )
{
memset ( regs , 0 , sizeof ( struct pt_regs ) ) ;
2008-01-30 13:30:56 +01:00
regs - > fs = __KERNEL_PERCPU ;
[PATCH] i386: Use %gs as the PDA base-segment in the kernel
This patch is the meat of the PDA change. This patch makes several related
changes:
1: Most significantly, %gs is now used in the kernel. This means that on
entry, the old value of %gs is saved away, and it is reloaded with
__KERNEL_PDA.
2: entry.S constructs the stack in the shape of struct pt_regs, and this
is passed around the kernel so that the process's saved register
state can be accessed.
Unfortunately struct pt_regs doesn't currently have space for %gs
(or %fs). This patch extends pt_regs to add space for gs (no space
is allocated for %fs, since it won't be used, and it would just
complicate the code in entry.S to work around the space).
3: Because %gs is now saved on the stack like %ds, %es and the integer
registers, there are a number of places where it no longer needs to
be handled specially; namely context switch, and saving/restoring the
register state in a signal context.
4: And since kernel threads run in kernel space and call normal kernel
code, they need to be created with their %gs == __KERNEL_PDA.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Chuck Ebbert <76306.1226@compuserve.com>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
2006-12-07 02:14:02 +01:00
return regs ;
}
2007-05-02 19:27:16 +02:00
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it ' s on the real one . */
void switch_to_new_gdt ( void )
{
2008-01-30 13:31:12 +01:00
struct desc_ptr gdt_descr ;
2007-05-02 19:27:16 +02:00
gdt_descr . address = ( long ) get_cpu_gdt_table ( smp_processor_id ( ) ) ;
gdt_descr . size = GDT_SIZE - 1 ;
load_gdt ( & gdt_descr ) ;
asm ( " mov %0, %%fs " : : " r " ( __KERNEL_PERCPU ) : " memory " ) ;
}
2007-05-02 19:27:10 +02:00
/*
* cpu_init ( ) initializes state that is per - CPU . Some data is already
* initialized ( naturally ) in the bootstrap process , such as the GDT
* and IDT . We reload them nevertheless , this function acts as a
* ' CPU state barrier ' , nothing should get across .
*/
void __cpuinit cpu_init ( void )
2007-01-22 09:18:31 -06:00
{
2007-05-02 19:27:10 +02:00
int cpu = smp_processor_id ( ) ;
struct task_struct * curr = current ;
2007-01-22 09:18:31 -06:00
struct tss_struct * t = & per_cpu ( init_tss , cpu ) ;
struct thread_struct * thread = & curr - > thread ;
2006-12-07 02:14:02 +01:00
if ( cpu_test_and_set ( cpu , cpu_initialized ) ) {
printk ( KERN_WARNING " CPU#%d already initialized! \n " , cpu ) ;
for ( ; ; ) local_irq_enable ( ) ;
}
printk ( KERN_INFO " Initializing CPU#%d \n " , cpu ) ;
if ( cpu_has_vme | | cpu_has_tsc | | cpu_has_de )
clear_in_cr4 ( X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE ) ;
2005-09-03 15:56:38 -07:00
load_idt ( & idt_descr ) ;
2007-05-02 19:27:16 +02:00
switch_to_new_gdt ( ) ;
2005-04-16 15:20:36 -07:00
/*
* Set up and load the per - CPU TSS and LDT
*/
atomic_inc ( & init_mm . mm_count ) ;
2006-12-07 02:14:02 +01:00
curr - > active_mm = & init_mm ;
if ( curr - > mm )
BUG ( ) ;
enter_lazy_tlb ( & init_mm , curr ) ;
2005-04-16 15:20:36 -07:00
2008-01-30 13:31:02 +01:00
load_sp0 ( t , thread ) ;
2005-04-16 15:20:36 -07:00
set_tss_desc ( cpu , t ) ;
load_TR_desc ( ) ;
load_LDT ( & init_mm . context ) ;
2006-01-08 01:05:24 -08:00
# ifdef CONFIG_DOUBLEFAULT
2005-04-16 15:20:36 -07:00
/* Set up doublefault TSS pointer in the GDT */
__set_tss_desc ( cpu , GDT_ENTRY_DOUBLEFAULT_TSS , & doublefault_tss ) ;
2006-01-08 01:05:24 -08:00
# endif
2005-04-16 15:20:36 -07:00
2007-02-13 13:26:20 +01:00
/* Clear %gs. */
asm volatile ( " mov %0, %%gs " : : " r " ( 0 ) ) ;
2005-04-16 15:20:36 -07:00
/* Clear all 6 debug registers: */
2005-09-03 15:56:36 -07:00
set_debugreg ( 0 , 0 ) ;
set_debugreg ( 0 , 1 ) ;
set_debugreg ( 0 , 2 ) ;
set_debugreg ( 0 , 3 ) ;
set_debugreg ( 0 , 6 ) ;
set_debugreg ( 0 , 7 ) ;
2005-04-16 15:20:36 -07:00
/*
* Force FPU initialization :
*/
current_thread_info ( ) - > status = 0 ;
clear_used_math ( ) ;
mxcsr_feature_mask_init ( ) ;
}
2005-06-25 14:54:56 -07:00
# ifdef CONFIG_HOTPLUG_CPU
2006-03-23 02:59:33 -08:00
void __cpuinit cpu_uninit ( void )
2005-06-25 14:54:56 -07:00
{
int cpu = raw_smp_processor_id ( ) ;
cpu_clear ( cpu , cpu_initialized ) ;
/* lazy TLB state */
per_cpu ( cpu_tlbstate , cpu ) . state = 0 ;
per_cpu ( cpu_tlbstate , cpu ) . active_mm = & init_mm ;
}
# endif