2005-04-17 02:20:36 +04:00
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/string.h>
# include <linux/bitops.h>
# include <linux/smp.h>
# include <linux/thread_info.h>
2005-11-14 03:07:23 +03:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
# include <asm/processor.h>
2007-10-17 20:04:33 +04:00
# include <asm/pgtable.h>
2005-04-17 02:20:36 +04:00
# include <asm/msr.h>
# include <asm/uaccess.h>
2008-01-30 15:31:09 +03:00
# include <asm/ptrace.h>
# include <asm/ds.h>
2005-04-17 02:20:36 +04:00
# include "cpu.h"
# ifdef CONFIG_X86_LOCAL_APIC
# include <asm/mpspec.h>
# include <asm/apic.h>
# include <mach_apic.h>
# endif
# ifdef CONFIG_X86_INTEL_USERCOPY
/*
* Alignment at which movsl is preferred for bulk memory copies .
*/
2005-07-08 04:56:59 +04:00
struct movsl_mask movsl_mask __read_mostly ;
2005-04-17 02:20:36 +04:00
# endif
2006-03-23 13:59:33 +03:00
void __cpuinit early_intel_workaround ( struct cpuinfo_x86 * c )
2005-04-17 02:20:36 +04:00
{
if ( c - > x86_vendor ! = X86_VENDOR_INTEL )
return ;
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
if ( c - > x86 = = 15 & & c - > x86_cache_alignment = = 64 )
c - > x86_cache_alignment = 128 ;
}
/*
* Early probe support logic for ppro memory erratum # 50
*
* This is called before we do cpu ident work
*/
2006-03-23 13:59:33 +03:00
int __cpuinit ppro_with_ram_bug ( void )
2005-04-17 02:20:36 +04:00
{
/* Uses data from early_cpu_detect now */
if ( boot_cpu_data . x86_vendor = = X86_VENDOR_INTEL & &
boot_cpu_data . x86 = = 6 & &
boot_cpu_data . x86_model = = 1 & &
boot_cpu_data . x86_mask < 8 ) {
printk ( KERN_INFO " Pentium Pro with Errata#50 detected. Taking evasive action. \n " ) ;
return 1 ;
}
return 0 ;
}
/*
* P4 Xeon errata 037 workaround .
* Hardware prefetcher may cause stale data to be loaded into the cache .
*/
2006-03-23 13:59:33 +03:00
static void __cpuinit Intel_errata_workarounds ( struct cpuinfo_x86 * c )
2005-04-17 02:20:36 +04:00
{
unsigned long lo , hi ;
if ( ( c - > x86 = = 15 ) & & ( c - > x86_model = = 1 ) & & ( c - > x86_mask = = 1 ) ) {
rdmsr ( MSR_IA32_MISC_ENABLE , lo , hi ) ;
if ( ( lo & ( 1 < < 9 ) ) = = 0 ) {
printk ( KERN_INFO " CPU: C0 stepping P4 Xeon detected. \n " ) ;
printk ( KERN_INFO " CPU: Disabling hardware prefetching (Errata 037) \n " ) ;
lo | = ( 1 < < 9 ) ; /* Disable hw prefetching */
wrmsr ( MSR_IA32_MISC_ENABLE , lo , hi ) ;
}
}
}
2005-04-17 02:25:15 +04:00
/*
* find out the number of processor cores on the die
*/
2006-03-23 13:59:33 +03:00
static int __cpuinit num_cpu_cores ( struct cpuinfo_x86 * c )
2005-04-17 02:25:15 +04:00
{
2005-09-04 02:56:42 +04:00
unsigned int eax , ebx , ecx , edx ;
2005-04-17 02:25:15 +04:00
if ( c - > cpuid_level < 4 )
return 1 ;
2005-09-04 02:56:42 +04:00
/* Intel has a non-standard dependency on %ecx for this CPUID level. */
cpuid_count ( 4 , 0 , & eax , & ebx , & ecx , & edx ) ;
2005-04-17 02:25:15 +04:00
if ( eax & 0x1f )
return ( ( eax > > 26 ) + 1 ) ;
else
return 1 ;
}
2007-10-17 20:04:33 +04:00
# ifdef CONFIG_X86_F00F_BUG
static void __cpuinit trap_init_f00f_bug ( void )
{
__set_fixmap ( FIX_F00F_IDT , __pa ( & idt_table ) , PAGE_KERNEL_RO ) ;
/*
* Update the IDT descriptor and reload the IDT so that
* it uses the read - only mapped virtual address .
*/
idt_descr . address = fix_to_virt ( FIX_F00F_IDT ) ;
load_idt ( & idt_descr ) ;
}
# endif
2006-03-23 13:59:33 +03:00
static void __cpuinit init_intel ( struct cpuinfo_x86 * c )
2005-04-17 02:20:36 +04:00
{
unsigned int l2 = 0 ;
char * p = NULL ;
# ifdef CONFIG_X86_F00F_BUG
/*
* All current models of Pentium and Pentium with MMX technology CPUs
* have the F0 0F bug , which lets nonprivileged users lock up the system .
* Note that the workaround only should be initialized once . . .
*/
c - > f00f_bug = 0 ;
2006-12-07 04:14:08 +03:00
if ( ! paravirt_enabled ( ) & & c - > x86 = = 5 ) {
2005-04-17 02:20:36 +04:00
static int f00f_workaround_enabled = 0 ;
c - > f00f_bug = 1 ;
if ( ! f00f_workaround_enabled ) {
trap_init_f00f_bug ( ) ;
printk ( KERN_NOTICE " Intel Pentium with F0 0F bug - workaround enabled. \n " ) ;
f00f_workaround_enabled = 1 ;
}
}
# endif
select_idle_routine ( c ) ;
l2 = init_intel_cacheinfo ( c ) ;
2006-06-26 15:59:59 +04:00
if ( c - > cpuid_level > 9 ) {
unsigned eax = cpuid_eax ( 10 ) ;
/* Check for version and the number of counters */
if ( ( eax & 0xff ) & & ( ( ( eax > > 8 ) & 0xff ) > 1 ) )
set_bit ( X86_FEATURE_ARCH_PERFMON , c - > x86_capability ) ;
}
2005-04-17 02:20:36 +04:00
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
if ( ( c - > x86 < < 8 | c - > x86_model < < 4 | c - > x86_mask ) < 0x633 )
clear_bit ( X86_FEATURE_SEP , c - > x86_capability ) ;
/* Names for the Pentium II/Celeron processors
detectable only by also checking the cache size .
Dixon is NOT a Celeron . */
if ( c - > x86 = = 6 ) {
switch ( c - > x86_model ) {
case 5 :
if ( c - > x86_mask = = 0 ) {
if ( l2 = = 0 )
p = " Celeron (Covington) " ;
else if ( l2 = = 256 )
p = " Mobile Pentium II (Dixon) " ;
}
break ;
case 6 :
if ( l2 = = 128 )
p = " Celeron (Mendocino) " ;
else if ( c - > x86_mask = = 0 | | c - > x86_mask = = 5 )
p = " Celeron-A " ;
break ;
case 8 :
if ( l2 = = 128 )
p = " Celeron (Coppermine) " ;
break ;
}
}
if ( p )
strcpy ( c - > x86_model_id , p ) ;
2005-11-05 19:25:54 +03:00
c - > x86_max_cores = num_cpu_cores ( c ) ;
2005-04-17 02:25:15 +04:00
2005-04-17 02:20:36 +04:00
detect_ht ( c ) ;
/* Work around errata */
Intel_errata_workarounds ( c ) ;
# ifdef CONFIG_X86_INTEL_USERCOPY
/*
* Set up the preferred alignment for movsl bulk memory moves
*/
switch ( c - > x86 ) {
case 4 : /* 486: untested */
break ;
case 5 : /* Old Pentia: untested */
break ;
case 6 : /* PII/PIII only like movsl with 8-byte alignment */
movsl_mask . mask = 7 ;
break ;
case 15 : /* P4 is OK down to 8-byte alignment */
movsl_mask . mask = 7 ;
break ;
}
# endif
2008-01-30 15:32:38 +03:00
if ( cpu_has_xmm2 )
2008-01-30 15:32:37 +03:00
set_bit ( X86_FEATURE_LFENCE_RDTSC , c - > x86_capability ) ;
2007-05-02 21:27:20 +04:00
if ( c - > x86 = = 15 ) {
2005-04-17 02:20:36 +04:00
set_bit ( X86_FEATURE_P4 , c - > x86_capability ) ;
2007-05-02 21:27:20 +04:00
}
2005-04-17 02:20:36 +04:00
if ( c - > x86 = = 6 )
set_bit ( X86_FEATURE_P3 , c - > x86_capability ) ;
2006-01-12 00:42:45 +03:00
if ( ( c - > x86 = = 0xf & & c - > x86_model > = 0x03 ) | |
( c - > x86 = = 0x6 & & c - > x86_model > = 0x0e ) )
set_bit ( X86_FEATURE_CONSTANT_TSC , c - > x86_capability ) ;
2005-04-17 02:20:36 +04:00
2006-12-07 04:14:01 +03:00
if ( cpu_has_ds ) {
unsigned int l1 ;
rdmsr ( MSR_IA32_MISC_ENABLE , l1 , l2 ) ;
2006-12-07 04:14:11 +03:00
if ( ! ( l1 & ( 1 < < 11 ) ) )
set_bit ( X86_FEATURE_BTS , c - > x86_capability ) ;
2006-12-07 04:14:01 +03:00
if ( ! ( l1 & ( 1 < < 12 ) ) )
set_bit ( X86_FEATURE_PEBS , c - > x86_capability ) ;
}
2008-01-30 15:31:09 +03:00
if ( cpu_has_bts )
ds_init_intel ( c ) ;
2006-12-07 04:14:01 +03:00
}
2005-04-17 02:20:36 +04:00
2006-09-26 12:52:36 +04:00
static unsigned int __cpuinit intel_size_cache ( struct cpuinfo_x86 * c , unsigned int size )
2005-04-17 02:20:36 +04:00
{
/* Intel PIII Tualatin. This comes in two flavours.
* One has 256 kb of cache , the other 512. We have no way
* to determine which , so we use a boottime override
* for the 512 kb model , and assume 256 otherwise .
*/
if ( ( c - > x86 = = 6 ) & & ( c - > x86_model = = 11 ) & & ( size = = 0 ) )
size = 256 ;
return size ;
}
2006-03-23 13:59:33 +03:00
static struct cpu_dev intel_cpu_dev __cpuinitdata = {
2005-04-17 02:20:36 +04:00
. c_vendor = " Intel " ,
. c_ident = { " GenuineIntel " } ,
. c_models = {
{ . vendor = X86_VENDOR_INTEL , . family = 4 , . model_names =
{
[ 0 ] = " 486 DX-25/33 " ,
[ 1 ] = " 486 DX-50 " ,
[ 2 ] = " 486 SX " ,
[ 3 ] = " 486 DX/2 " ,
[ 4 ] = " 486 SL " ,
[ 5 ] = " 486 SX/2 " ,
[ 7 ] = " 486 DX/2-WB " ,
[ 8 ] = " 486 DX/4 " ,
[ 9 ] = " 486 DX/4-WB "
}
} ,
{ . vendor = X86_VENDOR_INTEL , . family = 5 , . model_names =
{
[ 0 ] = " Pentium 60/66 A-step " ,
[ 1 ] = " Pentium 60/66 " ,
[ 2 ] = " Pentium 75 - 200 " ,
[ 3 ] = " OverDrive PODP5V83 " ,
[ 4 ] = " Pentium MMX " ,
[ 7 ] = " Mobile Pentium 75 - 200 " ,
[ 8 ] = " Mobile Pentium MMX "
}
} ,
{ . vendor = X86_VENDOR_INTEL , . family = 6 , . model_names =
{
[ 0 ] = " Pentium Pro A-step " ,
[ 1 ] = " Pentium Pro " ,
[ 3 ] = " Pentium II (Klamath) " ,
[ 4 ] = " Pentium II (Deschutes) " ,
[ 5 ] = " Pentium II (Deschutes) " ,
[ 6 ] = " Mobile Pentium II " ,
[ 7 ] = " Pentium III (Katmai) " ,
[ 8 ] = " Pentium III (Coppermine) " ,
[ 10 ] = " Pentium III (Cascades) " ,
[ 11 ] = " Pentium III (Tualatin) " ,
}
} ,
{ . vendor = X86_VENDOR_INTEL , . family = 15 , . model_names =
{
[ 0 ] = " Pentium 4 (Unknown) " ,
[ 1 ] = " Pentium 4 (Willamette) " ,
[ 2 ] = " Pentium 4 (Northwood) " ,
[ 4 ] = " Pentium 4 (Foster) " ,
[ 5 ] = " Pentium 4 (Foster) " ,
}
} ,
} ,
. c_init = init_intel ,
. c_size_cache = intel_size_cache ,
} ;
__init int intel_cpu_init ( void )
{
cpu_devs [ X86_VENDOR_INTEL ] = & intel_cpu_dev ;
return 0 ;
}
2005-11-14 03:07:23 +03:00
# ifndef CONFIG_X86_CMPXCHG
unsigned long cmpxchg_386_u8 ( volatile void * ptr , u8 old , u8 new )
{
u8 prev ;
unsigned long flags ;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save ( flags ) ;
prev = * ( u8 * ) ptr ;
if ( prev = = old )
* ( u8 * ) ptr = new ;
local_irq_restore ( flags ) ;
return prev ;
}
EXPORT_SYMBOL ( cmpxchg_386_u8 ) ;
unsigned long cmpxchg_386_u16 ( volatile void * ptr , u16 old , u16 new )
{
u16 prev ;
unsigned long flags ;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save ( flags ) ;
prev = * ( u16 * ) ptr ;
if ( prev = = old )
* ( u16 * ) ptr = new ;
local_irq_restore ( flags ) ;
return prev ;
}
EXPORT_SYMBOL ( cmpxchg_386_u16 ) ;
unsigned long cmpxchg_386_u32 ( volatile void * ptr , u32 old , u32 new )
{
u32 prev ;
unsigned long flags ;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save ( flags ) ;
prev = * ( u32 * ) ptr ;
if ( prev = = old )
* ( u32 * ) ptr = new ;
local_irq_restore ( flags ) ;
return prev ;
}
EXPORT_SYMBOL ( cmpxchg_386_u32 ) ;
# endif
x86: fall back on interrupt disable in cmpxchg8b on 80386 and 80486
Actually, on 386, cmpxchg and cmpxchg_local fall back on
cmpxchg_386_u8/16/32: it disables interruptions around non atomic
updates to mimic the cmpxchg behavior.
The comment:
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
already present in cmpxchg_386_u32 tells much about how this cmpxchg
implementation should not be used in a SMP context. However, the cmpxchg_local
can perfectly use this fallback, since it only needs to be atomic wrt the local
cpu.
This patch adds a cmpxchg_486_u64 and uses it as a fallback for cmpxchg64
and cmpxchg64_local on 80386 and 80486.
Q:
but why is it called cmpxchg_486 when the other functions are called
A:
Because the standard cmpxchg is missing only on 386, but cmpxchg8b is
missing both on 386 and 486.
Citing Intel's Instruction set reference:
cmpxchg:
This instruction is not supported on Intel processors earlier than the
Intel486 processors.
cmpxchg8b:
This instruction encoding is not supported on Intel processors earlier
than the Pentium processors.
Q:
What's the reason to have cmpxchg64_local on 32 bit architectures?
Without that need all this would just be a few simple defines.
A:
cmpxchg64_local on 32 bits architectures takes unsigned long long
parameters, but cmpxchg_local only takes longs. Since we have cmpxchg8b
to execute a 8 byte cmpxchg atomically on pentium and +, it makes sense
to provide a flavor of cmpxchg and cmpxchg_local using this instruction.
Also, for 32 bits architectures lacking the 64 bits atomic cmpxchg, it
makes sense _not_ to define cmpxchg64 while cmpxchg could still be
available.
Moreover, the fallback for cmpxchg8b on i386 for 386 and 486 is a
However, cmpxchg64_local will be emulated by disabling interrupts on all
architectures where it is not supported atomically.
Therefore, we *could* turn cmpxchg64_local into a cmpxchg_local, but it
would make the 386/486 fallbacks ugly, make its design different from
cmpxchg/cmpxchg64 (which really depends on atomic operations and cannot
be emulated) and require the __cmpxchg_local to be expressed as a macro
rather than an inline function so the parameters would not be fixed to
unsigned long long in every case.
So I think cmpxchg64_local makes sense there, but I am open to
suggestions.
Q:
Are there any callers?
A:
I am actually using it in LTTng in my timestamping code. I use it to
work around CPUs with asynchronous TSCs. I need to update 64 bits
values atomically on this 32 bits architecture.
Changelog:
- Ran though checkpatch.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 15:30:47 +03:00
# ifndef CONFIG_X86_CMPXCHG64
unsigned long long cmpxchg_486_u64 ( volatile void * ptr , u64 old , u64 new )
{
u64 prev ;
unsigned long flags ;
/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
local_irq_save ( flags ) ;
prev = * ( u64 * ) ptr ;
if ( prev = = old )
* ( u64 * ) ptr = new ;
local_irq_restore ( flags ) ;
return prev ;
}
EXPORT_SYMBOL ( cmpxchg_486_u64 ) ;
# endif
2005-04-17 02:20:36 +04:00
// arch_initcall(intel_cpu_init);