2008-01-30 15:31:03 +03:00
# ifndef __ASM_X86_PROCESSOR_H
# define __ASM_X86_PROCESSOR_H
2008-01-30 15:31:27 +03:00
# include <asm/processor-flags.h>
2008-01-30 15:31:27 +03:00
/* Forward declaration, a strange C thing */
struct task_struct ;
struct mm_struct ;
2008-01-30 15:31:57 +03:00
# include <asm/vm86.h>
# include <asm/math_emu.h>
# include <asm/segment.h>
# include <asm/types.h>
# include <asm/sigcontext.h>
# include <asm/current.h>
# include <asm/cpufeature.h>
2008-01-30 15:31:27 +03:00
# include <asm/system.h>
2008-01-30 15:31:57 +03:00
# include <asm/page.h>
2008-01-30 15:31:33 +03:00
# include <asm/percpu.h>
2008-01-30 15:31:57 +03:00
# include <asm/msr.h>
# include <asm/desc_defs.h>
2008-01-30 15:32:38 +03:00
# include <asm/nops.h>
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:57 +03:00
# include <linux/personality.h>
2008-01-30 15:31:33 +03:00
# include <linux/cpumask.h>
# include <linux/cache.h>
2008-01-30 15:31:57 +03:00
# include <linux/threads.h>
# include <linux/init.h>
2008-01-30 15:31:27 +03:00
2008-01-30 15:31:27 +03:00
/*
* Default implementation of macro that returns current
* instruction pointer ( " program counter " ) .
*/
static inline void * current_text_addr ( void )
{
void * pc ;
2008-02-21 06:24:40 +03:00
asm volatile ( " mov $1f, %0; 1: " : " =r " ( pc ) ) ;
2008-01-30 15:31:27 +03:00
return pc ;
}
2008-01-30 15:31:31 +03:00
# ifdef CONFIG_X86_VSMP
2008-02-21 06:24:40 +03:00
# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
2008-01-30 15:31:31 +03:00
# else
2008-02-21 06:24:40 +03:00
# define ARCH_MIN_TASKALIGN 16
# define ARCH_MIN_MMSTRUCT_ALIGN 0
2008-01-30 15:31:31 +03:00
# endif
2008-01-30 15:31:33 +03:00
/*
* CPU type and hardware bug flags . Kept separately for each CPU .
* Members of this structure are referenced in head . S , so think twice
* before touching them . [ mj ]
*/
struct cpuinfo_x86 {
2008-02-21 06:24:40 +03:00
__u8 x86 ; /* CPU family */
__u8 x86_vendor ; /* CPU vendor */
__u8 x86_model ;
__u8 x86_mask ;
2008-01-30 15:31:33 +03:00
# ifdef CONFIG_X86_32
2008-02-21 06:24:40 +03:00
char wp_works_ok ; /* It doesn't on 386's */
/* Problems on some 486Dx4's and old 386's: */
char hlt_works_ok ;
char hard_math ;
char rfu ;
char fdiv_bug ;
char f00f_bug ;
char coma_bug ;
char pad0 ;
2008-01-30 15:31:33 +03:00
# else
2008-02-21 06:24:40 +03:00
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
int x86_tlbsize ;
__u8 x86_virt_bits ;
__u8 x86_phys_bits ;
/* CPUID returned core id bits: */
__u8 x86_coreid_bits ;
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level ;
2008-01-30 15:31:33 +03:00
# endif
2008-02-21 06:24:40 +03:00
/* Maximum supported CPUID level, -1=no CPUID: */
int cpuid_level ;
__u32 x86_capability [ NCAPINTS ] ;
char x86_vendor_id [ 16 ] ;
char x86_model_id [ 64 ] ;
/* in KB - valid for CPUS which support this call: */
int x86_cache_size ;
int x86_cache_alignment ; /* In bytes */
int x86_power ;
unsigned long loops_per_jiffy ;
2008-01-30 15:31:33 +03:00
# ifdef CONFIG_SMP
2008-02-21 06:24:40 +03:00
/* cpus sharing the last level cache: */
cpumask_t llc_shared_map ;
2008-01-30 15:31:33 +03:00
# endif
2008-02-21 06:24:40 +03:00
/* cpuid returned max cores value: */
u16 x86_max_cores ;
u16 apicid ;
2008-03-07 00:46:39 +03:00
u16 initial_apicid ;
2008-02-21 06:24:40 +03:00
u16 x86_clflush_size ;
2008-01-30 15:31:33 +03:00
# ifdef CONFIG_SMP
2008-02-21 06:24:40 +03:00
/* number of cores as seen by the OS: */
u16 booted_cores ;
/* Physical processor id: */
u16 phys_proc_id ;
/* Core id: */
u16 cpu_core_id ;
/* Index into per_cpu list: */
u16 cpu_index ;
2008-01-30 15:31:33 +03:00
# endif
} __attribute__ ( ( __aligned__ ( SMP_CACHE_BYTES ) ) ) ;
2008-02-21 06:24:40 +03:00
# define X86_VENDOR_INTEL 0
# define X86_VENDOR_CYRIX 1
# define X86_VENDOR_AMD 2
# define X86_VENDOR_UMC 3
# define X86_VENDOR_CENTAUR 5
# define X86_VENDOR_TRANSMETA 7
# define X86_VENDOR_NSC 8
# define X86_VENDOR_NUM 9
# define X86_VENDOR_UNKNOWN 0xff
2008-01-30 15:31:33 +03:00
2008-01-30 15:31:39 +03:00
/*
* capabilities of CPUs
*/
2008-02-21 06:24:40 +03:00
extern struct cpuinfo_x86 boot_cpu_data ;
extern struct cpuinfo_x86 new_cpu_data ;
extern struct tss_struct doublefault_tss ;
extern __u32 cleared_cpu_caps [ NCAPINTS ] ;
2008-01-30 15:31:33 +03:00
# ifdef CONFIG_SMP
DECLARE_PER_CPU ( struct cpuinfo_x86 , cpu_info ) ;
# define cpu_data(cpu) per_cpu(cpu_info, cpu)
2008-07-19 05:11:31 +04:00
# define current_cpu_data __get_cpu_var(cpu_info)
2008-01-30 15:31:33 +03:00
# else
# define cpu_data(cpu) boot_cpu_data
# define current_cpu_data boot_cpu_data
# endif
2008-03-03 20:12:48 +03:00
static inline int hlt_works ( int cpu )
{
# ifdef CONFIG_X86_32
return cpu_data ( cpu ) . hlt_works_ok ;
# else
return 1 ;
# endif
}
2008-02-21 06:24:40 +03:00
# define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern void cpu_detect ( struct cpuinfo_x86 * c ) ;
2008-01-30 15:31:39 +03:00
2008-06-21 14:24:19 +04:00
extern void early_cpu_init ( void ) ;
2008-01-30 15:31:39 +03:00
extern void identify_boot_cpu ( void ) ;
extern void identify_secondary_cpu ( struct cpuinfo_x86 * ) ;
2008-01-30 15:31:33 +03:00
extern void print_cpu_info ( struct cpuinfo_x86 * ) ;
extern void init_scattered_cpuid_features ( struct cpuinfo_x86 * c ) ;
extern unsigned int init_intel_cacheinfo ( struct cpuinfo_x86 * c ) ;
extern unsigned short num_cache_leaves ;
2008-01-30 15:31:39 +03:00
# if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
extern void detect_ht ( struct cpuinfo_x86 * c ) ;
# else
static inline void detect_ht ( struct cpuinfo_x86 * c ) { }
# endif
2008-01-30 15:31:03 +03:00
static inline void native_cpuid ( unsigned int * eax , unsigned int * ebx ,
2008-02-21 06:24:40 +03:00
unsigned int * ecx , unsigned int * edx )
2008-01-30 15:31:03 +03:00
{
/* ecx is often an input as well as an output. */
2008-03-23 11:03:15 +03:00
asm ( " cpuid "
: " =a " ( * eax ) ,
" =b " ( * ebx ) ,
" =c " ( * ecx ) ,
" =d " ( * edx )
: " 0 " ( * eax ) , " 2 " ( * ecx ) ) ;
2008-01-30 15:31:03 +03:00
}
2008-01-30 15:31:27 +03:00
static inline void load_cr3 ( pgd_t * pgdir )
{
write_cr3 ( __pa ( pgdir ) ) ;
}
2008-01-30 15:31:03 +03:00
2008-01-30 15:31:31 +03:00
# ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */
struct x86_hw_tss {
2008-02-21 06:24:40 +03:00
unsigned short back_link , __blh ;
unsigned long sp0 ;
unsigned short ss0 , __ss0h ;
unsigned long sp1 ;
/* ss1 caches MSR_IA32_SYSENTER_CS: */
unsigned short ss1 , __ss1h ;
unsigned long sp2 ;
unsigned short ss2 , __ss2h ;
unsigned long __cr3 ;
unsigned long ip ;
unsigned long flags ;
unsigned long ax ;
unsigned long cx ;
unsigned long dx ;
unsigned long bx ;
unsigned long sp ;
unsigned long bp ;
unsigned long si ;
unsigned long di ;
unsigned short es , __esh ;
unsigned short cs , __csh ;
unsigned short ss , __ssh ;
unsigned short ds , __dsh ;
unsigned short fs , __fsh ;
unsigned short gs , __gsh ;
unsigned short ldt , __ldth ;
unsigned short trace ;
unsigned short io_bitmap_base ;
2008-01-30 15:31:31 +03:00
} __attribute__ ( ( packed ) ) ;
# else
struct x86_hw_tss {
2008-02-21 06:24:40 +03:00
u32 reserved1 ;
u64 sp0 ;
u64 sp1 ;
u64 sp2 ;
u64 reserved2 ;
u64 ist [ 7 ] ;
u32 reserved3 ;
u32 reserved4 ;
u16 reserved5 ;
u16 io_bitmap_base ;
2008-01-30 15:31:31 +03:00
} __attribute__ ( ( packed ) ) ____cacheline_aligned ;
# endif
/*
2008-02-21 06:24:40 +03:00
* IO - bitmap sizes :
2008-01-30 15:31:31 +03:00
*/
2008-02-21 06:24:40 +03:00
# define IO_BITMAP_BITS 65536
# define IO_BITMAP_BYTES (IO_BITMAP_BITS / 8)
# define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long))
# define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
# define INVALID_IO_BITMAP_OFFSET 0x8000
# define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
2008-01-30 15:31:31 +03:00
struct tss_struct {
2008-02-21 06:24:40 +03:00
/*
* The hardware state :
*/
struct x86_hw_tss x86_tss ;
2008-01-30 15:31:31 +03:00
/*
* The extra 1 is there because the CPU will access an
* additional byte beyond the end of the IO permission
* bitmap . The extra byte must be all 1 bits , and must
* be within the limit .
*/
2008-02-21 06:24:40 +03:00
unsigned long io_bitmap [ IO_BITMAP_LONGS + 1 ] ;
2008-01-30 15:31:31 +03:00
/*
* Cache the current maximum and the last task that used the bitmap :
*/
2008-02-21 06:24:40 +03:00
unsigned long io_bitmap_max ;
struct thread_struct * io_bitmap_owner ;
2008-01-30 15:31:31 +03:00
/*
2008-02-21 06:24:40 +03:00
* . . and then another 0x100 bytes for the emergency kernel stack :
2008-01-30 15:31:31 +03:00
*/
2008-02-21 06:24:40 +03:00
unsigned long stack [ 64 ] ;
2008-07-04 16:56:16 +04:00
} ____cacheline_aligned ;
2008-01-30 15:31:31 +03:00
DECLARE_PER_CPU ( struct tss_struct , init_tss ) ;
2008-02-21 06:24:40 +03:00
/*
* Save the original ist values for checking stack pointers during debugging
*/
2008-01-30 15:31:39 +03:00
struct orig_ist {
2008-02-21 06:24:40 +03:00
unsigned long ist [ 7 ] ;
2008-01-30 15:31:39 +03:00
} ;
2008-01-30 15:31:48 +03:00
# define MXCSR_DEFAULT 0x1f80
2008-01-30 15:31:41 +03:00
2008-01-30 15:31:48 +03:00
struct i387_fsave_struct {
2008-03-05 17:15:42 +03:00
u32 cwd ; /* FPU Control Word */
u32 swd ; /* FPU Status Word */
u32 twd ; /* FPU Tag Word */
u32 fip ; /* FPU IP Offset */
u32 fcs ; /* FPU IP Selector */
u32 foo ; /* FPU Operand Pointer Offset */
u32 fos ; /* FPU Operand Pointer Selector */
/* 8*10 bytes for each FP-reg = 80 bytes: */
2008-02-21 06:24:40 +03:00
u32 st_space [ 20 ] ;
2008-03-05 17:15:42 +03:00
/* Software status information [not touched by FSAVE ]: */
2008-02-21 06:24:40 +03:00
u32 status ;
2008-01-30 15:31:41 +03:00
} ;
struct i387_fxsave_struct {
2008-03-05 17:15:42 +03:00
u16 cwd ; /* Control Word */
u16 swd ; /* Status Word */
u16 twd ; /* Tag Word */
u16 fop ; /* Last Instruction Opcode */
2008-01-30 15:31:48 +03:00
union {
struct {
2008-03-05 17:15:42 +03:00
u64 rip ; /* Instruction Pointer */
u64 rdp ; /* Data Pointer */
2008-01-30 15:31:48 +03:00
} ;
struct {
2008-03-05 17:15:42 +03:00
u32 fip ; /* FPU IP Offset */
u32 fcs ; /* FPU IP Selector */
u32 foo ; /* FPU Operand Offset */
u32 fos ; /* FPU Operand Selector */
2008-01-30 15:31:48 +03:00
} ;
} ;
2008-03-05 17:15:42 +03:00
u32 mxcsr ; /* MXCSR Register State */
u32 mxcsr_mask ; /* MXCSR Mask */
/* 8*16 bytes for each FP-reg = 128 bytes: */
2008-02-21 06:24:40 +03:00
u32 st_space [ 32 ] ;
2008-03-05 17:15:42 +03:00
/* 16*16 bytes for each XMM-reg = 256 bytes: */
2008-02-21 06:24:40 +03:00
u32 xmm_space [ 64 ] ;
2008-03-05 17:15:42 +03:00
2008-02-21 06:24:40 +03:00
u32 padding [ 24 ] ;
2008-01-30 15:31:41 +03:00
} __attribute__ ( ( aligned ( 16 ) ) ) ;
2008-01-30 15:31:48 +03:00
struct i387_soft_struct {
2008-02-21 06:24:40 +03:00
u32 cwd ;
u32 swd ;
u32 twd ;
u32 fip ;
u32 fcs ;
u32 foo ;
u32 fos ;
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space [ 20 ] ;
u8 ftop ;
u8 changed ;
u8 lookahead ;
u8 no_update ;
u8 rm ;
u8 alimit ;
struct info * info ;
u32 entry_eip ;
2008-01-30 15:31:48 +03:00
} ;
2008-03-11 01:28:04 +03:00
union thread_xstate {
2008-01-30 15:31:48 +03:00
struct i387_fsave_struct fsave ;
2008-01-30 15:31:41 +03:00
struct i387_fxsave_struct fxsave ;
2008-02-21 06:24:40 +03:00
struct i387_soft_struct soft ;
2008-01-30 15:31:41 +03:00
} ;
2008-03-03 20:12:56 +03:00
# ifdef CONFIG_X86_64
2008-01-30 15:31:57 +03:00
DECLARE_PER_CPU ( struct orig_ist , orig_ist ) ;
2007-10-11 13:20:03 +04:00
# endif
2008-01-30 15:31:03 +03:00
2008-01-30 15:31:27 +03:00
extern void print_cpu_info ( struct cpuinfo_x86 * ) ;
2008-03-11 01:28:04 +03:00
extern unsigned int xstate_size ;
2008-03-11 01:28:05 +03:00
extern void free_thread_xstate ( struct task_struct * ) ;
extern struct kmem_cache * task_xstate_cachep ;
2008-01-30 15:31:27 +03:00
extern void init_scattered_cpuid_features ( struct cpuinfo_x86 * c ) ;
extern unsigned int init_intel_cacheinfo ( struct cpuinfo_x86 * c ) ;
extern unsigned short num_cache_leaves ;
2008-01-30 15:31:31 +03:00
struct thread_struct {
2008-02-21 06:24:40 +03:00
/* Cached TLS descriptors: */
struct desc_struct tls_array [ GDT_ENTRY_TLS_ENTRIES ] ;
unsigned long sp0 ;
unsigned long sp ;
2008-01-30 15:31:31 +03:00
# ifdef CONFIG_X86_32
2008-02-21 06:24:40 +03:00
unsigned long sysenter_cs ;
2008-01-30 15:31:31 +03:00
# else
2008-02-21 06:24:40 +03:00
unsigned long usersp ; /* Copy from PDA */
unsigned short es ;
unsigned short ds ;
unsigned short fsindex ;
unsigned short gsindex ;
2008-01-30 15:31:31 +03:00
# endif
2008-02-21 06:24:40 +03:00
unsigned long ip ;
unsigned long fs ;
unsigned long gs ;
/* Hardware debugging registers: */
unsigned long debugreg0 ;
unsigned long debugreg1 ;
unsigned long debugreg2 ;
unsigned long debugreg3 ;
unsigned long debugreg6 ;
unsigned long debugreg7 ;
/* Fault info: */
unsigned long cr2 ;
unsigned long trap_no ;
unsigned long error_code ;
2008-03-11 01:28:04 +03:00
/* floating point and extended processor state */
union thread_xstate * xstate ;
2008-01-30 15:31:31 +03:00
# ifdef CONFIG_X86_32
2008-02-21 06:24:40 +03:00
/* Virtual 86 mode info */
2008-01-30 15:31:31 +03:00
struct vm86_struct __user * vm86_info ;
unsigned long screen_bitmap ;
2008-02-21 06:24:40 +03:00
unsigned long v86flags ;
unsigned long v86mask ;
unsigned long saved_sp0 ;
unsigned int saved_fs ;
unsigned int saved_gs ;
2008-01-30 15:31:31 +03:00
# endif
2008-02-21 06:24:40 +03:00
/* IO permissions: */
unsigned long * io_bitmap_ptr ;
unsigned long iopl ;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max ;
2008-01-30 15:31:31 +03:00
/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
unsigned long debugctlmsr ;
/* Debug Store - if not 0 points to a DS Save Area configuration;
* goes into MSR_IA32_DS_AREA */
unsigned long ds_area_msr ;
} ;
2008-01-30 15:31:27 +03:00
static inline unsigned long native_get_debugreg ( int regno )
{
2008-02-21 06:24:40 +03:00
unsigned long val = 0 ; /* Damn you, gcc! */
2008-01-30 15:31:27 +03:00
switch ( regno ) {
case 0 :
2008-03-23 11:03:15 +03:00
asm ( " mov %%db0, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 15:31:27 +03:00
case 1 :
2008-03-23 11:03:15 +03:00
asm ( " mov %%db1, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 15:31:27 +03:00
case 2 :
2008-03-23 11:03:15 +03:00
asm ( " mov %%db2, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 15:31:27 +03:00
case 3 :
2008-03-23 11:03:15 +03:00
asm ( " mov %%db3, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 15:31:27 +03:00
case 6 :
2008-03-23 11:03:15 +03:00
asm ( " mov %%db6, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 15:31:27 +03:00
case 7 :
2008-03-23 11:03:15 +03:00
asm ( " mov %%db7, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 15:31:27 +03:00
default :
BUG ( ) ;
}
return val ;
}
static inline void native_set_debugreg ( int regno , unsigned long value )
{
switch ( regno ) {
case 0 :
2008-02-21 06:24:40 +03:00
asm ( " mov %0, %%db0 " : : " r " ( value ) ) ;
2008-01-30 15:31:27 +03:00
break ;
case 1 :
2008-02-21 06:24:40 +03:00
asm ( " mov %0, %%db1 " : : " r " ( value ) ) ;
2008-01-30 15:31:27 +03:00
break ;
case 2 :
2008-02-21 06:24:40 +03:00
asm ( " mov %0, %%db2 " : : " r " ( value ) ) ;
2008-01-30 15:31:27 +03:00
break ;
case 3 :
2008-02-21 06:24:40 +03:00
asm ( " mov %0, %%db3 " : : " r " ( value ) ) ;
2008-01-30 15:31:27 +03:00
break ;
case 6 :
2008-02-21 06:24:40 +03:00
asm ( " mov %0, %%db6 " : : " r " ( value ) ) ;
2008-01-30 15:31:27 +03:00
break ;
case 7 :
2008-02-21 06:24:40 +03:00
asm ( " mov %0, %%db7 " : : " r " ( value ) ) ;
2008-01-30 15:31:27 +03:00
break ;
default :
BUG ( ) ;
}
}
2008-01-30 15:31:27 +03:00
/*
* Set IOPL bits in EFLAGS from given mask
*/
static inline void native_set_iopl_mask ( unsigned mask )
{
# ifdef CONFIG_X86_32
unsigned int reg ;
2008-02-21 06:24:40 +03:00
2008-03-23 11:03:15 +03:00
asm volatile ( " pushfl; "
" popl %0; "
" andl %1, %0; "
" orl %2, %0; "
" pushl %0; "
" popfl "
: " =&r " ( reg )
: " i " ( ~ X86_EFLAGS_IOPL ) , " r " ( mask ) ) ;
2008-01-30 15:31:27 +03:00
# endif
}
2008-02-21 06:24:40 +03:00
static inline void
native_load_sp0 ( struct tss_struct * tss , struct thread_struct * thread )
2008-01-30 15:31:31 +03:00
{
tss - > x86_tss . sp0 = thread - > sp0 ;
# ifdef CONFIG_X86_32
2008-02-21 06:24:40 +03:00
/* Only happens when SEP is enabled, no need to test "SEP"arately: */
2008-01-30 15:31:31 +03:00
if ( unlikely ( tss - > x86_tss . ss1 ! = thread - > sysenter_cs ) ) {
tss - > x86_tss . ss1 = thread - > sysenter_cs ;
wrmsr ( MSR_IA32_SYSENTER_CS , thread - > sysenter_cs , 0 ) ;
}
# endif
}
2008-01-30 15:31:27 +03:00
2008-01-30 15:32:08 +03:00
static inline void native_swapgs ( void )
{
# ifdef CONFIG_X86_64
asm volatile ( " swapgs " : : : " memory " ) ;
# endif
}
2008-01-30 15:31:31 +03:00
# ifdef CONFIG_PARAVIRT
# include <asm/paravirt.h>
# else
2008-02-21 06:24:40 +03:00
# define __cpuid native_cpuid
# define paravirt_enabled() 0
2008-01-30 15:31:27 +03:00
/*
* These special macros can be used to get or set a debugging register
*/
# define get_debugreg(var, register) \
( var ) = native_get_debugreg ( register )
# define set_debugreg(value, register) \
native_set_debugreg ( register , value )
2008-03-23 11:03:15 +03:00
static inline void load_sp0 ( struct tss_struct * tss ,
struct thread_struct * thread )
2008-01-30 15:31:31 +03:00
{
native_load_sp0 ( tss , thread ) ;
}
2008-01-30 15:31:27 +03:00
# define set_iopl_mask native_set_iopl_mask
2008-01-30 15:31:27 +03:00
# endif /* CONFIG_PARAVIRT */
/*
* Save the cr4 feature set we ' re using ( ie
* Pentium 4 MB enable and PPro Global page
* enable ) , so that any CPU ' s that boot up
* after us can get the correct flags .
*/
2008-02-21 06:24:40 +03:00
extern unsigned long mmu_cr4_features ;
2008-01-30 15:31:27 +03:00
static inline void set_in_cr4 ( unsigned long mask )
{
unsigned cr4 ;
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:27 +03:00
mmu_cr4_features | = mask ;
cr4 = read_cr4 ( ) ;
cr4 | = mask ;
write_cr4 ( cr4 ) ;
}
static inline void clear_in_cr4 ( unsigned long mask )
{
unsigned cr4 ;
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:27 +03:00
mmu_cr4_features & = ~ mask ;
cr4 = read_cr4 ( ) ;
cr4 & = ~ mask ;
write_cr4 ( cr4 ) ;
}
2008-01-30 15:31:27 +03:00
struct microcode_header {
2008-02-21 06:24:40 +03:00
unsigned int hdrver ;
unsigned int rev ;
unsigned int date ;
unsigned int sig ;
unsigned int cksum ;
unsigned int ldrver ;
unsigned int pf ;
unsigned int datasize ;
unsigned int totalsize ;
unsigned int reserved [ 3 ] ;
2008-01-30 15:31:27 +03:00
} ;
struct microcode {
2008-02-21 06:24:40 +03:00
struct microcode_header hdr ;
unsigned int bits [ 0 ] ;
2008-01-30 15:31:27 +03:00
} ;
2008-02-21 06:24:40 +03:00
typedef struct microcode microcode_t ;
typedef struct microcode_header microcode_header_t ;
2008-01-30 15:31:27 +03:00
/* microcode format is extended from prescott processors */
struct extended_signature {
2008-02-21 06:24:40 +03:00
unsigned int sig ;
unsigned int pf ;
unsigned int cksum ;
2008-01-30 15:31:27 +03:00
} ;
struct extended_sigtable {
2008-02-21 06:24:40 +03:00
unsigned int count ;
unsigned int cksum ;
unsigned int reserved [ 3 ] ;
2008-01-30 15:31:27 +03:00
struct extended_signature sigs [ 0 ] ;
} ;
2008-01-30 15:31:38 +03:00
typedef struct {
2008-02-21 06:24:40 +03:00
unsigned long seg ;
2008-01-30 15:31:38 +03:00
} mm_segment_t ;
2008-01-30 15:31:27 +03:00
/*
* create a kernel thread without removing it from tasklists
*/
extern int kernel_thread ( int ( * fn ) ( void * ) , void * arg , unsigned long flags ) ;
/* Free all resources held by a thread. */
extern void release_thread ( struct task_struct * ) ;
2008-02-21 06:24:40 +03:00
/* Prepare to copy thread state - unlazy all lazy state */
2008-01-30 15:31:27 +03:00
extern void prepare_to_copy ( struct task_struct * tsk ) ;
2008-01-30 15:31:27 +03:00
2008-01-30 15:31:27 +03:00
unsigned long get_wchan ( struct task_struct * p ) ;
2008-01-30 15:31:03 +03:00
/*
* Generic CPUID function
* clear % ecx since some cpus ( Cyrix MII ) do not set or clear % ecx
* resulting in stale register contents being returned .
*/
static inline void cpuid ( unsigned int op ,
unsigned int * eax , unsigned int * ebx ,
unsigned int * ecx , unsigned int * edx )
{
* eax = op ;
* ecx = 0 ;
__cpuid ( eax , ebx , ecx , edx ) ;
}
/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_count ( unsigned int op , int count ,
unsigned int * eax , unsigned int * ebx ,
unsigned int * ecx , unsigned int * edx )
{
* eax = op ;
* ecx = count ;
__cpuid ( eax , ebx , ecx , edx ) ;
}
/*
* CPUID functions returning a single datum
*/
static inline unsigned int cpuid_eax ( unsigned int op )
{
unsigned int eax , ebx , ecx , edx ;
cpuid ( op , & eax , & ebx , & ecx , & edx ) ;
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:03 +03:00
return eax ;
}
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:03 +03:00
static inline unsigned int cpuid_ebx ( unsigned int op )
{
unsigned int eax , ebx , ecx , edx ;
cpuid ( op , & eax , & ebx , & ecx , & edx ) ;
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:03 +03:00
return ebx ;
}
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:03 +03:00
static inline unsigned int cpuid_ecx ( unsigned int op )
{
unsigned int eax , ebx , ecx , edx ;
cpuid ( op , & eax , & ebx , & ecx , & edx ) ;
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:03 +03:00
return ecx ;
}
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:03 +03:00
static inline unsigned int cpuid_edx ( unsigned int op )
{
unsigned int eax , ebx , ecx , edx ;
cpuid ( op , & eax , & ebx , & ecx , & edx ) ;
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:03 +03:00
return edx ;
}
2008-01-30 15:31:27 +03:00
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop ( void )
{
2008-03-23 11:03:15 +03:00
asm volatile ( " rep; nop " : : : " memory " ) ;
2008-01-30 15:31:27 +03:00
}
2008-02-21 06:24:40 +03:00
static inline void cpu_relax ( void )
{
rep_nop ( ) ;
}
/* Stop speculative execution: */
2008-01-30 15:31:27 +03:00
static inline void sync_core ( void )
{
int tmp ;
2008-02-21 06:24:40 +03:00
2008-01-30 15:31:27 +03:00
asm volatile ( " cpuid " : " =a " ( tmp ) : " 0 " ( 1 )
2008-03-23 11:03:15 +03:00
: " ebx " , " ecx " , " edx " , " memory " ) ;
2008-01-30 15:31:27 +03:00
}
2008-03-23 11:03:15 +03:00
static inline void __monitor ( const void * eax , unsigned long ecx ,
unsigned long edx )
2008-01-30 15:31:27 +03:00
{
2008-02-21 06:24:40 +03:00
/* "monitor %eax, %ecx, %edx;" */
2008-03-23 11:03:15 +03:00
asm volatile ( " .byte 0x0f, 0x01, 0xc8; "
: : " a " ( eax ) , " c " ( ecx ) , " d " ( edx ) ) ;
2008-01-30 15:31:27 +03:00
}
static inline void __mwait ( unsigned long eax , unsigned long ecx )
{
2008-02-21 06:24:40 +03:00
/* "mwait %eax, %ecx;" */
2008-03-23 11:03:15 +03:00
asm volatile ( " .byte 0x0f, 0x01, 0xc9; "
: : " a " ( eax ) , " c " ( ecx ) ) ;
2008-01-30 15:31:27 +03:00
}
static inline void __sti_mwait ( unsigned long eax , unsigned long ecx )
{
2008-04-25 19:39:01 +04:00
trace_hardirqs_on ( ) ;
2008-02-21 06:24:40 +03:00
/* "mwait %eax, %ecx;" */
2008-03-23 11:03:15 +03:00
asm volatile ( " sti; .byte 0x0f, 0x01, 0xc9; "
: : " a " ( eax ) , " c " ( ecx ) ) ;
2008-01-30 15:31:27 +03:00
}
extern void mwait_idle_with_hints ( unsigned long eax , unsigned long ecx ) ;
extern void select_idle_routine ( const struct cpuinfo_x86 * c ) ;
2008-02-21 06:24:40 +03:00
extern unsigned long boot_option_idle_override ;
2008-06-24 13:58:53 +04:00
extern unsigned long idle_halt ;
2008-06-24 14:01:09 +04:00
extern unsigned long idle_nomwait ;
2008-01-30 15:31:27 +03:00
2008-01-30 15:31:39 +03:00
extern void enable_sep_cpu ( void ) ;
extern int sysenter_setup ( void ) ;
/* Defined in head.S */
2008-02-21 06:24:40 +03:00
extern struct desc_ptr early_gdt_descr ;
2008-01-30 15:31:39 +03:00
extern void cpu_set_gdt ( int ) ;
extern void switch_to_new_gdt ( void ) ;
extern void cpu_init ( void ) ;
extern void init_gdt ( int cpu ) ;
2008-03-10 16:11:17 +03:00
static inline void update_debugctlmsr ( unsigned long debugctlmsr )
{
# ifndef CONFIG_X86_DEBUGCTLMSR
if ( boot_cpu_data . x86 < 6 )
return ;
# endif
wrmsrl ( MSR_IA32_DEBUGCTLMSR , debugctlmsr ) ;
}
2008-02-21 06:24:40 +03:00
/*
* from system description table in BIOS . Mostly for MCA use , but
* others may find it useful :
*/
extern unsigned int machine_id ;
extern unsigned int machine_submodel_id ;
extern unsigned int BIOS_revision ;
2008-01-30 15:31:39 +03:00
2008-02-21 06:24:40 +03:00
/* Boot loader type from the setup header: */
extern int bootloader_type ;
2008-01-30 15:31:39 +03:00
2008-02-21 06:24:40 +03:00
extern char ignore_fpu_irq ;
2008-01-30 15:31:27 +03:00
# define HAVE_ARCH_PICK_MMAP_LAYOUT 1
# define ARCH_HAS_PREFETCHW
# define ARCH_HAS_SPINLOCK_PREFETCH
2008-01-30 15:31:40 +03:00
# ifdef CONFIG_X86_32
2008-02-21 06:24:40 +03:00
# define BASE_PREFETCH ASM_NOP4
# define ARCH_HAS_PREFETCH
2008-01-30 15:31:40 +03:00
# else
2008-02-21 06:24:40 +03:00
# define BASE_PREFETCH "prefetcht0 (%1)"
2008-01-30 15:31:40 +03:00
# endif
2008-02-21 06:24:40 +03:00
/*
* Prefetch instructions for Pentium III ( + ) and AMD Athlon ( + )
*
* It ' s not worth to care about 3 dnow prefetches for the K6
* because they are microcoded there and very slow .
*/
2008-01-30 15:31:40 +03:00
static inline void prefetch ( const void * x )
{
alternative_input ( BASE_PREFETCH ,
" prefetchnta (%1) " ,
X86_FEATURE_XMM ,
" r " ( x ) ) ;
}
2008-02-21 06:24:40 +03:00
/*
* 3 dnow prefetch to get an exclusive cache line .
* Useful for spinlocks to avoid one state transition in the
* cache coherency protocol :
*/
2008-01-30 15:31:40 +03:00
static inline void prefetchw ( const void * x )
{
alternative_input ( BASE_PREFETCH ,
" prefetchw (%1) " ,
X86_FEATURE_3DNOW ,
" r " ( x ) ) ;
}
2008-02-21 06:24:40 +03:00
static inline void spin_lock_prefetch ( const void * x )
{
prefetchw ( x ) ;
}
2008-01-30 15:31:57 +03:00
# ifdef CONFIG_X86_32
/*
* User space process size : 3 GB ( default ) .
*/
2008-02-21 06:24:40 +03:00
# define TASK_SIZE PAGE_OFFSET
# define STACK_TOP TASK_SIZE
# define STACK_TOP_MAX STACK_TOP
# define INIT_THREAD { \
. sp0 = sizeof ( init_stack ) + ( long ) & init_stack , \
. vm86_info = NULL , \
. sysenter_cs = __KERNEL_CS , \
. io_bitmap_ptr = NULL , \
. fs = __KERNEL_PERCPU , \
2008-01-30 15:31:57 +03:00
}
/*
* Note that the . io_bitmap member must be extra - big . This is because
* the CPU will access an additional byte beyond the end of the IO
* permission bitmap . The extra byte must be all 1 bits , and must
* be within the limit .
*/
2008-02-21 06:24:40 +03:00
# define INIT_TSS { \
. x86_tss = { \
2008-01-30 15:31:57 +03:00
. sp0 = sizeof ( init_stack ) + ( long ) & init_stack , \
2008-02-21 06:24:40 +03:00
. ss0 = __KERNEL_DS , \
. ss1 = __KERNEL_CS , \
. io_bitmap_base = INVALID_IO_BITMAP_OFFSET , \
} , \
. io_bitmap = { [ 0 . . . IO_BITMAP_LONGS ] = ~ 0 } , \
2008-01-30 15:31:57 +03:00
}
extern unsigned long thread_saved_pc ( struct task_struct * tsk ) ;
# define THREAD_SIZE_LONGS (THREAD_SIZE / sizeof(unsigned long))
# define KSTK_TOP(info) \
( { \
unsigned long * __ptr = ( unsigned long * ) ( info ) ; \
( unsigned long ) ( & __ptr [ THREAD_SIZE_LONGS ] ) ; \
} )
/*
* The below - 8 is to reserve 8 bytes on top of the ring0 stack .
* This is necessary to guarantee that the entire " struct pt_regs "
* is accessable even if the CPU haven ' t stored the SS / ESP registers
* on the stack ( interrupt gate does not save these registers
* when switching to the same priv ring ) .
* Therefore beware : accessing the ss / esp fields of the
* " struct pt_regs " is possible , but they may contain the
* completely wrong values .
*/
# define task_pt_regs(task) \
( { \
struct pt_regs * __regs__ ; \
__regs__ = ( struct pt_regs * ) ( KSTK_TOP ( task_stack_page ( task ) ) - 8 ) ; \
__regs__ - 1 ; \
} )
2008-02-21 06:24:40 +03:00
# define KSTK_ESP(task) (task_pt_regs(task)->sp)
2008-01-30 15:31:57 +03:00
# else
/*
* User space process size . 47 bits minus one guard page .
*/
2008-03-13 19:44:56 +03:00
# define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE)
2008-01-30 15:31:57 +03:00
/* This decides where the kernel will search for a free chunk of vm
* space during mmap ' s .
*/
2008-02-21 06:24:40 +03:00
# define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
0xc0000000 : 0xFFFFe000 )
2008-01-30 15:31:57 +03:00
2008-02-21 06:24:40 +03:00
# define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
IA32_PAGE_OFFSET : TASK_SIZE64 )
# define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
IA32_PAGE_OFFSET : TASK_SIZE64 )
2008-01-30 15:31:57 +03:00
2008-02-08 15:19:26 +03:00
# define STACK_TOP TASK_SIZE
# define STACK_TOP_MAX TASK_SIZE64
2008-01-30 15:31:57 +03:00
# define INIT_THREAD { \
. sp0 = ( unsigned long ) & init_stack + sizeof ( init_stack ) \
}
# define INIT_TSS { \
. x86_tss . sp0 = ( unsigned long ) & init_stack + sizeof ( init_stack ) \
}
/*
* Return saved PC of a blocked thread .
* What is this good for ? it will be always the scheduler or ret_from_fork .
*/
2008-02-21 06:24:40 +03:00
# define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
2008-01-30 15:31:57 +03:00
2008-02-21 06:24:40 +03:00
# define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
# define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
2008-01-30 15:31:57 +03:00
# endif /* CONFIG_X86_64 */
2008-02-21 07:18:40 +03:00
extern void start_thread ( struct pt_regs * regs , unsigned long new_ip ,
unsigned long new_sp ) ;
2008-02-21 06:24:40 +03:00
/*
* This decides where the kernel will search for a free chunk of vm
2008-01-30 15:31:27 +03:00
* space during mmap ' s .
*/
# define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
2008-02-21 06:24:40 +03:00
# define KSTK_EIP(task) (task_pt_regs(task)->ip)
2008-01-30 15:31:27 +03:00
2008-04-14 02:24:18 +04:00
/* Get/set a process' ability to use the timestamp counter instruction */
# define GET_TSC_CTL(adr) get_tsc_mode((adr))
# define SET_TSC_CTL(val) set_tsc_mode((val))
extern int get_tsc_mode ( unsigned long adr ) ;
extern int set_tsc_mode ( unsigned int val ) ;
2008-01-30 15:31:03 +03:00
# endif