2008-10-22 22:26:29 -07:00
# ifndef _ASM_X86_PROCESSOR_H
# define _ASM_X86_PROCESSOR_H
2008-01-30 13:31:03 +01:00
2008-01-30 13:31:27 +01:00
# include <asm/processor-flags.h>
2008-01-30 13:31:27 +01:00
/* Forward declaration, a strange C thing */
struct task_struct ;
struct mm_struct ;
2008-01-30 13:31:57 +01:00
# include <asm/vm86.h>
# include <asm/math_emu.h>
# include <asm/segment.h>
# include <asm/types.h>
# include <asm/sigcontext.h>
# include <asm/current.h>
# include <asm/cpufeature.h>
2008-01-30 13:31:27 +01:00
# include <asm/system.h>
2008-01-30 13:31:57 +01:00
# include <asm/page.h>
2009-02-11 10:20:05 -08:00
# include <asm/pgtable_types.h>
2008-01-30 13:31:33 +01:00
# include <asm/percpu.h>
2008-01-30 13:31:57 +01:00
# include <asm/msr.h>
# include <asm/desc_defs.h>
2008-01-30 13:32:38 +01:00
# include <asm/nops.h>
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:57 +01:00
# include <linux/personality.h>
2008-01-30 13:31:33 +01:00
# include <linux/cpumask.h>
# include <linux/cache.h>
2008-01-30 13:31:57 +01:00
# include <linux/threads.h>
2009-09-02 11:49:52 +02:00
# include <linux/math64.h>
2008-01-30 13:31:57 +01:00
# include <linux/init.h>
2010-03-25 14:51:50 +01:00
# include <linux/err.h>
2008-01-30 13:31:27 +01:00
2009-06-01 23:43:10 +05:30
# define HBP_NUM 4
2008-01-30 13:31:27 +01:00
/*
* Default implementation of macro that returns current
* instruction pointer ( " program counter " ) .
*/
static inline void * current_text_addr ( void )
{
void * pc ;
2008-02-21 04:24:40 +01:00
asm volatile ( " mov $1f, %0; 1: " : " =r " ( pc ) ) ;
2008-01-30 13:31:27 +01:00
return pc ;
}
2008-01-30 13:31:31 +01:00
# ifdef CONFIG_X86_VSMP
2008-02-21 04:24:40 +01:00
# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
2008-01-30 13:31:31 +01:00
# else
2008-02-21 04:24:40 +01:00
# define ARCH_MIN_TASKALIGN 16
# define ARCH_MIN_MMSTRUCT_ALIGN 0
2008-01-30 13:31:31 +01:00
# endif
2008-01-30 13:31:33 +01:00
/*
* CPU type and hardware bug flags . Kept separately for each CPU .
* Members of this structure are referenced in head . S , so think twice
* before touching them . [ mj ]
*/
struct cpuinfo_x86 {
2008-02-21 04:24:40 +01:00
__u8 x86 ; /* CPU family */
__u8 x86_vendor ; /* CPU vendor */
__u8 x86_model ;
__u8 x86_mask ;
2008-01-30 13:31:33 +01:00
# ifdef CONFIG_X86_32
2008-02-21 04:24:40 +01:00
char wp_works_ok ; /* It doesn't on 386's */
/* Problems on some 486Dx4's and old 386's: */
char hlt_works_ok ;
char hard_math ;
char rfu ;
char fdiv_bug ;
char f00f_bug ;
char coma_bug ;
char pad0 ;
2008-01-30 13:31:33 +01:00
# else
2008-02-21 04:24:40 +01:00
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
2009-01-23 17:18:52 -08:00
int x86_tlbsize ;
2009-03-12 12:37:34 +00:00
# endif
2008-02-21 04:24:40 +01:00
__u8 x86_virt_bits ;
__u8 x86_phys_bits ;
/* CPUID returned core id bits: */
__u8 x86_coreid_bits ;
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level ;
/* Maximum supported CPUID level, -1=no CPUID: */
int cpuid_level ;
__u32 x86_capability [ NCAPINTS ] ;
char x86_vendor_id [ 16 ] ;
char x86_model_id [ 64 ] ;
/* in KB - valid for CPUS which support this call: */
int x86_cache_size ;
int x86_cache_alignment ; /* In bytes */
int x86_power ;
unsigned long loops_per_jiffy ;
2008-01-30 13:31:33 +01:00
# ifdef CONFIG_SMP
2008-02-21 04:24:40 +01:00
/* cpus sharing the last level cache: */
2009-03-13 14:49:53 +10:30
cpumask_var_t llc_shared_map ;
2008-01-30 13:31:33 +01:00
# endif
2008-02-21 04:24:40 +01:00
/* cpuid returned max cores value: */
u16 x86_max_cores ;
u16 apicid ;
2008-03-06 13:46:39 -08:00
u16 initial_apicid ;
2008-02-21 04:24:40 +01:00
u16 x86_clflush_size ;
2008-01-30 13:31:33 +01:00
# ifdef CONFIG_SMP
2008-02-21 04:24:40 +01:00
/* number of cores as seen by the OS: */
u16 booted_cores ;
/* Physical processor id: */
u16 phys_proc_id ;
/* Core id: */
u16 cpu_core_id ;
2010-09-30 14:38:57 +02:00
/* Compute unit id */
u8 compute_unit_id ;
2008-02-21 04:24:40 +01:00
/* Index into per_cpu list: */
u16 cpu_index ;
2008-01-30 13:31:33 +01:00
# endif
} __attribute__ ( ( __aligned__ ( SMP_CACHE_BYTES ) ) ) ;
2008-02-21 04:24:40 +01:00
# define X86_VENDOR_INTEL 0
# define X86_VENDOR_CYRIX 1
# define X86_VENDOR_AMD 2
# define X86_VENDOR_UMC 3
# define X86_VENDOR_CENTAUR 5
# define X86_VENDOR_TRANSMETA 7
# define X86_VENDOR_NSC 8
# define X86_VENDOR_NUM 9
# define X86_VENDOR_UNKNOWN 0xff
2008-01-30 13:31:33 +01:00
2008-01-30 13:31:39 +01:00
/*
* capabilities of CPUs
*/
2008-02-21 04:24:40 +01:00
extern struct cpuinfo_x86 boot_cpu_data ;
extern struct cpuinfo_x86 new_cpu_data ;
extern struct tss_struct doublefault_tss ;
2009-05-09 23:47:42 -07:00
extern __u32 cpu_caps_cleared [ NCAPINTS ] ;
extern __u32 cpu_caps_set [ NCAPINTS ] ;
2008-01-30 13:31:33 +01:00
# ifdef CONFIG_SMP
2009-04-21 23:00:24 +01:00
DECLARE_PER_CPU_SHARED_ALIGNED ( struct cpuinfo_x86 , cpu_info ) ;
2008-01-30 13:31:33 +01:00
# define cpu_data(cpu) per_cpu(cpu_info, cpu)
# else
2010-12-18 16:30:05 +01:00
# define cpu_info boot_cpu_data
2008-01-30 13:31:33 +01:00
# define cpu_data(cpu) boot_cpu_data
# endif
2008-07-21 22:40:37 +05:30
extern const struct seq_operations cpuinfo_op ;
2008-03-03 14:12:48 -03:00
static inline int hlt_works ( int cpu )
{
# ifdef CONFIG_X86_32
return cpu_data ( cpu ) . hlt_works_ok ;
# else
return 1 ;
# endif
}
2008-02-21 04:24:40 +01:00
# define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern void cpu_detect ( struct cpuinfo_x86 * c ) ;
2008-01-30 13:31:39 +01:00
2008-07-21 22:54:56 +05:30
extern struct pt_regs * idle_regs ( struct pt_regs * ) ;
2008-06-21 03:24:19 -07:00
extern void early_cpu_init ( void ) ;
2008-01-30 13:31:39 +01:00
extern void identify_boot_cpu ( void ) ;
extern void identify_secondary_cpu ( struct cpuinfo_x86 * ) ;
2008-01-30 13:31:33 +01:00
extern void print_cpu_info ( struct cpuinfo_x86 * ) ;
extern void init_scattered_cpuid_features ( struct cpuinfo_x86 * c ) ;
extern unsigned int init_intel_cacheinfo ( struct cpuinfo_x86 * c ) ;
extern unsigned short num_cache_leaves ;
2008-08-23 17:47:10 +02:00
extern void detect_extended_topology ( struct cpuinfo_x86 * c ) ;
2008-01-30 13:31:39 +01:00
extern void detect_ht ( struct cpuinfo_x86 * c ) ;
2008-01-30 13:31:03 +01:00
static inline void native_cpuid ( unsigned int * eax , unsigned int * ebx ,
2008-02-21 04:24:40 +01:00
unsigned int * ecx , unsigned int * edx )
2008-01-30 13:31:03 +01:00
{
/* ecx is often an input as well as an output. */
2009-12-16 16:25:42 -08:00
asm volatile ( " cpuid "
2008-03-23 01:03:15 -07:00
: " =a " ( * eax ) ,
" =b " ( * ebx ) ,
" =c " ( * ecx ) ,
" =d " ( * edx )
: " 0 " ( * eax ) , " 2 " ( * ecx ) ) ;
2008-01-30 13:31:03 +01:00
}
2008-01-30 13:31:27 +01:00
static inline void load_cr3 ( pgd_t * pgdir )
{
write_cr3 ( __pa ( pgdir ) ) ;
}
2008-01-30 13:31:03 +01:00
2008-01-30 13:31:31 +01:00
# ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */
struct x86_hw_tss {
2008-02-21 04:24:40 +01:00
unsigned short back_link , __blh ;
unsigned long sp0 ;
unsigned short ss0 , __ss0h ;
unsigned long sp1 ;
/* ss1 caches MSR_IA32_SYSENTER_CS: */
unsigned short ss1 , __ss1h ;
unsigned long sp2 ;
unsigned short ss2 , __ss2h ;
unsigned long __cr3 ;
unsigned long ip ;
unsigned long flags ;
unsigned long ax ;
unsigned long cx ;
unsigned long dx ;
unsigned long bx ;
unsigned long sp ;
unsigned long bp ;
unsigned long si ;
unsigned long di ;
unsigned short es , __esh ;
unsigned short cs , __csh ;
unsigned short ss , __ssh ;
unsigned short ds , __dsh ;
unsigned short fs , __fsh ;
unsigned short gs , __gsh ;
unsigned short ldt , __ldth ;
unsigned short trace ;
unsigned short io_bitmap_base ;
2008-01-30 13:31:31 +01:00
} __attribute__ ( ( packed ) ) ;
# else
struct x86_hw_tss {
2008-02-21 04:24:40 +01:00
u32 reserved1 ;
u64 sp0 ;
u64 sp1 ;
u64 sp2 ;
u64 reserved2 ;
u64 ist [ 7 ] ;
u32 reserved3 ;
u32 reserved4 ;
u16 reserved5 ;
u16 io_bitmap_base ;
2008-01-30 13:31:31 +01:00
} __attribute__ ( ( packed ) ) ____cacheline_aligned ;
# endif
/*
2008-02-21 04:24:40 +01:00
* IO - bitmap sizes :
2008-01-30 13:31:31 +01:00
*/
2008-02-21 04:24:40 +01:00
# define IO_BITMAP_BITS 65536
# define IO_BITMAP_BYTES (IO_BITMAP_BITS / 8)
# define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long))
# define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
# define INVALID_IO_BITMAP_OFFSET 0x8000
2008-01-30 13:31:31 +01:00
struct tss_struct {
2008-02-21 04:24:40 +01:00
/*
* The hardware state :
*/
struct x86_hw_tss x86_tss ;
2008-01-30 13:31:31 +01:00
/*
* The extra 1 is there because the CPU will access an
* additional byte beyond the end of the IO permission
* bitmap . The extra byte must be all 1 bits , and must
* be within the limit .
*/
2008-02-21 04:24:40 +01:00
unsigned long io_bitmap [ IO_BITMAP_LONGS + 1 ] ;
2008-01-30 13:31:31 +01:00
/*
2008-02-21 04:24:40 +01:00
* . . and then another 0x100 bytes for the emergency kernel stack :
2008-01-30 13:31:31 +01:00
*/
2008-02-21 04:24:40 +01:00
unsigned long stack [ 64 ] ;
2008-07-04 13:56:16 +01:00
} ____cacheline_aligned ;
2008-01-30 13:31:31 +01:00
2009-04-21 23:00:24 +01:00
DECLARE_PER_CPU_SHARED_ALIGNED ( struct tss_struct , init_tss ) ;
2008-01-30 13:31:31 +01:00
2008-02-21 04:24:40 +01:00
/*
* Save the original ist values for checking stack pointers during debugging
*/
2008-01-30 13:31:39 +01:00
struct orig_ist {
2008-02-21 04:24:40 +01:00
unsigned long ist [ 7 ] ;
2008-01-30 13:31:39 +01:00
} ;
2008-01-30 13:31:48 +01:00
# define MXCSR_DEFAULT 0x1f80
2008-01-30 13:31:41 +01:00
2008-01-30 13:31:48 +01:00
struct i387_fsave_struct {
2008-03-05 15:15:42 +01:00
u32 cwd ; /* FPU Control Word */
u32 swd ; /* FPU Status Word */
u32 twd ; /* FPU Tag Word */
u32 fip ; /* FPU IP Offset */
u32 fcs ; /* FPU IP Selector */
u32 foo ; /* FPU Operand Pointer Offset */
u32 fos ; /* FPU Operand Pointer Selector */
/* 8*10 bytes for each FP-reg = 80 bytes: */
2008-02-21 04:24:40 +01:00
u32 st_space [ 20 ] ;
2008-03-05 15:15:42 +01:00
/* Software status information [not touched by FSAVE ]: */
2008-02-21 04:24:40 +01:00
u32 status ;
2008-01-30 13:31:41 +01:00
} ;
struct i387_fxsave_struct {
2008-03-05 15:15:42 +01:00
u16 cwd ; /* Control Word */
u16 swd ; /* Status Word */
u16 twd ; /* Tag Word */
u16 fop ; /* Last Instruction Opcode */
2008-01-30 13:31:48 +01:00
union {
struct {
2008-03-05 15:15:42 +01:00
u64 rip ; /* Instruction Pointer */
u64 rdp ; /* Data Pointer */
2008-01-30 13:31:48 +01:00
} ;
struct {
2008-03-05 15:15:42 +01:00
u32 fip ; /* FPU IP Offset */
u32 fcs ; /* FPU IP Selector */
u32 foo ; /* FPU Operand Offset */
u32 fos ; /* FPU Operand Selector */
2008-01-30 13:31:48 +01:00
} ;
} ;
2008-03-05 15:15:42 +01:00
u32 mxcsr ; /* MXCSR Register State */
u32 mxcsr_mask ; /* MXCSR Mask */
/* 8*16 bytes for each FP-reg = 128 bytes: */
2008-02-21 04:24:40 +01:00
u32 st_space [ 32 ] ;
2008-03-05 15:15:42 +01:00
/* 16*16 bytes for each XMM-reg = 256 bytes: */
2008-02-21 04:24:40 +01:00
u32 xmm_space [ 64 ] ;
2008-03-05 15:15:42 +01:00
2008-07-29 10:29:24 -07:00
u32 padding [ 12 ] ;
union {
u32 padding1 [ 12 ] ;
u32 sw_reserved [ 12 ] ;
} ;
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:41 +01:00
} __attribute__ ( ( aligned ( 16 ) ) ) ;
2008-01-30 13:31:48 +01:00
struct i387_soft_struct {
2008-02-21 04:24:40 +01:00
u32 cwd ;
u32 swd ;
u32 twd ;
u32 fip ;
u32 fcs ;
u32 foo ;
u32 fos ;
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space [ 20 ] ;
u8 ftop ;
u8 changed ;
u8 lookahead ;
u8 no_update ;
u8 rm ;
u8 alimit ;
2009-02-09 22:17:39 +09:00
struct math_emu_info * info ;
2008-02-21 04:24:40 +01:00
u32 entry_eip ;
2008-01-30 13:31:48 +01:00
} ;
2009-04-10 15:21:24 -07:00
struct ymmh_struct {
/* 16 * 16 bytes for each YMMH-reg = 256 bytes */
u32 ymmh_space [ 64 ] ;
} ;
2008-07-29 10:29:19 -07:00
struct xsave_hdr_struct {
u64 xstate_bv ;
u64 reserved1 [ 2 ] ;
u64 reserved2 [ 5 ] ;
} __attribute__ ( ( packed ) ) ;
struct xsave_struct {
struct i387_fxsave_struct i387 ;
struct xsave_hdr_struct xsave_hdr ;
2009-04-10 15:21:24 -07:00
struct ymmh_struct ymmh ;
2008-07-29 10:29:19 -07:00
/* new processor state extensions will go here */
} __attribute__ ( ( packed , aligned ( 64 ) ) ) ;
2008-03-10 15:28:04 -07:00
union thread_xstate {
2008-01-30 13:31:48 +01:00
struct i387_fsave_struct fsave ;
2008-01-30 13:31:41 +01:00
struct i387_fxsave_struct fxsave ;
2008-02-21 04:24:40 +01:00
struct i387_soft_struct soft ;
2008-07-29 10:29:20 -07:00
struct xsave_struct xsave ;
2008-01-30 13:31:41 +01:00
} ;
2010-05-06 11:45:46 +03:00
struct fpu {
union thread_xstate * state ;
} ;
2008-03-03 14:12:56 -03:00
# ifdef CONFIG_X86_64
2008-01-30 13:31:57 +01:00
DECLARE_PER_CPU ( struct orig_ist , orig_ist ) ;
2009-01-19 00:38:58 +09:00
2009-01-19 12:21:28 +09:00
union irq_stack_union {
char irq_stack [ IRQ_STACK_SIZE ] ;
/*
* GCC hardcodes the stack canary as % gs : 40. Since the
* irq_stack is the object at % gs : 0 , we reserve the bottom
* 48 bytes of the irq stack for the canary .
*/
struct {
char gs_base [ 40 ] ;
unsigned long stack_canary ;
} ;
} ;
2009-04-21 23:00:24 +01:00
DECLARE_PER_CPU_FIRST ( union irq_stack_union , irq_stack_union ) ;
2009-02-08 09:58:39 -05:00
DECLARE_INIT_PER_CPU ( irq_stack_union ) ;
2009-01-19 00:38:58 +09:00
DECLARE_PER_CPU ( char * , irq_stack_ptr ) ;
2009-03-14 11:19:49 +05:30
DECLARE_PER_CPU ( unsigned int , irq_count ) ;
extern unsigned long kernel_eflags ;
extern asmlinkage void ignore_sysret ( void ) ;
2009-02-09 22:17:40 +09:00
# else /* X86_64 */
# ifdef CONFIG_CC_STACKPROTECTOR
2009-09-03 12:27:15 -07:00
/*
* Make sure stack canary segment base is cached - aligned :
* " For Intel Atom processors, avoid non zero segment base address
* that is not aligned to cache line boundary at all cost . "
* ( Optim Ref Manual Assembly / Compiler Coding Rule 15. )
*/
struct stack_canary {
char __pad [ 20 ] ; /* canary at %gs:20 */
unsigned long canary ;
} ;
2009-09-03 14:31:44 -07:00
DECLARE_PER_CPU_ALIGNED ( struct stack_canary , stack_canary ) ;
2007-10-11 11:20:03 +02:00
# endif
2009-02-09 22:17:40 +09:00
# endif /* X86_64 */
2008-01-30 13:31:03 +01:00
2008-03-10 15:28:04 -07:00
extern unsigned int xstate_size ;
2008-03-10 15:28:05 -07:00
extern void free_thread_xstate ( struct task_struct * ) ;
extern struct kmem_cache * task_xstate_cachep ;
2008-01-30 13:31:27 +01:00
2009-09-09 19:22:48 +02:00
struct perf_event ;
2008-01-30 13:31:31 +01:00
struct thread_struct {
2008-02-21 04:24:40 +01:00
/* Cached TLS descriptors: */
struct desc_struct tls_array [ GDT_ENTRY_TLS_ENTRIES ] ;
unsigned long sp0 ;
unsigned long sp ;
2008-01-30 13:31:31 +01:00
# ifdef CONFIG_X86_32
2008-02-21 04:24:40 +01:00
unsigned long sysenter_cs ;
2008-01-30 13:31:31 +01:00
# else
2008-02-21 04:24:40 +01:00
unsigned long usersp ; /* Copy from PDA */
unsigned short es ;
unsigned short ds ;
unsigned short fsindex ;
unsigned short gsindex ;
2008-01-30 13:31:31 +01:00
# endif
2009-05-04 03:30:15 +04:00
# ifdef CONFIG_X86_32
2008-02-21 04:24:40 +01:00
unsigned long ip ;
2009-05-04 03:30:15 +04:00
# endif
2009-05-04 03:29:52 +04:00
# ifdef CONFIG_X86_64
2008-02-21 04:24:40 +01:00
unsigned long fs ;
2009-05-04 03:29:52 +04:00
# endif
2008-02-21 04:24:40 +01:00
unsigned long gs ;
2009-09-09 19:22:48 +02:00
/* Save middle states of ptrace breakpoints */
struct perf_event * ptrace_bps [ HBP_NUM ] ;
/* Debug status used for traps, single steps, etc... */
unsigned long debugreg6 ;
2010-02-18 18:24:18 +01:00
/* Keep track of the exact dr7 value set by the user */
unsigned long ptrace_dr7 ;
2008-02-21 04:24:40 +01:00
/* Fault info: */
unsigned long cr2 ;
unsigned long trap_no ;
unsigned long error_code ;
2008-03-10 15:28:04 -07:00
/* floating point and extended processor state */
2010-05-06 11:45:46 +03:00
struct fpu fpu ;
2008-01-30 13:31:31 +01:00
# ifdef CONFIG_X86_32
2008-02-21 04:24:40 +01:00
/* Virtual 86 mode info */
2008-01-30 13:31:31 +01:00
struct vm86_struct __user * vm86_info ;
unsigned long screen_bitmap ;
2008-02-21 04:24:40 +01:00
unsigned long v86flags ;
unsigned long v86mask ;
unsigned long saved_sp0 ;
unsigned int saved_fs ;
unsigned int saved_gs ;
2008-01-30 13:31:31 +01:00
# endif
2008-02-21 04:24:40 +01:00
/* IO permissions: */
unsigned long * io_bitmap_ptr ;
unsigned long iopl ;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max ;
2008-01-30 13:31:31 +01:00
} ;
2008-01-30 13:31:27 +01:00
static inline unsigned long native_get_debugreg ( int regno )
{
2008-02-21 04:24:40 +01:00
unsigned long val = 0 ; /* Damn you, gcc! */
2008-01-30 13:31:27 +01:00
switch ( regno ) {
case 0 :
2008-03-23 01:03:15 -07:00
asm ( " mov %%db0, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 13:31:27 +01:00
case 1 :
2008-03-23 01:03:15 -07:00
asm ( " mov %%db1, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 13:31:27 +01:00
case 2 :
2008-03-23 01:03:15 -07:00
asm ( " mov %%db2, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 13:31:27 +01:00
case 3 :
2008-03-23 01:03:15 -07:00
asm ( " mov %%db3, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 13:31:27 +01:00
case 6 :
2008-03-23 01:03:15 -07:00
asm ( " mov %%db6, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 13:31:27 +01:00
case 7 :
2008-03-23 01:03:15 -07:00
asm ( " mov %%db7, %0 " : " =r " ( val ) ) ;
break ;
2008-01-30 13:31:27 +01:00
default :
BUG ( ) ;
}
return val ;
}
static inline void native_set_debugreg ( int regno , unsigned long value )
{
switch ( regno ) {
case 0 :
2008-02-21 04:24:40 +01:00
asm ( " mov %0, %%db0 " : : " r " ( value ) ) ;
2008-01-30 13:31:27 +01:00
break ;
case 1 :
2008-02-21 04:24:40 +01:00
asm ( " mov %0, %%db1 " : : " r " ( value ) ) ;
2008-01-30 13:31:27 +01:00
break ;
case 2 :
2008-02-21 04:24:40 +01:00
asm ( " mov %0, %%db2 " : : " r " ( value ) ) ;
2008-01-30 13:31:27 +01:00
break ;
case 3 :
2008-02-21 04:24:40 +01:00
asm ( " mov %0, %%db3 " : : " r " ( value ) ) ;
2008-01-30 13:31:27 +01:00
break ;
case 6 :
2008-02-21 04:24:40 +01:00
asm ( " mov %0, %%db6 " : : " r " ( value ) ) ;
2008-01-30 13:31:27 +01:00
break ;
case 7 :
2008-02-21 04:24:40 +01:00
asm ( " mov %0, %%db7 " : : " r " ( value ) ) ;
2008-01-30 13:31:27 +01:00
break ;
default :
BUG ( ) ;
}
}
2008-01-30 13:31:27 +01:00
/*
* Set IOPL bits in EFLAGS from given mask
*/
static inline void native_set_iopl_mask ( unsigned mask )
{
# ifdef CONFIG_X86_32
unsigned int reg ;
2008-02-21 04:24:40 +01:00
2008-03-23 01:03:15 -07:00
asm volatile ( " pushfl; "
" popl %0; "
" andl %1, %0; "
" orl %2, %0; "
" pushl %0; "
" popfl "
: " =&r " ( reg )
: " i " ( ~ X86_EFLAGS_IOPL ) , " r " ( mask ) ) ;
2008-01-30 13:31:27 +01:00
# endif
}
2008-02-21 04:24:40 +01:00
static inline void
native_load_sp0 ( struct tss_struct * tss , struct thread_struct * thread )
2008-01-30 13:31:31 +01:00
{
tss - > x86_tss . sp0 = thread - > sp0 ;
# ifdef CONFIG_X86_32
2008-02-21 04:24:40 +01:00
/* Only happens when SEP is enabled, no need to test "SEP"arately: */
2008-01-30 13:31:31 +01:00
if ( unlikely ( tss - > x86_tss . ss1 ! = thread - > sysenter_cs ) ) {
tss - > x86_tss . ss1 = thread - > sysenter_cs ;
wrmsr ( MSR_IA32_SYSENTER_CS , thread - > sysenter_cs , 0 ) ;
}
# endif
}
2008-01-30 13:31:27 +01:00
2008-01-30 13:32:08 +01:00
static inline void native_swapgs ( void )
{
# ifdef CONFIG_X86_64
asm volatile ( " swapgs " : : : " memory " ) ;
# endif
}
2008-01-30 13:31:31 +01:00
# ifdef CONFIG_PARAVIRT
# include <asm/paravirt.h>
# else
2008-02-21 04:24:40 +01:00
# define __cpuid native_cpuid
# define paravirt_enabled() 0
2008-01-30 13:31:27 +01:00
/*
* These special macros can be used to get or set a debugging register
*/
# define get_debugreg(var, register) \
( var ) = native_get_debugreg ( register )
# define set_debugreg(value, register) \
native_set_debugreg ( register , value )
2008-03-23 01:03:15 -07:00
static inline void load_sp0 ( struct tss_struct * tss ,
struct thread_struct * thread )
2008-01-30 13:31:31 +01:00
{
native_load_sp0 ( tss , thread ) ;
}
2008-01-30 13:31:27 +01:00
# define set_iopl_mask native_set_iopl_mask
2008-01-30 13:31:27 +01:00
# endif /* CONFIG_PARAVIRT */
/*
* Save the cr4 feature set we ' re using ( ie
* Pentium 4 MB enable and PPro Global page
* enable ) , so that any CPU ' s that boot up
* after us can get the correct flags .
*/
2008-02-21 04:24:40 +01:00
extern unsigned long mmu_cr4_features ;
2008-01-30 13:31:27 +01:00
static inline void set_in_cr4 ( unsigned long mask )
{
2010-09-03 21:17:08 -04:00
unsigned long cr4 ;
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:27 +01:00
mmu_cr4_features | = mask ;
cr4 = read_cr4 ( ) ;
cr4 | = mask ;
write_cr4 ( cr4 ) ;
}
static inline void clear_in_cr4 ( unsigned long mask )
{
2010-09-03 21:17:08 -04:00
unsigned long cr4 ;
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:27 +01:00
mmu_cr4_features & = ~ mask ;
cr4 = read_cr4 ( ) ;
cr4 & = ~ mask ;
write_cr4 ( cr4 ) ;
}
2008-01-30 13:31:38 +01:00
typedef struct {
2008-02-21 04:24:40 +01:00
unsigned long seg ;
2008-01-30 13:31:38 +01:00
} mm_segment_t ;
2008-01-30 13:31:27 +01:00
/*
* create a kernel thread without removing it from tasklists
*/
extern int kernel_thread ( int ( * fn ) ( void * ) , void * arg , unsigned long flags ) ;
/* Free all resources held by a thread. */
extern void release_thread ( struct task_struct * ) ;
2008-02-21 04:24:40 +01:00
/* Prepare to copy thread state - unlazy all lazy state */
2008-01-30 13:31:27 +01:00
extern void prepare_to_copy ( struct task_struct * tsk ) ;
2008-01-30 13:31:27 +01:00
2008-01-30 13:31:27 +01:00
unsigned long get_wchan ( struct task_struct * p ) ;
2008-01-30 13:31:03 +01:00
/*
* Generic CPUID function
* clear % ecx since some cpus ( Cyrix MII ) do not set or clear % ecx
* resulting in stale register contents being returned .
*/
static inline void cpuid ( unsigned int op ,
unsigned int * eax , unsigned int * ebx ,
unsigned int * ecx , unsigned int * edx )
{
* eax = op ;
* ecx = 0 ;
__cpuid ( eax , ebx , ecx , edx ) ;
}
/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_count ( unsigned int op , int count ,
unsigned int * eax , unsigned int * ebx ,
unsigned int * ecx , unsigned int * edx )
{
* eax = op ;
* ecx = count ;
__cpuid ( eax , ebx , ecx , edx ) ;
}
/*
* CPUID functions returning a single datum
*/
static inline unsigned int cpuid_eax ( unsigned int op )
{
unsigned int eax , ebx , ecx , edx ;
cpuid ( op , & eax , & ebx , & ecx , & edx ) ;
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:03 +01:00
return eax ;
}
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:03 +01:00
static inline unsigned int cpuid_ebx ( unsigned int op )
{
unsigned int eax , ebx , ecx , edx ;
cpuid ( op , & eax , & ebx , & ecx , & edx ) ;
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:03 +01:00
return ebx ;
}
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:03 +01:00
static inline unsigned int cpuid_ecx ( unsigned int op )
{
unsigned int eax , ebx , ecx , edx ;
cpuid ( op , & eax , & ebx , & ecx , & edx ) ;
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:03 +01:00
return ecx ;
}
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:03 +01:00
static inline unsigned int cpuid_edx ( unsigned int op )
{
unsigned int eax , ebx , ecx , edx ;
cpuid ( op , & eax , & ebx , & ecx , & edx ) ;
2008-02-21 04:24:40 +01:00
2008-01-30 13:31:03 +01:00
return edx ;
}
2008-01-30 13:31:27 +01:00
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop ( void )
{
2008-03-23 01:03:15 -07:00
asm volatile ( " rep; nop " : : : " memory " ) ;
2008-01-30 13:31:27 +01:00
}
2008-02-21 04:24:40 +01:00
static inline void cpu_relax ( void )
{
rep_nop ( ) ;
}
2009-09-10 02:53:50 +01:00
/* Stop speculative execution and prefetching of modified code. */
2008-01-30 13:31:27 +01:00
static inline void sync_core ( void )
{
int tmp ;
2008-02-21 04:24:40 +01:00
2009-09-10 02:53:50 +01:00
# if defined(CONFIG_M386) || defined(CONFIG_M486)
if ( boot_cpu_data . x86 < 5 )
/* There is no speculative execution.
* jmp is a barrier to prefetching . */
asm volatile ( " jmp 1f \n 1: \n " : : : " memory " ) ;
else
# endif
/* cpuid is a barrier to speculative execution.
* Prefetched instructions are automatically
* invalidated when modified . */
asm volatile ( " cpuid " : " =a " ( tmp ) : " 0 " ( 1 )
: " ebx " , " ecx " , " edx " , " memory " ) ;
2008-01-30 13:31:27 +01:00
}
2008-03-23 01:03:15 -07:00
static inline void __monitor ( const void * eax , unsigned long ecx ,
unsigned long edx )
2008-01-30 13:31:27 +01:00
{
2008-02-21 04:24:40 +01:00
/* "monitor %eax, %ecx, %edx;" */
2008-03-23 01:03:15 -07:00
asm volatile ( " .byte 0x0f, 0x01, 0xc8; "
: : " a " ( eax ) , " c " ( ecx ) , " d " ( edx ) ) ;
2008-01-30 13:31:27 +01:00
}
static inline void __mwait ( unsigned long eax , unsigned long ecx )
{
2008-02-21 04:24:40 +01:00
/* "mwait %eax, %ecx;" */
2008-03-23 01:03:15 -07:00
asm volatile ( " .byte 0x0f, 0x01, 0xc9; "
: : " a " ( eax ) , " c " ( ecx ) ) ;
2008-01-30 13:31:27 +01:00
}
static inline void __sti_mwait ( unsigned long eax , unsigned long ecx )
{
2008-04-25 17:39:01 +02:00
trace_hardirqs_on ( ) ;
2008-02-21 04:24:40 +01:00
/* "mwait %eax, %ecx;" */
2008-03-23 01:03:15 -07:00
asm volatile ( " sti; .byte 0x0f, 0x01, 0xc9; "
: : " a " ( eax ) , " c " ( ecx ) ) ;
2008-01-30 13:31:27 +01:00
}
extern void mwait_idle_with_hints ( unsigned long eax , unsigned long ecx ) ;
extern void select_idle_routine ( const struct cpuinfo_x86 * c ) ;
2009-03-17 14:50:34 +10:30
extern void init_c1e_mask ( void ) ;
2008-01-30 13:31:27 +01:00
2008-02-21 04:24:40 +01:00
extern unsigned long boot_option_idle_override ;
2010-07-27 18:53:35 +02:00
extern bool c1e_detected ;
2008-01-30 13:31:27 +01:00
2010-11-03 17:06:14 +01:00
enum idle_boot_override { IDLE_NO_OVERRIDE = 0 , IDLE_HALT , IDLE_NOMWAIT ,
IDLE_POLL , IDLE_FORCE_MWAIT } ;
2008-01-30 13:31:39 +01:00
extern void enable_sep_cpu ( void ) ;
extern int sysenter_setup ( void ) ;
2010-05-20 21:04:29 -05:00
extern void early_trap_init ( void ) ;
2008-01-30 13:31:39 +01:00
/* Defined in head.S */
2008-02-21 04:24:40 +01:00
extern struct desc_ptr early_gdt_descr ;
2008-01-30 13:31:39 +01:00
extern void cpu_set_gdt ( int ) ;
2009-01-30 17:47:53 +09:00
extern void switch_to_new_gdt ( int ) ;
2009-01-30 17:47:54 +09:00
extern void load_percpu_segment ( int ) ;
2008-01-30 13:31:39 +01:00
extern void cpu_init ( void ) ;
2008-12-11 13:49:59 +01:00
static inline unsigned long get_debugctlmsr ( void )
{
2010-03-25 14:51:51 +01:00
unsigned long debugctlmsr = 0 ;
2008-12-11 13:49:59 +01:00
# ifndef CONFIG_X86_DEBUGCTLMSR
if ( boot_cpu_data . x86 < 6 )
return 0 ;
# endif
rdmsrl ( MSR_IA32_DEBUGCTLMSR , debugctlmsr ) ;
2010-03-25 14:51:51 +01:00
return debugctlmsr ;
2008-12-11 13:49:59 +01:00
}
2008-03-10 13:11:17 +00:00
static inline void update_debugctlmsr ( unsigned long debugctlmsr )
{
# ifndef CONFIG_X86_DEBUGCTLMSR
if ( boot_cpu_data . x86 < 6 )
return ;
# endif
wrmsrl ( MSR_IA32_DEBUGCTLMSR , debugctlmsr ) ;
}
2008-02-21 04:24:40 +01:00
/*
* from system description table in BIOS . Mostly for MCA use , but
* others may find it useful :
*/
extern unsigned int machine_id ;
extern unsigned int machine_submodel_id ;
extern unsigned int BIOS_revision ;
2008-01-30 13:31:39 +01:00
2008-02-21 04:24:40 +01:00
/* Boot loader type from the setup header: */
extern int bootloader_type ;
2009-05-07 16:54:11 -07:00
extern int bootloader_version ;
2008-01-30 13:31:39 +01:00
2008-02-21 04:24:40 +01:00
extern char ignore_fpu_irq ;
2008-01-30 13:31:27 +01:00
# define HAVE_ARCH_PICK_MMAP_LAYOUT 1
# define ARCH_HAS_PREFETCHW
# define ARCH_HAS_SPINLOCK_PREFETCH
2008-01-30 13:31:40 +01:00
# ifdef CONFIG_X86_32
2008-02-21 04:24:40 +01:00
# define BASE_PREFETCH ASM_NOP4
# define ARCH_HAS_PREFETCH
2008-01-30 13:31:40 +01:00
# else
2008-02-21 04:24:40 +01:00
# define BASE_PREFETCH "prefetcht0 (%1)"
2008-01-30 13:31:40 +01:00
# endif
2008-02-21 04:24:40 +01:00
/*
* Prefetch instructions for Pentium III ( + ) and AMD Athlon ( + )
*
* It ' s not worth to care about 3 dnow prefetches for the K6
* because they are microcoded there and very slow .
*/
2008-01-30 13:31:40 +01:00
static inline void prefetch ( const void * x )
{
alternative_input ( BASE_PREFETCH ,
" prefetchnta (%1) " ,
X86_FEATURE_XMM ,
" r " ( x ) ) ;
}
2008-02-21 04:24:40 +01:00
/*
* 3 dnow prefetch to get an exclusive cache line .
* Useful for spinlocks to avoid one state transition in the
* cache coherency protocol :
*/
2008-01-30 13:31:40 +01:00
static inline void prefetchw ( const void * x )
{
alternative_input ( BASE_PREFETCH ,
" prefetchw (%1) " ,
X86_FEATURE_3DNOW ,
" r " ( x ) ) ;
}
2008-02-21 04:24:40 +01:00
static inline void spin_lock_prefetch ( const void * x )
{
prefetchw ( x ) ;
}
2008-01-30 13:31:57 +01:00
# ifdef CONFIG_X86_32
/*
* User space process size : 3 GB ( default ) .
*/
2008-02-21 04:24:40 +01:00
# define TASK_SIZE PAGE_OFFSET
2009-02-20 23:32:28 +01:00
# define TASK_SIZE_MAX TASK_SIZE
2008-02-21 04:24:40 +01:00
# define STACK_TOP TASK_SIZE
# define STACK_TOP_MAX STACK_TOP
# define INIT_THREAD { \
. sp0 = sizeof ( init_stack ) + ( long ) & init_stack , \
. vm86_info = NULL , \
. sysenter_cs = __KERNEL_CS , \
. io_bitmap_ptr = NULL , \
2008-01-30 13:31:57 +01:00
}
/*
* Note that the . io_bitmap member must be extra - big . This is because
* the CPU will access an additional byte beyond the end of the IO
* permission bitmap . The extra byte must be all 1 bits , and must
* be within the limit .
*/
2008-02-21 04:24:40 +01:00
# define INIT_TSS { \
. x86_tss = { \
2008-01-30 13:31:57 +01:00
. sp0 = sizeof ( init_stack ) + ( long ) & init_stack , \
2008-02-21 04:24:40 +01:00
. ss0 = __KERNEL_DS , \
. ss1 = __KERNEL_CS , \
. io_bitmap_base = INVALID_IO_BITMAP_OFFSET , \
} , \
. io_bitmap = { [ 0 . . . IO_BITMAP_LONGS ] = ~ 0 } , \
2008-01-30 13:31:57 +01:00
}
extern unsigned long thread_saved_pc ( struct task_struct * tsk ) ;
# define THREAD_SIZE_LONGS (THREAD_SIZE / sizeof(unsigned long))
# define KSTK_TOP(info) \
( { \
unsigned long * __ptr = ( unsigned long * ) ( info ) ; \
( unsigned long ) ( & __ptr [ THREAD_SIZE_LONGS ] ) ; \
} )
/*
* The below - 8 is to reserve 8 bytes on top of the ring0 stack .
* This is necessary to guarantee that the entire " struct pt_regs "
tree-wide: fix comment/printk typos
"gadget", "through", "command", "maintain", "maintain", "controller", "address",
"between", "initiali[zs]e", "instead", "function", "select", "already",
"equal", "access", "management", "hierarchy", "registration", "interest",
"relative", "memory", "offset", "already",
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2010-11-01 15:38:34 -04:00
* is accessible even if the CPU haven ' t stored the SS / ESP registers
2008-01-30 13:31:57 +01:00
* on the stack ( interrupt gate does not save these registers
* when switching to the same priv ring ) .
* Therefore beware : accessing the ss / esp fields of the
* " struct pt_regs " is possible , but they may contain the
* completely wrong values .
*/
# define task_pt_regs(task) \
( { \
struct pt_regs * __regs__ ; \
__regs__ = ( struct pt_regs * ) ( KSTK_TOP ( task_stack_page ( task ) ) - 8 ) ; \
__regs__ - 1 ; \
} )
2008-02-21 04:24:40 +01:00
# define KSTK_ESP(task) (task_pt_regs(task)->sp)
2008-01-30 13:31:57 +01:00
# else
/*
* User space process size . 47 bits minus one guard page .
*/
2009-02-20 23:32:28 +01:00
# define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
2008-01-30 13:31:57 +01:00
/* This decides where the kernel will search for a free chunk of vm
* space during mmap ' s .
*/
2008-02-21 04:24:40 +01:00
# define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
0xc0000000 : 0xFFFFe000 )
2008-01-30 13:31:57 +01:00
2008-02-21 04:24:40 +01:00
# define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
2009-02-20 23:32:28 +01:00
IA32_PAGE_OFFSET : TASK_SIZE_MAX )
2008-02-21 04:24:40 +01:00
# define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
2009-02-20 23:32:28 +01:00
IA32_PAGE_OFFSET : TASK_SIZE_MAX )
2008-01-30 13:31:57 +01:00
2008-02-08 04:19:26 -08:00
# define STACK_TOP TASK_SIZE
2009-02-20 23:32:28 +01:00
# define STACK_TOP_MAX TASK_SIZE_MAX
2008-02-08 04:19:26 -08:00
2008-01-30 13:31:57 +01:00
# define INIT_THREAD { \
. sp0 = ( unsigned long ) & init_stack + sizeof ( init_stack ) \
}
# define INIT_TSS { \
. x86_tss . sp0 = ( unsigned long ) & init_stack + sizeof ( init_stack ) \
}
/*
* Return saved PC of a blocked thread .
* What is this good for ? it will be always the scheduler or ret_from_fork .
*/
2008-02-21 04:24:40 +01:00
# define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
2008-01-30 13:31:57 +01:00
2008-02-21 04:24:40 +01:00
# define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
2009-11-03 10:22:40 +01:00
extern unsigned long KSTK_ESP ( struct task_struct * task ) ;
2008-01-30 13:31:57 +01:00
# endif /* CONFIG_X86_64 */
2008-02-21 05:18:40 +01:00
extern void start_thread ( struct pt_regs * regs , unsigned long new_ip ,
unsigned long new_sp ) ;
2008-02-21 04:24:40 +01:00
/*
* This decides where the kernel will search for a free chunk of vm
2008-01-30 13:31:27 +01:00
* space during mmap ' s .
*/
# define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
2008-02-21 04:24:40 +01:00
# define KSTK_EIP(task) (task_pt_regs(task)->ip)
2008-01-30 13:31:27 +01:00
2008-04-14 00:24:18 +02:00
/* Get/set a process' ability to use the timestamp counter instruction */
# define GET_TSC_CTL(adr) get_tsc_mode((adr))
# define SET_TSC_CTL(val) set_tsc_mode((val))
extern int get_tsc_mode ( unsigned long adr ) ;
extern int set_tsc_mode ( unsigned int val ) ;
2009-09-16 11:33:40 +02:00
extern int amd_get_nb_id ( int cpu ) ;
2009-09-02 11:49:52 +02:00
struct aperfmperf {
u64 aperf , mperf ;
} ;
static inline void get_aperfmperf ( struct aperfmperf * am )
{
WARN_ON_ONCE ( ! boot_cpu_has ( X86_FEATURE_APERFMPERF ) ) ;
rdmsrl ( MSR_IA32_APERF , am - > aperf ) ;
rdmsrl ( MSR_IA32_MPERF , am - > mperf ) ;
}
# define APERFMPERF_SHIFT 10
static inline
unsigned long calc_aperfmperf_ratio ( struct aperfmperf * old ,
struct aperfmperf * new )
{
u64 aperf = new - > aperf - old - > aperf ;
u64 mperf = new - > mperf - old - > mperf ;
unsigned long ratio = aperf ;
mperf > > = APERFMPERF_SHIFT ;
if ( mperf )
ratio = div64_u64 ( aperf , mperf ) ;
return ratio ;
}
2010-07-28 19:09:30 +02:00
/*
* AMD errata checking
*/
# ifdef CONFIG_CPU_SUP_AMD
2010-07-28 19:09:32 +02:00
extern const int amd_erratum_383 [ ] ;
2010-07-28 19:09:31 +02:00
extern const int amd_erratum_400 [ ] ;
2010-07-28 19:09:30 +02:00
extern bool cpu_has_amd_erratum ( const int * ) ;
# define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
# define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
# define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
( ( f < < 24 ) | ( m_start < < 16 ) | ( s_start < < 12 ) | ( m_end < < 4 ) | ( s_end ) )
# define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
# define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
# define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
# else
# define cpu_has_amd_erratum(x) (false)
# endif /* CONFIG_CPU_SUP_AMD */
2008-10-22 22:26:29 -07:00
# endif /* _ASM_X86_PROCESSOR_H */