2005-04-16 15:20:36 -07:00
# ifndef __ASM_SYSTEM_H
# define __ASM_SYSTEM_H
# include <linux/kernel.h>
# include <asm/segment.h>
2007-05-08 00:35:02 -07:00
# include <asm/cmpxchg.h>
2005-04-16 15:20:36 -07:00
# ifdef __KERNEL__
# define __STR(x) #x
# define STR(x) __STR(x)
# define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
# define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
/* frame pointer must be last for get_wchan */
2006-09-26 10:52:41 +02:00
# define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
# define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
2005-04-16 15:20:36 -07:00
# define __EXTRA_CLOBBER \
, " rcx " , " rbx " , " rdx " , " r8 " , " r9 " , " r10 " , " r11 " , " r12 " , " r13 " , " r14 " , " r15 "
2006-09-26 10:52:41 +02:00
/* Save restore flags to clear handle leaking NT */
2005-04-16 15:20:36 -07:00
# define switch_to(prev,next,last) \
asm volatile ( SAVE_CONTEXT \
" movq %%rsp,%P[threadrsp](%[prev]) \n \t " /* save RSP */ \
" movq %P[threadrsp](%[next]),%%rsp \n \t " /* restore RSP */ \
" call __switch_to \n \t " \
" .globl thread_return \n " \
" thread_return: \n \t " \
" movq %%gs:%P[pda_pcurrent],%%rsi \n \t " \
" movq %P[thread_info](%%rsi),%%r8 \n \t " \
2006-06-26 13:56:16 +02:00
LOCK_PREFIX " btr %[tif_fork],%P[ti_flags](%%r8) \n \t " \
2005-04-16 15:20:36 -07:00
" movq %%rax,%%rdi \n \t " \
" jc ret_from_fork \n \t " \
RESTORE_CONTEXT \
: " =a " ( last ) \
: [ next ] " S " ( next ) , [ prev ] " D " ( prev ) , \
[ threadrsp ] " i " ( offsetof ( struct task_struct , thread . rsp ) ) , \
[ ti_flags ] " i " ( offsetof ( struct thread_info , flags ) ) , \
[ tif_fork ] " i " ( TIF_FORK ) , \
2007-05-09 02:35:17 -07:00
[ thread_info ] " i " ( offsetof ( struct task_struct , stack ) ) , \
2005-04-16 15:20:36 -07:00
[ pda_pcurrent ] " i " ( offsetof ( struct x8664_pda , pcurrent ) ) \
: " memory " , " cc " __EXTRA_CLOBBER )
extern void load_gs_index ( unsigned ) ;
/*
* Load a segment . Fall back on loading the zero
* segment if something goes wrong . .
*/
# define loadsegment(seg,value) \
asm volatile ( " \n " \
" 1: \t " \
" movl %k0,%% " # seg " \n " \
" 2: \n " \
" .section .fixup, \" ax \" \n " \
" 3: \t " \
" movl %1,%% " # seg " \n \t " \
" jmp 2b \n " \
" .previous \n " \
" .section __ex_table, \" a \" \n \t " \
" .align 8 \n \t " \
" .quad 1b,3b \n " \
" .previous " \
: : " r " ( value ) , " r " ( 0 ) )
/*
* Clear and set ' TS ' bit respectively
*/
# define clts() __asm__ __volatile__ ("clts")
static inline unsigned long read_cr0 ( void )
{
unsigned long cr0 ;
asm volatile ( " movq %%cr0,%0 " : " =r " ( cr0 ) ) ;
return cr0 ;
2007-07-22 11:12:29 +02:00
}
2005-04-16 15:20:36 -07:00
static inline void write_cr0 ( unsigned long val )
{
asm volatile ( " movq %0,%%cr0 " : : " r " ( val ) ) ;
2007-07-22 11:12:29 +02:00
}
static inline unsigned long read_cr2 ( void )
{
unsigned long cr2 ;
asm ( " movq %%cr2,%0 " : " =r " ( cr2 ) ) ;
return cr2 ;
}
static inline void write_cr2 ( unsigned long val )
{
asm volatile ( " movq %0,%%cr2 " : : " r " ( val ) ) ;
}
2005-04-16 15:20:36 -07:00
static inline unsigned long read_cr3 ( void )
{
unsigned long cr3 ;
asm ( " movq %%cr3,%0 " : " =r " ( cr3 ) ) ;
return cr3 ;
2007-07-22 11:12:29 +02:00
}
2005-04-16 15:20:36 -07:00
[PATCH] x86-64: Remove duplicated code for reading control registers
On Tue, Mar 13, 2007 at 05:33:09AM -0700, Randy.Dunlap wrote:
> On Tue, 13 Mar 2007, Glauber de Oliveira Costa wrote:
>
> > Tiny cleanup:
> >
> > In x86_64, the same functions for reading cr3 and writing cr{3,4} are
> > defined in tlbflush.h and system.h, whith just a name change.
> > The only difference is the clobbering of memory, which seems a safe, and
> > even needed change for the write_cr4. This patch removes the duplicate.
> > write_cr3() is moved to system.h for consistency.
>
> missing patch.....
>
thanks. Attached now
--
Glauber de Oliveira Costa
Red Hat Inc.
"Free as in Freedom"
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 19:27:06 +02:00
static inline void write_cr3 ( unsigned long val )
{
asm volatile ( " movq %0,%%cr3 " : : " r " ( val ) : " memory " ) ;
}
2005-04-16 15:20:36 -07:00
static inline unsigned long read_cr4 ( void )
{
unsigned long cr4 ;
asm ( " movq %%cr4,%0 " : " =r " ( cr4 ) ) ;
return cr4 ;
2007-07-22 11:12:29 +02:00
}
2005-04-16 15:20:36 -07:00
static inline void write_cr4 ( unsigned long val )
{
[PATCH] x86-64: Remove duplicated code for reading control registers
On Tue, Mar 13, 2007 at 05:33:09AM -0700, Randy.Dunlap wrote:
> On Tue, 13 Mar 2007, Glauber de Oliveira Costa wrote:
>
> > Tiny cleanup:
> >
> > In x86_64, the same functions for reading cr3 and writing cr{3,4} are
> > defined in tlbflush.h and system.h, whith just a name change.
> > The only difference is the clobbering of memory, which seems a safe, and
> > even needed change for the write_cr4. This patch removes the duplicate.
> > write_cr3() is moved to system.h for consistency.
>
> missing patch.....
>
thanks. Attached now
--
Glauber de Oliveira Costa
Red Hat Inc.
"Free as in Freedom"
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 19:27:06 +02:00
asm volatile ( " movq %0,%%cr4 " : : " r " ( val ) : " memory " ) ;
2007-07-22 11:12:29 +02:00
}
static inline unsigned long read_cr8 ( void )
{
unsigned long cr8 ;
asm ( " movq %%cr8,%0 " : " =r " ( cr8 ) ) ;
return cr8 ;
}
static inline void write_cr8 ( unsigned long val )
{
asm volatile ( " movq %0,%%cr8 " : : " r " ( val ) : " memory " ) ;
}
2005-04-16 15:20:36 -07:00
# define stts() write_cr0(8 | read_cr0())
# define wbinvd() \
2007-07-21 04:37:17 -07:00
__asm__ __volatile__ ( " wbinvd " : : : " memory " )
2005-04-16 15:20:36 -07:00
# endif /* __KERNEL__ */
# define nop() __asm__ __volatile__ ("nop")
# ifdef CONFIG_SMP
# define smp_mb() mb()
# define smp_rmb() rmb()
# define smp_wmb() wmb()
# define smp_read_barrier_depends() do {} while(0)
# else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do {} while(0)
# endif
/*
* Force strict CPU ordering .
* And yes , this is required on UP too when we ' re talking
* to devices .
*/
# define mb() asm volatile("mfence":::"memory")
# define rmb() asm volatile("lfence":::"memory")
# ifdef CONFIG_UNORDERED_IO
# define wmb() asm volatile("sfence" ::: "memory")
# else
# define wmb() asm volatile("" ::: "memory")
# endif
# define read_barrier_depends() do {} while(0)
2006-02-04 23:28:05 -08:00
# define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
2005-04-16 15:20:36 -07:00
# define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
2006-07-03 00:24:45 -07:00
# include <linux/irqflags.h>
2006-01-17 07:03:47 +01:00
2005-04-16 15:20:36 -07:00
void cpu_idle_wait ( void ) ;
extern unsigned long arch_align_stack ( unsigned long sp ) ;
2006-06-26 13:56:16 +02:00
extern void free_init_pages ( char * what , unsigned long begin , unsigned long end ) ;
2005-04-16 15:20:36 -07:00
# endif