2008-06-18 19:08:48 +04:00
# ifndef ASM_X86__MMU_CONTEXT_64_H
# define ASM_X86__MMU_CONTEXT_64_H
2005-04-17 02:20:36 +04:00
# include <asm/pda.h>
static inline void enter_lazy_tlb ( struct mm_struct * mm , struct task_struct * tsk )
{
2006-01-12 00:46:09 +03:00
# ifdef CONFIG_SMP
2008-03-23 11:02:43 +03:00
if ( read_pda ( mmu_state ) = = TLBSTATE_OK )
2005-04-17 02:20:36 +04:00
write_pda ( mmu_state , TLBSTATE_LAZY ) ;
# endif
2006-01-12 00:46:09 +03:00
}
2005-04-17 02:20:36 +04:00
2008-03-23 11:02:43 +03:00
static inline void switch_mm ( struct mm_struct * prev , struct mm_struct * next ,
2005-04-17 02:20:36 +04:00
struct task_struct * tsk )
{
unsigned cpu = smp_processor_id ( ) ;
if ( likely ( prev ! = next ) ) {
/* stop flush ipis for the previous mm */
2006-03-24 14:15:11 +03:00
cpu_clear ( cpu , prev - > cpu_vm_mask ) ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_SMP
write_pda ( mmu_state , TLBSTATE_OK ) ;
write_pda ( active_mm , next ) ;
# endif
2006-03-24 14:15:11 +03:00
cpu_set ( cpu , next - > cpu_vm_mask ) ;
2005-04-17 02:20:36 +04:00
load_cr3 ( next - > pgd ) ;
2008-03-23 11:02:43 +03:00
if ( unlikely ( next - > context . ldt ! = prev - > context . ldt ) )
2008-01-30 15:31:14 +03:00
load_LDT_nolock ( & next - > context ) ;
2005-04-17 02:20:36 +04:00
}
# ifdef CONFIG_SMP
else {
write_pda ( mmu_state , TLBSTATE_OK ) ;
if ( read_pda ( active_mm ) ! = next )
2008-01-30 15:30:28 +03:00
BUG ( ) ;
2006-03-24 14:15:11 +03:00
if ( ! cpu_test_and_set ( cpu , next - > cpu_vm_mask ) ) {
2008-03-23 11:02:43 +03:00
/* We were in lazy tlb mode and leave_mm disabled
2005-04-17 02:20:36 +04:00
* tlb flush IPI delivery . We must reload CR3
* to make sure to use no freed page tables .
*/
load_cr3 ( next - > pgd ) ;
2008-01-30 15:31:14 +03:00
load_LDT_nolock ( & next - > context ) ;
2005-04-17 02:20:36 +04:00
}
}
# endif
}
2008-03-23 11:02:43 +03:00
# define deactivate_mm(tsk, mm) \
do { \
load_gs_index ( 0 ) ; \
asm volatile ( " movl %0,%%fs " : : " r " ( 0 ) ) ; \
} while ( 0 )
2005-04-17 02:20:36 +04:00
2008-06-18 19:08:48 +04:00
# endif /* ASM_X86__MMU_CONTEXT_64_H */