2005-04-16 15:20:36 -07:00
# ifndef __X86_64_MMU_CONTEXT_H
# define __X86_64_MMU_CONTEXT_H
# include <asm/desc.h>
# include <asm/atomic.h>
# include <asm/pgalloc.h>
# include <asm/pda.h>
# include <asm/pgtable.h>
# include <asm/tlbflush.h>
2007-05-02 19:27:14 +02:00
# include <asm-generic/mm_hooks.h>
2005-04-16 15:20:36 -07:00
/*
* possibly do the LDT unload here ?
*/
int init_new_context ( struct task_struct * tsk , struct mm_struct * mm ) ;
void destroy_context ( struct mm_struct * mm ) ;
static inline void enter_lazy_tlb ( struct mm_struct * mm , struct task_struct * tsk )
{
2006-01-11 22:46:09 +01:00
# ifdef CONFIG_SMP
2005-04-16 15:20:36 -07:00
if ( read_pda ( mmu_state ) = = TLBSTATE_OK )
write_pda ( mmu_state , TLBSTATE_LAZY ) ;
# endif
2006-01-11 22:46:09 +01:00
}
2005-04-16 15:20:36 -07:00
static inline void switch_mm ( struct mm_struct * prev , struct mm_struct * next ,
struct task_struct * tsk )
{
unsigned cpu = smp_processor_id ( ) ;
if ( likely ( prev ! = next ) ) {
/* stop flush ipis for the previous mm */
2006-03-24 03:15:11 -08:00
cpu_clear ( cpu , prev - > cpu_vm_mask ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
write_pda ( mmu_state , TLBSTATE_OK ) ;
write_pda ( active_mm , next ) ;
# endif
2006-03-24 03:15:11 -08:00
cpu_set ( cpu , next - > cpu_vm_mask ) ;
2005-04-16 15:20:36 -07:00
load_cr3 ( next - > pgd ) ;
if ( unlikely ( next - > context . ldt ! = prev - > context . ldt ) )
2008-01-30 13:31:14 +01:00
load_LDT_nolock ( & next - > context ) ;
2005-04-16 15:20:36 -07:00
}
# ifdef CONFIG_SMP
else {
write_pda ( mmu_state , TLBSTATE_OK ) ;
if ( read_pda ( active_mm ) ! = next )
2008-01-30 13:30:28 +01:00
BUG ( ) ;
2006-03-24 03:15:11 -08:00
if ( ! cpu_test_and_set ( cpu , next - > cpu_vm_mask ) ) {
2005-04-16 15:20:36 -07:00
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery . We must reload CR3
* to make sure to use no freed page tables .
*/
load_cr3 ( next - > pgd ) ;
2008-01-30 13:31:14 +01:00
load_LDT_nolock ( & next - > context ) ;
2005-04-16 15:20:36 -07:00
}
}
# endif
}
# define deactivate_mm(tsk,mm) do { \
load_gs_index ( 0 ) ; \
asm volatile ( " movl %0,%%fs " : : " r " ( 0 ) ) ; \
} while ( 0 )
# define activate_mm(prev, next) \
switch_mm ( ( prev ) , ( next ) , NULL )
# endif