2005-04-17 02:20:36 +04:00
/*
* Switch a MMU context .
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1996 , 1997 , 1998 , 1999 by Ralf Baechle
* Copyright ( C ) 1999 Silicon Graphics , Inc .
*/
# ifndef _ASM_MMU_CONTEXT_H
# define _ASM_MMU_CONTEXT_H
# include <linux/errno.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
# include <asm/mipsmtregs.h>
# include <asm/smtc.h>
# endif /* SMTC */
2007-05-02 21:27:14 +04:00
# include <asm-generic/mm_hooks.h>
2005-04-17 02:20:36 +04:00
/*
* For the fast tlb miss handlers , we keep a per cpu array of pointers
* to the current pgd for each processor . Also , the proc . id is stuffed
* into the context register .
*/
extern unsigned long pgd_current [ ] ;
# define TLBMISS_HANDLER_SETUP_PGD(pgd) \
pgd_current [ smp_processor_id ( ) ] = ( unsigned long ) ( pgd )
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_32BIT
2005-04-17 02:20:36 +04:00
# define TLBMISS_HANDLER_SETUP() \
2005-04-01 18:07:13 +04:00
write_c0_context ( ( unsigned long ) smp_processor_id ( ) < < 25 ) ; \
2005-04-17 02:20:36 +04:00
TLBMISS_HANDLER_SETUP_PGD ( swapper_pg_dir )
# endif
2006-02-23 02:06:55 +03:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
# define TLBMISS_HANDLER_SETUP() \
2005-04-01 18:07:13 +04:00
write_c0_context ( ( unsigned long ) smp_processor_id ( ) < < 26 ) ; \
2005-04-17 02:20:36 +04:00
TLBMISS_HANDLER_SETUP_PGD ( swapper_pg_dir )
# endif
# if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
# define ASID_INC 0x40
# define ASID_MASK 0xfc0
# elif defined(CONFIG_CPU_R8000)
# define ASID_INC 0x10
# define ASID_MASK 0xff0
# elif defined(CONFIG_CPU_RM9000)
# define ASID_INC 0x1
# define ASID_MASK 0xfff
2006-04-05 12:45:45 +04:00
/* SMTC/34K debug hack - but maybe we'll keep it */
# elif defined(CONFIG_MIPS_MT_SMTC)
# define ASID_INC 0x1
extern unsigned long smtc_asid_mask ;
# define ASID_MASK (smtc_asid_mask)
# define HW_ASID_MASK 0xff
/* End SMTC/34K debug hack */
2005-04-17 02:20:36 +04:00
# else /* FIXME: not correct for R6000 */
# define ASID_INC 0x1
# define ASID_MASK 0xff
# endif
# define cpu_context(cpu, mm) ((mm)->context[cpu])
# define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
# define asid_cache(cpu) (cpu_data[cpu].asid_cache)
static inline void enter_lazy_tlb ( struct mm_struct * mm , struct task_struct * tsk )
{
}
/*
* All unused by hardware upper bits will be considered
* as a software asid extension .
*/
# define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
# define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
2006-04-05 12:45:45 +04:00
# ifndef CONFIG_MIPS_MT_SMTC
/* Normal, classic MIPS get_new_mmu_context */
2005-04-17 02:20:36 +04:00
static inline void
get_new_mmu_context ( struct mm_struct * mm , unsigned long cpu )
{
unsigned long asid = asid_cache ( cpu ) ;
if ( ! ( ( asid + = ASID_INC ) & ASID_MASK ) ) {
if ( cpu_has_vtag_icache )
flush_icache_all ( ) ;
local_flush_tlb_all ( ) ; /* start new asid cycle */
if ( ! asid ) /* fix version if needed */
asid = ASID_FIRST_VERSION ;
}
cpu_context ( cpu , mm ) = asid_cache ( cpu ) = asid ;
}
2006-04-05 12:45:45 +04:00
# else /* CONFIG_MIPS_MT_SMTC */
2007-10-12 02:46:15 +04:00
# define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
2006-04-05 12:45:45 +04:00
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
/*
* Initialize the context related info for a new mm_struct
* instance .
*/
static inline int
init_new_context ( struct task_struct * tsk , struct mm_struct * mm )
{
int i ;
2007-10-03 22:16:57 +04:00
for_each_online_cpu ( i )
2005-04-17 02:20:36 +04:00
cpu_context ( i , mm ) = 0 ;
return 0 ;
}
static inline void switch_mm ( struct mm_struct * prev , struct mm_struct * next ,
struct task_struct * tsk )
{
unsigned int cpu = smp_processor_id ( ) ;
unsigned long flags ;
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid ;
unsigned long mtflags ;
int mytlb = ( smtc_status & SMTC_TLB_SHARED ) ? 0 : cpu_data [ cpu ] . vpe_id ;
2005-04-17 02:20:36 +04:00
local_irq_save ( flags ) ;
2006-04-05 12:45:45 +04:00
mtflags = dvpe ( ) ;
# else /* Not SMTC */
local_irq_save ( flags ) ;
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
/* Check if our ASID is of an older version and thus invalid */
if ( ( cpu_context ( cpu , next ) ^ asid_cache ( cpu ) ) & ASID_VERSION_MASK )
get_new_mmu_context ( next , cpu ) ;
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
/*
* If the EntryHi ASID being replaced happens to be
* the value flagged at ASID recycling time as having
* an extended life , clear the bit showing it being
* in use by this " CPU " , and if that ' s the last bit ,
* free up the ASID value for use and flush any old
* instances of it from the TLB .
*/
oldasid = ( read_c0_entryhi ( ) & ASID_MASK ) ;
if ( smtc_live_asid [ mytlb ] [ oldasid ] ) {
smtc_live_asid [ mytlb ] [ oldasid ] & = ~ ( 0x1 < < cpu ) ;
if ( smtc_live_asid [ mytlb ] [ oldasid ] = = 0 )
smtc_flush_tlb_asid ( oldasid ) ;
}
/*
* Tread softly on EntryHi , and so long as we support
* having ASID_MASK smaller than the hardware maximum ,
* make sure no " soft " bits become " hard " . . .
*/
write_c0_entryhi ( ( read_c0_entryhi ( ) & ~ HW_ASID_MASK )
| ( cpu_context ( cpu , next ) & ASID_MASK ) ) ;
ehb ( ) ; /* Make sure it propagates to TCStatus */
evpe ( mtflags ) ;
# else
2005-04-17 02:20:36 +04:00
write_c0_entryhi ( cpu_context ( cpu , next ) ) ;
2006-04-05 12:45:45 +04:00
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
TLBMISS_HANDLER_SETUP_PGD ( next - > pgd ) ;
/*
* Mark current - > active_mm as not " active " anymore .
* We don ' t want to mislead possible IPI tlb flush routines .
*/
cpu_clear ( cpu , prev - > cpu_vm_mask ) ;
cpu_set ( cpu , next - > cpu_vm_mask ) ;
local_irq_restore ( flags ) ;
}
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest .
*/
static inline void destroy_context ( struct mm_struct * mm )
{
}
2007-10-12 02:46:15 +04:00
# define deactivate_mm(tsk, mm) do { } while (0)
2005-04-17 02:20:36 +04:00
/*
* After we have set current - > mm to a new value , this activates
* the context for the new mm so we see the new mappings .
*/
static inline void
activate_mm ( struct mm_struct * prev , struct mm_struct * next )
{
unsigned long flags ;
unsigned int cpu = smp_processor_id ( ) ;
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid ;
unsigned long mtflags ;
int mytlb = ( smtc_status & SMTC_TLB_SHARED ) ? 0 : cpu_data [ cpu ] . vpe_id ;
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
local_irq_save ( flags ) ;
/* Unconditionally get a new ASID. */
get_new_mmu_context ( next , cpu ) ;
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
mtflags = dvpe ( ) ;
oldasid = read_c0_entryhi ( ) & ASID_MASK ;
if ( smtc_live_asid [ mytlb ] [ oldasid ] ) {
smtc_live_asid [ mytlb ] [ oldasid ] & = ~ ( 0x1 < < cpu ) ;
if ( smtc_live_asid [ mytlb ] [ oldasid ] = = 0 )
smtc_flush_tlb_asid ( oldasid ) ;
}
/* See comments for similar code above */
write_c0_entryhi ( ( read_c0_entryhi ( ) & ~ HW_ASID_MASK ) |
( cpu_context ( cpu , next ) & ASID_MASK ) ) ;
ehb ( ) ; /* Make sure it propagates to TCStatus */
evpe ( mtflags ) ;
# else
2005-04-17 02:20:36 +04:00
write_c0_entryhi ( cpu_context ( cpu , next ) ) ;
2006-04-05 12:45:45 +04:00
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
TLBMISS_HANDLER_SETUP_PGD ( next - > pgd ) ;
/* mark mmu ownership change */
cpu_clear ( cpu , prev - > cpu_vm_mask ) ;
cpu_set ( cpu , next - > cpu_vm_mask ) ;
local_irq_restore ( flags ) ;
}
/*
* If mm is currently active_mm , we can ' t really drop it . Instead ,
* we will get a new one for it .
*/
static inline void
drop_mmu_context ( struct mm_struct * mm , unsigned cpu )
{
unsigned long flags ;
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid ;
/* Can't use spinlock because called from TLB flush within DVPE */
unsigned int prevvpe ;
int mytlb = ( smtc_status & SMTC_TLB_SHARED ) ? 0 : cpu_data [ cpu ] . vpe_id ;
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
local_irq_save ( flags ) ;
if ( cpu_isset ( cpu , mm - > cpu_vm_mask ) ) {
get_new_mmu_context ( mm , cpu ) ;
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
prevvpe = dvpe ( ) ;
oldasid = ( read_c0_entryhi ( ) & ASID_MASK ) ;
2006-08-25 15:34:33 +04:00
if ( smtc_live_asid [ mytlb ] [ oldasid ] ) {
smtc_live_asid [ mytlb ] [ oldasid ] & = ~ ( 0x1 < < cpu ) ;
if ( smtc_live_asid [ mytlb ] [ oldasid ] = = 0 )
smtc_flush_tlb_asid ( oldasid ) ;
2006-04-05 12:45:45 +04:00
}
/* See comments for similar code above */
write_c0_entryhi ( ( read_c0_entryhi ( ) & ~ HW_ASID_MASK )
| cpu_asid ( cpu , mm ) ) ;
ehb ( ) ; /* Make sure it propagates to TCStatus */
evpe ( prevvpe ) ;
# else /* not CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
write_c0_entryhi ( cpu_asid ( cpu , mm ) ) ;
2006-04-05 12:45:45 +04:00
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
} else {
/* will get a new context next time */
2006-04-05 12:45:45 +04:00
# ifndef CONFIG_MIPS_MT_SMTC
2005-04-17 02:20:36 +04:00
cpu_context ( cpu , mm ) = 0 ;
2006-04-05 12:45:45 +04:00
# else /* SMTC */
int i ;
/* SMTC shares the TLB (and ASIDs) across VPEs */
2007-10-03 22:16:57 +04:00
for_each_online_cpu ( i ) {
2006-04-05 12:45:45 +04:00
if ( ( smtc_status & SMTC_TLB_SHARED )
| | ( cpu_data [ i ] . vpe_id = = cpu_data [ cpu ] . vpe_id ) )
cpu_context ( i , mm ) = 0 ;
}
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
}
local_irq_restore ( flags ) ;
}
# endif /* _ASM_MMU_CONTEXT_H */