2005-04-17 02:20:36 +04:00
# ifndef _ASM_IA64_MMU_CONTEXT_H
# define _ASM_IA64_MMU_CONTEXT_H
/*
* Copyright ( C ) 1998 - 2002 Hewlett - Packard Co
* David Mosberger - Tang < davidm @ hpl . hp . com >
*/
/*
2005-10-30 04:47:04 +03:00
* Routines to manage the allocation of task context numbers . Task context
* numbers are used to reduce or eliminate the need to perform TLB flushes
* due to context switches . Context numbers are implemented using ia - 64
* region ids . Since the IA - 64 TLB does not consider the region number when
* performing a TLB lookup , we need to assign a unique region id to each
* region in a process . We use the least significant three bits in aregion
* id for this purpose .
2005-04-17 02:20:36 +04:00
*/
# define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
# define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
2005-08-17 06:54:00 +04:00
# include <asm / page.h>
2005-04-17 02:20:36 +04:00
# ifndef __ASSEMBLY__
# include <linux/compiler.h>
# include <linux/percpu.h>
# include <linux/sched.h>
# include <linux/spinlock.h>
# include <asm/processor.h>
2007-05-02 21:27:14 +04:00
# include <asm-generic/mm_hooks.h>
2005-04-17 02:20:36 +04:00
struct ia64_ctx {
spinlock_t lock ;
unsigned int next ; /* next context number to use */
2005-11-01 00:44:47 +03:00
unsigned int limit ; /* available free range */
unsigned int max_ctx ; /* max. context value supported by all CPUs */
/* call wrap_mmu_context when next >= max */
unsigned long * bitmap ; /* bitmap size is max_ctx+1 */
unsigned long * flushmap ; /* pending rid to be flushed */
2005-04-17 02:20:36 +04:00
} ;
extern struct ia64_ctx ia64_ctx ;
DECLARE_PER_CPU ( u8 , ia64_need_tlb_flush ) ;
2005-11-01 00:44:47 +03:00
extern void mmu_context_init ( void ) ;
2005-04-17 02:20:36 +04:00
extern void wrap_mmu_context ( struct mm_struct * mm ) ;
static inline void
enter_lazy_tlb ( struct mm_struct * mm , struct task_struct * tsk )
{
}
/*
2005-10-30 04:47:04 +03:00
* When the context counter wraps around all TLBs need to be flushed because
* an old context number might have been reused . This is signalled by the
* ia64_need_tlb_flush per - CPU variable , which is checked in the routine
* below . Called by activate_mm ( ) . < efocht @ ess . nec . de >
2005-04-17 02:20:36 +04:00
*/
static inline void
delayed_tlb_flush ( void )
{
extern void local_flush_tlb_all ( void ) ;
2005-07-26 09:23:00 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
if ( unlikely ( __ia64_per_cpu_var ( ia64_need_tlb_flush ) ) ) {
2005-07-26 09:23:00 +04:00
spin_lock_irqsave ( & ia64_ctx . lock , flags ) ;
2005-10-30 04:47:04 +03:00
if ( __ia64_per_cpu_var ( ia64_need_tlb_flush ) ) {
local_flush_tlb_all ( ) ;
__ia64_per_cpu_var ( ia64_need_tlb_flush ) = 0 ;
2005-07-26 09:23:00 +04:00
}
spin_unlock_irqrestore ( & ia64_ctx . lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
}
2005-07-26 09:23:00 +04:00
static inline nv_mm_context_t
2005-04-17 02:20:36 +04:00
get_mmu_context ( struct mm_struct * mm )
{
unsigned long flags ;
2005-07-26 09:23:00 +04:00
nv_mm_context_t context = mm - > context ;
2005-10-30 04:47:04 +03:00
if ( likely ( context ) )
goto out ;
spin_lock_irqsave ( & ia64_ctx . lock , flags ) ;
/* re-check, now that we've got the lock: */
context = mm - > context ;
if ( context = = 0 ) {
2009-03-16 06:42:48 +03:00
cpumask_clear ( mm_cpumask ( mm ) ) ;
2005-10-30 04:47:04 +03:00
if ( ia64_ctx . next > = ia64_ctx . limit ) {
ia64_ctx . next = find_next_zero_bit ( ia64_ctx . bitmap ,
ia64_ctx . max_ctx , ia64_ctx . next ) ;
ia64_ctx . limit = find_next_bit ( ia64_ctx . bitmap ,
ia64_ctx . max_ctx , ia64_ctx . next ) ;
if ( ia64_ctx . next > = ia64_ctx . max_ctx )
wrap_mmu_context ( mm ) ;
2005-04-17 02:20:36 +04:00
}
2005-10-30 04:47:04 +03:00
mm - > context = context = ia64_ctx . next + + ;
__set_bit ( context , ia64_ctx . bitmap ) ;
2005-04-17 02:20:36 +04:00
}
2005-10-30 04:47:04 +03:00
spin_unlock_irqrestore ( & ia64_ctx . lock , flags ) ;
out :
2005-07-26 09:23:00 +04:00
/*
* Ensure we ' re not starting to use " context " before any old
* uses of it are gone from our TLB .
*/
delayed_tlb_flush ( ) ;
2005-04-17 02:20:36 +04:00
return context ;
}
/*
2005-10-30 04:47:04 +03:00
* Initialize context number to some sane value . MM is guaranteed to be a
* brand - new address - space , so no TLB flushing is needed , ever .
2005-04-17 02:20:36 +04:00
*/
static inline int
init_new_context ( struct task_struct * p , struct mm_struct * mm )
{
mm - > context = 0 ;
return 0 ;
}
static inline void
destroy_context ( struct mm_struct * mm )
{
/* Nothing to do. */
}
static inline void
2005-07-26 09:23:00 +04:00
reload_context ( nv_mm_context_t context )
2005-04-17 02:20:36 +04:00
{
unsigned long rid ;
unsigned long rid_incr = 0 ;
unsigned long rr0 , rr1 , rr2 , rr3 , rr4 , old_rr4 ;
2005-08-17 06:54:00 +04:00
old_rr4 = ia64_get_rr ( RGN_BASE ( RGN_HPAGE ) ) ;
2005-04-17 02:20:36 +04:00
rid = context < < 3 ; /* make space for encoding the region number */
rid_incr = 1 < < 8 ;
/* encode the region id, preferred page size, and VHPT enable bit: */
rr0 = ( rid < < 8 ) | ( PAGE_SHIFT < < 2 ) | 1 ;
rr1 = rr0 + 1 * rid_incr ;
rr2 = rr0 + 2 * rid_incr ;
rr3 = rr0 + 3 * rid_incr ;
rr4 = rr0 + 4 * rid_incr ;
# ifdef CONFIG_HUGETLB_PAGE
rr4 = ( rr4 & ( ~ ( 0xfcUL ) ) ) | ( old_rr4 & 0xfc ) ;
2005-08-17 06:54:00 +04:00
# if RGN_HPAGE != 4
# error "reload_context assumes RGN_HPAGE is 4"
# endif
2005-04-17 02:20:36 +04:00
# endif
2008-05-19 17:13:30 +04:00
ia64_set_rr0_to_rr4 ( rr0 , rr1 , rr2 , rr3 , rr4 ) ;
2005-04-17 02:20:36 +04:00
ia64_srlz_i ( ) ; /* srlz.i implies srlz.d */
}
2005-06-24 08:14:00 +04:00
/*
* Must be called with preemption off
*/
2005-04-17 02:20:36 +04:00
static inline void
activate_context ( struct mm_struct * mm )
{
2005-07-26 09:23:00 +04:00
nv_mm_context_t context ;
2005-04-17 02:20:36 +04:00
do {
context = get_mmu_context ( mm ) ;
2009-03-16 06:42:48 +03:00
if ( ! cpumask_test_cpu ( smp_processor_id ( ) , mm_cpumask ( mm ) ) )
cpumask_set_cpu ( smp_processor_id ( ) , mm_cpumask ( mm ) ) ;
2005-04-17 02:20:36 +04:00
reload_context ( context ) ;
2005-10-30 04:47:04 +03:00
/*
* in the unlikely event of a TLB - flush by another thread ,
* redo the load .
*/
2005-04-17 02:20:36 +04:00
} while ( unlikely ( context ! = mm - > context ) ) ;
}
# define deactivate_mm(tsk,mm) do { } while (0)
/*
* Switch from address space PREV to address space NEXT .
*/
static inline void
activate_mm ( struct mm_struct * prev , struct mm_struct * next )
{
/*
2005-10-30 04:47:04 +03:00
* We may get interrupts here , but that ' s OK because interrupt
* handlers cannot touch user - space .
2005-04-17 02:20:36 +04:00
*/
ia64_set_kr ( IA64_KR_PT_BASE , __pa ( next - > pgd ) ) ;
activate_context ( next ) ;
}
# define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
# endif /* ! __ASSEMBLY__ */
# endif /* _ASM_IA64_MMU_CONTEXT_H */