2005-11-19 12:17:32 +03:00
# ifndef __ASM_POWERPC_MMU_CONTEXT_H
# define __ASM_POWERPC_MMU_CONTEXT_H
2005-12-17 00:43:46 +03:00
# ifdef __KERNEL__
2005-11-19 12:17:32 +03:00
2008-12-18 22:13:24 +03:00
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/sched.h>
# include <linux/spinlock.h>
2007-07-03 12:22:05 +04:00
# include <asm/mmu.h>
# include <asm/cputable.h>
# include <asm-generic/mm_hooks.h>
2008-12-18 22:13:24 +03:00
# include <asm/cputhreads.h>
2007-07-03 12:22:05 +04:00
/*
2008-12-18 22:13:24 +03:00
* Most if the context management is out of line
2007-07-03 12:22:05 +04:00
*/
2005-04-17 02:20:36 +04:00
extern int init_new_context ( struct task_struct * tsk , struct mm_struct * mm ) ;
extern void destroy_context ( struct mm_struct * mm ) ;
2008-12-18 22:13:24 +03:00
extern void switch_mmu_context ( struct mm_struct * prev , struct mm_struct * next ) ;
2005-04-17 02:20:36 +04:00
extern void switch_stab ( struct task_struct * tsk , struct mm_struct * mm ) ;
extern void switch_slb ( struct task_struct * tsk , struct mm_struct * mm ) ;
2008-12-18 22:13:24 +03:00
extern void set_context ( unsigned long id , pgd_t * pgd ) ;
2005-04-17 02:20:36 +04:00
2009-07-24 03:15:26 +04:00
# ifdef CONFIG_PPC_BOOK3S_64
2009-11-02 15:02:30 +03:00
extern int __init_new_context ( void ) ;
extern void __destroy_context ( int context_id ) ;
2009-07-24 03:15:26 +04:00
static inline void mmu_context_init ( void ) { }
# else
extern void mmu_context_init ( void ) ;
# endif
2005-04-17 02:20:36 +04:00
/*
* switch_mm is the entry point called from the architecture independent
* code in kernel / sched . c
*/
static inline void switch_mm ( struct mm_struct * prev , struct mm_struct * next ,
struct task_struct * tsk )
{
2008-12-18 22:13:24 +03:00
/* Mark this context has been used on the new CPU */
2009-03-15 21:16:43 +03:00
cpumask_set_cpu ( smp_processor_id ( ) , mm_cpumask ( next ) ) ;
2008-12-18 22:13:24 +03:00
/* 32-bit keeps track of the current PGDIR in the thread struct */
# ifdef CONFIG_PPC32
tsk - > thread . pgdir = next - > pgd ;
# endif /* CONFIG_PPC32 */
2005-04-17 02:20:36 +04:00
2009-07-24 03:15:47 +04:00
/* 64-bit Book3E keeps track of current PGD in the PACA */
# ifdef CONFIG_PPC_BOOK3E_64
get_paca ( ) - > pgd = next - > pgd ;
# endif
2008-12-18 22:13:24 +03:00
/* Nothing else to do if we aren't actually switching */
2005-04-17 02:20:36 +04:00
if ( prev = = next )
return ;
2008-12-18 22:13:24 +03:00
/* We must stop all altivec streams before changing the HW
* context
*/
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_ALTIVEC
if ( cpu_has_feature ( CPU_FTR_ALTIVEC ) )
asm volatile ( " dssall " ) ;
# endif /* CONFIG_ALTIVEC */
2008-12-18 22:13:24 +03:00
/* The actual HW switching method differs between the various
* sub architectures .
*/
# ifdef CONFIG_PPC_STD_MMU_64
2005-04-17 02:20:36 +04:00
if ( cpu_has_feature ( CPU_FTR_SLB ) )
switch_slb ( tsk , next ) ;
else
switch_stab ( tsk , next ) ;
2008-12-18 22:13:24 +03:00
# else
/* Out of line for now */
switch_mmu_context ( prev , next ) ;
# endif
2005-04-17 02:20:36 +04:00
}
# define deactivate_mm(tsk,mm) do { } while (0)
/*
* After we have set current - > mm to a new value , this activates
* the context for the new mm so we see the new mappings .
*/
static inline void activate_mm ( struct mm_struct * prev , struct mm_struct * next )
{
unsigned long flags ;
local_irq_save ( flags ) ;
switch_mm ( prev , next , current ) ;
local_irq_restore ( flags ) ;
}
2008-12-18 22:13:24 +03:00
/* We don't currently use enter_lazy_tlb() for anything */
static inline void enter_lazy_tlb ( struct mm_struct * mm ,
struct task_struct * tsk )
{
2009-07-24 03:15:47 +04:00
/* 64-bit Book3E keeps track of current PGD in the PACA */
# ifdef CONFIG_PPC_BOOK3E_64
get_paca ( ) - > pgd = NULL ;
# endif
2008-12-18 22:13:24 +03:00
}
2005-12-17 00:43:46 +03:00
# endif /* __KERNEL__ */
2005-11-19 12:17:32 +03:00
# endif /* __ASM_POWERPC_MMU_CONTEXT_H */