2005-11-19 12:17:32 +03:00
# ifndef __ASM_POWERPC_MMU_CONTEXT_H
# define __ASM_POWERPC_MMU_CONTEXT_H
# ifndef CONFIG_PPC64
# include <asm-ppc/mmu_context.h>
# else
2005-04-17 02:20:36 +04:00
# include <linux/kernel.h>
# include <linux/mm.h>
# include <asm/mmu.h>
# include <asm/cputable.h>
/*
* Copyright ( C ) 2001 PPC 64 Team , IBM Corp
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
2005-11-07 03:06:55 +03:00
/*
* Getting into a kernel thread , there is no valid user segment , mark
* paca - > pgdir NULL so that SLB miss on user addresses will fault
*/
static inline void enter_lazy_tlb ( struct mm_struct * mm ,
struct task_struct * tsk )
2005-04-17 02:20:36 +04:00
{
2005-11-07 03:06:55 +03:00
# ifdef CONFIG_PPC_64K_PAGES
get_paca ( ) - > pgdir = NULL ;
# endif /* CONFIG_PPC_64K_PAGES */
2005-04-17 02:20:36 +04:00
}
# define NO_CONTEXT 0
# define MAX_CONTEXT (0x100000-1)
extern int init_new_context ( struct task_struct * tsk , struct mm_struct * mm ) ;
extern void destroy_context ( struct mm_struct * mm ) ;
extern void switch_stab ( struct task_struct * tsk , struct mm_struct * mm ) ;
extern void switch_slb ( struct task_struct * tsk , struct mm_struct * mm ) ;
/*
* switch_mm is the entry point called from the architecture independent
* code in kernel / sched . c
*/
static inline void switch_mm ( struct mm_struct * prev , struct mm_struct * next ,
struct task_struct * tsk )
{
if ( ! cpu_isset ( smp_processor_id ( ) , next - > cpu_vm_mask ) )
cpu_set ( smp_processor_id ( ) , next - > cpu_vm_mask ) ;
/* No need to flush userspace segments if the mm doesnt change */
2005-11-07 03:06:55 +03:00
# ifdef CONFIG_PPC_64K_PAGES
if ( prev = = next & & get_paca ( ) - > pgdir = = next - > pgd )
return ;
# else
2005-04-17 02:20:36 +04:00
if ( prev = = next )
return ;
2005-11-07 03:06:55 +03:00
# endif /* CONFIG_PPC_64K_PAGES */
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_ALTIVEC
if ( cpu_has_feature ( CPU_FTR_ALTIVEC ) )
asm volatile ( " dssall " ) ;
# endif /* CONFIG_ALTIVEC */
if ( cpu_has_feature ( CPU_FTR_SLB ) )
switch_slb ( tsk , next ) ;
else
switch_stab ( tsk , next ) ;
}
# define deactivate_mm(tsk,mm) do { } while (0)
/*
* After we have set current - > mm to a new value , this activates
* the context for the new mm so we see the new mappings .
*/
static inline void activate_mm ( struct mm_struct * prev , struct mm_struct * next )
{
unsigned long flags ;
local_irq_save ( flags ) ;
switch_mm ( prev , next , current ) ;
local_irq_restore ( flags ) ;
}
2005-11-19 12:17:32 +03:00
# endif /* CONFIG_PPC64 */
# endif /* __ASM_POWERPC_MMU_CONTEXT_H */