2005-06-23 22:01:26 -07:00
/*
* Switch an MMU context .
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
2013-10-17 02:42:26 +04:00
* Copyright ( C ) 2001 - 2013 Tensilica Inc .
2005-06-23 22:01:26 -07:00
*/
# ifndef _XTENSA_MMU_CONTEXT_H
# define _XTENSA_MMU_CONTEXT_H
2009-03-04 16:21:31 +01:00
# ifndef CONFIG_MMU
# include <asm/nommu_context.h>
# else
2005-06-23 22:01:26 -07:00
# include <linux/stringify.h>
2007-05-31 17:47:01 -07:00
# include <linux/sched.h>
2005-06-23 22:01:26 -07:00
2013-10-17 02:42:26 +04:00
# include <asm/vectors.h>
2009-03-04 16:21:28 +01:00
2005-06-23 22:01:26 -07:00
# include <asm/pgtable.h>
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
2007-05-02 19:27:14 +02:00
# include <asm-generic/mm_hooks.h>
2013-10-17 02:42:26 +04:00
# include <asm-generic/percpu.h>
2005-06-23 22:01:26 -07:00
# if (XCHAL_HAVE_TLBS != 1)
# error "Linux must have an MMU!"
# endif
2013-10-17 02:42:26 +04:00
DECLARE_PER_CPU ( unsigned long , asid_cache ) ;
# define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
2005-06-23 22:01:26 -07:00
/*
* NO_CONTEXT is the invalid ASID value that we don ' t ever assign to
2013-10-17 02:42:26 +04:00
* any user or kernel context . We use the reserved values in the
* ASID_INSERT macro below .
2006-12-10 02:18:48 -08:00
*
* 0 invalid
* 1 kernel
* 2 reserved
* 3 reserved
* 4. . .255 available
2005-06-23 22:01:26 -07:00
*/
2006-12-10 02:18:48 -08:00
# define NO_CONTEXT 0
# define ASID_USER_FIRST 4
# define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
# define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
2005-06-23 22:01:26 -07:00
2013-10-17 02:42:21 +04:00
void init_mmu ( void ) ;
2005-09-03 15:57:53 -07:00
static inline void set_rasid_register ( unsigned long val )
2005-06-23 22:01:26 -07:00
{
2012-10-15 03:55:38 +04:00
__asm__ __volatile__ ( " wsr %0, rasid \n \t "
2005-06-23 22:01:26 -07:00
" isync \n " : : " a " ( val ) ) ;
}
2005-09-03 15:57:53 -07:00
static inline unsigned long get_rasid_register ( void )
2005-06-23 22:01:26 -07:00
{
unsigned long tmp ;
2012-10-15 03:55:38 +04:00
__asm__ __volatile__ ( " rsr %0, rasid \n \t " : " =a " ( tmp ) ) ;
2005-06-23 22:01:26 -07:00
return tmp ;
}
2013-10-17 02:42:26 +04:00
static inline void get_new_mmu_context ( struct mm_struct * mm , unsigned int cpu )
{
unsigned long asid = cpu_asid_cache ( cpu ) ;
if ( ( + + asid & ASID_MASK ) = = 0 ) {
/*
* Start new asid cycle ; continue counting with next
* incarnation bits ; skipping over 0 , 1 , 2 , 3.
*/
local_flush_tlb_all ( ) ;
asid + = ASID_USER_FIRST ;
}
cpu_asid_cache ( cpu ) = asid ;
mm - > context . asid [ cpu ] = asid ;
mm - > context . cpu = cpu ;
}
static inline void get_mmu_context ( struct mm_struct * mm , unsigned int cpu )
2005-06-23 22:01:26 -07:00
{
2013-10-17 02:42:26 +04:00
/*
* Check if our ASID is of an older version and thus invalid .
*/
if ( mm ) {
unsigned long asid = mm - > context . asid [ cpu ] ;
if ( asid = = NO_CONTEXT | |
( ( asid ^ cpu_asid_cache ( cpu ) ) & ~ ASID_MASK ) )
get_new_mmu_context ( mm , cpu ) ;
2005-06-23 22:01:26 -07:00
}
}
2013-10-17 02:42:26 +04:00
static inline void activate_context ( struct mm_struct * mm , unsigned int cpu )
2005-06-23 22:01:26 -07:00
{
2013-10-17 02:42:26 +04:00
get_mmu_context ( mm , cpu ) ;
set_rasid_register ( ASID_INSERT ( mm - > context . asid [ cpu ] ) ) ;
2006-12-10 02:18:48 -08:00
invalidate_page_directory ( ) ;
2005-06-23 22:01:26 -07:00
}
/*
* Initialize the context related info for a new mm_struct
2013-10-17 02:42:26 +04:00
* instance . Valid cpu values are 0. . ( NR_CPUS - 1 ) , so initializing
* to - 1 says the process has never run on any core .
2005-06-23 22:01:26 -07:00
*/
2013-10-17 02:42:26 +04:00
static inline int init_new_context ( struct task_struct * tsk ,
struct mm_struct * mm )
2005-06-23 22:01:26 -07:00
{
2013-10-17 02:42:26 +04:00
int cpu ;
for_each_possible_cpu ( cpu ) {
mm - > context . asid [ cpu ] = NO_CONTEXT ;
}
mm - > context . cpu = - 1 ;
2005-06-23 22:01:26 -07:00
return 0 ;
}
2005-09-03 15:57:53 -07:00
static inline void switch_mm ( struct mm_struct * prev , struct mm_struct * next ,
2012-11-28 16:53:51 -08:00
struct task_struct * tsk )
2005-06-23 22:01:26 -07:00
{
2013-10-17 02:42:26 +04:00
unsigned int cpu = smp_processor_id ( ) ;
int migrated = next - > context . cpu ! = cpu ;
/* Flush the icache if we migrated to a new core. */
if ( migrated ) {
__invalidate_icache_all ( ) ;
next - > context . cpu = cpu ;
}
if ( migrated | | prev ! = next )
activate_context ( next , cpu ) ;
2005-06-23 22:01:26 -07:00
}
2013-10-17 02:42:26 +04:00
# define activate_mm(prev, next) switch_mm((prev), (next), NULL)
# define deactivate_mm(tsk, mm) do { } while (0)
2005-06-23 22:01:26 -07:00
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest .
*/
2005-09-03 15:57:53 -07:00
static inline void destroy_context ( struct mm_struct * mm )
2005-06-23 22:01:26 -07:00
{
invalidate_page_directory ( ) ;
}
static inline void enter_lazy_tlb ( struct mm_struct * mm , struct task_struct * tsk )
{
/* Nothing to do. */
}
2009-03-04 16:21:31 +01:00
# endif /* CONFIG_MMU */
2005-06-23 22:01:26 -07:00
# endif /* _XTENSA_MMU_CONTEXT_H */