2005-04-16 15:20:36 -07:00
/*
2008-08-02 10:55:55 +01:00
* arch / arm / include / asm / mmu_context . h
2005-04-16 15:20:36 -07:00
*
* Copyright ( C ) 1996 Russell King .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Changelog :
* 27 - 06 - 1996 RMK Created
*/
# ifndef __ASM_ARM_MMU_CONTEXT_H
# define __ASM_ARM_MMU_CONTEXT_H
2005-11-16 17:23:57 +00:00
# include <linux/compiler.h>
2008-11-29 17:35:51 +00:00
# include <linux/sched.h>
2005-11-06 19:47:04 +00:00
# include <asm/cacheflush.h>
2008-08-10 18:10:19 +01:00
# include <asm/cachetype.h>
2005-04-16 15:20:36 -07:00
# include <asm/proc-fns.h>
2013-06-12 12:25:56 +01:00
# include <asm/smp_plat.h>
2012-01-20 12:01:13 +01:00
# include <asm-generic/mm_hooks.h>
2005-04-16 15:20:36 -07:00
2012-11-25 03:24:32 +01:00
void __check_vmalloc_seq ( struct mm_struct * mm ) ;
2006-06-29 20:17:15 +01:00
2007-05-17 10:19:23 +01:00
# ifdef CONFIG_CPU_HAS_ASID
2005-04-16 15:20:36 -07:00
2012-06-15 14:47:31 +01:00
void check_and_switch_context ( struct mm_struct * mm , struct task_struct * tsk ) ;
2013-02-28 17:47:36 +01:00
# define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
2005-04-16 15:20:36 -07:00
2013-06-21 12:07:27 +01:00
# ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask ( int this_cpu , struct mm_struct * mm ,
cpumask_t * mask ) ;
# else /* !CONFIG_ARM_ERRATA_798181 */
static inline void a15_erratum_get_cpumask ( int this_cpu , struct mm_struct * mm ,
cpumask_t * mask )
{
}
# endif /* CONFIG_ARM_ERRATA_798181 */
2013-03-26 23:35:04 +01:00
2011-11-28 13:53:28 +00:00
# else /* !CONFIG_CPU_HAS_ASID */
2011-11-28 21:57:24 +00:00
# ifdef CONFIG_MMU
2011-11-28 13:53:28 +00:00
static inline void check_and_switch_context ( struct mm_struct * mm ,
struct task_struct * tsk )
2006-06-29 20:17:15 +01:00
{
2012-11-25 03:24:32 +01:00
if ( unlikely ( mm - > context . vmalloc_seq ! = init_mm . context . vmalloc_seq ) )
__check_vmalloc_seq ( mm ) ;
2011-11-28 21:57:24 +00:00
if ( irqs_disabled ( ) )
/*
* cpu_switch_mm ( ) needs to flush the VIVT caches . To avoid
* high interrupt latencies , defer the call and continue
* running with the old mm . Since we only support UP systems
* on non - ASID CPUs , the old mm will remain valid until the
* finish_arch_post_lock_switch ( ) call .
*/
2013-07-23 16:15:36 +01:00
mm - > context . switch_pending = 1 ;
2011-11-28 21:57:24 +00:00
else
cpu_switch_mm ( mm - > pgd , mm ) ;
2006-06-29 20:17:15 +01:00
}
2011-11-28 21:57:24 +00:00
# define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch ( void )
{
2013-07-23 16:15:36 +01:00
struct mm_struct * mm = current - > mm ;
if ( mm & & mm - > context . switch_pending ) {
/*
* Preemption must be disabled during cpu_switch_mm ( ) as we
* have some stateful cache flush implementations . Check
* switch_pending again in case we were preempted and the
* switch to this mm was already done .
*/
preempt_disable ( ) ;
if ( mm - > context . switch_pending ) {
mm - > context . switch_pending = 0 ;
cpu_switch_mm ( mm - > pgd , mm ) ;
}
preempt_enable_no_resched ( ) ;
2011-11-28 21:57:24 +00:00
}
}
2005-04-16 15:20:36 -07:00
2011-11-28 21:57:24 +00:00
# endif /* CONFIG_MMU */
# define init_new_context(tsk,mm) 0
2011-11-28 13:53:28 +00:00
# endif /* CONFIG_CPU_HAS_ASID */
2005-04-16 15:20:36 -07:00
# define destroy_context(mm) do { } while(0)
2012-06-15 14:47:31 +01:00
# define activate_mm(prev,next) switch_mm(prev, next, NULL)
2005-04-16 15:20:36 -07:00
/*
* This is called when " tsk " is about to enter lazy TLB mode .
*
* mm : describes the currently active mm context
* tsk : task which is entering lazy tlb
* cpu : cpu number which is entering lazy tlb
*
* tsk - > mm will be NULL
*/
static inline void
enter_lazy_tlb ( struct mm_struct * mm , struct task_struct * tsk )
{
}
/*
* This is the actual mm switch as far as the scheduler
* is concerned . No registers are touched . We avoid
* calling the CPU specific function when the mm hasn ' t
* actually changed .
*/
static inline void
switch_mm ( struct mm_struct * prev , struct mm_struct * next ,
struct task_struct * tsk )
{
2006-06-20 20:46:52 +01:00
# ifdef CONFIG_MMU
2005-04-16 15:20:36 -07:00
unsigned int cpu = smp_processor_id ( ) ;
2013-06-12 12:25:56 +01:00
/*
* __sync_icache_dcache doesn ' t broadcast the I - cache invalidation ,
* so check for possible thread migration and invalidate the I - cache
* if we ' re new to this CPU .
*/
if ( cache_ops_need_broadcast ( ) & &
! cpumask_empty ( mm_cpumask ( next ) ) & &
2009-09-24 09:34:49 -06:00
! cpumask_test_cpu ( cpu , mm_cpumask ( next ) ) )
2008-06-13 10:28:36 +01:00
__flush_icache_all ( ) ;
2013-06-12 12:25:56 +01:00
2009-09-24 09:34:49 -06:00
if ( ! cpumask_test_and_set_cpu ( cpu , mm_cpumask ( next ) ) | | prev ! = next ) {
2011-11-28 13:53:28 +00:00
check_and_switch_context ( next , tsk ) ;
2005-11-03 20:32:45 +00:00
if ( cache_is_vivt ( ) )
2009-09-24 09:34:49 -06:00
cpumask_clear_cpu ( cpu , mm_cpumask ( prev ) ) ;
2005-04-16 15:20:36 -07:00
}
2006-06-20 20:46:52 +01:00
# endif
2005-04-16 15:20:36 -07:00
}
# define deactivate_mm(tsk,mm) do { } while (0)
# endif