2019-06-04 11:11:33 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2005-04-17 02:20:36 +04:00
/*
2008-08-02 13:55:55 +04:00
* arch / arm / include / asm / mmu_context . h
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 1996 Russell King .
*
* Changelog :
* 27 - 06 - 1996 RMK Created
*/
# ifndef __ASM_ARM_MMU_CONTEXT_H
# define __ASM_ARM_MMU_CONTEXT_H
2005-11-16 20:23:57 +03:00
# include <linux/compiler.h>
2008-11-29 20:35:51 +03:00
# include <linux/sched.h>
2017-02-04 02:16:44 +03:00
# include <linux/mm_types.h>
2016-04-26 19:39:05 +03:00
# include <linux/preempt.h>
2017-02-04 02:16:44 +03:00
2005-11-06 22:47:04 +03:00
# include <asm/cacheflush.h>
2008-08-10 21:10:19 +04:00
# include <asm/cachetype.h>
2005-04-17 02:20:36 +04:00
# include <asm/proc-fns.h>
2013-06-12 15:25:56 +04:00
# include <asm/smp_plat.h>
2012-01-20 15:01:13 +04:00
# include <asm-generic/mm_hooks.h>
2005-04-17 02:20:36 +04:00
2012-11-25 06:24:32 +04:00
void __check_vmalloc_seq ( struct mm_struct * mm ) ;
2006-06-29 23:17:15 +04:00
2022-01-10 11:54:22 +03:00
# ifdef CONFIG_MMU
static inline void check_vmalloc_seq ( struct mm_struct * mm )
{
if ( ! IS_ENABLED ( CONFIG_ARM_LPAE ) & &
unlikely ( atomic_read ( & mm - > context . vmalloc_seq ) ! =
atomic_read ( & init_mm . context . vmalloc_seq ) ) )
__check_vmalloc_seq ( mm ) ;
}
# endif
2007-05-17 13:19:23 +04:00
# ifdef CONFIG_CPU_HAS_ASID
2005-04-17 02:20:36 +04:00
2012-06-15 17:47:31 +04:00
void check_and_switch_context ( struct mm_struct * mm , struct task_struct * tsk ) ;
2020-09-01 17:15:20 +03:00
# define init_new_context init_new_context
2016-02-18 18:00:23 +03:00
static inline int
init_new_context ( struct task_struct * tsk , struct mm_struct * mm )
{
atomic64_set ( & mm - > context . id , 0 ) ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
2013-06-21 15:07:27 +04:00
# ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask ( int this_cpu , struct mm_struct * mm ,
cpumask_t * mask ) ;
# else /* !CONFIG_ARM_ERRATA_798181 */
static inline void a15_erratum_get_cpumask ( int this_cpu , struct mm_struct * mm ,
cpumask_t * mask )
{
}
# endif /* CONFIG_ARM_ERRATA_798181 */
2013-03-27 02:35:04 +04:00
2011-11-28 17:53:28 +04:00
# else /* !CONFIG_CPU_HAS_ASID */
2011-11-29 01:57:24 +04:00
# ifdef CONFIG_MMU
2011-11-28 17:53:28 +04:00
static inline void check_and_switch_context ( struct mm_struct * mm ,
struct task_struct * tsk )
2006-06-29 23:17:15 +04:00
{
2022-01-10 11:54:22 +03:00
check_vmalloc_seq ( mm ) ;
2011-11-29 01:57:24 +04:00
if ( irqs_disabled ( ) )
/*
* cpu_switch_mm ( ) needs to flush the VIVT caches . To avoid
* high interrupt latencies , defer the call and continue
* running with the old mm . Since we only support UP systems
* on non - ASID CPUs , the old mm will remain valid until the
* finish_arch_post_lock_switch ( ) call .
*/
2013-07-23 19:15:36 +04:00
mm - > context . switch_pending = 1 ;
2011-11-29 01:57:24 +04:00
else
cpu_switch_mm ( mm - > pgd , mm ) ;
2006-06-29 23:17:15 +04:00
}
ARM: Hide finish_arch_post_lock_switch() from modules
The introduction of switch_mm_irqs_off() brought back an old bug
regarding the use of preempt_enable_no_resched:
As part of:
62b94a08da1b ("sched/preempt: Take away preempt_enable_no_resched() from modules")
the definition of preempt_enable_no_resched() is only available in
built-in code, not in loadable modules, so we can't generally use
it from header files.
However, the ARM version of finish_arch_post_lock_switch()
calls preempt_enable_no_resched() and is defined as a static
inline function in asm/mmu_context.h. This in turn means we cannot
include asm/mmu_context.h from modules.
With today's tip tree, asm/mmu_context.h gets included from
linux/mmu_context.h, which is normally the exact pattern one would
expect, but unfortunately, linux/mmu_context.h can be included from
the vhost driver that is a loadable module, now causing this compile
time error with modular configs:
In file included from ../include/linux/mmu_context.h:4:0,
from ../drivers/vhost/vhost.c:18:
../arch/arm/include/asm/mmu_context.h: In function 'finish_arch_post_lock_switch':
../arch/arm/include/asm/mmu_context.h:88:3: error: implicit declaration of function 'preempt_enable_no_resched' [-Werror=implicit-function-declaration]
preempt_enable_no_resched();
Andy already tried to fix the bug by including linux/preempt.h
from asm/mmu_context.h, but that didn't help. Arnd suggested reordering
the header files, which wasn't popular, so let's use this
workaround instead:
The finish_arch_post_lock_switch() definition is now also hidden
inside of #ifdef MODULE, so we don't see anything referencing
preempt_enable_no_resched() from a header file. I've built a
few hundred randconfig kernels with this, and did not see any
new problems.
Tested-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King - ARM Linux <linux@armlinux.org.uk>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: f98db6013c55 ("sched/core: Add switch_mm_irqs_off() and use it in the scheduler")
Link: http://lkml.kernel.org/r/1463146234-161304-1-git-send-email-arnd@arndb.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-05-13 16:30:13 +03:00
# ifndef MODULE
2011-11-29 01:57:24 +04:00
# define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch ( void )
{
2013-07-23 19:15:36 +04:00
struct mm_struct * mm = current - > mm ;
if ( mm & & mm - > context . switch_pending ) {
/*
* Preemption must be disabled during cpu_switch_mm ( ) as we
* have some stateful cache flush implementations . Check
* switch_pending again in case we were preempted and the
* switch to this mm was already done .
*/
preempt_disable ( ) ;
if ( mm - > context . switch_pending ) {
mm - > context . switch_pending = 0 ;
cpu_switch_mm ( mm - > pgd , mm ) ;
}
preempt_enable_no_resched ( ) ;
2011-11-29 01:57:24 +04:00
}
}
ARM: Hide finish_arch_post_lock_switch() from modules
The introduction of switch_mm_irqs_off() brought back an old bug
regarding the use of preempt_enable_no_resched:
As part of:
62b94a08da1b ("sched/preempt: Take away preempt_enable_no_resched() from modules")
the definition of preempt_enable_no_resched() is only available in
built-in code, not in loadable modules, so we can't generally use
it from header files.
However, the ARM version of finish_arch_post_lock_switch()
calls preempt_enable_no_resched() and is defined as a static
inline function in asm/mmu_context.h. This in turn means we cannot
include asm/mmu_context.h from modules.
With today's tip tree, asm/mmu_context.h gets included from
linux/mmu_context.h, which is normally the exact pattern one would
expect, but unfortunately, linux/mmu_context.h can be included from
the vhost driver that is a loadable module, now causing this compile
time error with modular configs:
In file included from ../include/linux/mmu_context.h:4:0,
from ../drivers/vhost/vhost.c:18:
../arch/arm/include/asm/mmu_context.h: In function 'finish_arch_post_lock_switch':
../arch/arm/include/asm/mmu_context.h:88:3: error: implicit declaration of function 'preempt_enable_no_resched' [-Werror=implicit-function-declaration]
preempt_enable_no_resched();
Andy already tried to fix the bug by including linux/preempt.h
from asm/mmu_context.h, but that didn't help. Arnd suggested reordering
the header files, which wasn't popular, so let's use this
workaround instead:
The finish_arch_post_lock_switch() definition is now also hidden
inside of #ifdef MODULE, so we don't see anything referencing
preempt_enable_no_resched() from a header file. I've built a
few hundred randconfig kernels with this, and did not see any
new problems.
Tested-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King - ARM Linux <linux@armlinux.org.uk>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: f98db6013c55 ("sched/core: Add switch_mm_irqs_off() and use it in the scheduler")
Link: http://lkml.kernel.org/r/1463146234-161304-1-git-send-email-arnd@arndb.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-05-13 16:30:13 +03:00
# endif /* !MODULE */
2005-04-17 02:20:36 +04:00
2011-11-29 01:57:24 +04:00
# endif /* CONFIG_MMU */
2011-11-28 17:53:28 +04:00
# endif /* CONFIG_CPU_HAS_ASID */
2005-04-17 02:20:36 +04:00
2012-06-15 17:47:31 +04:00
# define activate_mm(prev,next) switch_mm(prev, next, NULL)
2005-04-17 02:20:36 +04:00
/*
* This is the actual mm switch as far as the scheduler
* is concerned . No registers are touched . We avoid
* calling the CPU specific function when the mm hasn ' t
* actually changed .
*/
static inline void
switch_mm ( struct mm_struct * prev , struct mm_struct * next ,
struct task_struct * tsk )
{
2006-06-20 23:46:52 +04:00
# ifdef CONFIG_MMU
2005-04-17 02:20:36 +04:00
unsigned int cpu = smp_processor_id ( ) ;
2013-06-12 15:25:56 +04:00
/*
* __sync_icache_dcache doesn ' t broadcast the I - cache invalidation ,
* so check for possible thread migration and invalidate the I - cache
* if we ' re new to this CPU .
*/
if ( cache_ops_need_broadcast ( ) & &
! cpumask_empty ( mm_cpumask ( next ) ) & &
2009-09-24 19:34:49 +04:00
! cpumask_test_cpu ( cpu , mm_cpumask ( next ) ) )
2008-06-13 13:28:36 +04:00
__flush_icache_all ( ) ;
2013-06-12 15:25:56 +04:00
2009-09-24 19:34:49 +04:00
if ( ! cpumask_test_and_set_cpu ( cpu , mm_cpumask ( next ) ) | | prev ! = next ) {
2011-11-28 17:53:28 +04:00
check_and_switch_context ( next , tsk ) ;
2005-11-03 23:32:45 +03:00
if ( cache_is_vivt ( ) )
2009-09-24 19:34:49 +04:00
cpumask_clear_cpu ( cpu , mm_cpumask ( prev ) ) ;
2005-04-17 02:20:36 +04:00
}
2006-06-20 23:46:52 +04:00
# endif
2005-04-17 02:20:36 +04:00
}
2022-01-10 11:54:22 +03:00
# ifdef CONFIG_VMAP_STACK
static inline void enter_lazy_tlb ( struct mm_struct * mm , struct task_struct * tsk )
{
if ( mm ! = & init_mm )
check_vmalloc_seq ( mm ) ;
}
# define enter_lazy_tlb enter_lazy_tlb
# endif
2020-09-01 17:15:20 +03:00
# include <asm-generic/mmu_context.h>
2005-04-17 02:20:36 +04:00
# endif