2019-05-19 15:08:55 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-01-16 20:16:37 +04:00
/*
* Context tracking : Probe on high level context boundaries such as kernel
* and userspace . This includes syscalls and exceptions entry / exit .
*
* This is used by RCU to remove its dependency on the timer tick while a CPU
* runs in userspace .
*
* Started by Frederic Weisbecker :
*
* Copyright ( C ) 2012 Red Hat , Inc . , Frederic Weisbecker < fweisbec @ redhat . com >
*
* Many thanks to Gilad Ben - Yossef , Paul McKenney , Ingo Molnar , Andrew Morton ,
* Steven Rostedt , Peter Zijlstra for suggestions and improvements .
*
*/
2012-11-27 22:33:25 +04:00
# include <linux/context_tracking.h>
# include <linux/rcupdate.h>
# include <linux/sched.h>
# include <linux/hardirq.h>
2012-12-16 23:00:34 +04:00
# include <linux/export.h>
2014-06-14 10:47:12 +04:00
# include <linux/kprobes.h>
2012-11-27 22:33:25 +04:00
2022-06-08 17:40:24 +03:00
# ifdef CONFIG_CONTEXT_TRACKING_USER
2013-07-11 22:27:43 +04:00
# define CREATE_TRACE_POINTS
# include <trace/events/context_tracking.h>
2019-10-16 05:56:51 +03:00
DEFINE_STATIC_KEY_FALSE ( context_tracking_key ) ;
EXPORT_SYMBOL_GPL ( context_tracking_key ) ;
2013-07-11 21:12:32 +04:00
DEFINE_PER_CPU ( struct context_tracking , context_tracking ) ;
2013-07-10 04:44:35 +04:00
EXPORT_SYMBOL_GPL ( context_tracking ) ;
2012-11-27 22:33:25 +04:00
2020-03-04 13:05:22 +03:00
static noinstr bool context_tracking_recursion_enter ( void )
2015-05-06 19:04:23 +03:00
{
int recursion ;
recursion = __this_cpu_inc_return ( context_tracking . recursion ) ;
if ( recursion = = 1 )
return true ;
WARN_ONCE ( ( recursion < 1 ) , " Invalid context tracking recursion value %d \n " , recursion ) ;
__this_cpu_dec ( context_tracking . recursion ) ;
return false ;
}
2020-03-04 13:05:22 +03:00
static __always_inline void context_tracking_recursion_exit ( void )
2015-05-06 19:04:23 +03:00
{
__this_cpu_dec ( context_tracking . recursion ) ;
}
2013-01-16 20:16:37 +04:00
/**
2022-06-08 17:40:20 +03:00
* __ct_user_enter - Inform the context tracking that the CPU is going
* to enter user or guest space mode .
2013-01-16 20:16:37 +04:00
*
* This function must be called right before we switch from the kernel
2015-02-10 23:27:50 +03:00
* to user or guest space , when it ' s guaranteed the remaining kernel
* instructions to execute won ' t use any RCU read side critical section
* because this function sets RCU in extended quiescent state .
2013-01-16 20:16:37 +04:00
*/
2022-06-08 17:40:20 +03:00
void noinstr __ct_user_enter ( enum ctx_state state )
2012-11-27 22:33:25 +04:00
{
2013-01-16 20:16:37 +04:00
/* Kernel threads aren't supposed to go to userspace */
2012-11-27 22:33:25 +04:00
WARN_ON_ONCE ( ! current - > mm ) ;
2015-05-06 19:04:23 +03:00
if ( ! context_tracking_recursion_enter ( ) )
2015-10-28 04:39:56 +03:00
return ;
2015-05-06 19:04:23 +03:00
2015-02-10 23:27:50 +03:00
if ( __this_cpu_read ( context_tracking . state ) ! = state ) {
2013-07-12 01:59:33 +04:00
if ( __this_cpu_read ( context_tracking . active ) ) {
/*
* At this stage , only low level arch entry code remains and
* then we ' ll run in userspace . We can assume there won ' t be
* any RCU read - side critical section until the next call to
* user_exit ( ) or rcu_irq_enter ( ) . Let ' s remove RCU ' s dependency
* on the tick .
*/
2015-02-10 23:27:52 +03:00
if ( state = = CONTEXT_USER ) {
2020-03-04 13:05:22 +03:00
instrumentation_begin ( ) ;
2015-02-10 23:27:52 +03:00
trace_user_enter ( 0 ) ;
vtime_user_enter ( current ) ;
2020-03-04 13:05:22 +03:00
instrumentation_end ( ) ;
2015-02-10 23:27:52 +03:00
}
2013-07-12 01:59:33 +04:00
rcu_user_enter ( ) ;
}
2013-01-16 20:16:37 +04:00
/*
2013-07-12 01:59:33 +04:00
* Even if context tracking is disabled on this CPU , because it ' s outside
* the full dynticks mask for example , we still have to keep track of the
* context transitions and states to prevent inconsistency on those of
* other CPUs .
* If a task triggers an exception in userspace , sleep on the exception
* handler and then migrate to another CPU , that new CPU must know where
* the exception returns by the time we call exception_exit ( ) .
* This information can only be provided by the previous CPU when it called
* exception_enter ( ) .
* OTOH we can spare the calls to vtime and RCU when context_tracking . active
* is false because we know that CPU is not tickless .
2013-01-16 20:16:37 +04:00
*/
2015-02-10 23:27:50 +03:00
__this_cpu_write ( context_tracking . state , state ) ;
2012-11-27 22:33:25 +04:00
}
2015-05-06 19:04:23 +03:00
context_tracking_recursion_exit ( ) ;
2015-10-28 04:39:56 +03:00
}
2022-06-08 17:40:20 +03:00
EXPORT_SYMBOL_GPL ( __ct_user_enter ) ;
2015-10-28 04:39:56 +03:00
2022-06-08 17:40:19 +03:00
/*
* OBSOLETE :
* This function should be noinstr but the below local_irq_restore ( ) is
* unsafe because it involves illegal RCU uses through tracing and lockdep .
* This is unlikely to be fixed as this function is obsolete . The preferred
* way is to call __context_tracking_enter ( ) through user_enter_irqoff ( )
* or context_tracking_guest_enter ( ) . It should be the arch entry code
* responsibility to call into context tracking with IRQs disabled .
*/
2022-06-08 17:40:22 +03:00
void ct_user_enter ( enum ctx_state state )
2015-10-28 04:39:56 +03:00
{
unsigned long flags ;
/*
* Some contexts may involve an exception occuring in an irq ,
* leading to that nesting :
* rcu_irq_enter ( ) rcu_user_exit ( ) rcu_user_exit ( ) rcu_irq_exit ( )
* This would mess up the dyntick_nesting count though . And rcu_irq_ * ( )
* helpers are enough to protect RCU uses inside the exception . So
* just return immediately if we detect we are in an IRQ .
*/
if ( in_interrupt ( ) )
return ;
local_irq_save ( flags ) ;
2022-06-08 17:40:20 +03:00
__ct_user_enter ( state ) ;
2012-11-27 22:33:25 +04:00
local_irq_restore ( flags ) ;
}
2022-06-08 17:40:22 +03:00
NOKPROBE_SYMBOL ( ct_user_enter ) ;
EXPORT_SYMBOL_GPL ( ct_user_enter ) ;
2015-02-10 23:27:50 +03:00
2022-06-08 17:40:21 +03:00
/**
* user_enter_callable ( ) - Unfortunate ASM callable version of user_enter ( ) for
* archs that didn ' t manage to check the context tracking
* static key from low level code .
*
* This OBSOLETE function should be noinstr but it unsafely calls
* local_irq_restore ( ) , involving illegal RCU uses through tracing and lockdep .
2022-06-08 17:40:19 +03:00
* This is unlikely to be fixed as this function is obsolete . The preferred
* way is to call user_enter_irqoff ( ) . It should be the arch entry code
* responsibility to call into context tracking with IRQs disabled .
*/
2022-06-08 17:40:21 +03:00
void user_enter_callable ( void )
2015-02-10 23:27:50 +03:00
{
2015-10-28 04:39:55 +03:00
user_enter ( ) ;
2015-02-10 23:27:50 +03:00
}
2022-06-08 17:40:21 +03:00
NOKPROBE_SYMBOL ( user_enter_callable ) ;
2012-11-27 22:33:25 +04:00
2013-01-16 20:16:37 +04:00
/**
2022-06-08 17:40:20 +03:00
* __ct_user_exit - Inform the context tracking that the CPU is
* exiting user or guest mode and entering the kernel .
2013-01-16 20:16:37 +04:00
*
2015-02-10 23:27:50 +03:00
* This function must be called after we entered the kernel from user or
* guest space before any use of RCU read side critical section . This
* potentially include any high level kernel code like syscalls , exceptions ,
* signal handling , etc . . .
2013-01-16 20:16:37 +04:00
*
* This call supports re - entrancy . This way it can be called from any exception
* handler without needing to know if we came from userspace or not .
*/
2022-06-08 17:40:20 +03:00
void noinstr __ct_user_exit ( enum ctx_state state )
2012-11-27 22:33:25 +04:00
{
2015-05-06 19:04:23 +03:00
if ( ! context_tracking_recursion_enter ( ) )
2015-10-28 04:39:56 +03:00
return ;
2015-05-06 19:04:23 +03:00
2015-02-10 23:27:50 +03:00
if ( __this_cpu_read ( context_tracking . state ) = = state ) {
2013-07-12 01:59:33 +04:00
if ( __this_cpu_read ( context_tracking . active ) ) {
/*
* We are going to run code that may use RCU . Inform
* RCU core about that ( ie : we may need the tick again ) .
*/
rcu_user_exit ( ) ;
2015-02-10 23:27:52 +03:00
if ( state = = CONTEXT_USER ) {
2020-03-04 13:05:22 +03:00
instrumentation_begin ( ) ;
2015-02-10 23:27:52 +03:00
vtime_user_exit ( current ) ;
trace_user_exit ( 0 ) ;
2020-03-04 13:05:22 +03:00
instrumentation_end ( ) ;
2015-02-10 23:27:52 +03:00
}
2013-07-12 01:59:33 +04:00
}
2015-03-04 20:06:33 +03:00
__this_cpu_write ( context_tracking . state , CONTEXT_KERNEL ) ;
2012-11-27 22:33:25 +04:00
}
2015-05-06 19:04:23 +03:00
context_tracking_recursion_exit ( ) ;
2015-10-28 04:39:56 +03:00
}
2022-06-08 17:40:20 +03:00
EXPORT_SYMBOL_GPL ( __ct_user_exit ) ;
2015-10-28 04:39:56 +03:00
2022-06-08 17:40:19 +03:00
/*
* OBSOLETE :
* This function should be noinstr but the below local_irq_save ( ) is
* unsafe because it involves illegal RCU uses through tracing and lockdep .
* This is unlikely to be fixed as this function is obsolete . The preferred
* way is to call __context_tracking_exit ( ) through user_exit_irqoff ( )
* or context_tracking_guest_exit ( ) . It should be the arch entry code
* responsibility to call into context tracking with IRQs disabled .
*/
2022-06-08 17:40:22 +03:00
void ct_user_exit ( enum ctx_state state )
2015-10-28 04:39:56 +03:00
{
unsigned long flags ;
if ( in_interrupt ( ) )
return ;
local_irq_save ( flags ) ;
2022-06-08 17:40:20 +03:00
__ct_user_exit ( state ) ;
2012-11-27 22:33:25 +04:00
local_irq_restore ( flags ) ;
}
2022-06-08 17:40:22 +03:00
NOKPROBE_SYMBOL ( ct_user_exit ) ;
EXPORT_SYMBOL_GPL ( ct_user_exit ) ;
2015-02-10 23:27:50 +03:00
2022-06-08 17:40:21 +03:00
/**
* user_exit_callable ( ) - Unfortunate ASM callable version of user_exit ( ) for
* archs that didn ' t manage to check the context tracking
* static key from low level code .
*
* This OBSOLETE function should be noinstr but it unsafely calls local_irq_save ( ) ,
2022-06-08 17:40:19 +03:00
* involving illegal RCU uses through tracing and lockdep . This is unlikely
* to be fixed as this function is obsolete . The preferred way is to call
* user_exit_irqoff ( ) . It should be the arch entry code responsibility to
* call into context tracking with IRQs disabled .
*/
2022-06-08 17:40:21 +03:00
void user_exit_callable ( void )
2015-02-10 23:27:50 +03:00
{
2015-10-28 04:39:55 +03:00
user_exit ( ) ;
2015-02-10 23:27:50 +03:00
}
2022-06-08 17:40:21 +03:00
NOKPROBE_SYMBOL ( user_exit_callable ) ;
2012-11-27 22:33:25 +04:00
2022-06-08 17:40:23 +03:00
void __init ct_cpu_track_user ( int cpu )
2012-11-27 22:33:25 +04:00
{
2015-05-06 19:04:24 +03:00
static __initdata bool initialized = false ;
if ( ! per_cpu ( context_tracking . active , cpu ) ) {
per_cpu ( context_tracking . active , cpu ) = true ;
2019-10-16 05:56:51 +03:00
static_branch_inc ( & context_tracking_key ) ;
2015-05-06 19:04:24 +03:00
}
if ( initialized )
return ;
2020-01-27 18:41:52 +03:00
# ifdef CONFIG_HAVE_TIF_NOHZ
2015-05-06 19:04:24 +03:00
/*
* Set TIF_NOHZ to init / 0 and let it propagate to all tasks through fork
* This assumes that init is the only task at this early boot stage .
*/
set_tsk_thread_flag ( & init_task , TIF_NOHZ ) ;
2020-01-27 18:41:52 +03:00
# endif
2015-05-06 19:04:24 +03:00
WARN_ON_ONCE ( ! tasklist_empty ( ) ) ;
initialized = true ;
2012-11-27 22:33:25 +04:00
}
2013-07-11 21:12:32 +04:00
2022-06-08 17:40:24 +03:00
# ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
2013-07-11 21:12:32 +04:00
void __init context_tracking_init ( void )
{
int cpu ;
for_each_possible_cpu ( cpu )
2022-06-08 17:40:23 +03:00
ct_cpu_track_user ( cpu ) ;
2013-07-11 21:12:32 +04:00
}
# endif
2022-06-08 17:40:24 +03:00
# endif /* #ifdef CONFIG_CONTEXT_TRACKING_USER */