2019-05-19 15:08:55 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-01-16 20:16:37 +04:00
/*
2022-06-08 17:40:37 +03:00
* Context tracking : Probe on high level context boundaries such as kernel ,
* userspace , guest or idle .
2013-01-16 20:16:37 +04:00
*
* This is used by RCU to remove its dependency on the timer tick while a CPU
2022-06-08 17:40:37 +03:00
* runs in idle , userspace or guest mode .
2013-01-16 20:16:37 +04:00
*
2022-06-08 17:40:37 +03:00
* User / guest tracking started by Frederic Weisbecker :
2013-01-16 20:16:37 +04:00
*
2022-06-08 17:40:37 +03:00
* Copyright ( C ) 2012 Red Hat , Inc . , Frederic Weisbecker
2013-01-16 20:16:37 +04:00
*
* Many thanks to Gilad Ben - Yossef , Paul McKenney , Ingo Molnar , Andrew Morton ,
* Steven Rostedt , Peter Zijlstra for suggestions and improvements .
*
2022-06-08 17:40:37 +03:00
* RCU extended quiescent state bits imported from kernel / rcu / tree . c
* where the relevant authorship may be found .
2013-01-16 20:16:37 +04:00
*/
2012-11-27 22:33:25 +04:00
# include <linux/context_tracking.h>
# include <linux/rcupdate.h>
# include <linux/sched.h>
# include <linux/hardirq.h>
2012-12-16 23:00:34 +04:00
# include <linux/export.h>
2014-06-14 10:47:12 +04:00
# include <linux/kprobes.h>
2022-06-08 17:40:33 +03:00
# include <trace/events/rcu.h>
2012-11-27 22:33:25 +04:00
2022-06-08 17:40:25 +03:00
2022-06-08 17:40:29 +03:00
DEFINE_PER_CPU ( struct context_tracking , context_tracking ) = {
# ifdef CONFIG_CONTEXT_TRACKING_IDLE
2022-06-08 17:40:30 +03:00
. dynticks_nesting = 1 ,
2022-06-08 17:40:31 +03:00
. dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE ,
2022-06-08 17:40:29 +03:00
# endif
2022-06-08 17:40:35 +03:00
. state = ATOMIC_INIT ( RCU_DYNTICKS_IDX ) ,
2022-06-08 17:40:29 +03:00
} ;
EXPORT_SYMBOL_GPL ( context_tracking ) ;
2022-06-08 17:40:25 +03:00
# ifdef CONFIG_CONTEXT_TRACKING_IDLE
2022-06-08 17:40:33 +03:00
# define TPS(x) tracepoint_string(x)
/* Record the current task on dyntick-idle entry. */
static __always_inline void rcu_dynticks_task_enter ( void )
{
# if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE ( current - > rcu_tasks_idle_cpu , smp_processor_id ( ) ) ;
# endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
}
/* Record no current task on dyntick-idle exit. */
static __always_inline void rcu_dynticks_task_exit ( void )
{
# if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE ( current - > rcu_tasks_idle_cpu , - 1 ) ;
# endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
}
/* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
static __always_inline void rcu_dynticks_task_trace_enter ( void )
{
# ifdef CONFIG_TASKS_TRACE_RCU
if ( IS_ENABLED ( CONFIG_TASKS_TRACE_RCU_READ_MB ) )
current - > trc_reader_special . b . need_mb = true ;
# endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
}
/* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
static __always_inline void rcu_dynticks_task_trace_exit ( void )
{
# ifdef CONFIG_TASKS_TRACE_RCU
if ( IS_ENABLED ( CONFIG_TASKS_TRACE_RCU_READ_MB ) )
current - > trc_reader_special . b . need_mb = false ;
# endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
}
/*
* Record entry into an extended quiescent state . This is only to be
* called when not already in an extended quiescent state , that is ,
* RCU is watching prior to the call to this function and is no longer
* watching upon return .
*/
2022-06-08 17:40:35 +03:00
static noinstr void ct_kernel_exit_state ( int offset )
2022-06-08 17:40:33 +03:00
{
int seq ;
/*
* CPUs seeing atomic_add_return ( ) must see prior RCU read - side
* critical sections , and we also must force ordering with the
* next idle sojourn .
*/
rcu_dynticks_task_trace_enter ( ) ; // Before ->dynticks update!
2022-06-08 17:40:35 +03:00
seq = ct_state_inc ( offset ) ;
2022-06-08 17:40:33 +03:00
// RCU is no longer watching. Better be in extended quiescent state!
2022-06-08 17:40:35 +03:00
WARN_ON_ONCE ( IS_ENABLED ( CONFIG_RCU_EQS_DEBUG ) & & ( seq & RCU_DYNTICKS_IDX ) ) ;
2022-06-08 17:40:33 +03:00
}
/*
* Record exit from an extended quiescent state . This is only to be
* called from an extended quiescent state , that is , RCU is not watching
* prior to the call to this function and is watching upon return .
*/
2022-06-08 17:40:35 +03:00
static noinstr void ct_kernel_enter_state ( int offset )
2022-06-08 17:40:33 +03:00
{
int seq ;
/*
* CPUs seeing atomic_add_return ( ) must see prior idle sojourns ,
* and we also must force ordering with the next RCU read - side
* critical section .
*/
2022-06-08 17:40:35 +03:00
seq = ct_state_inc ( offset ) ;
2022-06-08 17:40:33 +03:00
// RCU is now watching. Better not be in an extended quiescent state!
rcu_dynticks_task_trace_exit ( ) ; // After ->dynticks update!
2022-06-08 17:40:35 +03:00
WARN_ON_ONCE ( IS_ENABLED ( CONFIG_RCU_EQS_DEBUG ) & & ! ( seq & RCU_DYNTICKS_IDX ) ) ;
2022-06-08 17:40:33 +03:00
}
/*
* Enter an RCU extended quiescent state , which can be either the
* idle loop or adaptive - tickless usermode execution .
*
* We crowbar the - > dynticks_nmi_nesting field to zero to allow for
* the possibility of usermode upcalls having messed up our count
* of interrupt nesting level during the prior busy period .
*/
2022-06-08 17:40:35 +03:00
static void noinstr ct_kernel_exit ( bool user , int offset )
2022-06-08 17:40:33 +03:00
{
struct context_tracking * ct = this_cpu_ptr ( & context_tracking ) ;
WARN_ON_ONCE ( ct_dynticks_nmi_nesting ( ) ! = DYNTICK_IRQ_NONIDLE ) ;
WRITE_ONCE ( ct - > dynticks_nmi_nesting , 0 ) ;
WARN_ON_ONCE ( IS_ENABLED ( CONFIG_RCU_EQS_DEBUG ) & &
ct_dynticks_nesting ( ) = = 0 ) ;
if ( ct_dynticks_nesting ( ) ! = 1 ) {
// RCU will still be watching, so just do accounting and leave.
ct - > dynticks_nesting - - ;
return ;
}
instrumentation_begin ( ) ;
lockdep_assert_irqs_disabled ( ) ;
trace_rcu_dyntick ( TPS ( " Start " ) , ct_dynticks_nesting ( ) , 0 , ct_dynticks ( ) ) ;
WARN_ON_ONCE ( IS_ENABLED ( CONFIG_RCU_EQS_DEBUG ) & & ! user & & ! is_idle_task ( current ) ) ;
rcu_preempt_deferred_qs ( current ) ;
2022-06-08 17:40:35 +03:00
// instrumentation for the noinstr ct_kernel_exit_state()
instrument_atomic_write ( & ct - > state , sizeof ( ct - > state ) ) ;
2022-06-08 17:40:33 +03:00
instrumentation_end ( ) ;
WRITE_ONCE ( ct - > dynticks_nesting , 0 ) ; /* Avoid irq-access tearing. */
// RCU is watching here ...
2022-06-08 17:40:35 +03:00
ct_kernel_exit_state ( offset ) ;
2022-06-08 17:40:33 +03:00
// ... but is no longer watching here.
rcu_dynticks_task_enter ( ) ;
}
/*
* Exit an RCU extended quiescent state , which can be either the
* idle loop or adaptive - tickless usermode execution .
*
* We crowbar the - > dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
* allow for the possibility of usermode upcalls messing up our count of
* interrupt nesting level during the busy period that is just now starting .
*/
2022-06-08 17:40:35 +03:00
static void noinstr ct_kernel_enter ( bool user , int offset )
2022-06-08 17:40:33 +03:00
{
struct context_tracking * ct = this_cpu_ptr ( & context_tracking ) ;
long oldval ;
WARN_ON_ONCE ( IS_ENABLED ( CONFIG_RCU_EQS_DEBUG ) & & ! raw_irqs_disabled ( ) ) ;
oldval = ct_dynticks_nesting ( ) ;
WARN_ON_ONCE ( IS_ENABLED ( CONFIG_RCU_EQS_DEBUG ) & & oldval < 0 ) ;
if ( oldval ) {
// RCU was already watching, so just do accounting and leave.
ct - > dynticks_nesting + + ;
return ;
}
rcu_dynticks_task_exit ( ) ;
// RCU is not watching here ...
2022-06-08 17:40:35 +03:00
ct_kernel_enter_state ( offset ) ;
2022-06-08 17:40:33 +03:00
// ... but is watching here.
instrumentation_begin ( ) ;
2022-06-08 17:40:35 +03:00
// instrumentation for the noinstr ct_kernel_enter_state()
instrument_atomic_write ( & ct - > state , sizeof ( ct - > state ) ) ;
2022-06-08 17:40:33 +03:00
trace_rcu_dyntick ( TPS ( " End " ) , ct_dynticks_nesting ( ) , 1 , ct_dynticks ( ) ) ;
WARN_ON_ONCE ( IS_ENABLED ( CONFIG_RCU_EQS_DEBUG ) & & ! user & & ! is_idle_task ( current ) ) ;
WRITE_ONCE ( ct - > dynticks_nesting , 1 ) ;
WARN_ON_ONCE ( ct_dynticks_nmi_nesting ( ) ) ;
WRITE_ONCE ( ct - > dynticks_nmi_nesting , DYNTICK_IRQ_NONIDLE ) ;
instrumentation_end ( ) ;
}
/**
2022-06-08 17:40:34 +03:00
* ct_nmi_exit - inform RCU of exit from NMI context
2022-06-08 17:40:33 +03:00
*
* If we are returning from the outermost NMI handler that interrupted an
2022-06-08 17:40:35 +03:00
* RCU - idle period , update ct - > state and ct - > dynticks_nmi_nesting
2022-06-08 17:40:33 +03:00
* to let the RCU grace - period handling know that the CPU is back to
* being RCU - idle .
*
2022-06-08 17:40:34 +03:00
* If you add or remove a call to ct_nmi_exit ( ) , be sure to test
2022-06-08 17:40:33 +03:00
* with CONFIG_RCU_EQS_DEBUG = y .
*/
2022-06-08 17:40:34 +03:00
void noinstr ct_nmi_exit ( void )
2022-06-08 17:40:33 +03:00
{
struct context_tracking * ct = this_cpu_ptr ( & context_tracking ) ;
instrumentation_begin ( ) ;
/*
* Check for - > dynticks_nmi_nesting underflow and bad - > dynticks .
* ( We are exiting an NMI handler , so RCU better be paying attention
* to us ! )
*/
WARN_ON_ONCE ( ct_dynticks_nmi_nesting ( ) < = 0 ) ;
WARN_ON_ONCE ( rcu_dynticks_curr_cpu_in_eqs ( ) ) ;
/*
* If the nesting level is not 1 , the CPU wasn ' t RCU - idle , so
* leave it in non - RCU - idle state .
*/
if ( ct_dynticks_nmi_nesting ( ) ! = 1 ) {
trace_rcu_dyntick ( TPS ( " --= " ) , ct_dynticks_nmi_nesting ( ) , ct_dynticks_nmi_nesting ( ) - 2 ,
ct_dynticks ( ) ) ;
WRITE_ONCE ( ct - > dynticks_nmi_nesting , /* No store tearing. */
ct_dynticks_nmi_nesting ( ) - 2 ) ;
instrumentation_end ( ) ;
return ;
}
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
trace_rcu_dyntick ( TPS ( " Startirq " ) , ct_dynticks_nmi_nesting ( ) , 0 , ct_dynticks ( ) ) ;
WRITE_ONCE ( ct - > dynticks_nmi_nesting , 0 ) ; /* Avoid store tearing. */
2022-06-08 17:40:35 +03:00
// instrumentation for the noinstr ct_kernel_exit_state()
instrument_atomic_write ( & ct - > state , sizeof ( ct - > state ) ) ;
2022-06-08 17:40:33 +03:00
instrumentation_end ( ) ;
// RCU is watching here ...
2022-06-08 17:40:35 +03:00
ct_kernel_exit_state ( RCU_DYNTICKS_IDX ) ;
2022-06-08 17:40:33 +03:00
// ... but is no longer watching here.
if ( ! in_nmi ( ) )
rcu_dynticks_task_enter ( ) ;
}
/**
2022-06-08 17:40:34 +03:00
* ct_nmi_enter - inform RCU of entry to NMI context
2022-06-08 17:40:33 +03:00
*
2022-06-08 17:40:35 +03:00
* If the CPU was idle from RCU ' s viewpoint , update ct - > state and
2022-06-08 17:40:33 +03:00
* ct - > dynticks_nmi_nesting to let the RCU grace - period handling know
* that the CPU is active . This implementation permits nested NMIs , as
* long as the nesting level does not overflow an int . ( You will probably
* run out of stack space first . )
*
2022-06-08 17:40:34 +03:00
* If you add or remove a call to ct_nmi_enter ( ) , be sure to test
2022-06-08 17:40:33 +03:00
* with CONFIG_RCU_EQS_DEBUG = y .
*/
2022-06-08 17:40:34 +03:00
void noinstr ct_nmi_enter ( void )
2022-06-08 17:40:33 +03:00
{
long incby = 2 ;
struct context_tracking * ct = this_cpu_ptr ( & context_tracking ) ;
/* Complain about underflow. */
WARN_ON_ONCE ( ct_dynticks_nmi_nesting ( ) < 0 ) ;
/*
* If idle from RCU viewpoint , atomically increment - > dynticks
* to mark non - idle and increment - > dynticks_nmi_nesting by one .
* Otherwise , increment - > dynticks_nmi_nesting by two . This means
* if - > dynticks_nmi_nesting is equal to one , we are guaranteed
* to be in the outermost NMI handler that interrupted an RCU - idle
* period ( observation due to Andy Lutomirski ) .
*/
if ( rcu_dynticks_curr_cpu_in_eqs ( ) ) {
if ( ! in_nmi ( ) )
rcu_dynticks_task_exit ( ) ;
// RCU is not watching here ...
2022-06-08 17:40:35 +03:00
ct_kernel_enter_state ( RCU_DYNTICKS_IDX ) ;
2022-06-08 17:40:33 +03:00
// ... but is watching here.
instrumentation_begin ( ) ;
// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
2022-06-08 17:40:35 +03:00
instrument_atomic_read ( & ct - > state , sizeof ( ct - > state ) ) ;
// instrumentation for the noinstr ct_kernel_enter_state()
instrument_atomic_write ( & ct - > state , sizeof ( ct - > state ) ) ;
2022-06-08 17:40:33 +03:00
incby = 1 ;
} else if ( ! in_nmi ( ) ) {
instrumentation_begin ( ) ;
rcu_irq_enter_check_tick ( ) ;
} else {
instrumentation_begin ( ) ;
}
trace_rcu_dyntick ( incby = = 1 ? TPS ( " Endirq " ) : TPS ( " ++= " ) ,
ct_dynticks_nmi_nesting ( ) ,
ct_dynticks_nmi_nesting ( ) + incby , ct_dynticks ( ) ) ;
instrumentation_end ( ) ;
WRITE_ONCE ( ct - > dynticks_nmi_nesting , /* Prevent store tearing. */
ct_dynticks_nmi_nesting ( ) + incby ) ;
barrier ( ) ;
}
/**
2022-06-08 17:40:34 +03:00
* ct_idle_enter - inform RCU that current CPU is entering idle
2022-06-08 17:40:33 +03:00
*
* Enter idle mode , in other words , - leave - the mode in which RCU
* read - side critical sections can occur . ( Though RCU read - side
* critical sections can occur in irq handlers in idle , a possibility
* handled by irq_enter ( ) and irq_exit ( ) . )
*
2022-06-08 17:40:34 +03:00
* If you add or remove a call to ct_idle_enter ( ) , be sure to test with
2022-06-08 17:40:33 +03:00
* CONFIG_RCU_EQS_DEBUG = y .
*/
2022-06-08 17:40:34 +03:00
void noinstr ct_idle_enter ( void )
2022-06-08 17:40:33 +03:00
{
WARN_ON_ONCE ( IS_ENABLED ( CONFIG_RCU_EQS_DEBUG ) & & ! raw_irqs_disabled ( ) ) ;
2022-06-08 17:40:35 +03:00
ct_kernel_exit ( false , RCU_DYNTICKS_IDX + CONTEXT_IDLE ) ;
2022-06-08 17:40:33 +03:00
}
2022-06-08 17:40:34 +03:00
EXPORT_SYMBOL_GPL ( ct_idle_enter ) ;
2022-06-08 17:40:33 +03:00
/**
2022-06-08 17:40:34 +03:00
* ct_idle_exit - inform RCU that current CPU is leaving idle
2022-06-08 17:40:33 +03:00
*
* Exit idle mode , in other words , - enter - the mode in which RCU
* read - side critical sections can occur .
*
2022-06-08 17:40:34 +03:00
* If you add or remove a call to ct_idle_exit ( ) , be sure to test with
2022-06-08 17:40:33 +03:00
* CONFIG_RCU_EQS_DEBUG = y .
*/
2022-06-08 17:40:34 +03:00
void noinstr ct_idle_exit ( void )
2022-06-08 17:40:33 +03:00
{
unsigned long flags ;
raw_local_irq_save ( flags ) ;
2022-06-08 17:40:35 +03:00
ct_kernel_enter ( false , RCU_DYNTICKS_IDX - CONTEXT_IDLE ) ;
2022-06-08 17:40:33 +03:00
raw_local_irq_restore ( flags ) ;
}
2022-06-08 17:40:25 +03:00
EXPORT_SYMBOL_GPL ( ct_idle_exit ) ;
2022-06-08 17:40:26 +03:00
2022-06-08 17:40:28 +03:00
/**
* ct_irq_enter - inform RCU that current CPU is entering irq away from idle
*
* Enter an interrupt handler , which might possibly result in exiting
* idle mode , in other words , entering the mode in which read - side critical
* sections can occur . The caller must have disabled interrupts .
*
* Note that the Linux kernel is fully capable of entering an interrupt
* handler that it never exits , for example when doing upcalls to user mode !
* This code assumes that the idle loop never does upcalls to user mode .
* If your architecture ' s idle loop does do upcalls to user mode ( or does
* anything else that results in unbalanced calls to the irq_enter ( ) and
* irq_exit ( ) functions ) , RCU will give you what you deserve , good and hard .
* But very infrequently and irreproducibly .
*
* Use things like work queues to work around this limitation .
*
* You have been warned .
*
* If you add or remove a call to ct_irq_enter ( ) , be sure to test with
* CONFIG_RCU_EQS_DEBUG = y .
*/
2022-06-08 17:40:26 +03:00
noinstr void ct_irq_enter ( void )
{
2022-06-08 17:40:28 +03:00
lockdep_assert_irqs_disabled ( ) ;
ct_nmi_enter ( ) ;
2022-06-08 17:40:26 +03:00
}
2022-06-08 17:40:28 +03:00
/**
* ct_irq_exit - inform RCU that current CPU is exiting irq towards idle
*
* Exit from an interrupt handler , which might possibly result in entering
* idle mode , in other words , leaving the mode in which read - side critical
* sections can occur . The caller must have disabled interrupts .
*
* This code assumes that the idle loop never does anything that might
* result in unbalanced calls to irq_enter ( ) and irq_exit ( ) . If your
* architecture ' s idle loop violates this assumption , RCU will give you what
* you deserve , good and hard . But very infrequently and irreproducibly .
*
* Use things like work queues to work around this limitation .
*
* You have been warned .
*
* If you add or remove a call to ct_irq_exit ( ) , be sure to test with
* CONFIG_RCU_EQS_DEBUG = y .
*/
2022-06-08 17:40:26 +03:00
noinstr void ct_irq_exit ( void )
{
2022-06-08 17:40:28 +03:00
lockdep_assert_irqs_disabled ( ) ;
ct_nmi_exit ( ) ;
2022-06-08 17:40:26 +03:00
}
2022-06-08 17:40:28 +03:00
/*
* Wrapper for ct_irq_enter ( ) where interrupts are enabled .
*
* If you add or remove a call to ct_irq_enter_irqson ( ) , be sure to test
* with CONFIG_RCU_EQS_DEBUG = y .
*/
2022-06-08 17:40:26 +03:00
void ct_irq_enter_irqson ( void )
{
2022-06-08 17:40:28 +03:00
unsigned long flags ;
local_irq_save ( flags ) ;
ct_irq_enter ( ) ;
local_irq_restore ( flags ) ;
2022-06-08 17:40:26 +03:00
}
2022-06-08 17:40:28 +03:00
/*
* Wrapper for ct_irq_exit ( ) where interrupts are enabled .
*
* If you add or remove a call to ct_irq_exit_irqson ( ) , be sure to test
* with CONFIG_RCU_EQS_DEBUG = y .
*/
2022-06-08 17:40:26 +03:00
void ct_irq_exit_irqson ( void )
{
2022-06-08 17:40:28 +03:00
unsigned long flags ;
local_irq_save ( flags ) ;
ct_irq_exit ( ) ;
local_irq_restore ( flags ) ;
2022-06-08 17:40:26 +03:00
}
2022-06-08 17:40:34 +03:00
# else
2022-06-08 17:40:35 +03:00
static __always_inline void ct_kernel_exit ( bool user , int offset ) { }
static __always_inline void ct_kernel_enter ( bool user , int offset ) { }
2022-06-08 17:40:25 +03:00
# endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
2022-06-08 17:40:24 +03:00
# ifdef CONFIG_CONTEXT_TRACKING_USER
2013-07-11 22:27:43 +04:00
# define CREATE_TRACE_POINTS
# include <trace/events/context_tracking.h>
2019-10-16 05:56:51 +03:00
DEFINE_STATIC_KEY_FALSE ( context_tracking_key ) ;
EXPORT_SYMBOL_GPL ( context_tracking_key ) ;
2013-07-11 21:12:32 +04:00
2020-03-04 13:05:22 +03:00
static noinstr bool context_tracking_recursion_enter ( void )
2015-05-06 19:04:23 +03:00
{
int recursion ;
recursion = __this_cpu_inc_return ( context_tracking . recursion ) ;
if ( recursion = = 1 )
return true ;
WARN_ONCE ( ( recursion < 1 ) , " Invalid context tracking recursion value %d \n " , recursion ) ;
__this_cpu_dec ( context_tracking . recursion ) ;
return false ;
}
2020-03-04 13:05:22 +03:00
static __always_inline void context_tracking_recursion_exit ( void )
2015-05-06 19:04:23 +03:00
{
__this_cpu_dec ( context_tracking . recursion ) ;
}
2013-01-16 20:16:37 +04:00
/**
2022-06-08 17:40:20 +03:00
* __ct_user_enter - Inform the context tracking that the CPU is going
* to enter user or guest space mode .
2013-01-16 20:16:37 +04:00
*
* This function must be called right before we switch from the kernel
2015-02-10 23:27:50 +03:00
* to user or guest space , when it ' s guaranteed the remaining kernel
* instructions to execute won ' t use any RCU read side critical section
* because this function sets RCU in extended quiescent state .
2013-01-16 20:16:37 +04:00
*/
2022-06-08 17:40:20 +03:00
void noinstr __ct_user_enter ( enum ctx_state state )
2012-11-27 22:33:25 +04:00
{
2022-06-08 17:40:35 +03:00
struct context_tracking * ct = this_cpu_ptr ( & context_tracking ) ;
2022-06-08 17:40:32 +03:00
lockdep_assert_irqs_disabled ( ) ;
2013-01-16 20:16:37 +04:00
/* Kernel threads aren't supposed to go to userspace */
2012-11-27 22:33:25 +04:00
WARN_ON_ONCE ( ! current - > mm ) ;
2015-05-06 19:04:23 +03:00
if ( ! context_tracking_recursion_enter ( ) )
2015-10-28 04:39:56 +03:00
return ;
2015-05-06 19:04:23 +03:00
2022-06-08 17:40:35 +03:00
if ( __ct_state ( ) ! = state ) {
if ( ct - > active ) {
2013-07-12 01:59:33 +04:00
/*
* At this stage , only low level arch entry code remains and
* then we ' ll run in userspace . We can assume there won ' t be
* any RCU read - side critical section until the next call to
2022-06-08 17:40:26 +03:00
* user_exit ( ) or ct_irq_enter ( ) . Let ' s remove RCU ' s dependency
2013-07-12 01:59:33 +04:00
* on the tick .
*/
2015-02-10 23:27:52 +03:00
if ( state = = CONTEXT_USER ) {
2020-03-04 13:05:22 +03:00
instrumentation_begin ( ) ;
2015-02-10 23:27:52 +03:00
trace_user_enter ( 0 ) ;
vtime_user_enter ( current ) ;
2020-03-04 13:05:22 +03:00
instrumentation_end ( ) ;
2015-02-10 23:27:52 +03:00
}
2022-06-08 17:40:32 +03:00
/*
* Other than generic entry implementation , we may be past the last
* rescheduling opportunity in the entry code . Trigger a self IPI
* that will fire and reschedule once we resume in user / guest mode .
*/
rcu_irq_work_resched ( ) ;
2022-06-08 17:40:35 +03:00
2022-06-08 17:40:34 +03:00
/*
* Enter RCU idle mode right before resuming userspace . No use of RCU
* is permitted between this call and rcu_eqs_exit ( ) . This way the
* CPU doesn ' t need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace .
*/
2022-06-08 17:40:35 +03:00
ct_kernel_exit ( true , RCU_DYNTICKS_IDX + state ) ;
/*
* Special case if we only track user < - > kernel transitions for tickless
* cputime accounting but we don ' t support RCU extended quiescent state .
* In this we case we don ' t care about any concurrency / ordering .
*/
if ( ! IS_ENABLED ( CONFIG_CONTEXT_TRACKING_IDLE ) )
atomic_set ( & ct - > state , state ) ;
} else {
/*
* Even if context tracking is disabled on this CPU , because it ' s outside
* the full dynticks mask for example , we still have to keep track of the
* context transitions and states to prevent inconsistency on those of
* other CPUs .
* If a task triggers an exception in userspace , sleep on the exception
* handler and then migrate to another CPU , that new CPU must know where
* the exception returns by the time we call exception_exit ( ) .
* This information can only be provided by the previous CPU when it called
* exception_enter ( ) .
* OTOH we can spare the calls to vtime and RCU when context_tracking . active
* is false because we know that CPU is not tickless .
*/
if ( ! IS_ENABLED ( CONFIG_CONTEXT_TRACKING_IDLE ) ) {
/* Tracking for vtime only, no concurrent RCU EQS accounting */
atomic_set ( & ct - > state , state ) ;
} else {
/*
* Tracking for vtime and RCU EQS . Make sure we don ' t race
* with NMIs . OTOH we don ' t care about ordering here since
* RCU only requires RCU_DYNTICKS_IDX increments to be fully
* ordered .
*/
atomic_add ( state , & ct - > state ) ;
}
2013-07-12 01:59:33 +04:00
}
2012-11-27 22:33:25 +04:00
}
2015-05-06 19:04:23 +03:00
context_tracking_recursion_exit ( ) ;
2015-10-28 04:39:56 +03:00
}
2022-06-08 17:40:20 +03:00
EXPORT_SYMBOL_GPL ( __ct_user_enter ) ;
2015-10-28 04:39:56 +03:00
2022-06-08 17:40:19 +03:00
/*
* OBSOLETE :
* This function should be noinstr but the below local_irq_restore ( ) is
* unsafe because it involves illegal RCU uses through tracing and lockdep .
* This is unlikely to be fixed as this function is obsolete . The preferred
* way is to call __context_tracking_enter ( ) through user_enter_irqoff ( )
* or context_tracking_guest_enter ( ) . It should be the arch entry code
* responsibility to call into context tracking with IRQs disabled .
*/
2022-06-08 17:40:22 +03:00
void ct_user_enter ( enum ctx_state state )
2015-10-28 04:39:56 +03:00
{
unsigned long flags ;
/*
* Some contexts may involve an exception occuring in an irq ,
* leading to that nesting :
2022-06-08 17:40:34 +03:00
* ct_irq_enter ( ) rcu_eqs_exit ( true ) rcu_eqs_enter ( true ) ct_irq_exit ( )
2015-10-28 04:39:56 +03:00
* This would mess up the dyntick_nesting count though . And rcu_irq_ * ( )
* helpers are enough to protect RCU uses inside the exception . So
* just return immediately if we detect we are in an IRQ .
*/
if ( in_interrupt ( ) )
return ;
local_irq_save ( flags ) ;
2022-06-08 17:40:20 +03:00
__ct_user_enter ( state ) ;
2012-11-27 22:33:25 +04:00
local_irq_restore ( flags ) ;
}
2022-06-08 17:40:22 +03:00
NOKPROBE_SYMBOL ( ct_user_enter ) ;
EXPORT_SYMBOL_GPL ( ct_user_enter ) ;
2015-02-10 23:27:50 +03:00
2022-06-08 17:40:21 +03:00
/**
* user_enter_callable ( ) - Unfortunate ASM callable version of user_enter ( ) for
* archs that didn ' t manage to check the context tracking
* static key from low level code .
*
* This OBSOLETE function should be noinstr but it unsafely calls
* local_irq_restore ( ) , involving illegal RCU uses through tracing and lockdep .
2022-06-08 17:40:19 +03:00
* This is unlikely to be fixed as this function is obsolete . The preferred
* way is to call user_enter_irqoff ( ) . It should be the arch entry code
* responsibility to call into context tracking with IRQs disabled .
*/
2022-06-08 17:40:21 +03:00
void user_enter_callable ( void )
2015-02-10 23:27:50 +03:00
{
2015-10-28 04:39:55 +03:00
user_enter ( ) ;
2015-02-10 23:27:50 +03:00
}
2022-06-08 17:40:21 +03:00
NOKPROBE_SYMBOL ( user_enter_callable ) ;
2012-11-27 22:33:25 +04:00
2013-01-16 20:16:37 +04:00
/**
2022-06-08 17:40:20 +03:00
* __ct_user_exit - Inform the context tracking that the CPU is
* exiting user or guest mode and entering the kernel .
2013-01-16 20:16:37 +04:00
*
2015-02-10 23:27:50 +03:00
* This function must be called after we entered the kernel from user or
* guest space before any use of RCU read side critical section . This
* potentially include any high level kernel code like syscalls , exceptions ,
* signal handling , etc . . .
2013-01-16 20:16:37 +04:00
*
* This call supports re - entrancy . This way it can be called from any exception
* handler without needing to know if we came from userspace or not .
*/
2022-06-08 17:40:20 +03:00
void noinstr __ct_user_exit ( enum ctx_state state )
2012-11-27 22:33:25 +04:00
{
2022-06-08 17:40:35 +03:00
struct context_tracking * ct = this_cpu_ptr ( & context_tracking ) ;
2015-05-06 19:04:23 +03:00
if ( ! context_tracking_recursion_enter ( ) )
2015-10-28 04:39:56 +03:00
return ;
2015-05-06 19:04:23 +03:00
2022-06-08 17:40:35 +03:00
if ( __ct_state ( ) = = state ) {
if ( ct - > active ) {
2013-07-12 01:59:33 +04:00
/*
2022-06-08 17:40:34 +03:00
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime .
2013-07-12 01:59:33 +04:00
*/
2022-06-08 17:40:35 +03:00
ct_kernel_enter ( true , RCU_DYNTICKS_IDX - state ) ;
2015-02-10 23:27:52 +03:00
if ( state = = CONTEXT_USER ) {
2020-03-04 13:05:22 +03:00
instrumentation_begin ( ) ;
2015-02-10 23:27:52 +03:00
vtime_user_exit ( current ) ;
trace_user_exit ( 0 ) ;
2020-03-04 13:05:22 +03:00
instrumentation_end ( ) ;
2015-02-10 23:27:52 +03:00
}
2022-06-08 17:40:35 +03:00
/*
* Special case if we only track user < - > kernel transitions for tickless
* cputime accounting but we don ' t support RCU extended quiescent state .
* In this we case we don ' t care about any concurrency / ordering .
*/
if ( ! IS_ENABLED ( CONFIG_CONTEXT_TRACKING_IDLE ) )
atomic_set ( & ct - > state , CONTEXT_KERNEL ) ;
} else {
if ( ! IS_ENABLED ( CONFIG_CONTEXT_TRACKING_IDLE ) ) {
/* Tracking for vtime only, no concurrent RCU EQS accounting */
atomic_set ( & ct - > state , CONTEXT_KERNEL ) ;
} else {
/*
* Tracking for vtime and RCU EQS . Make sure we don ' t race
* with NMIs . OTOH we don ' t care about ordering here since
* RCU only requires RCU_DYNTICKS_IDX increments to be fully
* ordered .
*/
atomic_sub ( state , & ct - > state ) ;
}
2013-07-12 01:59:33 +04:00
}
2012-11-27 22:33:25 +04:00
}
2015-05-06 19:04:23 +03:00
context_tracking_recursion_exit ( ) ;
2015-10-28 04:39:56 +03:00
}
2022-06-08 17:40:20 +03:00
EXPORT_SYMBOL_GPL ( __ct_user_exit ) ;
2015-10-28 04:39:56 +03:00
2022-06-08 17:40:19 +03:00
/*
* OBSOLETE :
* This function should be noinstr but the below local_irq_save ( ) is
* unsafe because it involves illegal RCU uses through tracing and lockdep .
* This is unlikely to be fixed as this function is obsolete . The preferred
* way is to call __context_tracking_exit ( ) through user_exit_irqoff ( )
* or context_tracking_guest_exit ( ) . It should be the arch entry code
* responsibility to call into context tracking with IRQs disabled .
*/
2022-06-08 17:40:22 +03:00
void ct_user_exit ( enum ctx_state state )
2015-10-28 04:39:56 +03:00
{
unsigned long flags ;
if ( in_interrupt ( ) )
return ;
local_irq_save ( flags ) ;
2022-06-08 17:40:20 +03:00
__ct_user_exit ( state ) ;
2012-11-27 22:33:25 +04:00
local_irq_restore ( flags ) ;
}
2022-06-08 17:40:22 +03:00
NOKPROBE_SYMBOL ( ct_user_exit ) ;
EXPORT_SYMBOL_GPL ( ct_user_exit ) ;
2015-02-10 23:27:50 +03:00
2022-06-08 17:40:21 +03:00
/**
* user_exit_callable ( ) - Unfortunate ASM callable version of user_exit ( ) for
* archs that didn ' t manage to check the context tracking
* static key from low level code .
*
* This OBSOLETE function should be noinstr but it unsafely calls local_irq_save ( ) ,
2022-06-08 17:40:19 +03:00
* involving illegal RCU uses through tracing and lockdep . This is unlikely
* to be fixed as this function is obsolete . The preferred way is to call
* user_exit_irqoff ( ) . It should be the arch entry code responsibility to
* call into context tracking with IRQs disabled .
*/
2022-06-08 17:40:21 +03:00
void user_exit_callable ( void )
2015-02-10 23:27:50 +03:00
{
2015-10-28 04:39:55 +03:00
user_exit ( ) ;
2015-02-10 23:27:50 +03:00
}
2022-06-08 17:40:21 +03:00
NOKPROBE_SYMBOL ( user_exit_callable ) ;
2012-11-27 22:33:25 +04:00
2022-06-08 17:40:23 +03:00
void __init ct_cpu_track_user ( int cpu )
2012-11-27 22:33:25 +04:00
{
2015-05-06 19:04:24 +03:00
static __initdata bool initialized = false ;
if ( ! per_cpu ( context_tracking . active , cpu ) ) {
per_cpu ( context_tracking . active , cpu ) = true ;
2019-10-16 05:56:51 +03:00
static_branch_inc ( & context_tracking_key ) ;
2015-05-06 19:04:24 +03:00
}
if ( initialized )
return ;
2020-01-27 18:41:52 +03:00
# ifdef CONFIG_HAVE_TIF_NOHZ
2015-05-06 19:04:24 +03:00
/*
* Set TIF_NOHZ to init / 0 and let it propagate to all tasks through fork
* This assumes that init is the only task at this early boot stage .
*/
set_tsk_thread_flag ( & init_task , TIF_NOHZ ) ;
2020-01-27 18:41:52 +03:00
# endif
2015-05-06 19:04:24 +03:00
WARN_ON_ONCE ( ! tasklist_empty ( ) ) ;
initialized = true ;
2012-11-27 22:33:25 +04:00
}
2013-07-11 21:12:32 +04:00
2022-06-08 17:40:24 +03:00
# ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
2013-07-11 21:12:32 +04:00
void __init context_tracking_init ( void )
{
int cpu ;
for_each_possible_cpu ( cpu )
2022-06-08 17:40:23 +03:00
ct_cpu_track_user ( cpu ) ;
2013-07-11 21:12:32 +04:00
}
# endif
2022-06-08 17:40:24 +03:00
# endif /* #ifdef CONFIG_CONTEXT_TRACKING_USER */