rcu/context-tracking: Remove unused and/or unecessary middle functions

Some eqs functions are now only used internally by context tracking, so
their public declarations can be removed.

Also middle functions such as rcu_user_*() and rcu_idle_*()
which now directly call to rcu_eqs_enter() and rcu_eqs_exit() can be
wiped out as well.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Nicolas Saenz Julienne <nsaenz@kernel.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Cc: Yu Liao <liaoyu15@huawei.com>
Cc: Phil Auld <pauld@redhat.com>
Cc: Paul Gortmaker<paul.gortmaker@windriver.com>
Cc: Alex Belits <abelits@marvell.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
Tested-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
This commit is contained in:
Frederic Weisbecker 2022-06-08 16:40:34 +02:00 committed by Paul E. McKenney
parent 1721145527
commit c33ef43a35
6 changed files with 28 additions and 92 deletions

View File

@ -97,7 +97,7 @@ warnings:
which will include additional debugging information. which will include additional debugging information.
- A low-level kernel issue that either fails to invoke one of the - A low-level kernel issue that either fails to invoke one of the
variants of rcu_user_enter(), rcu_user_exit(), ct_idle_enter(), variants of rcu_eqs_enter(true), rcu_eqs_exit(true), ct_idle_enter(),
ct_idle_exit(), ct_irq_enter(), or ct_irq_exit() on the one ct_idle_exit(), ct_irq_enter(), or ct_irq_exit() on the one
hand, or that invokes one of them too many times on the other. hand, or that invokes one of them too many times on the other.
Historically, the most frequent issue has been an omission Historically, the most frequent issue has been an omission

View File

@ -92,14 +92,6 @@ void irq_exit_rcu(void);
#define arch_nmi_exit() do { } while (0) #define arch_nmi_exit() do { } while (0)
#endif #endif
#ifdef CONFIG_TINY_RCU
static inline void rcu_nmi_enter(void) { }
static inline void rcu_nmi_exit(void) { }
#else
extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void);
#endif
/* /*
* NMI vs Tracing * NMI vs Tracing
* -------------- * --------------

View File

@ -104,14 +104,6 @@ static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { } static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
#ifdef CONFIG_NO_HZ_FULL
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
#endif /* CONFIG_NO_HZ_FULL */
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
void rcu_irq_work_resched(void); void rcu_irq_work_resched(void);
#else #else

View File

@ -76,8 +76,6 @@ static inline int rcu_needs_cpu(void)
static inline void rcu_virt_note_context_switch(int cpu) { } static inline void rcu_virt_note_context_switch(int cpu) { }
static inline void rcu_cpu_stall_reset(void) { } static inline void rcu_cpu_stall_reset(void) { }
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_exit_check_preempt(void) { } static inline void rcu_irq_exit_check_preempt(void) { }
#define rcu_is_idle_cpu(cpu) \ #define rcu_is_idle_cpu(cpu) \
(is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq()) (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq())

View File

@ -45,8 +45,6 @@ unsigned long start_poll_synchronize_rcu(void);
bool poll_state_synchronize_rcu(unsigned long oldstate); bool poll_state_synchronize_rcu(unsigned long oldstate);
void cond_synchronize_rcu(unsigned long oldstate); void cond_synchronize_rcu(unsigned long oldstate);
void rcu_idle_enter(void);
void rcu_idle_exit(void);
bool rcu_is_idle_cpu(int cpu); bool rcu_is_idle_cpu(int cpu);
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU

View File

@ -189,17 +189,17 @@ static void noinstr rcu_eqs_exit(bool user)
} }
/** /**
* rcu_nmi_exit - inform RCU of exit from NMI context * ct_nmi_exit - inform RCU of exit from NMI context
* *
* If we are returning from the outermost NMI handler that interrupted an * If we are returning from the outermost NMI handler that interrupted an
* RCU-idle period, update ct->dynticks and ct->dynticks_nmi_nesting * RCU-idle period, update ct->dynticks and ct->dynticks_nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to * to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle. * being RCU-idle.
* *
* If you add or remove a call to rcu_nmi_exit(), be sure to test * If you add or remove a call to ct_nmi_exit(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y. * with CONFIG_RCU_EQS_DEBUG=y.
*/ */
void noinstr rcu_nmi_exit(void) void noinstr ct_nmi_exit(void)
{ {
struct context_tracking *ct = this_cpu_ptr(&context_tracking); struct context_tracking *ct = this_cpu_ptr(&context_tracking);
@ -242,7 +242,7 @@ void noinstr rcu_nmi_exit(void)
} }
/** /**
* rcu_nmi_enter - inform RCU of entry to NMI context * ct_nmi_enter - inform RCU of entry to NMI context
* *
* If the CPU was idle from RCU's viewpoint, update ct->dynticks and * If the CPU was idle from RCU's viewpoint, update ct->dynticks and
* ct->dynticks_nmi_nesting to let the RCU grace-period handling know * ct->dynticks_nmi_nesting to let the RCU grace-period handling know
@ -250,10 +250,10 @@ void noinstr rcu_nmi_exit(void)
* long as the nesting level does not overflow an int. (You will probably * long as the nesting level does not overflow an int. (You will probably
* run out of stack space first.) * run out of stack space first.)
* *
* If you add or remove a call to rcu_nmi_enter(), be sure to test * If you add or remove a call to ct_nmi_enter(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y. * with CONFIG_RCU_EQS_DEBUG=y.
*/ */
void noinstr rcu_nmi_enter(void) void noinstr ct_nmi_enter(void)
{ {
long incby = 2; long incby = 2;
struct context_tracking *ct = this_cpu_ptr(&context_tracking); struct context_tracking *ct = this_cpu_ptr(&context_tracking);
@ -302,32 +302,33 @@ void noinstr rcu_nmi_enter(void)
} }
/** /**
* rcu_idle_enter - inform RCU that current CPU is entering idle * ct_idle_enter - inform RCU that current CPU is entering idle
* *
* Enter idle mode, in other words, -leave- the mode in which RCU * Enter idle mode, in other words, -leave- the mode in which RCU
* read-side critical sections can occur. (Though RCU read-side * read-side critical sections can occur. (Though RCU read-side
* critical sections can occur in irq handlers in idle, a possibility * critical sections can occur in irq handlers in idle, a possibility
* handled by irq_enter() and irq_exit().) * handled by irq_enter() and irq_exit().)
* *
* If you add or remove a call to rcu_idle_enter(), be sure to test with * If you add or remove a call to ct_idle_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y. * CONFIG_RCU_EQS_DEBUG=y.
*/ */
void noinstr rcu_idle_enter(void) void noinstr ct_idle_enter(void)
{ {
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled()); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
rcu_eqs_enter(false); rcu_eqs_enter(false);
} }
EXPORT_SYMBOL_GPL(ct_idle_enter);
/** /**
* rcu_idle_exit - inform RCU that current CPU is leaving idle * ct_idle_exit - inform RCU that current CPU is leaving idle
* *
* Exit idle mode, in other words, -enter- the mode in which RCU * Exit idle mode, in other words, -enter- the mode in which RCU
* read-side critical sections can occur. * read-side critical sections can occur.
* *
* If you add or remove a call to rcu_idle_exit(), be sure to test with * If you add or remove a call to ct_idle_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y. * CONFIG_RCU_EQS_DEBUG=y.
*/ */
void noinstr rcu_idle_exit(void) void noinstr ct_idle_exit(void)
{ {
unsigned long flags; unsigned long flags;
@ -335,18 +336,6 @@ void noinstr rcu_idle_exit(void)
rcu_eqs_exit(false); rcu_eqs_exit(false);
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(rcu_idle_exit);
noinstr void ct_idle_enter(void)
{
rcu_idle_enter();
}
EXPORT_SYMBOL_GPL(ct_idle_enter);
void ct_idle_exit(void)
{
rcu_idle_exit();
}
EXPORT_SYMBOL_GPL(ct_idle_exit); EXPORT_SYMBOL_GPL(ct_idle_exit);
/** /**
@ -431,50 +420,11 @@ void ct_irq_exit_irqson(void)
ct_irq_exit(); ct_irq_exit();
local_irq_restore(flags); local_irq_restore(flags);
} }
#else
noinstr void ct_nmi_enter(void) static __always_inline void rcu_eqs_enter(bool user) { }
{ static __always_inline void rcu_eqs_exit(bool user) { }
rcu_nmi_enter();
}
noinstr void ct_nmi_exit(void)
{
rcu_nmi_exit();
}
#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */ #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
#ifdef CONFIG_NO_HZ_FULL
/**
* rcu_user_enter - inform RCU that we are resuming userspace.
*
* Enter RCU idle mode right before resuming userspace. No use of RCU
* is permitted between this call and rcu_user_exit(). This way the
* CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace.
*
* If you add or remove a call to rcu_user_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
noinstr void rcu_user_enter(void)
{
rcu_eqs_enter(true);
}
/**
* rcu_user_exit - inform RCU that we are exiting userspace.
*
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime.
*
* If you add or remove a call to rcu_user_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
void noinstr rcu_user_exit(void)
{
rcu_eqs_exit(true);
}
#endif /* #ifdef CONFIG_NO_HZ_FULL */
#ifdef CONFIG_CONTEXT_TRACKING_USER #ifdef CONFIG_CONTEXT_TRACKING_USER
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
@ -542,7 +492,13 @@ void noinstr __ct_user_enter(enum ctx_state state)
* that will fire and reschedule once we resume in user/guest mode. * that will fire and reschedule once we resume in user/guest mode.
*/ */
rcu_irq_work_resched(); rcu_irq_work_resched();
rcu_user_enter(); /*
* Enter RCU idle mode right before resuming userspace. No use of RCU
* is permitted between this call and rcu_eqs_exit(). This way the
* CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace.
*/
rcu_eqs_enter(true);
} }
/* /*
* Even if context tracking is disabled on this CPU, because it's outside * Even if context tracking is disabled on this CPU, because it's outside
@ -579,7 +535,7 @@ void ct_user_enter(enum ctx_state state)
/* /*
* Some contexts may involve an exception occuring in an irq, * Some contexts may involve an exception occuring in an irq,
* leading to that nesting: * leading to that nesting:
* ct_irq_enter() rcu_user_exit() rcu_user_exit() ct_irq_exit() * ct_irq_enter() rcu_eqs_exit(true) rcu_eqs_enter(true) ct_irq_exit()
* This would mess up the dyntick_nesting count though. And rcu_irq_*() * This would mess up the dyntick_nesting count though. And rcu_irq_*()
* helpers are enough to protect RCU uses inside the exception. So * helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ. * just return immediately if we detect we are in an IRQ.
@ -631,10 +587,10 @@ void noinstr __ct_user_exit(enum ctx_state state)
if (__this_cpu_read(context_tracking.state) == state) { if (__this_cpu_read(context_tracking.state) == state) {
if (__this_cpu_read(context_tracking.active)) { if (__this_cpu_read(context_tracking.active)) {
/* /*
* We are going to run code that may use RCU. Inform * Exit RCU idle mode while entering the kernel because it can
* RCU core about that (ie: we may need the tick again). * run a RCU read side critical section anytime.
*/ */
rcu_user_exit(); rcu_eqs_exit(true);
if (state == CONTEXT_USER) { if (state == CONTEXT_USER) {
instrumentation_begin(); instrumentation_begin();
vtime_user_exit(current); vtime_user_exit(current);