cpuidle: Ensure ct_cpuidle_enter() is always called from noinstr/__cpuidle

Tracing (kprobes included) and other compiler instrumentation relies
on a normal kernel runtime. Therefore all functions that disable RCU
should be noinstr, as should all functions that are called while RCU
is disabled.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230112195542.212914195@infradead.org
This commit is contained in:
Peter Zijlstra 2023-01-12 20:44:01 +01:00 committed by Ingo Molnar
parent 1c38b0615f
commit 17cc2b5525

View File

@ -137,11 +137,13 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
static void enter_s2idle_proper(struct cpuidle_driver *drv,
static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
ktime_t time_start, time_end;
struct cpuidle_state *target_state = &drv->states[index];
ktime_t time_start, time_end;
instrumentation_begin();
time_start = ns_to_ktime(local_clock());
@ -152,13 +154,18 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
ct_cpuidle_enter();
/* Annotate away the indirect call */
instrumentation_begin();
}
target_state->enter_s2idle(dev, drv, index);
if (WARN_ON_ONCE(!irqs_disabled()))
raw_local_irq_disable();
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
instrumentation_end();
ct_cpuidle_exit();
}
tick_unfreeze();
start_critical_timings();
@ -166,6 +173,7 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
dev->states_usage[index].s2idle_usage++;
instrumentation_end();
}
/**
@ -200,7 +208,8 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* @drv: cpuidle driver for this cpu
* @index: index into the states table in @drv of the state to enter
*/
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
int entered_state;
@ -209,6 +218,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
instrumentation_begin();
/*
* Tell the time framework to switch to a broadcast timer because our
* local timer will be shut down. If a local timer is used from another
@ -235,15 +246,21 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_start = ns_to_ktime(local_clock());
stop_critical_timings();
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
ct_cpuidle_enter();
/* Annotate away the indirect call */
instrumentation_begin();
}
entered_state = target_state->enter(dev, drv, index);
if (WARN_ONCE(!irqs_disabled(), "%ps leaked IRQ state", target_state->enter))
raw_local_irq_disable();
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
instrumentation_end();
ct_cpuidle_exit();
}
start_critical_timings();
sched_clock_idle_wakeup_event();
@ -306,6 +323,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
dev->states_usage[index].rejected++;
}
instrumentation_end();
return entered_state;
}