sched / idle: Eliminate the "reflect" check from cpuidle_idle_call()
Since cpuidle_reflect() should only be called if the idle state to enter was selected by cpuidle_select(), there is the "reflect" variable in cpuidle_idle_call() whose value is used to determine whether or not that is the case. However, if the entire code run between the conditional setting "reflect" and the call to cpuidle_reflect() is moved to a separate function, it will be possible to call that new function in both branches of the conditional, in which case cpuidle_reflect() will only need to be called from one of them too and the "reflect" variable won't be necessary any more. This eliminates one check made by cpuidle_idle_call() on the majority of its invocations, so change the code as described. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Daniel Lezcano <daniel.lezcano@linaro.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
This commit is contained in:
parent
a802ea9645
commit
bcf6ad8a4a
@ -79,6 +79,46 @@ static void default_idle_call(void)
|
|||||||
arch_cpu_idle();
|
arch_cpu_idle();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
|
int next_state)
|
||||||
|
{
|
||||||
|
int entered_state;
|
||||||
|
|
||||||
|
/* Fall back to the default arch idle method on errors. */
|
||||||
|
if (next_state < 0) {
|
||||||
|
default_idle_call();
|
||||||
|
return next_state;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The idle task must be scheduled, it is pointless to go to idle, just
|
||||||
|
* update no idle residency and return.
|
||||||
|
*/
|
||||||
|
if (current_clr_polling_and_test()) {
|
||||||
|
dev->last_residency = 0;
|
||||||
|
local_irq_enable();
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Take note of the planned idle state. */
|
||||||
|
idle_set_state(this_rq(), &drv->states[next_state]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enter the idle state previously returned by the governor decision.
|
||||||
|
* This function will block until an interrupt occurs and will take
|
||||||
|
* care of re-enabling the local interrupts
|
||||||
|
*/
|
||||||
|
entered_state = cpuidle_enter(drv, dev, next_state);
|
||||||
|
|
||||||
|
/* The cpu is no longer idle or about to enter idle. */
|
||||||
|
idle_set_state(this_rq(), NULL);
|
||||||
|
|
||||||
|
if (entered_state == -EBUSY)
|
||||||
|
default_idle_call();
|
||||||
|
|
||||||
|
return entered_state;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpuidle_idle_call - the main idle function
|
* cpuidle_idle_call - the main idle function
|
||||||
*
|
*
|
||||||
@ -93,7 +133,6 @@ static void cpuidle_idle_call(void)
|
|||||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
||||||
int next_state, entered_state;
|
int next_state, entered_state;
|
||||||
bool reflect;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if the idle task must be rescheduled. If it is the
|
* Check if the idle task must be rescheduled. If it is the
|
||||||
@ -138,56 +177,19 @@ static void cpuidle_idle_call(void)
|
|||||||
goto exit_idle;
|
goto exit_idle;
|
||||||
}
|
}
|
||||||
|
|
||||||
reflect = false;
|
|
||||||
next_state = cpuidle_find_deepest_state(drv, dev);
|
next_state = cpuidle_find_deepest_state(drv, dev);
|
||||||
|
call_cpuidle(drv, dev, next_state);
|
||||||
} else {
|
} else {
|
||||||
reflect = true;
|
|
||||||
/*
|
/*
|
||||||
* Ask the cpuidle framework to choose a convenient idle state.
|
* Ask the cpuidle framework to choose a convenient idle state.
|
||||||
*/
|
*/
|
||||||
next_state = cpuidle_select(drv, dev);
|
next_state = cpuidle_select(drv, dev);
|
||||||
}
|
entered_state = call_cpuidle(drv, dev, next_state);
|
||||||
/* Fall back to the default arch idle method on errors. */
|
/*
|
||||||
if (next_state < 0) {
|
* Give the governor an opportunity to reflect on the outcome
|
||||||
default_idle_call();
|
*/
|
||||||
goto exit_idle;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The idle task must be scheduled, it is pointless to
|
|
||||||
* go to idle, just update no idle residency and get
|
|
||||||
* out of this function
|
|
||||||
*/
|
|
||||||
if (current_clr_polling_and_test()) {
|
|
||||||
dev->last_residency = 0;
|
|
||||||
entered_state = next_state;
|
|
||||||
local_irq_enable();
|
|
||||||
goto exit_idle;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Take note of the planned idle state. */
|
|
||||||
idle_set_state(this_rq(), &drv->states[next_state]);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Enter the idle state previously returned by the governor decision.
|
|
||||||
* This function will block until an interrupt occurs and will take
|
|
||||||
* care of re-enabling the local interrupts
|
|
||||||
*/
|
|
||||||
entered_state = cpuidle_enter(drv, dev, next_state);
|
|
||||||
|
|
||||||
/* The cpu is no longer idle or about to enter idle. */
|
|
||||||
idle_set_state(this_rq(), NULL);
|
|
||||||
|
|
||||||
if (entered_state == -EBUSY) {
|
|
||||||
default_idle_call();
|
|
||||||
goto exit_idle;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Give the governor an opportunity to reflect on the outcome
|
|
||||||
*/
|
|
||||||
if (reflect)
|
|
||||||
cpuidle_reflect(dev, entered_state);
|
cpuidle_reflect(dev, entered_state);
|
||||||
|
}
|
||||||
|
|
||||||
exit_idle:
|
exit_idle:
|
||||||
__current_set_polling();
|
__current_set_polling();
|
||||||
|
Loading…
Reference in New Issue
Block a user