cpuidle: fix HP nx6125 regression
Fix for http://bugzilla.kernel.org/show_bug.cgi?id=9355 cpuidle always used to fallback to C2 if there is some bm activity while entering C3. But, presence of C2 is not always guaranteed. Change cpuidle algorithm to detect a safe_state to fallback in case of bm_activity and use that state instead of C2. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
This commit is contained in:
parent
5062911830
commit
ddc081a195
@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
|
|||||||
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
|
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void acpi_safe_halt(void)
|
||||||
|
{
|
||||||
|
current_thread_info()->status &= ~TS_POLLING;
|
||||||
|
/*
|
||||||
|
* TS_POLLING-cleared state must be visible before we
|
||||||
|
* test NEED_RESCHED:
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
if (!need_resched())
|
||||||
|
safe_halt();
|
||||||
|
current_thread_info()->status |= TS_POLLING;
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_CPU_IDLE
|
#ifndef CONFIG_CPU_IDLE
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void acpi_safe_halt(void)
|
|
||||||
{
|
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
|
||||||
/*
|
|
||||||
* TS_POLLING-cleared state must be visible before we
|
|
||||||
* test NEED_RESCHED:
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
if (!need_resched())
|
|
||||||
safe_halt();
|
|
||||||
current_thread_info()->status |= TS_POLLING;
|
|
||||||
}
|
|
||||||
|
|
||||||
static atomic_t c3_cpu_count;
|
static atomic_t c3_cpu_count;
|
||||||
|
|
||||||
/* Common C-state entry for C2, C3, .. */
|
/* Common C-state entry for C2, C3, .. */
|
||||||
@ -1385,15 +1385,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
|||||||
if (pr->flags.bm_check)
|
if (pr->flags.bm_check)
|
||||||
acpi_idle_update_bm_rld(pr, cx);
|
acpi_idle_update_bm_rld(pr, cx);
|
||||||
|
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
acpi_safe_halt();
|
||||||
/*
|
|
||||||
* TS_POLLING-cleared state must be visible before we test
|
|
||||||
* NEED_RESCHED:
|
|
||||||
*/
|
|
||||||
smp_mb();
|
|
||||||
if (!need_resched())
|
|
||||||
safe_halt();
|
|
||||||
current_thread_info()->status |= TS_POLLING;
|
|
||||||
|
|
||||||
cx->usage++;
|
cx->usage++;
|
||||||
|
|
||||||
@ -1493,6 +1485,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|||||||
if (acpi_idle_suspend)
|
if (acpi_idle_suspend)
|
||||||
return(acpi_idle_enter_c1(dev, state));
|
return(acpi_idle_enter_c1(dev, state));
|
||||||
|
|
||||||
|
if (acpi_idle_bm_check()) {
|
||||||
|
if (dev->safe_state) {
|
||||||
|
return dev->safe_state->enter(dev, dev->safe_state);
|
||||||
|
} else {
|
||||||
|
acpi_safe_halt();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
current_thread_info()->status &= ~TS_POLLING;
|
current_thread_info()->status &= ~TS_POLLING;
|
||||||
/*
|
/*
|
||||||
@ -1515,49 +1516,39 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|||||||
*/
|
*/
|
||||||
acpi_state_timer_broadcast(pr, cx, 1);
|
acpi_state_timer_broadcast(pr, cx, 1);
|
||||||
|
|
||||||
if (acpi_idle_bm_check()) {
|
acpi_idle_update_bm_rld(pr, cx);
|
||||||
cx = pr->power.bm_state;
|
|
||||||
|
|
||||||
acpi_idle_update_bm_rld(pr, cx);
|
/*
|
||||||
|
* disable bus master
|
||||||
|
* bm_check implies we need ARB_DIS
|
||||||
|
* !bm_check implies we need cache flush
|
||||||
|
* bm_control implies whether we can do ARB_DIS
|
||||||
|
*
|
||||||
|
* That leaves a case where bm_check is set and bm_control is
|
||||||
|
* not set. In that case we cannot do much, we enter C3
|
||||||
|
* without doing anything.
|
||||||
|
*/
|
||||||
|
if (pr->flags.bm_check && pr->flags.bm_control) {
|
||||||
|
spin_lock(&c3_lock);
|
||||||
|
c3_cpu_count++;
|
||||||
|
/* Disable bus master arbitration when all CPUs are in C3 */
|
||||||
|
if (c3_cpu_count == num_online_cpus())
|
||||||
|
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
|
||||||
|
spin_unlock(&c3_lock);
|
||||||
|
} else if (!pr->flags.bm_check) {
|
||||||
|
ACPI_FLUSH_CPU_CACHE();
|
||||||
|
}
|
||||||
|
|
||||||
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||||
acpi_idle_do_entry(cx);
|
acpi_idle_do_entry(cx);
|
||||||
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||||
} else {
|
|
||||||
acpi_idle_update_bm_rld(pr, cx);
|
|
||||||
|
|
||||||
/*
|
/* Re-enable bus master arbitration */
|
||||||
* disable bus master
|
if (pr->flags.bm_check && pr->flags.bm_control) {
|
||||||
* bm_check implies we need ARB_DIS
|
spin_lock(&c3_lock);
|
||||||
* !bm_check implies we need cache flush
|
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
|
||||||
* bm_control implies whether we can do ARB_DIS
|
c3_cpu_count--;
|
||||||
*
|
spin_unlock(&c3_lock);
|
||||||
* That leaves a case where bm_check is set and bm_control is
|
|
||||||
* not set. In that case we cannot do much, we enter C3
|
|
||||||
* without doing anything.
|
|
||||||
*/
|
|
||||||
if (pr->flags.bm_check && pr->flags.bm_control) {
|
|
||||||
spin_lock(&c3_lock);
|
|
||||||
c3_cpu_count++;
|
|
||||||
/* Disable bus master arbitration when all CPUs are in C3 */
|
|
||||||
if (c3_cpu_count == num_online_cpus())
|
|
||||||
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
|
|
||||||
spin_unlock(&c3_lock);
|
|
||||||
} else if (!pr->flags.bm_check) {
|
|
||||||
ACPI_FLUSH_CPU_CACHE();
|
|
||||||
}
|
|
||||||
|
|
||||||
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
|
||||||
acpi_idle_do_entry(cx);
|
|
||||||
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
|
||||||
|
|
||||||
/* Re-enable bus master arbitration */
|
|
||||||
if (pr->flags.bm_check && pr->flags.bm_control) {
|
|
||||||
spin_lock(&c3_lock);
|
|
||||||
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
|
|
||||||
c3_cpu_count--;
|
|
||||||
spin_unlock(&c3_lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
|
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
|
||||||
@ -1626,12 +1617,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
|||||||
case ACPI_STATE_C1:
|
case ACPI_STATE_C1:
|
||||||
state->flags |= CPUIDLE_FLAG_SHALLOW;
|
state->flags |= CPUIDLE_FLAG_SHALLOW;
|
||||||
state->enter = acpi_idle_enter_c1;
|
state->enter = acpi_idle_enter_c1;
|
||||||
|
dev->safe_state = state;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ACPI_STATE_C2:
|
case ACPI_STATE_C2:
|
||||||
state->flags |= CPUIDLE_FLAG_BALANCED;
|
state->flags |= CPUIDLE_FLAG_BALANCED;
|
||||||
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
||||||
state->enter = acpi_idle_enter_simple;
|
state->enter = acpi_idle_enter_simple;
|
||||||
|
dev->safe_state = state;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ACPI_STATE_C3:
|
case ACPI_STATE_C3:
|
||||||
@ -1652,14 +1645,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
|||||||
if (!count)
|
if (!count)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* find the deepest state that can handle active BM */
|
|
||||||
if (pr->flags.bm_check) {
|
|
||||||
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
|
|
||||||
if (pr->power.states[i].type == ACPI_STATE_C3)
|
|
||||||
break;
|
|
||||||
pr->power.bm_state = &pr->power.states[i-1];
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +78,6 @@ struct acpi_processor_cx {
|
|||||||
struct acpi_processor_power {
|
struct acpi_processor_power {
|
||||||
struct cpuidle_device dev;
|
struct cpuidle_device dev;
|
||||||
struct acpi_processor_cx *state;
|
struct acpi_processor_cx *state;
|
||||||
struct acpi_processor_cx *bm_state;
|
|
||||||
unsigned long bm_check_timestamp;
|
unsigned long bm_check_timestamp;
|
||||||
u32 default_state;
|
u32 default_state;
|
||||||
u32 bm_activity;
|
u32 bm_activity;
|
||||||
|
@ -92,6 +92,7 @@ struct cpuidle_device {
|
|||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
struct completion kobj_unregister;
|
struct completion kobj_unregister;
|
||||||
void *governor_data;
|
void *governor_data;
|
||||||
|
struct cpuidle_state *safe_state;
|
||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user