2010-09-17 15:36:40 -07:00
# ifndef _ASM_X86_MWAIT_H
# define _ASM_X86_MWAIT_H
2013-12-12 15:08:36 +01:00
# include <linux/sched.h>
2010-09-17 15:36:40 -07:00
# define MWAIT_SUBSTATE_MASK 0xf
# define MWAIT_CSTATE_MASK 0xf
# define MWAIT_SUBSTATE_SIZE 4
2013-02-01 23:37:30 -05:00
# define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
# define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
2010-09-17 15:36:40 -07:00
# define CPUID_MWAIT_LEAF 5
# define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
# define CPUID5_ECX_INTERRUPT_BREAK 0x2
# define MWAIT_ECX_INTERRUPT_BREAK 0x1
2013-12-12 15:08:36 +01:00
static inline void __monitor ( const void * eax , unsigned long ecx ,
unsigned long edx )
{
/* "monitor %eax, %ecx, %edx;" */
asm volatile ( " .byte 0x0f, 0x01, 0xc8; "
: : " a " ( eax ) , " c " ( ecx ) , " d " ( edx ) ) ;
}
static inline void __mwait ( unsigned long eax , unsigned long ecx )
{
/* "mwait %eax, %ecx;" */
asm volatile ( " .byte 0x0f, 0x01, 0xc9; "
: : " a " ( eax ) , " c " ( ecx ) ) ;
}
sched/idle/x86: Restore mwait_idle() to fix boot hangs, to improve power savings and to improve performance
In Linux-3.9 we removed the mwait_idle() loop:
69fb3676df33 ("x86 idle: remove mwait_idle() and "idle=mwait" cmdline param")
The reasoning was that modern machines should be sufficiently
happy during the boot process using the default_idle() HALT
loop, until cpuidle loads and either acpi_idle or intel_idle
invoke the newer MWAIT-with-hints idle loop.
But two machines reported problems:
1. Certain Core2-era machines support MWAIT-C1 and HALT only.
MWAIT-C1 is preferred for optimal power and performance.
But if they support just C1, cpuidle never loads and
so they use the boot-time default idle loop forever.
2. Some laptops will boot-hang if HALT is used,
but will boot successfully if MWAIT is used.
This appears to be a hidden assumption in BIOS SMI,
that is presumably valid on the proprietary OS
where the BIOS was validated.
https://bugzilla.kernel.org/show_bug.cgi?id=60770
So here we effectively revert the patch above, restoring
the mwait_idle() loop. However, we don't bother restoring
the idle=mwait cmdline parameter, since it appears to add
no value.
Maintainer notes:
For 3.9, simply revert 69fb3676df
for 3.10, patch -F3 applies, fuzz needed due to __cpuinit use in
context For 3.11, 3.12, 3.13, this patch applies cleanly
Tested-by: Mike Galbraith <bitbucket@online.de>
Signed-off-by: Len Brown <len.brown@intel.com>
Acked-by: Mike Galbraith <bitbucket@online.de>
Cc: <stable@vger.kernel.org> # 3.9+
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ian Malone <ibmalone@gmail.com>
Cc: Josh Boyer <jwboyer@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/345254a551eb5a6a866e048d7ab570fd2193aca4.1389763084.git.len.brown@intel.com
[ Ported to recent kernels. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-01-15 00:37:34 -05:00
static inline void __sti_mwait ( unsigned long eax , unsigned long ecx )
{
trace_hardirqs_on ( ) ;
/* "mwait %eax, %ecx;" */
asm volatile ( " sti; .byte 0x0f, 0x01, 0xc9; "
: : " a " ( eax ) , " c " ( ecx ) ) ;
}
2013-12-12 15:08:36 +01:00
/*
* This uses new MONITOR / MWAIT instructions on P4 processors with PNI ,
* which can obviate IPI to trigger checking of need_resched .
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT . Whenever someone changes need_resched , we would be woken
* up from MWAIT ( without an IPI ) .
*
* New with Core Duo processors , MWAIT can take some hints based on CPU
* capability .
*/
static inline void mwait_idle_with_hints ( unsigned long eax , unsigned long ecx )
{
if ( ! current_set_polling_and_test ( ) ) {
2014-06-18 00:06:23 +02:00
if ( static_cpu_has_bug ( X86_BUG_CLFLUSH_MONITOR ) ) {
2013-12-19 11:58:16 -08:00
mb ( ) ;
2013-12-12 15:08:36 +01:00
clflush ( ( void * ) & current_thread_info ( ) - > flags ) ;
2013-12-19 11:58:16 -08:00
mb ( ) ;
}
2013-12-12 15:08:36 +01:00
__monitor ( ( void * ) & current_thread_info ( ) - > flags , 0 , 0 ) ;
if ( ! need_resched ( ) )
__mwait ( eax , ecx ) ;
}
2013-11-20 12:22:37 +01:00
current_clr_polling ( ) ;
2013-12-12 15:08:36 +01:00
}
2010-09-17 15:36:40 -07:00
# endif /* _ASM_X86_MWAIT_H */