2005-04-15 15:07:10 -04:00
/*
* Copyright ( C ) 2005 Intel Corporation
* Venkatesh Pallipadi < venkatesh . pallipadi @ intel . com >
* - Added _PDC for SMP C - states on Intel CPUs
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/acpi.h>
2006-09-25 16:28:13 -07:00
# include <linux/cpu.h>
2006-10-18 13:55:46 -04:00
# include <linux/sched.h>
2005-04-15 15:07:10 -04:00
# include <acpi/processor.h>
# include <asm/acpi.h>
2010-09-17 15:36:40 -07:00
# include <asm/mwait.h>
2012-03-28 18:11:12 +01:00
# include <asm/special_insns.h>
2005-04-15 15:07:10 -04:00
/*
* Initialize bm_flags based on the CPU cache properties
* On SMP it depends on cache configuration
* - When cache is not shared among all CPUs , we flush cache
* before entering C3 .
* - When cache is shared among all CPUs , we use bm_check
* mechanism as in UP case
*
* This routine is called only after all the CPUs are online
*/
void acpi_processor_power_init_bm_check ( struct acpi_processor_flags * flags ,
unsigned int cpu )
{
2007-10-19 20:35:04 +02:00
struct cpuinfo_x86 * c = & cpu_data ( cpu ) ;
2005-04-15 15:07:10 -04:00
flags - > bm_check = 0 ;
if ( num_online_cpus ( ) = = 1 )
flags - > bm_check = 1 ;
else if ( c - > x86_vendor = = X86_VENDOR_INTEL ) {
/*
2009-05-21 17:09:10 -07:00
* Today all MP CPUs that support C3 share cache .
* And caches should not be flushed by software while
* entering C3 type state .
2005-04-15 15:07:10 -04:00
*/
flags - > bm_check = 1 ;
}
2009-05-21 17:09:10 -07:00
/*
* On all recent Intel platforms , ARB_DISABLE is a nop .
* So , set bm_control to zero to indicate that ARB_DISABLE
* is not required while entering C3 type state on
* P4 , Core and beyond CPUs
*/
if ( c - > x86_vendor = = X86_VENDOR_INTEL & &
2009-12-11 15:17:20 +08:00
( c - > x86 > 0xf | | ( c - > x86 = = 6 & & c - > x86_model > = 0x0f ) ) )
2009-05-21 17:09:10 -07:00
flags - > bm_control = 0 ;
2005-04-15 15:07:10 -04:00
}
EXPORT_SYMBOL ( acpi_processor_power_init_bm_check ) ;
2006-09-25 16:28:13 -07:00
/* The code below handles cstate entry with monitor-mwait pair on Intel*/
2007-01-10 23:08:38 -05:00
struct cstate_entry {
2006-09-25 16:28:13 -07:00
struct {
unsigned int eax ;
unsigned int ecx ;
} states [ ACPI_PROCESSOR_MAX_POWER ] ;
} ;
2010-08-08 02:17:29 +09:00
static struct cstate_entry __percpu * cpu_cstate_entry ; /* per CPU ptr */
2006-09-25 16:28:13 -07:00
static short mwait_supported [ ACPI_PROCESSOR_MAX_POWER ] ;
# define NATIVE_CSTATE_BEYOND_HALT (2)
2009-01-04 05:18:07 -08:00
static long acpi_processor_ffh_cstate_probe_cpu ( void * _cx )
2006-09-25 16:28:13 -07:00
{
2009-01-04 05:18:07 -08:00
struct acpi_processor_cx * cx = _cx ;
long retval ;
2006-09-25 16:28:13 -07:00
unsigned int eax , ebx , ecx , edx ;
unsigned int edx_part ;
unsigned int cstate_type ; /* C-state type and not ACPI C-state type */
unsigned int num_cstate_subtype ;
cpuid ( CPUID_MWAIT_LEAF , & eax , & ebx , & ecx , & edx ) ;
/* Check whether this particular cx_type (in CST) is supported or not */
2009-01-04 12:04:21 +08:00
cstate_type = ( ( cx - > address > > MWAIT_SUBSTATE_SIZE ) &
MWAIT_CSTATE_MASK ) + 1 ;
2006-09-25 16:28:13 -07:00
edx_part = edx > > ( cstate_type * MWAIT_SUBSTATE_SIZE ) ;
num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK ;
retval = 0 ;
if ( num_cstate_subtype < ( cx - > address & MWAIT_SUBSTATE_MASK ) ) {
retval = - 1 ;
goto out ;
}
/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
if ( ! ( ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED ) | |
! ( ecx & CPUID5_ECX_INTERRUPT_BREAK ) ) {
retval = - 1 ;
goto out ;
}
if ( ! mwait_supported [ cstate_type ] ) {
mwait_supported [ cstate_type ] = 1 ;
2009-01-04 05:18:07 -08:00
printk ( KERN_DEBUG
" Monitor-Mwait will be used to enter C-%d "
" state \n " , cx - > type ) ;
2006-09-25 16:28:13 -07:00
}
2009-01-04 05:18:07 -08:00
snprintf ( cx - > desc ,
ACPI_CX_DESC_LEN , " ACPI FFH INTEL MWAIT 0x%x " ,
cx - > address ) ;
2006-09-25 16:28:13 -07:00
out :
2009-01-04 05:18:07 -08:00
return retval ;
}
int acpi_processor_ffh_cstate_probe ( unsigned int cpu ,
struct acpi_processor_cx * cx , struct acpi_power_register * reg )
{
struct cstate_entry * percpu_entry ;
struct cpuinfo_x86 * c = & cpu_data ( cpu ) ;
long retval ;
if ( ! cpu_cstate_entry | | c - > cpuid_level < CPUID_MWAIT_LEAF )
return - 1 ;
if ( reg - > bit_offset ! = NATIVE_CSTATE_BEYOND_HALT )
return - 1 ;
percpu_entry = per_cpu_ptr ( cpu_cstate_entry , cpu ) ;
percpu_entry - > states [ cx - > index ] . eax = 0 ;
percpu_entry - > states [ cx - > index ] . ecx = 0 ;
/* Make sure we are running on right CPU */
retval = work_on_cpu ( cpu , acpi_processor_ffh_cstate_probe_cpu , cx ) ;
if ( retval = = 0 ) {
/* Use the hint in CST */
percpu_entry - > states [ cx - > index ] . eax = cx - > address ;
percpu_entry - > states [ cx - > index ] . ecx = MWAIT_ECX_INTERRUPT_BREAK ;
}
2010-07-22 16:54:27 -04:00
/*
* For _CST FFH on Intel , if GAS . access_size bit 1 is cleared ,
* then we should skip checking BM_STS for this C - state .
* ref : " Intel Processor Vendor-Specific ACPI Interface Specification "
*/
if ( ( c - > x86_vendor = = X86_VENDOR_INTEL ) & & ! ( reg - > access_size & 0x2 ) )
cx - > bm_sts_skip = 1 ;
2006-09-25 16:28:13 -07:00
return retval ;
}
EXPORT_SYMBOL_GPL ( acpi_processor_ffh_cstate_probe ) ;
2011-03-30 23:52:29 -04:00
/*
* This uses new MONITOR / MWAIT instructions on P4 processors with PNI ,
* which can obviate IPI to trigger checking of need_resched .
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT . Whenever someone changes need_resched , we would be woken
* up from MWAIT ( without an IPI ) .
*
* New with Core Duo processors , MWAIT can take some hints based on CPU
* capability .
*/
void mwait_idle_with_hints ( unsigned long ax , unsigned long cx )
{
if ( ! need_resched ( ) ) {
if ( this_cpu_has ( X86_FEATURE_CLFLUSH_MONITOR ) )
clflush ( ( void * ) & current_thread_info ( ) - > flags ) ;
__monitor ( ( void * ) & current_thread_info ( ) - > flags , 0 , 0 ) ;
smp_mb ( ) ;
if ( ! need_resched ( ) )
__mwait ( ax , cx ) ;
}
}
2006-09-25 16:28:13 -07:00
void acpi_processor_ffh_cstate_enter ( struct acpi_processor_cx * cx )
{
unsigned int cpu = smp_processor_id ( ) ;
2007-01-10 23:08:38 -05:00
struct cstate_entry * percpu_entry ;
2006-09-25 16:28:13 -07:00
percpu_entry = per_cpu_ptr ( cpu_cstate_entry , cpu ) ;
mwait_idle_with_hints ( percpu_entry - > states [ cx - > index ] . eax ,
percpu_entry - > states [ cx - > index ] . ecx ) ;
}
EXPORT_SYMBOL_GPL ( acpi_processor_ffh_cstate_enter ) ;
static int __init ffh_cstate_init ( void )
{
struct cpuinfo_x86 * c = & boot_cpu_data ;
if ( c - > x86_vendor ! = X86_VENDOR_INTEL )
return - 1 ;
2007-01-10 23:08:38 -05:00
cpu_cstate_entry = alloc_percpu ( struct cstate_entry ) ;
2006-09-25 16:28:13 -07:00
return 0 ;
}
static void __exit ffh_cstate_exit ( void )
{
2006-12-06 20:32:37 -08:00
free_percpu ( cpu_cstate_entry ) ;
cpu_cstate_entry = NULL ;
2006-09-25 16:28:13 -07:00
}
arch_initcall ( ffh_cstate_init ) ;
__exitcall ( ffh_cstate_exit ) ;