2008-02-20 01:51:27 +03:00
/*
2008-07-22 23:09:08 +04:00
* @ file op_model_amd . c
2007-12-18 20:05:58 +03:00
* athlon / K7 / K8 / Family 10 h model - specific MSR operations
2005-04-17 02:20:36 +04:00
*
2008-12-25 19:26:07 +03:00
* @ remark Copyright 2002 - 2009 OProfile authors
2005-04-17 02:20:36 +04:00
* @ remark Read the file COPYING
*
* @ author John Levon
* @ author Philippe Elie
* @ author Graydon Hoare
2008-07-22 23:08:48 +04:00
* @ author Robert Richter < robert . richter @ amd . com >
2009-07-08 15:49:38 +04:00
* @ author Barry Kasindorf < barry . kasindorf @ amd . com >
* @ author Jason Yeh < jason . yeh @ amd . com >
* @ author Suravee Suthikulpanit < suravee . suthikulpanit @ amd . com >
2008-12-25 19:26:07 +03:00
*/
2005-04-17 02:20:36 +04:00
# include <linux/oprofile.h>
2008-07-22 23:08:55 +04:00
# include <linux/device.h>
# include <linux/pci.h>
2009-07-08 15:49:38 +04:00
# include <linux/percpu.h>
2008-07-22 23:08:55 +04:00
2005-04-17 02:20:36 +04:00
# include <asm/ptrace.h>
# include <asm/msr.h>
2006-06-26 15:57:01 +04:00
# include <asm/nmi.h>
2010-01-28 20:05:26 +03:00
# include <asm/apic.h>
2010-02-04 12:57:23 +03:00
# include <asm/processor.h>
# include <asm/cpufeature.h>
2008-02-20 01:51:27 +03:00
2005-04-17 02:20:36 +04:00
# include "op_x86_model.h"
# include "op_counter.h"
2010-09-24 17:54:43 +04:00
# define NUM_COUNTERS 4
# define NUM_COUNTERS_F15H 6
2009-07-08 15:49:38 +04:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
2010-09-24 17:54:43 +04:00
# define NUM_VIRT_COUNTERS 32
2009-07-08 15:49:38 +04:00
# else
2010-09-24 17:54:43 +04:00
# define NUM_VIRT_COUNTERS 0
2009-07-08 15:49:38 +04:00
# endif
2009-05-25 17:10:32 +04:00
# define OP_EVENT_MASK 0x0FFF
2009-05-25 19:59:06 +04:00
# define OP_CTR_OVERFLOW (1ULL<<31)
2009-05-25 17:10:32 +04:00
# define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
2005-04-17 02:20:36 +04:00
2010-09-24 17:54:43 +04:00
static int num_counters ;
static unsigned long reset_value [ OP_MAX_COUNTER ] ;
2008-07-22 23:09:06 +04:00
2009-06-03 22:10:39 +04:00
# define IBS_FETCH_SIZE 6
# define IBS_OP_SIZE 12
2008-07-22 23:08:55 +04:00
2010-02-04 12:57:23 +03:00
static u32 ibs_caps ;
2008-07-22 23:08:55 +04:00
2010-09-21 19:58:15 +04:00
struct ibs_config {
2008-07-22 23:08:55 +04:00
unsigned long op_enabled ;
unsigned long fetch_enabled ;
unsigned long max_cnt_fetch ;
unsigned long max_cnt_op ;
unsigned long rand_en ;
unsigned long dispatched_ops ;
2010-09-21 17:49:31 +04:00
unsigned long branch_target ;
2008-07-22 23:08:55 +04:00
} ;
2010-09-21 19:58:15 +04:00
struct ibs_state {
2010-09-21 17:49:31 +04:00
u64 ibs_op_ctl ;
int branch_target ;
unsigned long sample_size ;
2010-09-21 19:58:15 +04:00
} ;
static struct ibs_config ibs_config ;
static struct ibs_state ibs_state ;
2008-02-20 01:51:27 +03:00
2010-02-04 12:57:23 +03:00
/*
* IBS cpuid feature detection
*/
2010-10-06 14:27:54 +04:00
# define IBS_CPUID_FEATURES 0x8000001b
2010-02-04 12:57:23 +03:00
/*
* Same bit mask as for IBS cpuid feature flags ( Fn8000_001B_EAX ) , but
* bit 0 is used to indicate the existence of IBS .
*/
2010-10-06 14:27:54 +04:00
# define IBS_CAPS_AVAIL (1U<<0)
2010-09-21 17:58:32 +04:00
# define IBS_CAPS_FETCHSAM (1U<<1)
# define IBS_CAPS_OPSAM (1U<<2)
2010-10-06 14:27:54 +04:00
# define IBS_CAPS_RDWROPCNT (1U<<3)
# define IBS_CAPS_OPCNT (1U<<4)
2010-09-21 17:49:31 +04:00
# define IBS_CAPS_BRNTRGT (1U<<5)
2010-09-22 19:45:39 +04:00
# define IBS_CAPS_OPCNTEXT (1U<<6)
2010-09-21 17:58:32 +04:00
# define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
| IBS_CAPS_FETCHSAM \
| IBS_CAPS_OPSAM )
2010-10-06 14:27:54 +04:00
/*
* IBS APIC setup
*/
# define IBSCTL 0x1cc
# define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
# define IBSCTL_LVT_OFFSET_MASK 0x0F
2010-02-04 12:57:23 +03:00
2010-02-23 17:46:49 +03:00
/*
* IBS randomization macros
*/
# define IBS_RANDOM_BITS 12
# define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
# define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
2010-02-04 12:57:23 +03:00
static u32 get_ibs_caps ( void )
{
u32 ibs_caps ;
unsigned int max_level ;
if ( ! boot_cpu_has ( X86_FEATURE_IBS ) )
return 0 ;
/* check IBS cpuid feature flags */
max_level = cpuid_eax ( 0x80000000 ) ;
if ( max_level < IBS_CPUID_FEATURES )
2010-09-21 17:58:32 +04:00
return IBS_CAPS_DEFAULT ;
2010-02-04 12:57:23 +03:00
ibs_caps = cpuid_eax ( IBS_CPUID_FEATURES ) ;
if ( ! ( ibs_caps & IBS_CAPS_AVAIL ) )
/* cpuid flags not valid */
2010-09-21 17:58:32 +04:00
return IBS_CAPS_DEFAULT ;
2010-02-04 12:57:23 +03:00
return ibs_caps ;
}
2010-01-18 20:25:45 +03:00
/*
* 16 - bit Linear Feedback Shift Register ( LFSR )
*
* 16 14 13 11
* Feedback polynomial = X + X + X + X + 1
*/
static unsigned int lfsr_random ( void )
{
static unsigned int lfsr_value = 0xF00D ;
unsigned int bit ;
/* Compute next bit to shift in */
bit = ( ( lfsr_value > > 0 ) ^
( lfsr_value > > 2 ) ^
( lfsr_value > > 3 ) ^
( lfsr_value > > 5 ) ) & 0x0001 ;
/* Advance to next register value */
lfsr_value = ( lfsr_value > > 1 ) | ( bit < < 15 ) ;
return lfsr_value ;
}
2010-02-23 17:46:49 +03:00
/*
* IBS software randomization
*
* The IBS periodic op counter is randomized in software . The lower 12
* bits of the 20 bit counter are randomized . IbsOpCurCnt is
* initialized with a 12 bit random value .
*/
static inline u64 op_amd_randomize_ibs_op ( u64 val )
{
unsigned int random = lfsr_random ( ) ;
if ( ! ( ibs_caps & IBS_CAPS_RDWROPCNT ) )
/*
* Work around if the hw can not write to IbsOpCurCnt
*
* Randomize the lower 8 bits of the 16 bit
* IbsOpMaxCnt [ 15 : 0 ] value in the range of - 128 to
* + 127 by adding / subtracting an offset to the
* maximum count ( IbsOpMaxCnt ) .
*
* To avoid over or underflows and protect upper bits
* starting at bit 16 , the initial value for
* IbsOpMaxCnt must fit in the range from 0x0081 to
* 0xff80 .
*/
val + = ( s8 ) ( random > > 4 ) ;
else
val | = ( u64 ) ( random & IBS_RANDOM_MASK ) < < 32 ;
return val ;
}
2009-06-23 23:36:08 +04:00
static inline void
2008-07-22 23:08:56 +04:00
op_amd_handle_ibs ( struct pt_regs * const regs ,
struct op_msrs const * const msrs )
2005-04-17 02:20:36 +04:00
{
2009-06-03 22:10:39 +04:00
u64 val , ctl ;
2009-01-05 12:35:31 +03:00
struct op_entry entry ;
2005-04-17 02:20:36 +04:00
2010-02-04 12:57:23 +03:00
if ( ! ibs_caps )
2009-06-23 23:36:08 +04:00
return ;
2005-04-17 02:20:36 +04:00
2008-07-22 23:08:56 +04:00
if ( ibs_config . fetch_enabled ) {
2009-06-03 22:10:39 +04:00
rdmsrl ( MSR_AMD64_IBSFETCHCTL , ctl ) ;
if ( ctl & IBS_FETCH_VAL ) {
rdmsrl ( MSR_AMD64_IBSFETCHLINAD , val ) ;
oprofile_write_reserve ( & entry , regs , val ,
2009-01-07 23:50:22 +03:00
IBS_FETCH_CODE , IBS_FETCH_SIZE ) ;
2009-06-03 22:54:56 +04:00
oprofile_add_data64 ( & entry , val ) ;
oprofile_add_data64 ( & entry , ctl ) ;
2009-06-03 22:10:39 +04:00
rdmsrl ( MSR_AMD64_IBSFETCHPHYSAD , val ) ;
2009-06-03 22:54:56 +04:00
oprofile_add_data64 ( & entry , val ) ;
2009-01-07 23:50:22 +03:00
oprofile_write_commit ( & entry ) ;
2008-07-22 23:08:55 +04:00
2008-10-19 23:00:09 +04:00
/* reenable the IRQ */
2010-02-25 21:43:07 +03:00
ctl & = ~ ( IBS_FETCH_VAL | IBS_FETCH_CNT ) ;
2009-06-03 22:10:39 +04:00
ctl | = IBS_FETCH_ENABLE ;
wrmsrl ( MSR_AMD64_IBSFETCHCTL , ctl ) ;
2008-07-22 23:08:55 +04:00
}
}
2008-07-22 23:08:56 +04:00
if ( ibs_config . op_enabled ) {
2009-06-03 22:10:39 +04:00
rdmsrl ( MSR_AMD64_IBSOPCTL , ctl ) ;
if ( ctl & IBS_OP_VAL ) {
rdmsrl ( MSR_AMD64_IBSOPRIP , val ) ;
2010-09-21 17:49:31 +04:00
oprofile_write_reserve ( & entry , regs , val , IBS_OP_CODE ,
ibs_state . sample_size ) ;
2009-06-03 22:54:56 +04:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 22:10:39 +04:00
rdmsrl ( MSR_AMD64_IBSOPDATA , val ) ;
2009-06-03 22:54:56 +04:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 22:10:39 +04:00
rdmsrl ( MSR_AMD64_IBSOPDATA2 , val ) ;
2009-06-03 22:54:56 +04:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 22:10:39 +04:00
rdmsrl ( MSR_AMD64_IBSOPDATA3 , val ) ;
2009-06-03 22:54:56 +04:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 22:10:39 +04:00
rdmsrl ( MSR_AMD64_IBSDCLINAD , val ) ;
2009-06-03 22:54:56 +04:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 22:10:39 +04:00
rdmsrl ( MSR_AMD64_IBSDCPHYSAD , val ) ;
2009-06-03 22:54:56 +04:00
oprofile_add_data64 ( & entry , val ) ;
2010-09-21 17:49:31 +04:00
if ( ibs_state . branch_target ) {
rdmsrl ( MSR_AMD64_IBSBRTARGET , val ) ;
oprofile_add_data ( & entry , ( unsigned long ) val ) ;
}
2009-01-07 23:50:22 +03:00
oprofile_write_commit ( & entry ) ;
2008-07-22 23:08:55 +04:00
/* reenable the IRQ */
2010-09-21 19:58:15 +04:00
ctl = op_amd_randomize_ibs_op ( ibs_state . ibs_op_ctl ) ;
2009-06-03 22:10:39 +04:00
wrmsrl ( MSR_AMD64_IBSOPCTL , ctl ) ;
2008-07-22 23:08:55 +04:00
}
}
2005-04-17 02:20:36 +04:00
}
2009-03-10 21:15:57 +03:00
static inline void op_amd_start_ibs ( void )
{
2009-06-03 22:10:39 +04:00
u64 val ;
2010-02-04 12:57:23 +03:00
if ( ! ibs_caps )
return ;
2010-09-21 19:58:15 +04:00
memset ( & ibs_state , 0 , sizeof ( ibs_state ) ) ;
2010-09-22 19:45:39 +04:00
/*
* Note : Since the max count settings may out of range we
* write back the actual used values so that userland can read
* it .
*/
2010-02-04 12:57:23 +03:00
if ( ibs_config . fetch_enabled ) {
2010-09-22 19:45:39 +04:00
val = ibs_config . max_cnt_fetch > > 4 ;
val = min ( val , IBS_FETCH_MAX_CNT ) ;
ibs_config . max_cnt_fetch = val < < 4 ;
2009-06-03 22:10:39 +04:00
val | = ibs_config . rand_en ? IBS_FETCH_RAND_EN : 0 ;
val | = IBS_FETCH_ENABLE ;
wrmsrl ( MSR_AMD64_IBSFETCHCTL , val ) ;
2009-03-10 21:15:57 +03:00
}
2010-02-04 12:57:23 +03:00
if ( ibs_config . op_enabled ) {
2010-09-21 19:58:15 +04:00
val = ibs_config . max_cnt_op > > 4 ;
2010-02-23 17:46:49 +03:00
if ( ! ( ibs_caps & IBS_CAPS_RDWROPCNT ) ) {
/*
* IbsOpCurCnt not supported . See
* op_amd_randomize_ibs_op ( ) for details .
*/
2010-09-21 19:58:15 +04:00
val = clamp ( val , 0x0081ULL , 0xFF80ULL ) ;
2010-09-22 19:45:39 +04:00
ibs_config . max_cnt_op = val < < 4 ;
2010-02-23 17:46:49 +03:00
} else {
/*
* The start value is randomized with a
* positive offset , we need to compensate it
* with the half of the randomized range . Also
* avoid underflows .
*/
2010-09-22 19:45:39 +04:00
val + = IBS_RANDOM_MAXCNT_OFFSET ;
if ( ibs_caps & IBS_CAPS_OPCNTEXT )
val = min ( val , IBS_OP_MAX_CNT_EXT ) ;
else
val = min ( val , IBS_OP_MAX_CNT ) ;
ibs_config . max_cnt_op =
( val - IBS_RANDOM_MAXCNT_OFFSET ) < < 4 ;
2010-02-23 17:46:49 +03:00
}
2010-09-22 19:45:39 +04:00
val = ( ( val & ~ IBS_OP_MAX_CNT ) < < 4 ) | ( val & IBS_OP_MAX_CNT ) ;
2010-09-21 19:58:15 +04:00
val | = ibs_config . dispatched_ops ? IBS_OP_CNT_CTL : 0 ;
val | = IBS_OP_ENABLE ;
ibs_state . ibs_op_ctl = val ;
2010-09-21 17:49:31 +04:00
ibs_state . sample_size = IBS_OP_SIZE ;
if ( ibs_config . branch_target ) {
ibs_state . branch_target = 1 ;
ibs_state . sample_size + + ;
2010-02-23 17:46:49 +03:00
}
2010-09-21 19:58:15 +04:00
val = op_amd_randomize_ibs_op ( ibs_state . ibs_op_ctl ) ;
2009-06-03 22:10:39 +04:00
wrmsrl ( MSR_AMD64_IBSOPCTL , val ) ;
2009-03-10 21:15:57 +03:00
}
}
static void op_amd_stop_ibs ( void )
{
2010-02-04 12:57:23 +03:00
if ( ! ibs_caps )
return ;
if ( ibs_config . fetch_enabled )
2009-03-10 21:15:57 +03:00
/* clear max count and enable */
2009-06-03 22:10:39 +04:00
wrmsrl ( MSR_AMD64_IBSFETCHCTL , 0 ) ;
2009-03-10 21:15:57 +03:00
2010-02-04 12:57:23 +03:00
if ( ibs_config . op_enabled )
2009-03-10 21:15:57 +03:00
/* clear max count and enable */
2009-06-03 22:10:39 +04:00
wrmsrl ( MSR_AMD64_IBSOPCTL , 0 ) ;
2009-03-10 21:15:57 +03:00
}
2011-05-20 11:46:54 +04:00
static inline int get_eilvt ( int offset )
2010-10-06 14:27:54 +04:00
{
return ! setup_APIC_eilvt ( offset , 0 , APIC_EILVT_MSG_NMI , 1 ) ;
}
2011-05-20 11:46:54 +04:00
static inline int put_eilvt ( int offset )
{
return ! setup_APIC_eilvt ( offset , 0 , 0 , 1 ) ;
}
2010-10-06 14:27:54 +04:00
static inline int ibs_eilvt_valid ( void )
{
int offset ;
2010-10-25 10:41:09 +04:00
u64 val ;
2011-05-20 11:46:54 +04:00
int valid = 0 ;
preempt_disable ( ) ;
2010-10-06 14:27:54 +04:00
rdmsrl ( MSR_AMD64_IBSCTL , val ) ;
2010-10-25 10:41:09 +04:00
offset = val & IBSCTL_LVT_OFFSET_MASK ;
2010-10-06 14:27:54 +04:00
if ( ! ( val & IBSCTL_LVT_OFFSET_VALID ) ) {
2010-10-25 10:41:09 +04:00
pr_err ( FW_BUG " cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx) \n " ,
smp_processor_id ( ) , offset , MSR_AMD64_IBSCTL , val ) ;
2011-05-20 11:46:54 +04:00
goto out ;
2010-10-06 14:27:54 +04:00
}
2011-05-20 11:46:54 +04:00
if ( ! get_eilvt ( offset ) ) {
2010-10-25 10:41:09 +04:00
pr_err ( FW_BUG " cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx) \n " ,
smp_processor_id ( ) , offset , MSR_AMD64_IBSCTL , val ) ;
2011-05-20 11:46:54 +04:00
goto out ;
2010-10-25 10:41:09 +04:00
}
2010-10-06 14:27:54 +04:00
2011-05-20 11:46:54 +04:00
valid = 1 ;
out :
preempt_enable ( ) ;
return valid ;
2010-10-06 14:27:54 +04:00
}
static inline int get_ibs_offset ( void )
{
u64 val ;
rdmsrl ( MSR_AMD64_IBSCTL , val ) ;
if ( ! ( val & IBSCTL_LVT_OFFSET_VALID ) )
return - EINVAL ;
return val & IBSCTL_LVT_OFFSET_MASK ;
}
static void setup_APIC_ibs ( void )
{
int offset ;
offset = get_ibs_offset ( ) ;
if ( offset < 0 )
goto failed ;
if ( ! setup_APIC_eilvt ( offset , 0 , APIC_EILVT_MSG_NMI , 0 ) )
return ;
failed :
pr_warn ( " oprofile: IBS APIC setup failed on cpu #%d \n " ,
smp_processor_id ( ) ) ;
}
static void clear_APIC_ibs ( void )
{
int offset ;
offset = get_ibs_offset ( ) ;
if ( offset > = 0 )
setup_APIC_eilvt ( offset , 0 , APIC_EILVT_MSG_FIX , 1 ) ;
}
2010-02-26 12:54:56 +03:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
static void op_mux_switch_ctrl ( struct op_x86_model_spec const * model ,
struct op_msrs const * const msrs )
{
u64 val ;
int i ;
/* enable active counters */
2010-09-24 17:54:43 +04:00
for ( i = 0 ; i < num_counters ; + + i ) {
2010-02-26 12:54:56 +03:00
int virt = op_x86_phys_to_virt ( i ) ;
if ( ! reset_value [ virt ] )
continue ;
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
val & = model - > reserved ;
val | = op_x86_get_ctrl ( model , & counter_config [ virt ] ) ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
}
}
# endif
/* functions for op_amd_spec */
static void op_amd_shutdown ( struct op_msrs const * const msrs )
{
int i ;
2010-09-24 17:54:43 +04:00
for ( i = 0 ; i < num_counters ; + + i ) {
2010-02-26 12:54:56 +03:00
if ( ! msrs - > counters [ i ] . addr )
continue ;
release_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) ;
release_evntsel_nmi ( MSR_K7_EVNTSEL0 + i ) ;
}
}
static int op_amd_fill_in_addresses ( struct op_msrs * const msrs )
{
int i ;
2010-09-24 17:54:43 +04:00
for ( i = 0 ; i < num_counters ; i + + ) {
2010-02-26 12:54:56 +03:00
if ( ! reserve_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) )
goto fail ;
if ( ! reserve_evntsel_nmi ( MSR_K7_EVNTSEL0 + i ) ) {
release_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) ;
goto fail ;
}
/* both registers must be reserved */
2010-09-24 17:54:43 +04:00
if ( num_counters = = NUM_COUNTERS_F15H ) {
msrs - > counters [ i ] . addr = MSR_F15H_PERF_CTR + ( i < < 1 ) ;
msrs - > controls [ i ] . addr = MSR_F15H_PERF_CTL + ( i < < 1 ) ;
} else {
msrs - > controls [ i ] . addr = MSR_K7_EVNTSEL0 + i ;
msrs - > counters [ i ] . addr = MSR_K7_PERFCTR0 + i ;
}
2010-02-26 12:54:56 +03:00
continue ;
fail :
if ( ! counter_config [ i ] . enabled )
continue ;
op_x86_warn_reserved ( i ) ;
op_amd_shutdown ( msrs ) ;
return - EBUSY ;
}
return 0 ;
}
static void op_amd_setup_ctrs ( struct op_x86_model_spec const * model ,
struct op_msrs const * const msrs )
{
u64 val ;
int i ;
/* setup reset_value */
2010-09-24 17:54:43 +04:00
for ( i = 0 ; i < OP_MAX_COUNTER ; + + i ) {
2010-02-26 12:54:56 +03:00
if ( counter_config [ i ] . enabled
& & msrs - > counters [ op_x86_virt_to_phys ( i ) ] . addr )
reset_value [ i ] = counter_config [ i ] . count ;
else
reset_value [ i ] = 0 ;
}
/* clear all counters */
2010-09-24 17:54:43 +04:00
for ( i = 0 ; i < num_counters ; + + i ) {
2010-02-26 12:54:56 +03:00
if ( ! msrs - > controls [ i ] . addr )
continue ;
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
if ( val & ARCH_PERFMON_EVENTSEL_ENABLE )
op_x86_warn_in_use ( i ) ;
val & = model - > reserved ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
/*
* avoid a false detection of ctr overflows in NMI
* handler
*/
wrmsrl ( msrs - > counters [ i ] . addr , - 1LL ) ;
}
/* enable active counters */
2010-09-24 17:54:43 +04:00
for ( i = 0 ; i < num_counters ; + + i ) {
2010-02-26 12:54:56 +03:00
int virt = op_x86_phys_to_virt ( i ) ;
if ( ! reset_value [ virt ] )
continue ;
/* setup counter registers */
wrmsrl ( msrs - > counters [ i ] . addr , - ( u64 ) reset_value [ virt ] ) ;
/* setup control registers */
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
val & = model - > reserved ;
val | = op_x86_get_ctrl ( model , & counter_config [ virt ] ) ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
}
2010-05-05 19:47:17 +04:00
if ( ibs_caps )
2010-10-06 14:27:54 +04:00
setup_APIC_ibs ( ) ;
2010-05-05 19:47:17 +04:00
}
static void op_amd_cpu_shutdown ( void )
{
if ( ibs_caps )
2010-10-06 14:27:54 +04:00
clear_APIC_ibs ( ) ;
2010-02-26 12:54:56 +03:00
}
2008-07-22 23:08:56 +04:00
static int op_amd_check_ctrs ( struct pt_regs * const regs ,
struct op_msrs const * const msrs )
{
2009-05-25 19:59:06 +04:00
u64 val ;
2008-07-22 23:08:56 +04:00
int i ;
2010-09-24 17:54:43 +04:00
for ( i = 0 ; i < num_counters ; + + i ) {
2009-07-16 15:04:43 +04:00
int virt = op_x86_phys_to_virt ( i ) ;
if ( ! reset_value [ virt ] )
2008-07-22 23:08:56 +04:00
continue ;
2009-05-25 19:59:06 +04:00
rdmsrl ( msrs - > counters [ i ] . addr , val ) ;
/* bit is clear if overflowed: */
if ( val & OP_CTR_OVERFLOW )
continue ;
2009-07-16 15:04:43 +04:00
oprofile_add_sample ( regs , virt ) ;
wrmsrl ( msrs - > counters [ i ] . addr , - ( u64 ) reset_value [ virt ] ) ;
2008-07-22 23:08:56 +04:00
}
op_amd_handle_ibs ( regs , msrs ) ;
/* See op_model_ppro.c */
return 1 ;
}
2008-02-20 01:51:27 +03:00
2008-07-22 23:08:50 +04:00
static void op_amd_start ( struct op_msrs const * const msrs )
2005-04-17 02:20:36 +04:00
{
2009-05-25 20:11:52 +04:00
u64 val ;
2005-04-17 02:20:36 +04:00
int i ;
2009-07-08 15:49:38 +04:00
2010-09-24 17:54:43 +04:00
for ( i = 0 ; i < num_counters ; + + i ) {
2009-07-16 15:04:43 +04:00
if ( ! reset_value [ op_x86_phys_to_virt ( i ) ] )
continue ;
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
2010-03-01 16:21:23 +03:00
val | = ARCH_PERFMON_EVENTSEL_ENABLE ;
2009-07-16 15:04:43 +04:00
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
2005-04-17 02:20:36 +04:00
}
2008-07-22 23:09:06 +04:00
2009-03-10 21:15:57 +03:00
op_amd_start_ibs ( ) ;
2005-04-17 02:20:36 +04:00
}
2008-07-22 23:08:50 +04:00
static void op_amd_stop ( struct op_msrs const * const msrs )
2005-04-17 02:20:36 +04:00
{
2009-05-25 20:11:52 +04:00
u64 val ;
2005-04-17 02:20:36 +04:00
int i ;
2008-10-19 23:00:09 +04:00
/*
* Subtle : stop on all counters to avoid race with setting our
* pm callback
*/
2010-09-24 17:54:43 +04:00
for ( i = 0 ; i < num_counters ; + + i ) {
2009-07-16 15:04:43 +04:00
if ( ! reset_value [ op_x86_phys_to_virt ( i ) ] )
2006-09-26 12:52:26 +04:00
continue ;
2009-05-25 20:11:52 +04:00
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
2010-03-01 16:21:23 +03:00
val & = ~ ARCH_PERFMON_EVENTSEL_ENABLE ;
2009-05-25 20:11:52 +04:00
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
2005-04-17 02:20:36 +04:00
}
2008-07-22 23:08:55 +04:00
2009-03-10 21:15:57 +03:00
op_amd_stop_ibs ( ) ;
2005-04-17 02:20:36 +04:00
}
2010-10-06 14:27:54 +04:00
static int setup_ibs_ctl ( int ibs_eilvt_off )
2008-07-22 23:08:57 +04:00
{
struct pci_dev * cpu_cfg ;
int nodes ;
u32 value = 0 ;
nodes = 0 ;
cpu_cfg = NULL ;
do {
cpu_cfg = pci_get_device ( PCI_VENDOR_ID_AMD ,
PCI_DEVICE_ID_AMD_10H_NB_MISC ,
cpu_cfg ) ;
if ( ! cpu_cfg )
break ;
+ + nodes ;
pci_write_config_dword ( cpu_cfg , IBSCTL , ibs_eilvt_off
2010-10-06 14:27:54 +04:00
| IBSCTL_LVT_OFFSET_VALID ) ;
2008-07-22 23:08:57 +04:00
pci_read_config_dword ( cpu_cfg , IBSCTL , & value ) ;
2010-10-06 14:27:54 +04:00
if ( value ! = ( ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID ) ) {
2008-12-15 17:09:50 +03:00
pci_dev_put ( cpu_cfg ) ;
2008-07-22 23:08:57 +04:00
printk ( KERN_DEBUG " Failed to setup IBS LVT offset, "
2010-10-06 14:27:54 +04:00
" IBSCTL = 0x%08x \n " , value ) ;
return - EINVAL ;
2008-07-22 23:08:57 +04:00
}
} while ( 1 ) ;
if ( ! nodes ) {
2010-10-06 14:27:54 +04:00
printk ( KERN_DEBUG " No CPU node configured for IBS \n " ) ;
return - ENODEV ;
2008-07-22 23:08:57 +04:00
}
return 0 ;
}
2011-05-30 18:31:11 +04:00
/*
* This runs only on the current cpu . We try to find an LVT offset and
* setup the local APIC . For this we must disable preemption . On
* success we initialize all nodes with this offset . This updates then
* the offset in the IBS_CTL per - node msr . The per - core APIC setup of
* the IBS interrupt vector is called from op_amd_setup_ctrs ( ) / op_ -
* amd_cpu_shutdown ( ) using the new offset .
*/
2010-10-06 14:27:54 +04:00
static int force_ibs_eilvt_setup ( void )
{
2011-05-20 11:46:54 +04:00
int offset ;
2010-10-06 14:27:54 +04:00
int ret ;
2011-05-20 11:46:54 +04:00
preempt_disable ( ) ;
2011-05-30 18:31:11 +04:00
/* find the next free available EILVT entry, skip offset 0 */
2011-05-20 11:46:54 +04:00
for ( offset = 1 ; offset < APIC_EILVT_NR_MAX ; offset + + ) {
if ( get_eilvt ( offset ) )
break ;
2010-10-06 14:27:54 +04:00
}
2011-05-20 11:46:54 +04:00
preempt_enable ( ) ;
2010-10-06 14:27:54 +04:00
2011-05-20 11:46:54 +04:00
if ( offset = = APIC_EILVT_NR_MAX ) {
printk ( KERN_DEBUG " No EILVT entry available \n " ) ;
return - EBUSY ;
}
2010-10-06 14:27:54 +04:00
2011-05-20 11:46:54 +04:00
ret = setup_ibs_ctl ( offset ) ;
2010-10-06 14:27:54 +04:00
if ( ret )
2011-05-20 11:46:54 +04:00
goto out ;
2010-10-06 14:27:54 +04:00
2011-05-20 11:46:54 +04:00
if ( ! ibs_eilvt_valid ( ) ) {
ret = - EFAULT ;
goto out ;
}
2010-10-06 14:27:54 +04:00
2011-05-20 11:46:54 +04:00
pr_err ( FW_BUG " using offset %d for IBS interrupts \n " , offset ) ;
2010-10-06 14:27:54 +04:00
pr_err ( FW_BUG " workaround enabled for IBS LVT offset \n " ) ;
return 0 ;
2011-05-20 11:46:54 +04:00
out :
preempt_disable ( ) ;
put_eilvt ( offset ) ;
preempt_enable ( ) ;
return ret ;
2010-10-06 14:27:54 +04:00
}
2011-01-03 14:15:14 +03:00
/*
* check and reserve APIC extended interrupt LVT offset for IBS if
* available
*/
2010-05-05 19:47:17 +04:00
static void init_ibs ( void )
2008-07-22 23:08:55 +04:00
{
2011-01-03 14:15:14 +03:00
ibs_caps = get_ibs_caps ( ) ;
2011-05-20 11:46:54 +04:00
2010-02-04 12:57:23 +03:00
if ( ! ibs_caps )
2011-05-20 11:46:54 +04:00
return ;
if ( ibs_eilvt_valid ( ) )
2011-01-03 14:15:14 +03:00
goto out ;
2008-07-22 23:08:55 +04:00
2011-05-20 11:46:54 +04:00
if ( ! force_ibs_eilvt_setup ( ) )
goto out ;
/* Failed to setup ibs */
ibs_caps = 0 ;
return ;
2008-07-22 23:09:06 +04:00
2011-01-03 14:15:14 +03:00
out :
2011-05-20 11:46:54 +04:00
printk ( KERN_INFO " oprofile: AMD IBS detected (0x%08x) \n " , ibs_caps ) ;
2008-07-22 23:08:55 +04:00
}
2008-09-05 19:12:36 +04:00
static int ( * create_arch_files ) ( struct super_block * sb , struct dentry * root ) ;
2008-07-22 23:09:01 +04:00
2008-09-05 19:12:36 +04:00
static int setup_ibs_files ( struct super_block * sb , struct dentry * root )
2008-07-22 23:08:55 +04:00
{
struct dentry * dir ;
2008-07-22 23:09:01 +04:00
int ret = 0 ;
/* architecture specific files */
if ( create_arch_files )
ret = create_arch_files ( sb , root ) ;
if ( ret )
return ret ;
2008-07-22 23:08:55 +04:00
2010-02-04 12:57:23 +03:00
if ( ! ibs_caps )
2008-07-22 23:09:01 +04:00
return ret ;
/* model specific files */
2008-07-22 23:08:55 +04:00
/* setup some reasonable defaults */
2010-09-21 17:49:31 +04:00
memset ( & ibs_config , 0 , sizeof ( ibs_config ) ) ;
2008-07-22 23:08:55 +04:00
ibs_config . max_cnt_fetch = 250000 ;
ibs_config . max_cnt_op = 250000 ;
2008-07-18 19:56:05 +04:00
2010-09-21 17:58:32 +04:00
if ( ibs_caps & IBS_CAPS_FETCHSAM ) {
dir = oprofilefs_mkdir ( sb , root , " ibs_fetch " ) ;
oprofilefs_create_ulong ( sb , dir , " enable " ,
& ibs_config . fetch_enabled ) ;
oprofilefs_create_ulong ( sb , dir , " max_count " ,
& ibs_config . max_cnt_fetch ) ;
oprofilefs_create_ulong ( sb , dir , " rand_enable " ,
& ibs_config . rand_en ) ;
}
if ( ibs_caps & IBS_CAPS_OPSAM ) {
dir = oprofilefs_mkdir ( sb , root , " ibs_op " ) ;
oprofilefs_create_ulong ( sb , dir , " enable " ,
& ibs_config . op_enabled ) ;
oprofilefs_create_ulong ( sb , dir , " max_count " ,
& ibs_config . max_cnt_op ) ;
if ( ibs_caps & IBS_CAPS_OPCNT )
oprofilefs_create_ulong ( sb , dir , " dispatched_ops " ,
& ibs_config . dispatched_ops ) ;
2010-09-21 17:49:31 +04:00
if ( ibs_caps & IBS_CAPS_BRNTRGT )
oprofilefs_create_ulong ( sb , dir , " branch_target " ,
& ibs_config . branch_target ) ;
2010-09-21 17:58:32 +04:00
}
2008-07-22 23:09:00 +04:00
return 0 ;
2008-07-22 23:08:55 +04:00
}
2010-09-24 17:54:43 +04:00
struct op_x86_model_spec op_amd_spec ;
2008-07-22 23:08:48 +04:00
static int op_amd_init ( struct oprofile_operations * ops )
{
2010-05-05 19:47:17 +04:00
init_ibs ( ) ;
2008-07-22 23:09:01 +04:00
create_arch_files = ops - > create_files ;
ops - > create_files = setup_ibs_files ;
2010-09-24 17:54:43 +04:00
if ( boot_cpu_data . x86 = = 0x15 ) {
num_counters = NUM_COUNTERS_F15H ;
} else {
num_counters = NUM_COUNTERS ;
}
op_amd_spec . num_counters = num_counters ;
op_amd_spec . num_controls = num_counters ;
op_amd_spec . num_virt_counters = max ( num_counters , NUM_VIRT_COUNTERS ) ;
2008-07-22 23:08:48 +04:00
return 0 ;
}
2009-07-09 17:12:35 +04:00
struct op_x86_model_spec op_amd_spec = {
2010-09-24 17:54:43 +04:00
/* num_counters/num_controls filled in at runtime */
2009-05-25 17:10:32 +04:00
. reserved = MSR_AMD_EVENTSEL_RESERVED ,
. event_mask = OP_EVENT_MASK ,
. init = op_amd_init ,
2008-09-05 19:12:36 +04:00
. fill_in_addresses = & op_amd_fill_in_addresses ,
. setup_ctrs = & op_amd_setup_ctrs ,
2010-05-05 19:47:17 +04:00
. cpu_down = & op_amd_cpu_shutdown ,
2008-09-05 19:12:36 +04:00
. check_ctrs = & op_amd_check_ctrs ,
. start = & op_amd_start ,
. stop = & op_amd_stop ,
2009-05-25 17:10:32 +04:00
. shutdown = & op_amd_shutdown ,
2009-07-08 15:49:38 +04:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
2009-07-16 15:09:53 +04:00
. switch_ctrl = & op_mux_switch_ctrl ,
2009-07-08 15:49:38 +04:00
# endif
2005-04-17 02:20:36 +04:00
} ;