2008-02-19 23:51:27 +01:00
/*
2008-07-22 21:09:08 +02:00
* @ file op_model_amd . c
2007-12-18 18:05:58 +01:00
* athlon / K7 / K8 / Family 10 h model - specific MSR operations
2005-04-16 15:20:36 -07:00
*
2008-12-25 17:26:07 +01:00
* @ remark Copyright 2002 - 2009 OProfile authors
2005-04-16 15:20:36 -07:00
* @ remark Read the file COPYING
*
* @ author John Levon
* @ author Philippe Elie
* @ author Graydon Hoare
2008-07-22 21:08:48 +02:00
* @ author Robert Richter < robert . richter @ amd . com >
2009-07-08 13:49:38 +02:00
* @ author Barry Kasindorf < barry . kasindorf @ amd . com >
* @ author Jason Yeh < jason . yeh @ amd . com >
* @ author Suravee Suthikulpanit < suravee . suthikulpanit @ amd . com >
2008-12-25 17:26:07 +01:00
*/
2005-04-16 15:20:36 -07:00
# include <linux/oprofile.h>
2008-07-22 21:08:55 +02:00
# include <linux/device.h>
# include <linux/pci.h>
2009-07-08 13:49:38 +02:00
# include <linux/percpu.h>
2008-07-22 21:08:55 +02:00
2005-04-16 15:20:36 -07:00
# include <asm/ptrace.h>
# include <asm/msr.h>
2006-06-26 13:57:01 +02:00
# include <asm/nmi.h>
2010-01-28 18:05:26 +01:00
# include <asm/apic.h>
2010-02-04 10:57:23 +01:00
# include <asm/processor.h>
# include <asm/cpufeature.h>
2008-02-19 23:51:27 +01:00
2005-04-16 15:20:36 -07:00
# include "op_x86_model.h"
# include "op_counter.h"
2009-07-08 13:49:38 +02:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
2010-09-24 15:54:43 +02:00
# define NUM_VIRT_COUNTERS 32
2009-07-08 13:49:38 +02:00
# else
2010-09-24 15:54:43 +02:00
# define NUM_VIRT_COUNTERS 0
2009-07-08 13:49:38 +02:00
# endif
2009-05-25 15:10:32 +02:00
# define OP_EVENT_MASK 0x0FFF
2009-05-25 17:59:06 +02:00
# define OP_CTR_OVERFLOW (1ULL<<31)
2009-05-25 15:10:32 +02:00
# define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
2005-04-16 15:20:36 -07:00
2010-09-24 15:54:43 +02:00
static int num_counters ;
static unsigned long reset_value [ OP_MAX_COUNTER ] ;
2008-07-22 21:09:06 +02:00
2009-06-03 20:10:39 +02:00
# define IBS_FETCH_SIZE 6
# define IBS_OP_SIZE 12
2008-07-22 21:08:55 +02:00
2010-02-04 10:57:23 +01:00
static u32 ibs_caps ;
2008-07-22 21:08:55 +02:00
2010-09-21 17:58:15 +02:00
struct ibs_config {
2008-07-22 21:08:55 +02:00
unsigned long op_enabled ;
unsigned long fetch_enabled ;
unsigned long max_cnt_fetch ;
unsigned long max_cnt_op ;
unsigned long rand_en ;
unsigned long dispatched_ops ;
2010-09-21 15:49:31 +02:00
unsigned long branch_target ;
2008-07-22 21:08:55 +02:00
} ;
2010-09-21 17:58:15 +02:00
struct ibs_state {
2010-09-21 15:49:31 +02:00
u64 ibs_op_ctl ;
int branch_target ;
unsigned long sample_size ;
2010-09-21 17:58:15 +02:00
} ;
static struct ibs_config ibs_config ;
static struct ibs_state ibs_state ;
2008-02-19 23:51:27 +01:00
2010-02-23 15:46:49 +01:00
/*
* IBS randomization macros
*/
# define IBS_RANDOM_BITS 12
# define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
# define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
2010-01-18 11:25:45 -06:00
/*
* 16 - bit Linear Feedback Shift Register ( LFSR )
*
* 16 14 13 11
* Feedback polynomial = X + X + X + X + 1
*/
static unsigned int lfsr_random ( void )
{
static unsigned int lfsr_value = 0xF00D ;
unsigned int bit ;
/* Compute next bit to shift in */
bit = ( ( lfsr_value > > 0 ) ^
( lfsr_value > > 2 ) ^
( lfsr_value > > 3 ) ^
( lfsr_value > > 5 ) ) & 0x0001 ;
/* Advance to next register value */
lfsr_value = ( lfsr_value > > 1 ) | ( bit < < 15 ) ;
return lfsr_value ;
}
2010-02-23 15:46:49 +01:00
/*
* IBS software randomization
*
* The IBS periodic op counter is randomized in software . The lower 12
* bits of the 20 bit counter are randomized . IbsOpCurCnt is
* initialized with a 12 bit random value .
*/
static inline u64 op_amd_randomize_ibs_op ( u64 val )
{
unsigned int random = lfsr_random ( ) ;
if ( ! ( ibs_caps & IBS_CAPS_RDWROPCNT ) )
/*
* Work around if the hw can not write to IbsOpCurCnt
*
* Randomize the lower 8 bits of the 16 bit
* IbsOpMaxCnt [ 15 : 0 ] value in the range of - 128 to
* + 127 by adding / subtracting an offset to the
* maximum count ( IbsOpMaxCnt ) .
*
* To avoid over or underflows and protect upper bits
* starting at bit 16 , the initial value for
* IbsOpMaxCnt must fit in the range from 0x0081 to
* 0xff80 .
*/
val + = ( s8 ) ( random > > 4 ) ;
else
val | = ( u64 ) ( random & IBS_RANDOM_MASK ) < < 32 ;
return val ;
}
2009-06-23 12:36:08 -07:00
static inline void
2008-07-22 21:08:56 +02:00
op_amd_handle_ibs ( struct pt_regs * const regs ,
struct op_msrs const * const msrs )
2005-04-16 15:20:36 -07:00
{
2009-06-03 20:10:39 +02:00
u64 val , ctl ;
2009-01-05 10:35:31 +01:00
struct op_entry entry ;
2005-04-16 15:20:36 -07:00
2010-02-04 10:57:23 +01:00
if ( ! ibs_caps )
2009-06-23 12:36:08 -07:00
return ;
2005-04-16 15:20:36 -07:00
2008-07-22 21:08:56 +02:00
if ( ibs_config . fetch_enabled ) {
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSFETCHCTL , ctl ) ;
if ( ctl & IBS_FETCH_VAL ) {
rdmsrl ( MSR_AMD64_IBSFETCHLINAD , val ) ;
oprofile_write_reserve ( & entry , regs , val ,
2009-01-07 21:50:22 +01:00
IBS_FETCH_CODE , IBS_FETCH_SIZE ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
oprofile_add_data64 ( & entry , ctl ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSFETCHPHYSAD , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-01-07 21:50:22 +01:00
oprofile_write_commit ( & entry ) ;
2008-07-22 21:08:55 +02:00
2008-10-19 21:00:09 +02:00
/* reenable the IRQ */
2010-02-25 19:43:07 +01:00
ctl & = ~ ( IBS_FETCH_VAL | IBS_FETCH_CNT ) ;
2009-06-03 20:10:39 +02:00
ctl | = IBS_FETCH_ENABLE ;
wrmsrl ( MSR_AMD64_IBSFETCHCTL , ctl ) ;
2008-07-22 21:08:55 +02:00
}
}
2008-07-22 21:08:56 +02:00
if ( ibs_config . op_enabled ) {
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSOPCTL , ctl ) ;
if ( ctl & IBS_OP_VAL ) {
rdmsrl ( MSR_AMD64_IBSOPRIP , val ) ;
2010-09-21 15:49:31 +02:00
oprofile_write_reserve ( & entry , regs , val , IBS_OP_CODE ,
ibs_state . sample_size ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSOPDATA , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSOPDATA2 , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSOPDATA3 , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSDCLINAD , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSDCPHYSAD , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2010-09-21 15:49:31 +02:00
if ( ibs_state . branch_target ) {
rdmsrl ( MSR_AMD64_IBSBRTARGET , val ) ;
oprofile_add_data ( & entry , ( unsigned long ) val ) ;
}
2009-01-07 21:50:22 +01:00
oprofile_write_commit ( & entry ) ;
2008-07-22 21:08:55 +02:00
/* reenable the IRQ */
2010-09-21 17:58:15 +02:00
ctl = op_amd_randomize_ibs_op ( ibs_state . ibs_op_ctl ) ;
2009-06-03 20:10:39 +02:00
wrmsrl ( MSR_AMD64_IBSOPCTL , ctl ) ;
2008-07-22 21:08:55 +02:00
}
}
2005-04-16 15:20:36 -07:00
}
2009-03-10 19:15:57 +01:00
static inline void op_amd_start_ibs ( void )
{
2009-06-03 20:10:39 +02:00
u64 val ;
2010-02-04 10:57:23 +01:00
if ( ! ibs_caps )
return ;
2010-09-21 17:58:15 +02:00
memset ( & ibs_state , 0 , sizeof ( ibs_state ) ) ;
2010-09-22 17:45:39 +02:00
/*
* Note : Since the max count settings may out of range we
* write back the actual used values so that userland can read
* it .
*/
2010-02-04 10:57:23 +01:00
if ( ibs_config . fetch_enabled ) {
2010-09-22 17:45:39 +02:00
val = ibs_config . max_cnt_fetch > > 4 ;
val = min ( val , IBS_FETCH_MAX_CNT ) ;
ibs_config . max_cnt_fetch = val < < 4 ;
2009-06-03 20:10:39 +02:00
val | = ibs_config . rand_en ? IBS_FETCH_RAND_EN : 0 ;
val | = IBS_FETCH_ENABLE ;
wrmsrl ( MSR_AMD64_IBSFETCHCTL , val ) ;
2009-03-10 19:15:57 +01:00
}
2010-02-04 10:57:23 +01:00
if ( ibs_config . op_enabled ) {
2010-09-21 17:58:15 +02:00
val = ibs_config . max_cnt_op > > 4 ;
2010-02-23 15:46:49 +01:00
if ( ! ( ibs_caps & IBS_CAPS_RDWROPCNT ) ) {
/*
* IbsOpCurCnt not supported . See
* op_amd_randomize_ibs_op ( ) for details .
*/
2010-09-21 17:58:15 +02:00
val = clamp ( val , 0x0081ULL , 0xFF80ULL ) ;
2010-09-22 17:45:39 +02:00
ibs_config . max_cnt_op = val < < 4 ;
2010-02-23 15:46:49 +01:00
} else {
/*
* The start value is randomized with a
* positive offset , we need to compensate it
* with the half of the randomized range . Also
* avoid underflows .
*/
2010-09-22 17:45:39 +02:00
val + = IBS_RANDOM_MAXCNT_OFFSET ;
if ( ibs_caps & IBS_CAPS_OPCNTEXT )
val = min ( val , IBS_OP_MAX_CNT_EXT ) ;
else
val = min ( val , IBS_OP_MAX_CNT ) ;
ibs_config . max_cnt_op =
( val - IBS_RANDOM_MAXCNT_OFFSET ) < < 4 ;
2010-02-23 15:46:49 +01:00
}
2010-09-22 17:45:39 +02:00
val = ( ( val & ~ IBS_OP_MAX_CNT ) < < 4 ) | ( val & IBS_OP_MAX_CNT ) ;
2010-09-21 17:58:15 +02:00
val | = ibs_config . dispatched_ops ? IBS_OP_CNT_CTL : 0 ;
val | = IBS_OP_ENABLE ;
ibs_state . ibs_op_ctl = val ;
2010-09-21 15:49:31 +02:00
ibs_state . sample_size = IBS_OP_SIZE ;
if ( ibs_config . branch_target ) {
ibs_state . branch_target = 1 ;
ibs_state . sample_size + + ;
2010-02-23 15:46:49 +01:00
}
2010-09-21 17:58:15 +02:00
val = op_amd_randomize_ibs_op ( ibs_state . ibs_op_ctl ) ;
2009-06-03 20:10:39 +02:00
wrmsrl ( MSR_AMD64_IBSOPCTL , val ) ;
2009-03-10 19:15:57 +01:00
}
}
static void op_amd_stop_ibs ( void )
{
2010-02-04 10:57:23 +01:00
if ( ! ibs_caps )
return ;
if ( ibs_config . fetch_enabled )
2009-03-10 19:15:57 +01:00
/* clear max count and enable */
2009-06-03 20:10:39 +02:00
wrmsrl ( MSR_AMD64_IBSFETCHCTL , 0 ) ;
2009-03-10 19:15:57 +01:00
2010-02-04 10:57:23 +01:00
if ( ibs_config . op_enabled )
2009-03-10 19:15:57 +01:00
/* clear max count and enable */
2009-06-03 20:10:39 +02:00
wrmsrl ( MSR_AMD64_IBSOPCTL , 0 ) ;
2009-03-10 19:15:57 +01:00
}
2010-02-26 10:54:56 +01:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
static void op_mux_switch_ctrl ( struct op_x86_model_spec const * model ,
struct op_msrs const * const msrs )
{
u64 val ;
int i ;
/* enable active counters */
2010-09-24 15:54:43 +02:00
for ( i = 0 ; i < num_counters ; + + i ) {
2010-02-26 10:54:56 +01:00
int virt = op_x86_phys_to_virt ( i ) ;
if ( ! reset_value [ virt ] )
continue ;
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
val & = model - > reserved ;
val | = op_x86_get_ctrl ( model , & counter_config [ virt ] ) ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
}
}
# endif
/* functions for op_amd_spec */
static void op_amd_shutdown ( struct op_msrs const * const msrs )
{
int i ;
2010-09-24 15:54:43 +02:00
for ( i = 0 ; i < num_counters ; + + i ) {
2010-02-26 10:54:56 +01:00
if ( ! msrs - > counters [ i ] . addr )
continue ;
release_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) ;
release_evntsel_nmi ( MSR_K7_EVNTSEL0 + i ) ;
}
}
static int op_amd_fill_in_addresses ( struct op_msrs * const msrs )
{
int i ;
2010-09-24 15:54:43 +02:00
for ( i = 0 ; i < num_counters ; i + + ) {
2010-02-26 10:54:56 +01:00
if ( ! reserve_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) )
goto fail ;
if ( ! reserve_evntsel_nmi ( MSR_K7_EVNTSEL0 + i ) ) {
release_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) ;
goto fail ;
}
/* both registers must be reserved */
2012-06-20 20:46:35 +02:00
if ( num_counters = = AMD64_NUM_COUNTERS_CORE ) {
2010-09-24 15:54:43 +02:00
msrs - > counters [ i ] . addr = MSR_F15H_PERF_CTR + ( i < < 1 ) ;
msrs - > controls [ i ] . addr = MSR_F15H_PERF_CTL + ( i < < 1 ) ;
} else {
msrs - > controls [ i ] . addr = MSR_K7_EVNTSEL0 + i ;
msrs - > counters [ i ] . addr = MSR_K7_PERFCTR0 + i ;
}
2010-02-26 10:54:56 +01:00
continue ;
fail :
if ( ! counter_config [ i ] . enabled )
continue ;
op_x86_warn_reserved ( i ) ;
op_amd_shutdown ( msrs ) ;
return - EBUSY ;
}
return 0 ;
}
static void op_amd_setup_ctrs ( struct op_x86_model_spec const * model ,
struct op_msrs const * const msrs )
{
u64 val ;
int i ;
/* setup reset_value */
2010-09-24 15:54:43 +02:00
for ( i = 0 ; i < OP_MAX_COUNTER ; + + i ) {
2010-02-26 10:54:56 +01:00
if ( counter_config [ i ] . enabled
& & msrs - > counters [ op_x86_virt_to_phys ( i ) ] . addr )
reset_value [ i ] = counter_config [ i ] . count ;
else
reset_value [ i ] = 0 ;
}
/* clear all counters */
2010-09-24 15:54:43 +02:00
for ( i = 0 ; i < num_counters ; + + i ) {
2010-02-26 10:54:56 +01:00
if ( ! msrs - > controls [ i ] . addr )
continue ;
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
if ( val & ARCH_PERFMON_EVENTSEL_ENABLE )
op_x86_warn_in_use ( i ) ;
val & = model - > reserved ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
/*
* avoid a false detection of ctr overflows in NMI
* handler
*/
wrmsrl ( msrs - > counters [ i ] . addr , - 1LL ) ;
}
/* enable active counters */
2010-09-24 15:54:43 +02:00
for ( i = 0 ; i < num_counters ; + + i ) {
2010-02-26 10:54:56 +01:00
int virt = op_x86_phys_to_virt ( i ) ;
if ( ! reset_value [ virt ] )
continue ;
/* setup counter registers */
wrmsrl ( msrs - > counters [ i ] . addr , - ( u64 ) reset_value [ virt ] ) ;
/* setup control registers */
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
val & = model - > reserved ;
val | = op_x86_get_ctrl ( model , & counter_config [ virt ] ) ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
}
}
2008-07-22 21:08:56 +02:00
static int op_amd_check_ctrs ( struct pt_regs * const regs ,
struct op_msrs const * const msrs )
{
2009-05-25 17:59:06 +02:00
u64 val ;
2008-07-22 21:08:56 +02:00
int i ;
2010-09-24 15:54:43 +02:00
for ( i = 0 ; i < num_counters ; + + i ) {
2009-07-16 13:04:43 +02:00
int virt = op_x86_phys_to_virt ( i ) ;
if ( ! reset_value [ virt ] )
2008-07-22 21:08:56 +02:00
continue ;
2009-05-25 17:59:06 +02:00
rdmsrl ( msrs - > counters [ i ] . addr , val ) ;
/* bit is clear if overflowed: */
if ( val & OP_CTR_OVERFLOW )
continue ;
2009-07-16 13:04:43 +02:00
oprofile_add_sample ( regs , virt ) ;
wrmsrl ( msrs - > counters [ i ] . addr , - ( u64 ) reset_value [ virt ] ) ;
2008-07-22 21:08:56 +02:00
}
op_amd_handle_ibs ( regs , msrs ) ;
/* See op_model_ppro.c */
return 1 ;
}
2008-02-19 23:51:27 +01:00
2008-07-22 21:08:50 +02:00
static void op_amd_start ( struct op_msrs const * const msrs )
2005-04-16 15:20:36 -07:00
{
2009-05-25 18:11:52 +02:00
u64 val ;
2005-04-16 15:20:36 -07:00
int i ;
2009-07-08 13:49:38 +02:00
2010-09-24 15:54:43 +02:00
for ( i = 0 ; i < num_counters ; + + i ) {
2009-07-16 13:04:43 +02:00
if ( ! reset_value [ op_x86_phys_to_virt ( i ) ] )
continue ;
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
2010-03-01 14:21:23 +01:00
val | = ARCH_PERFMON_EVENTSEL_ENABLE ;
2009-07-16 13:04:43 +02:00
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
2005-04-16 15:20:36 -07:00
}
2008-07-22 21:09:06 +02:00
2009-03-10 19:15:57 +01:00
op_amd_start_ibs ( ) ;
2005-04-16 15:20:36 -07:00
}
2008-07-22 21:08:50 +02:00
static void op_amd_stop ( struct op_msrs const * const msrs )
2005-04-16 15:20:36 -07:00
{
2009-05-25 18:11:52 +02:00
u64 val ;
2005-04-16 15:20:36 -07:00
int i ;
2008-10-19 21:00:09 +02:00
/*
* Subtle : stop on all counters to avoid race with setting our
* pm callback
*/
2010-09-24 15:54:43 +02:00
for ( i = 0 ; i < num_counters ; + + i ) {
2009-07-16 13:04:43 +02:00
if ( ! reset_value [ op_x86_phys_to_virt ( i ) ] )
2006-09-26 10:52:26 +02:00
continue ;
2009-05-25 18:11:52 +02:00
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
2010-03-01 14:21:23 +01:00
val & = ~ ARCH_PERFMON_EVENTSEL_ENABLE ;
2009-05-25 18:11:52 +02:00
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
2005-04-16 15:20:36 -07:00
}
2008-07-22 21:08:55 +02:00
2009-03-10 19:15:57 +01:00
op_amd_stop_ibs ( ) ;
2005-04-16 15:20:36 -07:00
}
2011-01-03 12:15:14 +01:00
/*
* check and reserve APIC extended interrupt LVT offset for IBS if
* available
*/
2010-05-05 17:47:17 +02:00
static void init_ibs ( void )
2008-07-22 21:08:55 +02:00
{
2011-01-03 12:15:14 +01:00
ibs_caps = get_ibs_caps ( ) ;
2011-05-20 09:46:54 +02:00
2010-02-04 10:57:23 +01:00
if ( ! ibs_caps )
2011-05-20 09:46:54 +02:00
return ;
printk ( KERN_INFO " oprofile: AMD IBS detected (0x%08x) \n " , ibs_caps ) ;
2008-07-22 21:08:55 +02:00
}
2013-07-19 15:52:42 +04:00
static int ( * create_arch_files ) ( struct dentry * root ) ;
2008-07-22 21:09:01 +02:00
2013-07-19 15:52:42 +04:00
static int setup_ibs_files ( struct dentry * root )
2008-07-22 21:08:55 +02:00
{
struct dentry * dir ;
2008-07-22 21:09:01 +02:00
int ret = 0 ;
/* architecture specific files */
if ( create_arch_files )
2013-07-19 15:52:42 +04:00
ret = create_arch_files ( root ) ;
2008-07-22 21:09:01 +02:00
if ( ret )
return ret ;
2008-07-22 21:08:55 +02:00
2010-02-04 10:57:23 +01:00
if ( ! ibs_caps )
2008-07-22 21:09:01 +02:00
return ret ;
/* model specific files */
2008-07-22 21:08:55 +02:00
/* setup some reasonable defaults */
2010-09-21 15:49:31 +02:00
memset ( & ibs_config , 0 , sizeof ( ibs_config ) ) ;
2008-07-22 21:08:55 +02:00
ibs_config . max_cnt_fetch = 250000 ;
ibs_config . max_cnt_op = 250000 ;
2008-07-18 17:56:05 +02:00
2010-09-21 15:58:32 +02:00
if ( ibs_caps & IBS_CAPS_FETCHSAM ) {
2013-07-19 15:58:27 +04:00
dir = oprofilefs_mkdir ( root , " ibs_fetch " ) ;
2013-07-19 16:10:36 +04:00
oprofilefs_create_ulong ( dir , " enable " ,
2010-09-21 15:58:32 +02:00
& ibs_config . fetch_enabled ) ;
2013-07-19 16:10:36 +04:00
oprofilefs_create_ulong ( dir , " max_count " ,
2010-09-21 15:58:32 +02:00
& ibs_config . max_cnt_fetch ) ;
2013-07-19 16:10:36 +04:00
oprofilefs_create_ulong ( dir , " rand_enable " ,
2010-09-21 15:58:32 +02:00
& ibs_config . rand_en ) ;
}
if ( ibs_caps & IBS_CAPS_OPSAM ) {
2013-07-19 15:58:27 +04:00
dir = oprofilefs_mkdir ( root , " ibs_op " ) ;
2013-07-19 16:10:36 +04:00
oprofilefs_create_ulong ( dir , " enable " ,
2010-09-21 15:58:32 +02:00
& ibs_config . op_enabled ) ;
2013-07-19 16:10:36 +04:00
oprofilefs_create_ulong ( dir , " max_count " ,
2010-09-21 15:58:32 +02:00
& ibs_config . max_cnt_op ) ;
if ( ibs_caps & IBS_CAPS_OPCNT )
2013-07-19 16:10:36 +04:00
oprofilefs_create_ulong ( dir , " dispatched_ops " ,
2010-09-21 15:58:32 +02:00
& ibs_config . dispatched_ops ) ;
2010-09-21 15:49:31 +02:00
if ( ibs_caps & IBS_CAPS_BRNTRGT )
2013-07-19 16:10:36 +04:00
oprofilefs_create_ulong ( dir , " branch_target " ,
2010-09-21 15:49:31 +02:00
& ibs_config . branch_target ) ;
2010-09-21 15:58:32 +02:00
}
2008-07-22 21:09:00 +02:00
return 0 ;
2008-07-22 21:08:55 +02:00
}
2010-09-24 15:54:43 +02:00
struct op_x86_model_spec op_amd_spec ;
2008-07-22 21:08:48 +02:00
static int op_amd_init ( struct oprofile_operations * ops )
{
2010-05-05 17:47:17 +02:00
init_ibs ( ) ;
2008-07-22 21:09:01 +02:00
create_arch_files = ops - > create_files ;
ops - > create_files = setup_ibs_files ;
2010-09-24 15:54:43 +02:00
if ( boot_cpu_data . x86 = = 0x15 ) {
2012-06-20 20:46:35 +02:00
num_counters = AMD64_NUM_COUNTERS_CORE ;
2010-09-24 15:54:43 +02:00
} else {
2011-09-21 11:30:17 +02:00
num_counters = AMD64_NUM_COUNTERS ;
2010-09-24 15:54:43 +02:00
}
op_amd_spec . num_counters = num_counters ;
op_amd_spec . num_controls = num_counters ;
op_amd_spec . num_virt_counters = max ( num_counters , NUM_VIRT_COUNTERS ) ;
2008-07-22 21:08:48 +02:00
return 0 ;
}
2009-07-09 15:12:35 +02:00
struct op_x86_model_spec op_amd_spec = {
2010-09-24 15:54:43 +02:00
/* num_counters/num_controls filled in at runtime */
2009-05-25 15:10:32 +02:00
. reserved = MSR_AMD_EVENTSEL_RESERVED ,
. event_mask = OP_EVENT_MASK ,
. init = op_amd_init ,
2008-09-05 17:12:36 +02:00
. fill_in_addresses = & op_amd_fill_in_addresses ,
. setup_ctrs = & op_amd_setup_ctrs ,
. check_ctrs = & op_amd_check_ctrs ,
. start = & op_amd_start ,
. stop = & op_amd_stop ,
2009-05-25 15:10:32 +02:00
. shutdown = & op_amd_shutdown ,
2009-07-08 13:49:38 +02:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
2009-07-16 13:09:53 +02:00
. switch_ctrl = & op_mux_switch_ctrl ,
2009-07-08 13:49:38 +02:00
# endif
2005-04-16 15:20:36 -07:00
} ;