2008-02-19 23:51:27 +01:00
/*
2008-07-22 21:09:08 +02:00
* @ file op_model_amd . c
2007-12-18 18:05:58 +01:00
* athlon / K7 / K8 / Family 10 h model - specific MSR operations
2005-04-16 15:20:36 -07:00
*
2008-12-25 17:26:07 +01:00
* @ remark Copyright 2002 - 2009 OProfile authors
2005-04-16 15:20:36 -07:00
* @ remark Read the file COPYING
*
* @ author John Levon
* @ author Philippe Elie
* @ author Graydon Hoare
2008-07-22 21:08:48 +02:00
* @ author Robert Richter < robert . richter @ amd . com >
2009-07-08 13:49:38 +02:00
* @ author Barry Kasindorf < barry . kasindorf @ amd . com >
* @ author Jason Yeh < jason . yeh @ amd . com >
* @ author Suravee Suthikulpanit < suravee . suthikulpanit @ amd . com >
2008-12-25 17:26:07 +01:00
*/
2005-04-16 15:20:36 -07:00
# include <linux/oprofile.h>
2008-07-22 21:08:55 +02:00
# include <linux/device.h>
# include <linux/pci.h>
2009-07-08 13:49:38 +02:00
# include <linux/percpu.h>
2008-07-22 21:08:55 +02:00
2005-04-16 15:20:36 -07:00
# include <asm/ptrace.h>
# include <asm/msr.h>
2006-06-26 13:57:01 +02:00
# include <asm/nmi.h>
2008-02-19 23:51:27 +01:00
2005-04-16 15:20:36 -07:00
# include "op_x86_model.h"
# include "op_counter.h"
2008-09-24 11:08:52 +02:00
# define NUM_COUNTERS 4
# define NUM_CONTROLS 4
2009-07-08 13:49:38 +02:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
# define NUM_VIRT_COUNTERS 32
# define NUM_VIRT_CONTROLS 32
# else
# define NUM_VIRT_COUNTERS NUM_COUNTERS
# define NUM_VIRT_CONTROLS NUM_CONTROLS
# endif
2009-05-25 15:10:32 +02:00
# define OP_EVENT_MASK 0x0FFF
2009-05-25 17:59:06 +02:00
# define OP_CTR_OVERFLOW (1ULL<<31)
2009-05-25 15:10:32 +02:00
# define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
2005-04-16 15:20:36 -07:00
2009-07-08 13:49:38 +02:00
static unsigned long reset_value [ NUM_VIRT_COUNTERS ] ;
2008-07-22 21:09:06 +02:00
# ifdef CONFIG_OPROFILE_IBS
2008-07-22 21:09:03 +02:00
/* IbsFetchCtl bits/masks */
2009-06-03 20:10:39 +02:00
# define IBS_FETCH_RAND_EN (1ULL<<57)
# define IBS_FETCH_VAL (1ULL<<49)
# define IBS_FETCH_ENABLE (1ULL<<48)
# define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
2008-07-22 21:08:55 +02:00
2008-07-22 21:09:03 +02:00
/*IbsOpCtl bits */
2009-06-03 20:10:39 +02:00
# define IBS_OP_CNT_CTL (1ULL<<19)
# define IBS_OP_VAL (1ULL<<18)
# define IBS_OP_ENABLE (1ULL<<17)
2008-07-22 21:08:55 +02:00
2009-06-03 20:10:39 +02:00
# define IBS_FETCH_SIZE 6
# define IBS_OP_SIZE 12
2008-07-22 21:08:55 +02:00
2008-12-18 00:28:27 +01:00
static int has_ibs ; /* AMD Family10h and later */
2008-07-22 21:08:55 +02:00
struct op_ibs_config {
unsigned long op_enabled ;
unsigned long fetch_enabled ;
unsigned long max_cnt_fetch ;
unsigned long max_cnt_op ;
unsigned long rand_en ;
unsigned long dispatched_ops ;
} ;
static struct op_ibs_config ibs_config ;
2008-02-19 23:51:27 +01:00
2008-07-22 21:09:06 +02:00
# endif
2009-07-16 13:09:53 +02:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
static void op_mux_fill_in_addresses ( struct op_msrs * const msrs )
{
int i ;
for ( i = 0 ; i < NUM_VIRT_COUNTERS ; i + + ) {
2009-07-10 15:47:17 +02:00
int hw_counter = op_x86_virt_to_phys ( i ) ;
2009-07-16 13:09:53 +02:00
if ( reserve_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) )
msrs - > multiplex [ i ] . addr = MSR_K7_PERFCTR0 + hw_counter ;
else
msrs - > multiplex [ i ] . addr = 0 ;
}
}
static void op_mux_switch_ctrl ( struct op_x86_model_spec const * model ,
struct op_msrs const * const msrs )
{
u64 val ;
int i ;
/* enable active counters */
for ( i = 0 ; i < NUM_COUNTERS ; + + i ) {
int virt = op_x86_phys_to_virt ( i ) ;
if ( ! counter_config [ virt ] . enabled )
continue ;
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
val & = model - > reserved ;
val | = op_x86_get_ctrl ( model , & counter_config [ virt ] ) ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
}
}
# else
static inline void op_mux_fill_in_addresses ( struct op_msrs * const msrs ) { }
# endif
2008-07-22 21:08:50 +02:00
/* functions for op_amd_spec */
2008-07-22 21:08:49 +02:00
2008-07-22 21:08:50 +02:00
static void op_amd_fill_in_addresses ( struct op_msrs * const msrs )
2005-04-16 15:20:36 -07:00
{
2006-09-26 10:52:26 +02:00
int i ;
2008-02-19 23:51:27 +01:00
for ( i = 0 ; i < NUM_COUNTERS ; i + + ) {
2008-09-24 11:08:52 +02:00
if ( reserve_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) )
msrs - > counters [ i ] . addr = MSR_K7_PERFCTR0 + i ;
2006-09-26 10:52:26 +02:00
else
msrs - > counters [ i ] . addr = 0 ;
}
2008-02-19 23:51:27 +01:00
for ( i = 0 ; i < NUM_CONTROLS ; i + + ) {
2008-09-24 11:08:52 +02:00
if ( reserve_evntsel_nmi ( MSR_K7_EVNTSEL0 + i ) )
msrs - > controls [ i ] . addr = MSR_K7_EVNTSEL0 + i ;
2006-09-26 10:52:26 +02:00
else
msrs - > controls [ i ] . addr = 0 ;
}
2009-07-08 13:49:38 +02:00
2009-07-16 13:09:53 +02:00
op_mux_fill_in_addresses ( msrs ) ;
2005-04-16 15:20:36 -07:00
}
2009-05-25 19:31:44 +02:00
static void op_amd_setup_ctrs ( struct op_x86_model_spec const * model ,
struct op_msrs const * const msrs )
2005-04-16 15:20:36 -07:00
{
2009-05-25 15:10:32 +02:00
u64 val ;
2005-04-16 15:20:36 -07:00
int i ;
2008-02-19 23:51:27 +01:00
2009-07-08 13:49:38 +02:00
/* setup reset_value */
for ( i = 0 ; i < NUM_VIRT_COUNTERS ; + + i ) {
2009-07-16 13:11:16 +02:00
if ( counter_config [ i ] . enabled )
2009-07-08 13:49:38 +02:00
reset_value [ i ] = counter_config [ i ] . count ;
2009-07-16 13:11:16 +02:00
else
2009-07-08 13:49:38 +02:00
reset_value [ i ] = 0 ;
}
2005-04-16 15:20:36 -07:00
/* clear all counters */
2009-07-07 19:25:39 +02:00
for ( i = 0 ; i < NUM_CONTROLS ; + + i ) {
2009-06-04 02:36:44 +02:00
if ( unlikely ( ! msrs - > controls [ i ] . addr ) )
2006-09-26 10:52:26 +02:00
continue ;
2009-05-25 15:10:32 +02:00
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
val & = model - > reserved ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
2005-04-16 15:20:36 -07:00
}
2006-09-26 10:52:26 +02:00
2005-04-16 15:20:36 -07:00
/* avoid a false detection of ctr overflows in NMI handler */
2008-09-24 11:08:52 +02:00
for ( i = 0 ; i < NUM_COUNTERS ; + + i ) {
2009-06-04 02:36:44 +02:00
if ( unlikely ( ! msrs - > counters [ i ] . addr ) )
2006-09-26 10:52:26 +02:00
continue ;
2009-05-25 17:38:19 +02:00
wrmsrl ( msrs - > counters [ i ] . addr , - 1LL ) ;
2005-04-16 15:20:36 -07:00
}
/* enable active counters */
2008-09-24 11:08:52 +02:00
for ( i = 0 ; i < NUM_COUNTERS ; + + i ) {
2009-07-16 13:04:43 +02:00
int virt = op_x86_phys_to_virt ( i ) ;
if ( ! counter_config [ virt ] . enabled )
continue ;
if ( ! msrs - > counters [ i ] . addr )
continue ;
/* setup counter registers */
wrmsrl ( msrs - > counters [ i ] . addr , - ( u64 ) reset_value [ virt ] ) ;
/* setup control registers */
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
val & = model - > reserved ;
val | = op_x86_get_ctrl ( model , & counter_config [ virt ] ) ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
2009-07-08 13:49:38 +02:00
}
}
2008-07-22 21:09:06 +02:00
# ifdef CONFIG_OPROFILE_IBS
2009-06-23 12:36:08 -07:00
static inline void
2008-07-22 21:08:56 +02:00
op_amd_handle_ibs ( struct pt_regs * const regs ,
struct op_msrs const * const msrs )
2005-04-16 15:20:36 -07:00
{
2009-06-03 20:10:39 +02:00
u64 val , ctl ;
2009-01-05 10:35:31 +01:00
struct op_entry entry ;
2005-04-16 15:20:36 -07:00
2008-12-18 00:28:27 +01:00
if ( ! has_ibs )
2009-06-23 12:36:08 -07:00
return ;
2005-04-16 15:20:36 -07:00
2008-07-22 21:08:56 +02:00
if ( ibs_config . fetch_enabled ) {
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSFETCHCTL , ctl ) ;
if ( ctl & IBS_FETCH_VAL ) {
rdmsrl ( MSR_AMD64_IBSFETCHLINAD , val ) ;
oprofile_write_reserve ( & entry , regs , val ,
2009-01-07 21:50:22 +01:00
IBS_FETCH_CODE , IBS_FETCH_SIZE ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
oprofile_add_data64 ( & entry , ctl ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSFETCHPHYSAD , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-01-07 21:50:22 +01:00
oprofile_write_commit ( & entry ) ;
2008-07-22 21:08:55 +02:00
2008-10-19 21:00:09 +02:00
/* reenable the IRQ */
2009-06-03 20:10:39 +02:00
ctl & = ~ ( IBS_FETCH_VAL | IBS_FETCH_CNT_MASK ) ;
ctl | = IBS_FETCH_ENABLE ;
wrmsrl ( MSR_AMD64_IBSFETCHCTL , ctl ) ;
2008-07-22 21:08:55 +02:00
}
}
2008-07-22 21:08:56 +02:00
if ( ibs_config . op_enabled ) {
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSOPCTL , ctl ) ;
if ( ctl & IBS_OP_VAL ) {
rdmsrl ( MSR_AMD64_IBSOPRIP , val ) ;
oprofile_write_reserve ( & entry , regs , val ,
2009-01-07 21:50:22 +01:00
IBS_OP_CODE , IBS_OP_SIZE ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSOPDATA , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSOPDATA2 , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSOPDATA3 , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSDCLINAD , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-06-03 20:10:39 +02:00
rdmsrl ( MSR_AMD64_IBSDCPHYSAD , val ) ;
2009-06-03 20:54:56 +02:00
oprofile_add_data64 ( & entry , val ) ;
2009-01-07 21:50:22 +01:00
oprofile_write_commit ( & entry ) ;
2008-07-22 21:08:55 +02:00
/* reenable the IRQ */
2009-06-03 20:10:39 +02:00
ctl & = ~ IBS_OP_VAL & 0xFFFFFFFF ;
ctl | = IBS_OP_ENABLE ;
wrmsrl ( MSR_AMD64_IBSOPCTL , ctl ) ;
2008-07-22 21:08:55 +02:00
}
}
2005-04-16 15:20:36 -07:00
}
2009-03-10 19:15:57 +01:00
static inline void op_amd_start_ibs ( void )
{
2009-06-03 20:10:39 +02:00
u64 val ;
2009-03-10 19:15:57 +01:00
if ( has_ibs & & ibs_config . fetch_enabled ) {
2009-06-03 20:10:39 +02:00
val = ( ibs_config . max_cnt_fetch > > 4 ) & 0xFFFF ;
val | = ibs_config . rand_en ? IBS_FETCH_RAND_EN : 0 ;
val | = IBS_FETCH_ENABLE ;
wrmsrl ( MSR_AMD64_IBSFETCHCTL , val ) ;
2009-03-10 19:15:57 +01:00
}
if ( has_ibs & & ibs_config . op_enabled ) {
2009-06-03 20:10:39 +02:00
val = ( ibs_config . max_cnt_op > > 4 ) & 0xFFFF ;
val | = ibs_config . dispatched_ops ? IBS_OP_CNT_CTL : 0 ;
val | = IBS_OP_ENABLE ;
wrmsrl ( MSR_AMD64_IBSOPCTL , val ) ;
2009-03-10 19:15:57 +01:00
}
}
static void op_amd_stop_ibs ( void )
{
2009-06-03 20:10:39 +02:00
if ( has_ibs & & ibs_config . fetch_enabled )
2009-03-10 19:15:57 +01:00
/* clear max count and enable */
2009-06-03 20:10:39 +02:00
wrmsrl ( MSR_AMD64_IBSFETCHCTL , 0 ) ;
2009-03-10 19:15:57 +01:00
2009-06-03 20:10:39 +02:00
if ( has_ibs & & ibs_config . op_enabled )
2009-03-10 19:15:57 +01:00
/* clear max count and enable */
2009-06-03 20:10:39 +02:00
wrmsrl ( MSR_AMD64_IBSOPCTL , 0 ) ;
2009-03-10 19:15:57 +01:00
}
# else
2009-06-23 12:36:08 -07:00
static inline void op_amd_handle_ibs ( struct pt_regs * const regs ,
2009-08-04 15:52:38 +02:00
struct op_msrs const * const msrs ) { }
2009-03-10 19:15:57 +01:00
static inline void op_amd_start_ibs ( void ) { }
static inline void op_amd_stop_ibs ( void ) { }
2008-07-22 21:09:06 +02:00
# endif
2008-07-22 21:08:56 +02:00
static int op_amd_check_ctrs ( struct pt_regs * const regs ,
struct op_msrs const * const msrs )
{
2009-05-25 17:59:06 +02:00
u64 val ;
2008-07-22 21:08:56 +02:00
int i ;
2009-07-07 19:25:39 +02:00
for ( i = 0 ; i < NUM_COUNTERS ; + + i ) {
2009-07-16 13:04:43 +02:00
int virt = op_x86_phys_to_virt ( i ) ;
if ( ! reset_value [ virt ] )
2008-07-22 21:08:56 +02:00
continue ;
2009-05-25 17:59:06 +02:00
rdmsrl ( msrs - > counters [ i ] . addr , val ) ;
/* bit is clear if overflowed: */
if ( val & OP_CTR_OVERFLOW )
continue ;
2009-07-16 13:04:43 +02:00
oprofile_add_sample ( regs , virt ) ;
wrmsrl ( msrs - > counters [ i ] . addr , - ( u64 ) reset_value [ virt ] ) ;
2008-07-22 21:08:56 +02:00
}
op_amd_handle_ibs ( regs , msrs ) ;
/* See op_model_ppro.c */
return 1 ;
}
2008-02-19 23:51:27 +01:00
2008-07-22 21:08:50 +02:00
static void op_amd_start ( struct op_msrs const * const msrs )
2005-04-16 15:20:36 -07:00
{
2009-05-25 18:11:52 +02:00
u64 val ;
2005-04-16 15:20:36 -07:00
int i ;
2009-07-08 13:49:38 +02:00
2009-07-07 19:25:39 +02:00
for ( i = 0 ; i < NUM_COUNTERS ; + + i ) {
2009-07-16 13:04:43 +02:00
if ( ! reset_value [ op_x86_phys_to_virt ( i ) ] )
continue ;
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
val | = ARCH_PERFMON_EVENTSEL0_ENABLE ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
2005-04-16 15:20:36 -07:00
}
2008-07-22 21:09:06 +02:00
2009-03-10 19:15:57 +01:00
op_amd_start_ibs ( ) ;
2005-04-16 15:20:36 -07:00
}
2008-07-22 21:08:50 +02:00
static void op_amd_stop ( struct op_msrs const * const msrs )
2005-04-16 15:20:36 -07:00
{
2009-05-25 18:11:52 +02:00
u64 val ;
2005-04-16 15:20:36 -07:00
int i ;
2008-10-19 21:00:09 +02:00
/*
* Subtle : stop on all counters to avoid race with setting our
* pm callback
*/
2009-07-07 19:25:39 +02:00
for ( i = 0 ; i < NUM_COUNTERS ; + + i ) {
2009-07-16 13:04:43 +02:00
if ( ! reset_value [ op_x86_phys_to_virt ( i ) ] )
2006-09-26 10:52:26 +02:00
continue ;
2009-05-25 18:11:52 +02:00
rdmsrl ( msrs - > controls [ i ] . addr , val ) ;
val & = ~ ARCH_PERFMON_EVENTSEL0_ENABLE ;
wrmsrl ( msrs - > controls [ i ] . addr , val ) ;
2005-04-16 15:20:36 -07:00
}
2008-07-22 21:08:55 +02:00
2009-03-10 19:15:57 +01:00
op_amd_stop_ibs ( ) ;
2005-04-16 15:20:36 -07:00
}
2008-07-22 21:08:50 +02:00
static void op_amd_shutdown ( struct op_msrs const * const msrs )
2006-09-26 10:52:26 +02:00
{
int i ;
2009-07-07 19:25:39 +02:00
for ( i = 0 ; i < NUM_COUNTERS ; + + i ) {
2009-06-04 02:36:44 +02:00
if ( msrs - > counters [ i ] . addr )
2006-09-26 10:52:26 +02:00
release_perfctr_nmi ( MSR_K7_PERFCTR0 + i ) ;
}
2009-07-08 14:54:17 +02:00
for ( i = 0 ; i < NUM_CONTROLS ; + + i ) {
2009-06-04 02:36:44 +02:00
if ( msrs - > controls [ i ] . addr )
2006-09-26 10:52:26 +02:00
release_evntsel_nmi ( MSR_K7_EVNTSEL0 + i ) ;
}
}
2005-04-16 15:20:36 -07:00
2008-11-24 14:21:03 +01:00
# ifdef CONFIG_OPROFILE_IBS
2008-07-22 21:09:02 +02:00
2008-07-22 21:08:57 +02:00
static u8 ibs_eilvt_off ;
2008-07-22 21:08:55 +02:00
static inline void apic_init_ibs_nmi_per_cpu ( void * arg )
{
2008-07-22 21:08:57 +02:00
ibs_eilvt_off = setup_APIC_eilvt_ibs ( 0 , APIC_EILVT_MSG_NMI , 0 ) ;
2008-07-22 21:08:55 +02:00
}
static inline void apic_clear_ibs_nmi_per_cpu ( void * arg )
{
setup_APIC_eilvt_ibs ( 0 , APIC_EILVT_MSG_FIX , 1 ) ;
}
2008-11-24 14:58:03 +01:00
static int init_ibs_nmi ( void )
2008-07-22 21:08:57 +02:00
{
# define IBSCTL_LVTOFFSETVAL (1 << 8)
# define IBSCTL 0x1cc
struct pci_dev * cpu_cfg ;
int nodes ;
u32 value = 0 ;
/* per CPU setup */
2008-07-22 21:08:59 +02:00
on_each_cpu ( apic_init_ibs_nmi_per_cpu , NULL , 1 ) ;
2008-07-22 21:08:57 +02:00
nodes = 0 ;
cpu_cfg = NULL ;
do {
cpu_cfg = pci_get_device ( PCI_VENDOR_ID_AMD ,
PCI_DEVICE_ID_AMD_10H_NB_MISC ,
cpu_cfg ) ;
if ( ! cpu_cfg )
break ;
+ + nodes ;
pci_write_config_dword ( cpu_cfg , IBSCTL , ibs_eilvt_off
| IBSCTL_LVTOFFSETVAL ) ;
pci_read_config_dword ( cpu_cfg , IBSCTL , & value ) ;
if ( value ! = ( ibs_eilvt_off | IBSCTL_LVTOFFSETVAL ) ) {
2008-12-15 15:09:50 +01:00
pci_dev_put ( cpu_cfg ) ;
2008-07-22 21:08:57 +02:00
printk ( KERN_DEBUG " Failed to setup IBS LVT offset, "
" IBSCTL = 0x%08x " , value ) ;
return 1 ;
}
} while ( 1 ) ;
if ( ! nodes ) {
printk ( KERN_DEBUG " No CPU node configured for IBS " ) ;
return 1 ;
}
# ifdef CONFIG_NUMA
/* Sanity check */
/* Works only for 64bit with proper numa implementation. */
if ( nodes ! = num_possible_nodes ( ) ) {
printk ( KERN_DEBUG " Failed to setup CPU node(s) for IBS, "
" found: %d, expected %d " ,
nodes , num_possible_nodes ( ) ) ;
return 1 ;
}
# endif
return 0 ;
}
2008-11-24 14:58:03 +01:00
/* uninitialize the APIC for the IBS interrupts if needed */
static void clear_ibs_nmi ( void )
{
2008-12-18 00:28:27 +01:00
if ( has_ibs )
2008-11-24 14:58:03 +01:00
on_each_cpu ( apic_clear_ibs_nmi_per_cpu , NULL , 1 ) ;
}
2008-10-19 21:00:09 +02:00
/* initialize the APIC for the IBS interrupts if available */
2008-11-24 14:58:03 +01:00
static void ibs_init ( void )
2008-07-22 21:08:55 +02:00
{
2008-12-18 00:28:27 +01:00
has_ibs = boot_cpu_has ( X86_FEATURE_IBS ) ;
2008-07-22 21:08:55 +02:00
2008-12-18 00:28:27 +01:00
if ( ! has_ibs )
2008-07-22 21:08:55 +02:00
return ;
2008-11-24 14:58:03 +01:00
if ( init_ibs_nmi ( ) ) {
2008-12-18 00:28:27 +01:00
has_ibs = 0 ;
2008-07-22 21:09:06 +02:00
return ;
}
printk ( KERN_INFO " oprofile: AMD IBS detected \n " ) ;
2008-07-22 21:08:55 +02:00
}
2008-11-24 14:58:03 +01:00
static void ibs_exit ( void )
2008-07-22 21:08:55 +02:00
{
2008-12-18 00:28:27 +01:00
if ( ! has_ibs )
2008-11-24 14:58:03 +01:00
return ;
clear_ibs_nmi ( ) ;
2008-07-22 21:08:55 +02:00
}
2008-09-05 17:12:36 +02:00
static int ( * create_arch_files ) ( struct super_block * sb , struct dentry * root ) ;
2008-07-22 21:09:01 +02:00
2008-09-05 17:12:36 +02:00
static int setup_ibs_files ( struct super_block * sb , struct dentry * root )
2008-07-22 21:08:55 +02:00
{
struct dentry * dir ;
2008-07-22 21:09:01 +02:00
int ret = 0 ;
/* architecture specific files */
if ( create_arch_files )
ret = create_arch_files ( sb , root ) ;
if ( ret )
return ret ;
2008-07-22 21:08:55 +02:00
2008-12-18 00:28:27 +01:00
if ( ! has_ibs )
2008-07-22 21:09:01 +02:00
return ret ;
/* model specific files */
2008-07-22 21:08:55 +02:00
/* setup some reasonable defaults */
ibs_config . max_cnt_fetch = 250000 ;
ibs_config . fetch_enabled = 0 ;
ibs_config . max_cnt_op = 250000 ;
ibs_config . op_enabled = 0 ;
ibs_config . dispatched_ops = 1 ;
2008-07-18 17:56:05 +02:00
dir = oprofilefs_mkdir ( sb , root , " ibs_fetch " ) ;
2008-07-22 21:08:55 +02:00
oprofilefs_create_ulong ( sb , dir , " enable " ,
2008-07-18 17:56:05 +02:00
& ibs_config . fetch_enabled ) ;
2008-07-22 21:08:55 +02:00
oprofilefs_create_ulong ( sb , dir , " max_count " ,
2008-07-18 17:56:05 +02:00
& ibs_config . max_cnt_fetch ) ;
oprofilefs_create_ulong ( sb , dir , " rand_enable " ,
& ibs_config . rand_en ) ;
2008-07-29 16:57:10 +02:00
dir = oprofilefs_mkdir ( sb , root , " ibs_op " ) ;
2008-07-22 21:08:55 +02:00
oprofilefs_create_ulong ( sb , dir , " enable " ,
2008-07-18 17:56:05 +02:00
& ibs_config . op_enabled ) ;
2008-07-22 21:08:55 +02:00
oprofilefs_create_ulong ( sb , dir , " max_count " ,
2008-07-18 17:56:05 +02:00
& ibs_config . max_cnt_op ) ;
2008-07-22 21:08:55 +02:00
oprofilefs_create_ulong ( sb , dir , " dispatched_ops " ,
2008-07-18 17:56:05 +02:00
& ibs_config . dispatched_ops ) ;
2008-07-22 21:09:00 +02:00
return 0 ;
2008-07-22 21:08:55 +02:00
}
2008-07-22 21:08:48 +02:00
static int op_amd_init ( struct oprofile_operations * ops )
{
2008-11-24 14:58:03 +01:00
ibs_init ( ) ;
2008-07-22 21:09:01 +02:00
create_arch_files = ops - > create_files ;
ops - > create_files = setup_ibs_files ;
2008-07-22 21:08:48 +02:00
return 0 ;
}
static void op_amd_exit ( void )
{
2008-11-24 14:58:03 +01:00
ibs_exit ( ) ;
2008-07-22 21:08:48 +02:00
}
2008-11-24 14:21:03 +01:00
# else
/* no IBS support */
static int op_amd_init ( struct oprofile_operations * ops )
{
return 0 ;
}
static void op_amd_exit ( void ) { }
# endif /* CONFIG_OPROFILE_IBS */
2008-07-22 21:09:02 +02:00
2009-07-09 15:12:35 +02:00
struct op_x86_model_spec op_amd_spec = {
2008-09-05 17:12:36 +02:00
. num_counters = NUM_COUNTERS ,
. num_controls = NUM_CONTROLS ,
2009-07-08 13:49:38 +02:00
. num_virt_counters = NUM_VIRT_COUNTERS ,
2009-05-25 15:10:32 +02:00
. reserved = MSR_AMD_EVENTSEL_RESERVED ,
. event_mask = OP_EVENT_MASK ,
. init = op_amd_init ,
. exit = op_amd_exit ,
2008-09-05 17:12:36 +02:00
. fill_in_addresses = & op_amd_fill_in_addresses ,
. setup_ctrs = & op_amd_setup_ctrs ,
. check_ctrs = & op_amd_check_ctrs ,
. start = & op_amd_start ,
. stop = & op_amd_stop ,
2009-05-25 15:10:32 +02:00
. shutdown = & op_amd_shutdown ,
2009-07-08 13:49:38 +02:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
2009-07-16 13:09:53 +02:00
. switch_ctrl = & op_mux_switch_ctrl ,
2009-07-08 13:49:38 +02:00
# endif
2005-04-16 15:20:36 -07:00
} ;