2011-09-24 04:29:55 +04:00
/*
* Linux performance counter support for MIPS .
*
* Copyright ( C ) 2010 MIPS Technologies , Inc .
2011-09-24 04:29:55 +04:00
* Copyright ( C ) 2011 Cavium Networks , Inc .
2011-09-24 04:29:55 +04:00
* Author : Deng - Cheng Zhu
*
* This code is based on the implementation for ARM , which is in turn
* based on the sparc64 perf event code and the x86 code . Performance
* counter access is based on the MIPS Oprofile code . And the callchain
* support references the code of MIPS stacktrace . c .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/cpumask.h>
# include <linux/interrupt.h>
# include <linux/smp.h>
# include <linux/kernel.h>
# include <linux/perf_event.h>
# include <linux/uaccess.h>
# include <asm/irq.h>
# include <asm/irq_regs.h>
# include <asm/stacktrace.h>
# include <asm/time.h> /* For perf_irq */
# define MIPS_MAX_HWEVENTS 4
2012-07-14 00:44:54 +04:00
# define MIPS_TCS_PER_COUNTER 2
# define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
2011-09-24 04:29:55 +04:00
struct cpu_hw_events {
/* Array of events on this cpu. */
struct perf_event * events [ MIPS_MAX_HWEVENTS ] ;
/*
* Set the bit ( indexed by the counter number ) when the counter
* is used for an event .
*/
unsigned long used_mask [ BITS_TO_LONGS ( MIPS_MAX_HWEVENTS ) ] ;
/*
* Software copy of the control register for each performance counter .
* MIPS CPUs vary in performance counters . They use this differently ,
* and even may not use it .
*/
unsigned int saved_ctrl [ MIPS_MAX_HWEVENTS ] ;
} ;
DEFINE_PER_CPU ( struct cpu_hw_events , cpu_hw_events ) = {
. saved_ctrl = { 0 } ,
} ;
/* The description of MIPS performance events. */
struct mips_perf_event {
unsigned int event_id ;
/*
* MIPS performance counters are indexed starting from 0.
* CNTR_EVEN indicates the indexes of the counters to be used are
* even numbers .
*/
unsigned int cntr_mask ;
# define CNTR_EVEN 0x55555555
# define CNTR_ODD 0xaaaaaaaa
2011-09-24 04:29:55 +04:00
# define CNTR_ALL 0xffffffff
2011-09-24 04:29:55 +04:00
# ifdef CONFIG_MIPS_MT_SMP
enum {
T = 0 ,
V = 1 ,
P = 2 ,
} range ;
# else
# define T
# define V
# define P
# endif
} ;
static struct mips_perf_event raw_event ;
static DEFINE_MUTEX ( raw_event_mutex ) ;
# define C(x) PERF_COUNT_HW_CACHE_##x
struct mips_pmu {
2011-09-24 04:29:55 +04:00
u64 max_period ;
u64 valid_count ;
u64 overflow ;
2011-09-24 04:29:55 +04:00
const char * name ;
int irq ;
u64 ( * read_counter ) ( unsigned int idx ) ;
void ( * write_counter ) ( unsigned int idx , u64 val ) ;
const struct mips_perf_event * ( * map_raw_event ) ( u64 config ) ;
const struct mips_perf_event ( * general_event_map ) [ PERF_COUNT_HW_MAX ] ;
const struct mips_perf_event ( * cache_event_map )
[ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] ;
unsigned int num_counters ;
} ;
2011-09-24 04:29:55 +04:00
static struct mips_pmu mipspmu ;
# define M_CONFIG1_PC (1 << 4)
2013-01-22 15:59:30 +04:00
# define M_PERFCTL_EXL (1 << 0)
# define M_PERFCTL_KERNEL (1 << 1)
# define M_PERFCTL_SUPERVISOR (1 << 2)
# define M_PERFCTL_USER (1 << 3)
# define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
2011-09-24 04:29:55 +04:00
# define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
2013-01-22 15:59:30 +04:00
# define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
2012-07-14 00:44:54 +04:00
# ifdef CONFIG_CPU_BMIPS5000
# define M_PERFCTL_MT_EN(filter) 0
# else /* !CONFIG_CPU_BMIPS5000 */
2011-09-24 04:29:55 +04:00
# define M_PERFCTL_MT_EN(filter) ((filter) << 20)
2012-07-14 00:44:54 +04:00
# endif /* CONFIG_CPU_BMIPS5000 */
2013-01-22 15:59:30 +04:00
# define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
# define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
# define M_TC_EN_TC M_PERFCTL_MT_EN(2)
# define M_PERFCTL_TCID(tcid) ((tcid) << 22)
# define M_PERFCTL_WIDE (1 << 30)
# define M_PERFCTL_MORE (1 << 31)
# define M_PERFCTL_TC (1 << 30)
2011-09-24 04:29:55 +04:00
# define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
M_PERFCTL_KERNEL | \
M_PERFCTL_USER | \
M_PERFCTL_SUPERVISOR | \
M_PERFCTL_INTERRUPT_ENABLE )
# ifdef CONFIG_MIPS_MT_SMP
# define M_PERFCTL_CONFIG_MASK 0x3fff801f
# else
# define M_PERFCTL_CONFIG_MASK 0x1f
# endif
# define M_PERFCTL_EVENT_MASK 0xfe0
2012-07-14 00:44:53 +04:00
# ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
2011-09-24 04:29:55 +04:00
static int cpu_has_mipsmt_pertccounters ;
static DEFINE_RWLOCK ( pmuint_rwlock ) ;
2012-07-14 00:44:54 +04:00
# if defined(CONFIG_CPU_BMIPS5000)
# define vpe_id() (cpu_has_mipsmt_pertccounters ? \
0 : ( smp_processor_id ( ) & MIPS_CPUID_TO_COUNTER_MASK ) )
# else
2011-09-24 04:29:55 +04:00
/*
* FIXME : For VSMP , vpe_id ( ) is redefined for Perf - events , because
* cpu_data [ cpuid ] . vpe_id reports 0 for _both_ CPUs .
*/
# define vpe_id() (cpu_has_mipsmt_pertccounters ? \
2012-07-14 00:44:54 +04:00
0 : smp_processor_id ( ) )
# endif
2011-09-24 04:29:55 +04:00
/* Copied from op_model_mipsxx.c */
static unsigned int vpe_shift ( void )
{
if ( num_possible_cpus ( ) > 1 )
return 1 ;
return 0 ;
}
static unsigned int counters_total_to_per_cpu ( unsigned int counters )
{
return counters > > vpe_shift ( ) ;
}
2012-07-14 00:44:53 +04:00
# else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
2011-09-24 04:29:55 +04:00
# define vpe_id() 0
2012-07-14 00:44:53 +04:00
# endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
2011-09-24 04:29:55 +04:00
static void resume_local_counters ( void ) ;
static void pause_local_counters ( void ) ;
static irqreturn_t mipsxx_pmu_handle_irq ( int , void * ) ;
static int mipsxx_pmu_handle_shared_irq ( void ) ;
static unsigned int mipsxx_pmu_swizzle_perf_idx ( unsigned int idx )
{
if ( vpe_id ( ) = = 1 )
idx = ( idx + 2 ) & 3 ;
return idx ;
}
static u64 mipsxx_pmu_read_counter ( unsigned int idx )
{
idx = mipsxx_pmu_swizzle_perf_idx ( idx ) ;
switch ( idx ) {
case 0 :
/*
* The counters are unsigned , we must cast to truncate
* off the high bits .
*/
return ( u32 ) read_c0_perfcntr0 ( ) ;
case 1 :
return ( u32 ) read_c0_perfcntr1 ( ) ;
case 2 :
return ( u32 ) read_c0_perfcntr2 ( ) ;
case 3 :
return ( u32 ) read_c0_perfcntr3 ( ) ;
default :
WARN_ONCE ( 1 , " Invalid performance counter number (%d) \n " , idx ) ;
return 0 ;
}
}
static u64 mipsxx_pmu_read_counter_64 ( unsigned int idx )
{
idx = mipsxx_pmu_swizzle_perf_idx ( idx ) ;
switch ( idx ) {
case 0 :
return read_c0_perfcntr0_64 ( ) ;
case 1 :
return read_c0_perfcntr1_64 ( ) ;
case 2 :
return read_c0_perfcntr2_64 ( ) ;
case 3 :
return read_c0_perfcntr3_64 ( ) ;
default :
WARN_ONCE ( 1 , " Invalid performance counter number (%d) \n " , idx ) ;
return 0 ;
}
}
static void mipsxx_pmu_write_counter ( unsigned int idx , u64 val )
{
idx = mipsxx_pmu_swizzle_perf_idx ( idx ) ;
switch ( idx ) {
case 0 :
write_c0_perfcntr0 ( val ) ;
return ;
case 1 :
write_c0_perfcntr1 ( val ) ;
return ;
case 2 :
write_c0_perfcntr2 ( val ) ;
return ;
case 3 :
write_c0_perfcntr3 ( val ) ;
return ;
}
}
static void mipsxx_pmu_write_counter_64 ( unsigned int idx , u64 val )
{
idx = mipsxx_pmu_swizzle_perf_idx ( idx ) ;
switch ( idx ) {
case 0 :
write_c0_perfcntr0_64 ( val ) ;
return ;
case 1 :
write_c0_perfcntr1_64 ( val ) ;
return ;
case 2 :
write_c0_perfcntr2_64 ( val ) ;
return ;
case 3 :
write_c0_perfcntr3_64 ( val ) ;
return ;
}
}
static unsigned int mipsxx_pmu_read_control ( unsigned int idx )
{
idx = mipsxx_pmu_swizzle_perf_idx ( idx ) ;
switch ( idx ) {
case 0 :
return read_c0_perfctrl0 ( ) ;
case 1 :
return read_c0_perfctrl1 ( ) ;
case 2 :
return read_c0_perfctrl2 ( ) ;
case 3 :
return read_c0_perfctrl3 ( ) ;
default :
WARN_ONCE ( 1 , " Invalid performance counter number (%d) \n " , idx ) ;
return 0 ;
}
}
static void mipsxx_pmu_write_control ( unsigned int idx , unsigned int val )
{
idx = mipsxx_pmu_swizzle_perf_idx ( idx ) ;
switch ( idx ) {
case 0 :
write_c0_perfctrl0 ( val ) ;
return ;
case 1 :
write_c0_perfctrl1 ( val ) ;
return ;
case 2 :
write_c0_perfctrl2 ( val ) ;
return ;
case 3 :
write_c0_perfctrl3 ( val ) ;
return ;
}
}
static int mipsxx_pmu_alloc_counter ( struct cpu_hw_events * cpuc ,
struct hw_perf_event * hwc )
{
int i ;
/*
* We only need to care the counter mask . The range has been
* checked definitely .
*/
unsigned long cntr_mask = ( hwc - > event_base > > 8 ) & 0xffff ;
for ( i = mipspmu . num_counters - 1 ; i > = 0 ; i - - ) {
/*
* Note that some MIPS perf events can be counted by both
* even and odd counters , wheresas many other are only by
* even _or_ odd counters . This introduces an issue that
* when the former kind of event takes the counter the
* latter kind of event wants to use , then the " counter
* allocation " for the latter event will fail. In fact if
* they can be dynamically swapped , they both feel happy .
* But here we leave this issue alone for now .
*/
if ( test_bit ( i , & cntr_mask ) & &
! test_and_set_bit ( i , cpuc - > used_mask ) )
return i ;
}
return - EAGAIN ;
}
static void mipsxx_pmu_enable_event ( struct hw_perf_event * evt , int idx )
{
struct cpu_hw_events * cpuc = & __get_cpu_var ( cpu_hw_events ) ;
WARN_ON ( idx < 0 | | idx > = mipspmu . num_counters ) ;
cpuc - > saved_ctrl [ idx ] = M_PERFCTL_EVENT ( evt - > event_base & 0xff ) |
( evt - > config_base & M_PERFCTL_CONFIG_MASK ) |
/* Make sure interrupt enabled. */
M_PERFCTL_INTERRUPT_ENABLE ;
2012-07-14 00:44:54 +04:00
if ( IS_ENABLED ( CONFIG_CPU_BMIPS5000 ) )
/* enable the counter for the calling thread */
cpuc - > saved_ctrl [ idx ] | =
( 1 < < ( 12 + vpe_id ( ) ) ) | M_PERFCTL_TC ;
2011-09-24 04:29:55 +04:00
/*
* We do not actually let the counter run . Leave it until start ( ) .
*/
}
static void mipsxx_pmu_disable_event ( int idx )
{
struct cpu_hw_events * cpuc = & __get_cpu_var ( cpu_hw_events ) ;
unsigned long flags ;
WARN_ON ( idx < 0 | | idx > = mipspmu . num_counters ) ;
local_irq_save ( flags ) ;
cpuc - > saved_ctrl [ idx ] = mipsxx_pmu_read_control ( idx ) &
~ M_PERFCTL_COUNT_EVENT_WHENEVER ;
mipsxx_pmu_write_control ( idx , cpuc - > saved_ctrl [ idx ] ) ;
local_irq_restore ( flags ) ;
}
2011-09-24 04:29:55 +04:00
static int mipspmu_event_set_period ( struct perf_event * event ,
struct hw_perf_event * hwc ,
int idx )
{
2011-09-24 04:29:55 +04:00
u64 left = local64_read ( & hwc - > period_left ) ;
u64 period = hwc - > sample_period ;
2011-09-24 04:29:55 +04:00
int ret = 0 ;
2011-09-24 04:29:55 +04:00
if ( unlikely ( ( left + period ) & ( 1ULL < < 63 ) ) ) {
/* left underflowed by more than period. */
2011-09-24 04:29:55 +04:00
left = period ;
local64_set ( & hwc - > period_left , left ) ;
hwc - > last_period = period ;
ret = 1 ;
2011-09-24 04:29:55 +04:00
} else if ( unlikely ( ( left + period ) < = period ) ) {
/* left underflowed by less than period. */
2011-09-24 04:29:55 +04:00
left + = period ;
local64_set ( & hwc - > period_left , left ) ;
hwc - > last_period = period ;
ret = 1 ;
}
2011-09-24 04:29:55 +04:00
if ( left > mipspmu . max_period ) {
left = mipspmu . max_period ;
local64_set ( & hwc - > period_left , left ) ;
}
2011-09-24 04:29:55 +04:00
2011-09-24 04:29:55 +04:00
local64_set ( & hwc - > prev_count , mipspmu . overflow - left ) ;
2011-09-24 04:29:55 +04:00
2011-09-24 04:29:55 +04:00
mipspmu . write_counter ( idx , mipspmu . overflow - left ) ;
2011-09-24 04:29:55 +04:00
perf_event_update_userpage ( event ) ;
return ret ;
}
static void mipspmu_event_update ( struct perf_event * event ,
struct hw_perf_event * hwc ,
int idx )
{
2011-09-24 04:29:55 +04:00
u64 prev_raw_count , new_raw_count ;
2011-09-24 04:29:55 +04:00
u64 delta ;
again :
prev_raw_count = local64_read ( & hwc - > prev_count ) ;
2011-09-24 04:29:55 +04:00
new_raw_count = mipspmu . read_counter ( idx ) ;
2011-09-24 04:29:55 +04:00
if ( local64_cmpxchg ( & hwc - > prev_count , prev_raw_count ,
new_raw_count ) ! = prev_raw_count )
goto again ;
2011-09-24 04:29:55 +04:00
delta = new_raw_count - prev_raw_count ;
2011-09-24 04:29:55 +04:00
local64_add ( delta , & event - > count ) ;
local64_sub ( delta , & hwc - > period_left ) ;
}
static void mipspmu_start ( struct perf_event * event , int flags )
{
struct hw_perf_event * hwc = & event - > hw ;
if ( flags & PERF_EF_RELOAD )
WARN_ON_ONCE ( ! ( hwc - > state & PERF_HES_UPTODATE ) ) ;
hwc - > state = 0 ;
/* Set the period for the event. */
mipspmu_event_set_period ( event , hwc , hwc - > idx ) ;
/* Enable the event. */
2011-09-24 04:29:55 +04:00
mipsxx_pmu_enable_event ( hwc , hwc - > idx ) ;
2011-09-24 04:29:55 +04:00
}
static void mipspmu_stop ( struct perf_event * event , int flags )
{
struct hw_perf_event * hwc = & event - > hw ;
if ( ! ( hwc - > state & PERF_HES_STOPPED ) ) {
/* We are working on a local event. */
2011-09-24 04:29:55 +04:00
mipsxx_pmu_disable_event ( hwc - > idx ) ;
2011-09-24 04:29:55 +04:00
barrier ( ) ;
mipspmu_event_update ( event , hwc , hwc - > idx ) ;
hwc - > state | = PERF_HES_STOPPED | PERF_HES_UPTODATE ;
}
}
static int mipspmu_add ( struct perf_event * event , int flags )
{
struct cpu_hw_events * cpuc = & __get_cpu_var ( cpu_hw_events ) ;
struct hw_perf_event * hwc = & event - > hw ;
int idx ;
int err = 0 ;
perf_pmu_disable ( event - > pmu ) ;
/* To look for a free counter for this event. */
2011-09-24 04:29:55 +04:00
idx = mipsxx_pmu_alloc_counter ( cpuc , hwc ) ;
2011-09-24 04:29:55 +04:00
if ( idx < 0 ) {
err = idx ;
goto out ;
}
/*
* If there is an event in the counter we are going to use then
* make sure it is disabled .
*/
event - > hw . idx = idx ;
2011-09-24 04:29:55 +04:00
mipsxx_pmu_disable_event ( idx ) ;
2011-09-24 04:29:55 +04:00
cpuc - > events [ idx ] = event ;
hwc - > state = PERF_HES_STOPPED | PERF_HES_UPTODATE ;
if ( flags & PERF_EF_START )
mipspmu_start ( event , PERF_EF_RELOAD ) ;
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage ( event ) ;
out :
perf_pmu_enable ( event - > pmu ) ;
return err ;
}
static void mipspmu_del ( struct perf_event * event , int flags )
{
struct cpu_hw_events * cpuc = & __get_cpu_var ( cpu_hw_events ) ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2011-09-24 04:29:55 +04:00
WARN_ON ( idx < 0 | | idx > = mipspmu . num_counters ) ;
2011-09-24 04:29:55 +04:00
mipspmu_stop ( event , PERF_EF_UPDATE ) ;
cpuc - > events [ idx ] = NULL ;
clear_bit ( idx , cpuc - > used_mask ) ;
perf_event_update_userpage ( event ) ;
}
static void mipspmu_read ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
/* Don't read disabled counters! */
if ( hwc - > idx < 0 )
return ;
mipspmu_event_update ( event , hwc , hwc - > idx ) ;
}
static void mipspmu_enable ( struct pmu * pmu )
{
2012-07-14 00:44:53 +04:00
# ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
2011-09-24 04:29:55 +04:00
write_unlock ( & pmuint_rwlock ) ;
# endif
resume_local_counters ( ) ;
2011-09-24 04:29:55 +04:00
}
2011-09-24 04:29:55 +04:00
/*
* MIPS performance counters can be per - TC . The control registers can
* not be directly accessed accross CPUs . Hence if we want to do global
* control , we need cross CPU calls . on_each_cpu ( ) can help us , but we
* can not make sure this function is called with interrupts enabled . So
* here we pause local counters and then grab a rwlock and leave the
* counters on other CPUs alone . If any counter interrupt raises while
* we own the write lock , simply pause local counters on that CPU and
* spin in the handler . Also we know we won ' t be switched to another
* CPU after pausing local counters and before grabbing the lock .
*/
2011-09-24 04:29:55 +04:00
static void mipspmu_disable ( struct pmu * pmu )
{
2011-09-24 04:29:55 +04:00
pause_local_counters ( ) ;
2012-07-14 00:44:53 +04:00
# ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
2011-09-24 04:29:55 +04:00
write_lock ( & pmuint_rwlock ) ;
# endif
2011-09-24 04:29:55 +04:00
}
static atomic_t active_events = ATOMIC_INIT ( 0 ) ;
static DEFINE_MUTEX ( pmu_reserve_mutex ) ;
static int ( * save_perf_irq ) ( void ) ;
static int mipspmu_get_irq ( void )
{
int err ;
2011-09-24 04:29:55 +04:00
if ( mipspmu . irq > = 0 ) {
2011-09-24 04:29:55 +04:00
/* Request my own irq handler. */
2011-09-24 04:29:55 +04:00
err = request_irq ( mipspmu . irq , mipsxx_pmu_handle_irq ,
IRQF_PERCPU | IRQF_NOBALANCING ,
2011-09-24 04:29:55 +04:00
" mips_perf_pmu " , NULL ) ;
if ( err ) {
pr_warning ( " Unable to request IRQ%d for MIPS "
2011-09-24 04:29:55 +04:00
" performance counters! \n " , mipspmu . irq ) ;
2011-09-24 04:29:55 +04:00
}
} else if ( cp0_perfcount_irq < 0 ) {
/*
* We are sharing the irq number with the timer interrupt .
*/
save_perf_irq = perf_irq ;
2011-09-24 04:29:55 +04:00
perf_irq = mipsxx_pmu_handle_shared_irq ;
2011-09-24 04:29:55 +04:00
err = 0 ;
} else {
pr_warning ( " The platform hasn't properly defined its "
" interrupt controller. \n " ) ;
err = - ENOENT ;
}
return err ;
}
static void mipspmu_free_irq ( void )
{
2011-09-24 04:29:55 +04:00
if ( mipspmu . irq > = 0 )
free_irq ( mipspmu . irq , NULL ) ;
2011-09-24 04:29:55 +04:00
else if ( cp0_perfcount_irq < 0 )
perf_irq = save_perf_irq ;
}
/*
* mipsxx / rm9000 / loongson2 have different performance counters , they have
* specific low - level init routines .
*/
static void reset_counters ( void * arg ) ;
static int __hw_perf_event_init ( struct perf_event * event ) ;
static void hw_perf_event_destroy ( struct perf_event * event )
{
if ( atomic_dec_and_mutex_lock ( & active_events ,
& pmu_reserve_mutex ) ) {
/*
* We must not call the destroy function with interrupts
* disabled .
*/
on_each_cpu ( reset_counters ,
2011-09-24 04:29:55 +04:00
( void * ) ( long ) mipspmu . num_counters , 1 ) ;
2011-09-24 04:29:55 +04:00
mipspmu_free_irq ( ) ;
mutex_unlock ( & pmu_reserve_mutex ) ;
}
}
static int mipspmu_event_init ( struct perf_event * event )
{
int err = 0 ;
2012-02-10 02:20:59 +04:00
/* does not support taken branch sampling */
if ( has_branch_stack ( event ) )
return - EOPNOTSUPP ;
2011-09-24 04:29:55 +04:00
switch ( event - > attr . type ) {
case PERF_TYPE_RAW :
case PERF_TYPE_HARDWARE :
case PERF_TYPE_HW_CACHE :
break ;
default :
return - ENOENT ;
}
2011-09-24 04:29:55 +04:00
if ( event - > cpu > = nr_cpumask_bits | |
( event - > cpu > = 0 & & ! cpu_online ( event - > cpu ) ) )
2011-09-24 04:29:55 +04:00
return - ENODEV ;
if ( ! atomic_inc_not_zero ( & active_events ) ) {
mutex_lock ( & pmu_reserve_mutex ) ;
if ( atomic_read ( & active_events ) = = 0 )
err = mipspmu_get_irq ( ) ;
if ( ! err )
atomic_inc ( & active_events ) ;
mutex_unlock ( & pmu_reserve_mutex ) ;
}
if ( err )
return err ;
2011-11-21 23:28:48 +04:00
return __hw_perf_event_init ( event ) ;
2011-09-24 04:29:55 +04:00
}
static struct pmu pmu = {
. pmu_enable = mipspmu_enable ,
. pmu_disable = mipspmu_disable ,
. event_init = mipspmu_event_init ,
. add = mipspmu_add ,
. del = mipspmu_del ,
. start = mipspmu_start ,
. stop = mipspmu_stop ,
. read = mipspmu_read ,
} ;
static unsigned int mipspmu_perf_event_encode ( const struct mips_perf_event * pev )
{
/*
* Top 8 bits for range , next 16 bits for cntr_mask , lowest 8 bits for
* event_id .
*/
# ifdef CONFIG_MIPS_MT_SMP
return ( ( unsigned int ) pev - > range < < 24 ) |
( pev - > cntr_mask & 0xffff00 ) |
( pev - > event_id & 0xff ) ;
# else
return ( pev - > cntr_mask & 0xffff00 ) |
( pev - > event_id & 0xff ) ;
# endif
}
static const struct mips_perf_event * mipspmu_map_general_event ( int idx )
{
2012-07-14 00:44:50 +04:00
if ( ( * mipspmu . general_event_map ) [ idx ] . cntr_mask = = 0 )
return ERR_PTR ( - EOPNOTSUPP ) ;
return & ( * mipspmu . general_event_map ) [ idx ] ;
2011-09-24 04:29:55 +04:00
}
static const struct mips_perf_event * mipspmu_map_cache_event ( u64 config )
{
unsigned int cache_type , cache_op , cache_result ;
const struct mips_perf_event * pev ;
cache_type = ( config > > 0 ) & 0xff ;
if ( cache_type > = PERF_COUNT_HW_CACHE_MAX )
return ERR_PTR ( - EINVAL ) ;
cache_op = ( config > > 8 ) & 0xff ;
if ( cache_op > = PERF_COUNT_HW_CACHE_OP_MAX )
return ERR_PTR ( - EINVAL ) ;
cache_result = ( config > > 16 ) & 0xff ;
if ( cache_result > = PERF_COUNT_HW_CACHE_RESULT_MAX )
return ERR_PTR ( - EINVAL ) ;
2011-09-24 04:29:55 +04:00
pev = & ( ( * mipspmu . cache_event_map )
2011-09-24 04:29:55 +04:00
[ cache_type ]
[ cache_op ]
[ cache_result ] ) ;
2012-07-14 00:44:50 +04:00
if ( pev - > cntr_mask = = 0 )
2011-09-24 04:29:55 +04:00
return ERR_PTR ( - EOPNOTSUPP ) ;
return pev ;
}
static int validate_group ( struct perf_event * event )
{
struct perf_event * sibling , * leader = event - > group_leader ;
struct cpu_hw_events fake_cpuc ;
memset ( & fake_cpuc , 0 , sizeof ( fake_cpuc ) ) ;
2011-11-21 23:28:47 +04:00
if ( mipsxx_pmu_alloc_counter ( & fake_cpuc , & leader - > hw ) < 0 )
2011-11-09 20:56:37 +04:00
return - EINVAL ;
2011-09-24 04:29:55 +04:00
list_for_each_entry ( sibling , & leader - > sibling_list , group_entry ) {
2011-11-21 23:28:47 +04:00
if ( mipsxx_pmu_alloc_counter ( & fake_cpuc , & sibling - > hw ) < 0 )
2011-11-09 20:56:37 +04:00
return - EINVAL ;
2011-09-24 04:29:55 +04:00
}
2011-11-21 23:28:47 +04:00
if ( mipsxx_pmu_alloc_counter ( & fake_cpuc , & event - > hw ) < 0 )
2011-11-09 20:56:37 +04:00
return - EINVAL ;
2011-09-24 04:29:55 +04:00
return 0 ;
}
/* This is needed by specific irq handlers in perf_event_*.c */
static void handle_associated_event ( struct cpu_hw_events * cpuc ,
int idx , struct perf_sample_data * data ,
struct pt_regs * regs )
{
struct perf_event * event = cpuc - > events [ idx ] ;
struct hw_perf_event * hwc = & event - > hw ;
mipspmu_event_update ( event , hwc , idx ) ;
data - > period = event - > hw . last_period ;
if ( ! mipspmu_event_set_period ( event , hwc , idx ) )
return ;
if ( perf_event_overflow ( event , data , regs ) )
2011-09-24 04:29:55 +04:00
mipsxx_pmu_disable_event ( idx ) ;
2011-09-24 04:29:55 +04:00
}
2010-10-12 15:37:24 +04:00
2011-09-24 04:29:55 +04:00
static int __n_counters ( void )
2010-10-12 15:37:24 +04:00
{
if ( ! ( read_c0_config1 ( ) & M_CONFIG1_PC ) )
return 0 ;
if ( ! ( read_c0_perfctrl0 ( ) & M_PERFCTL_MORE ) )
return 1 ;
if ( ! ( read_c0_perfctrl1 ( ) & M_PERFCTL_MORE ) )
return 2 ;
if ( ! ( read_c0_perfctrl2 ( ) & M_PERFCTL_MORE ) )
return 3 ;
return 4 ;
}
2011-09-24 04:29:55 +04:00
static int n_counters ( void )
2010-10-12 15:37:24 +04:00
{
int counters ;
switch ( current_cpu_type ( ) ) {
case CPU_R10000 :
counters = 2 ;
break ;
case CPU_R12000 :
case CPU_R14000 :
counters = 4 ;
break ;
default :
counters = __n_counters ( ) ;
}
return counters ;
}
static void reset_counters ( void * arg )
{
int counters = ( int ) ( long ) arg ;
switch ( counters ) {
case 4 :
2011-09-24 04:29:55 +04:00
mipsxx_pmu_write_control ( 3 , 0 ) ;
mipspmu . write_counter ( 3 , 0 ) ;
2010-10-12 15:37:24 +04:00
case 3 :
2011-09-24 04:29:55 +04:00
mipsxx_pmu_write_control ( 2 , 0 ) ;
mipspmu . write_counter ( 2 , 0 ) ;
2010-10-12 15:37:24 +04:00
case 2 :
2011-09-24 04:29:55 +04:00
mipsxx_pmu_write_control ( 1 , 0 ) ;
mipspmu . write_counter ( 1 , 0 ) ;
2010-10-12 15:37:24 +04:00
case 1 :
2011-09-24 04:29:55 +04:00
mipsxx_pmu_write_control ( 0 , 0 ) ;
mipspmu . write_counter ( 0 , 0 ) ;
2010-10-12 15:37:24 +04:00
}
}
/* 24K/34K/1004K cores can share the same event map. */
static const struct mips_perf_event mipsxxcore_event_map
[ PERF_COUNT_HW_MAX ] = {
[ PERF_COUNT_HW_CPU_CYCLES ] = { 0x00 , CNTR_EVEN | CNTR_ODD , P } ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = { 0x01 , CNTR_EVEN | CNTR_ODD , T } ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = { 0x02 , CNTR_EVEN , T } ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = { 0x02 , CNTR_ODD , T } ,
} ;
/* 74K core has different branch event code. */
static const struct mips_perf_event mipsxx74Kcore_event_map
[ PERF_COUNT_HW_MAX ] = {
[ PERF_COUNT_HW_CPU_CYCLES ] = { 0x00 , CNTR_EVEN | CNTR_ODD , P } ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = { 0x01 , CNTR_EVEN | CNTR_ODD , T } ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = { 0x27 , CNTR_EVEN , T } ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = { 0x27 , CNTR_ODD , T } ,
} ;
2011-09-24 04:29:55 +04:00
static const struct mips_perf_event octeon_event_map [ PERF_COUNT_HW_MAX ] = {
[ PERF_COUNT_HW_CPU_CYCLES ] = { 0x01 , CNTR_ALL } ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = { 0x03 , CNTR_ALL } ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = { 0x2b , CNTR_ALL } ,
2013-01-22 15:59:30 +04:00
[ PERF_COUNT_HW_CACHE_MISSES ] = { 0x2e , CNTR_ALL } ,
2011-09-24 04:29:55 +04:00
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = { 0x08 , CNTR_ALL } ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = { 0x09 , CNTR_ALL } ,
[ PERF_COUNT_HW_BUS_CYCLES ] = { 0x25 , CNTR_ALL } ,
} ;
2012-07-14 00:44:54 +04:00
static const struct mips_perf_event bmips5000_event_map
[ PERF_COUNT_HW_MAX ] = {
[ PERF_COUNT_HW_CPU_CYCLES ] = { 0x00 , CNTR_EVEN | CNTR_ODD , T } ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = { 0x01 , CNTR_EVEN | CNTR_ODD , T } ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = { 0x02 , CNTR_ODD , T } ,
} ;
2012-10-31 16:01:28 +04:00
static const struct mips_perf_event xlp_event_map [ PERF_COUNT_HW_MAX ] = {
[ PERF_COUNT_HW_CPU_CYCLES ] = { 0x01 , CNTR_ALL } ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = { 0x18 , CNTR_ALL } , /* PAPI_TOT_INS */
[ PERF_COUNT_HW_CACHE_REFERENCES ] = { 0x04 , CNTR_ALL } , /* PAPI_L1_ICA */
[ PERF_COUNT_HW_CACHE_MISSES ] = { 0x07 , CNTR_ALL } , /* PAPI_L1_ICM */
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = { 0x1b , CNTR_ALL } , /* PAPI_BR_CN */
[ PERF_COUNT_HW_BRANCH_MISSES ] = { 0x1c , CNTR_ALL } , /* PAPI_BR_MSP */
} ;
2010-10-12 15:37:24 +04:00
/* 24K/34K/1004K cores can share the same cache event map. */
static const struct mips_perf_event mipsxxcore_cache_map
[ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
/*
* Like some other architectures ( e . g . ARM ) , the performance
* counters don ' t differentiate between read and write
* accesses / misses , so this isn ' t strictly correct , but it ' s the
* best we can do . Writes and reads get combined .
*/
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x0a , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x0b , CNTR_EVEN | CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x0a , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x0b , CNTR_EVEN | CNTR_ODD , T } ,
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x09 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x09 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x09 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x09 , CNTR_ODD , T } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x14 , CNTR_EVEN , T } ,
/*
* Note that MIPS has only " hit " events countable for
* the prefetch operation .
*/
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x15 , CNTR_ODD , P } ,
[ C ( RESULT_MISS ) ] = { 0x16 , CNTR_EVEN , P } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x15 , CNTR_ODD , P } ,
[ C ( RESULT_MISS ) ] = { 0x16 , CNTR_EVEN , P } ,
} ,
} ,
[ C ( DTLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x06 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x06 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x06 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x06 , CNTR_ODD , T } ,
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x05 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x05 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x05 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x05 , CNTR_ODD , T } ,
} ,
} ,
[ C ( BPU ) ] = {
/* Using the same code for *HW_BRANCH* */
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x02 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x02 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x02 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x02 , CNTR_ODD , T } ,
} ,
2011-04-23 01:37:06 +04:00
} ,
2010-10-12 15:37:24 +04:00
} ;
/* 74K core has completely different cache event map. */
static const struct mips_perf_event mipsxx74Kcore_cache_map
[ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
/*
* Like some other architectures ( e . g . ARM ) , the performance
* counters don ' t differentiate between read and write
* accesses / misses , so this isn ' t strictly correct , but it ' s the
* best we can do . Writes and reads get combined .
*/
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x17 , CNTR_ODD , T } ,
[ C ( RESULT_MISS ) ] = { 0x18 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x17 , CNTR_ODD , T } ,
[ C ( RESULT_MISS ) ] = { 0x18 , CNTR_ODD , T } ,
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x06 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x06 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x06 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x06 , CNTR_ODD , T } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x34 , CNTR_EVEN , T } ,
/*
* Note that MIPS has only " hit " events countable for
* the prefetch operation .
*/
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x1c , CNTR_ODD , P } ,
[ C ( RESULT_MISS ) ] = { 0x1d , CNTR_EVEN | CNTR_ODD , P } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x1c , CNTR_ODD , P } ,
[ C ( RESULT_MISS ) ] = { 0x1d , CNTR_EVEN | CNTR_ODD , P } ,
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x04 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x04 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x04 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x04 , CNTR_ODD , T } ,
} ,
} ,
[ C ( BPU ) ] = {
/* Using the same code for *HW_BRANCH* */
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x27 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x27 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x27 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 0x27 , CNTR_ODD , T } ,
} ,
2011-04-23 01:37:06 +04:00
} ,
2010-10-12 15:37:24 +04:00
} ;
2012-07-14 00:44:54 +04:00
/* BMIPS5000 */
static const struct mips_perf_event bmips5000_cache_map
[ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
/*
* Like some other architectures ( e . g . ARM ) , the performance
* counters don ' t differentiate between read and write
* accesses / misses , so this isn ' t strictly correct , but it ' s the
* best we can do . Writes and reads get combined .
*/
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 12 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 12 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 12 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 12 , CNTR_ODD , T } ,
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 10 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 10 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 10 , CNTR_EVEN , T } ,
[ C ( RESULT_MISS ) ] = { 10 , CNTR_ODD , T } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { 23 , CNTR_EVEN , T } ,
/*
* Note that MIPS has only " hit " events countable for
* the prefetch operation .
*/
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 28 , CNTR_EVEN , P } ,
[ C ( RESULT_MISS ) ] = { 28 , CNTR_ODD , P } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 28 , CNTR_EVEN , P } ,
[ C ( RESULT_MISS ) ] = { 28 , CNTR_ODD , P } ,
} ,
} ,
[ C ( BPU ) ] = {
/* Using the same code for *HW_BRANCH* */
[ C ( OP_READ ) ] = {
[ C ( RESULT_MISS ) ] = { 0x02 , CNTR_ODD , T } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_MISS ) ] = { 0x02 , CNTR_ODD , T } ,
} ,
} ,
} ;
2011-09-24 04:29:55 +04:00
static const struct mips_perf_event octeon_cache_map
[ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x2b , CNTR_ALL } ,
[ C ( RESULT_MISS ) ] = { 0x2e , CNTR_ALL } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x30 , CNTR_ALL } ,
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x18 , CNTR_ALL } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x19 , CNTR_ALL } ,
} ,
} ,
[ C ( DTLB ) ] = {
/*
* Only general DTLB misses are counted use the same event for
* read and write .
*/
[ C ( OP_READ ) ] = {
[ C ( RESULT_MISS ) ] = { 0x35 , CNTR_ALL } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_MISS ) ] = { 0x35 , CNTR_ALL } ,
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_MISS ) ] = { 0x37 , CNTR_ALL } ,
} ,
} ,
} ;
2012-10-31 16:01:28 +04:00
static const struct mips_perf_event xlp_cache_map
[ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x31 , CNTR_ALL } , /* PAPI_L1_DCR */
[ C ( RESULT_MISS ) ] = { 0x30 , CNTR_ALL } , /* PAPI_L1_LDM */
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x2f , CNTR_ALL } , /* PAPI_L1_DCW */
[ C ( RESULT_MISS ) ] = { 0x2e , CNTR_ALL } , /* PAPI_L1_STM */
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x04 , CNTR_ALL } , /* PAPI_L1_ICA */
[ C ( RESULT_MISS ) ] = { 0x07 , CNTR_ALL } , /* PAPI_L1_ICM */
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x35 , CNTR_ALL } , /* PAPI_L2_DCR */
[ C ( RESULT_MISS ) ] = { 0x37 , CNTR_ALL } , /* PAPI_L2_LDM */
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { 0x34 , CNTR_ALL } , /* PAPI_L2_DCA */
[ C ( RESULT_MISS ) ] = { 0x36 , CNTR_ALL } , /* PAPI_L2_DCM */
} ,
} ,
[ C ( DTLB ) ] = {
/*
* Only general DTLB misses are counted use the same event for
* read and write .
*/
[ C ( OP_READ ) ] = {
[ C ( RESULT_MISS ) ] = { 0x2d , CNTR_ALL } , /* PAPI_TLB_DM */
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_MISS ) ] = { 0x2d , CNTR_ALL } , /* PAPI_TLB_DM */
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_MISS ) ] = { 0x08 , CNTR_ALL } , /* PAPI_TLB_IM */
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_MISS ) ] = { 0x08 , CNTR_ALL } , /* PAPI_TLB_IM */
} ,
} ,
[ C ( BPU ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_MISS ) ] = { 0x25 , CNTR_ALL } ,
} ,
} ,
} ;
2010-10-12 15:37:24 +04:00
# ifdef CONFIG_MIPS_MT_SMP
2011-09-24 04:29:55 +04:00
static void check_and_calc_range ( struct perf_event * event ,
const struct mips_perf_event * pev )
2010-10-12 15:37:24 +04:00
{
struct hw_perf_event * hwc = & event - > hw ;
if ( event - > cpu > = 0 ) {
if ( pev - > range > V ) {
/*
* The user selected an event that is processor
* wide , while expecting it to be VPE wide .
*/
hwc - > config_base | = M_TC_EN_ALL ;
} else {
/*
* FIXME : cpu_data [ event - > cpu ] . vpe_id reports 0
* for both CPUs .
*/
hwc - > config_base | = M_PERFCTL_VPEID ( event - > cpu ) ;
hwc - > config_base | = M_TC_EN_VPE ;
}
} else
hwc - > config_base | = M_TC_EN_ALL ;
}
# else
2011-09-24 04:29:55 +04:00
static void check_and_calc_range ( struct perf_event * event ,
const struct mips_perf_event * pev )
2010-10-12 15:37:24 +04:00
{
}
# endif
static int __hw_perf_event_init ( struct perf_event * event )
{
struct perf_event_attr * attr = & event - > attr ;
struct hw_perf_event * hwc = & event - > hw ;
const struct mips_perf_event * pev ;
int err ;
/* Returning MIPS event descriptor for generic perf event. */
if ( PERF_TYPE_HARDWARE = = event - > attr . type ) {
if ( event - > attr . config > = PERF_COUNT_HW_MAX )
return - EINVAL ;
pev = mipspmu_map_general_event ( event - > attr . config ) ;
} else if ( PERF_TYPE_HW_CACHE = = event - > attr . type ) {
pev = mipspmu_map_cache_event ( event - > attr . config ) ;
} else if ( PERF_TYPE_RAW = = event - > attr . type ) {
/* We are working on the global raw event. */
mutex_lock ( & raw_event_mutex ) ;
2011-09-24 04:29:55 +04:00
pev = mipspmu . map_raw_event ( event - > attr . config ) ;
2010-10-12 15:37:24 +04:00
} else {
/* The event type is not (yet) supported. */
return - EOPNOTSUPP ;
}
if ( IS_ERR ( pev ) ) {
if ( PERF_TYPE_RAW = = event - > attr . type )
mutex_unlock ( & raw_event_mutex ) ;
return PTR_ERR ( pev ) ;
}
/*
* We allow max flexibility on how each individual counter shared
* by the single CPU operates ( the mode exclusion and the range ) .
*/
hwc - > config_base = M_PERFCTL_INTERRUPT_ENABLE ;
/* Calculate range bits and validate it. */
if ( num_possible_cpus ( ) > 1 )
check_and_calc_range ( event , pev ) ;
hwc - > event_base = mipspmu_perf_event_encode ( pev ) ;
if ( PERF_TYPE_RAW = = event - > attr . type )
mutex_unlock ( & raw_event_mutex ) ;
if ( ! attr - > exclude_user )
hwc - > config_base | = M_PERFCTL_USER ;
if ( ! attr - > exclude_kernel ) {
hwc - > config_base | = M_PERFCTL_KERNEL ;
/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
hwc - > config_base | = M_PERFCTL_EXL ;
}
if ( ! attr - > exclude_hv )
hwc - > config_base | = M_PERFCTL_SUPERVISOR ;
hwc - > config_base & = M_PERFCTL_CONFIG_MASK ;
/*
* The event can belong to another cpu . We do not assign a local
* counter for it for now .
*/
hwc - > idx = - 1 ;
hwc - > config = 0 ;
if ( ! hwc - > sample_period ) {
2011-09-24 04:29:55 +04:00
hwc - > sample_period = mipspmu . max_period ;
2010-10-12 15:37:24 +04:00
hwc - > last_period = hwc - > sample_period ;
local64_set ( & hwc - > period_left , hwc - > sample_period ) ;
}
err = 0 ;
2011-11-21 23:28:48 +04:00
if ( event - > group_leader ! = event )
2010-10-12 15:37:24 +04:00
err = validate_group ( event ) ;
event - > destroy = hw_perf_event_destroy ;
2011-11-21 23:28:48 +04:00
if ( err )
event - > destroy ( event ) ;
2010-10-12 15:37:24 +04:00
return err ;
}
static void pause_local_counters ( void )
{
struct cpu_hw_events * cpuc = & __get_cpu_var ( cpu_hw_events ) ;
2011-09-24 04:29:55 +04:00
int ctr = mipspmu . num_counters ;
2010-10-12 15:37:24 +04:00
unsigned long flags ;
local_irq_save ( flags ) ;
2011-09-24 04:29:55 +04:00
do {
ctr - - ;
cpuc - > saved_ctrl [ ctr ] = mipsxx_pmu_read_control ( ctr ) ;
mipsxx_pmu_write_control ( ctr , cpuc - > saved_ctrl [ ctr ] &
~ M_PERFCTL_COUNT_EVENT_WHENEVER ) ;
} while ( ctr > 0 ) ;
2010-10-12 15:37:24 +04:00
local_irq_restore ( flags ) ;
}
static void resume_local_counters ( void )
{
struct cpu_hw_events * cpuc = & __get_cpu_var ( cpu_hw_events ) ;
2011-09-24 04:29:55 +04:00
int ctr = mipspmu . num_counters ;
2010-10-12 15:37:24 +04:00
2011-09-24 04:29:55 +04:00
do {
ctr - - ;
mipsxx_pmu_write_control ( ctr , cpuc - > saved_ctrl [ ctr ] ) ;
} while ( ctr > 0 ) ;
2010-10-12 15:37:24 +04:00
}
static int mipsxx_pmu_handle_shared_irq ( void )
{
struct cpu_hw_events * cpuc = & __get_cpu_var ( cpu_hw_events ) ;
struct perf_sample_data data ;
2011-09-24 04:29:55 +04:00
unsigned int counters = mipspmu . num_counters ;
u64 counter ;
2010-10-12 15:37:24 +04:00
int handled = IRQ_NONE ;
struct pt_regs * regs ;
2012-07-14 00:44:51 +04:00
if ( cpu_has_perf_cntr_intr_bit & & ! ( read_c0_cause ( ) & CAUSEF_PCI ) )
2010-10-12 15:37:24 +04:00
return handled ;
/*
* First we pause the local counters , so that when we are locked
* here , the counters are all paused . When it gets locked due to
* perf_disable ( ) , the timer interrupt handler will be delayed .
*
* See also mipsxx_pmu_start ( ) .
*/
pause_local_counters ( ) ;
2012-07-14 00:44:53 +04:00
# ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
2010-10-12 15:37:24 +04:00
read_lock ( & pmuint_rwlock ) ;
# endif
regs = get_irq_regs ( ) ;
2012-04-02 22:19:08 +04:00
perf_sample_data_init ( & data , 0 , 0 ) ;
2010-10-12 15:37:24 +04:00
switch ( counters ) {
# define HANDLE_COUNTER(n) \
case n + 1 : \
if ( test_bit ( n , cpuc - > used_mask ) ) { \
2011-09-24 04:29:55 +04:00
counter = mipspmu . read_counter ( n ) ; \
if ( counter & mipspmu . overflow ) { \
handle_associated_event ( cpuc , n , & data , regs ) ; \
2010-10-12 15:37:24 +04:00
handled = IRQ_HANDLED ; \
} \
}
HANDLE_COUNTER ( 3 )
HANDLE_COUNTER ( 2 )
HANDLE_COUNTER ( 1 )
HANDLE_COUNTER ( 0 )
}
/*
* Do all the work for the pending perf events . We can do this
* in here because the performance counter interrupt is a regular
* interrupt , not NMI .
*/
if ( handled = = IRQ_HANDLED )
2011-01-21 11:19:17 +03:00
irq_work_run ( ) ;
2010-10-12 15:37:24 +04:00
2012-07-14 00:44:53 +04:00
# ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
2010-10-12 15:37:24 +04:00
read_unlock ( & pmuint_rwlock ) ;
# endif
resume_local_counters ( ) ;
return handled ;
}
2011-09-24 04:29:55 +04:00
static irqreturn_t mipsxx_pmu_handle_irq ( int irq , void * dev )
2010-10-12 15:37:24 +04:00
{
return mipsxx_pmu_handle_shared_irq ( ) ;
}
/* 24K */
# define IS_BOTH_COUNTERS_24K_EVENT(b) \
( ( b ) = = 0 | | ( b ) = = 1 | | ( b ) = = 11 )
/* 34K */
# define IS_BOTH_COUNTERS_34K_EVENT(b) \
( ( b ) = = 0 | | ( b ) = = 1 | | ( b ) = = 11 )
# ifdef CONFIG_MIPS_MT_SMP
# define IS_RANGE_P_34K_EVENT(r, b) \
( ( b ) = = 0 | | ( r ) = = 18 | | ( b ) = = 21 | | ( b ) = = 22 | | \
( b ) = = 25 | | ( b ) = = 39 | | ( r ) = = 44 | | ( r ) = = 174 | | \
( r ) = = 176 | | ( ( b ) > = 50 & & ( b ) < = 55 ) | | \
( ( b ) > = 64 & & ( b ) < = 67 ) )
2013-01-22 15:59:30 +04:00
# define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
2010-10-12 15:37:24 +04:00
# endif
/* 74K */
# define IS_BOTH_COUNTERS_74K_EVENT(b) \
( ( b ) = = 0 | | ( b ) = = 1 )
/* 1004K */
# define IS_BOTH_COUNTERS_1004K_EVENT(b) \
( ( b ) = = 0 | | ( b ) = = 1 | | ( b ) = = 11 )
# ifdef CONFIG_MIPS_MT_SMP
# define IS_RANGE_P_1004K_EVENT(r, b) \
( ( b ) = = 0 | | ( r ) = = 18 | | ( b ) = = 21 | | ( b ) = = 22 | | \
( b ) = = 25 | | ( b ) = = 36 | | ( b ) = = 39 | | ( r ) = = 44 | | \
( r ) = = 174 | | ( r ) = = 176 | | ( ( b ) > = 50 & & ( b ) < = 59 ) | | \
( r ) = = 188 | | ( b ) = = 61 | | ( b ) = = 62 | | \
( ( b ) > = 64 & & ( b ) < = 67 ) )
# define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
# endif
2012-07-14 00:44:54 +04:00
/* BMIPS5000 */
# define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
( ( b ) = = 0 | | ( b ) = = 1 )
2010-10-12 15:37:24 +04:00
/*
* User can use 0 - 255 raw events , where 0 - 127 for the events of even
* counters , and 128 - 255 for odd counters . Note that bit 7 is used to
* indicate the parity . So , for example , when user wants to take the
* Event Num of 15 for odd counters ( by referring to the user manual ) ,
* then 128 needs to be added to 15 as the input for the event config ,
* i . e . , 143 ( 0x8F ) to be used .
*/
2011-09-24 04:29:55 +04:00
static const struct mips_perf_event * mipsxx_pmu_map_raw_event ( u64 config )
2010-10-12 15:37:24 +04:00
{
unsigned int raw_id = config & 0xff ;
unsigned int base_id = raw_id & 0x7f ;
2011-11-21 23:28:45 +04:00
raw_event . event_id = base_id ;
2010-10-12 15:37:24 +04:00
switch ( current_cpu_type ( ) ) {
case CPU_24K :
if ( IS_BOTH_COUNTERS_24K_EVENT ( base_id ) )
raw_event . cntr_mask = CNTR_EVEN | CNTR_ODD ;
else
raw_event . cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN ;
# ifdef CONFIG_MIPS_MT_SMP
/*
* This is actually doing nothing . Non - multithreading
* CPUs will not check and calculate the range .
*/
raw_event . range = P ;
# endif
break ;
case CPU_34K :
if ( IS_BOTH_COUNTERS_34K_EVENT ( base_id ) )
raw_event . cntr_mask = CNTR_EVEN | CNTR_ODD ;
else
raw_event . cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN ;
# ifdef CONFIG_MIPS_MT_SMP
if ( IS_RANGE_P_34K_EVENT ( raw_id , base_id ) )
raw_event . range = P ;
else if ( unlikely ( IS_RANGE_V_34K_EVENT ( raw_id ) ) )
raw_event . range = V ;
else
raw_event . range = T ;
# endif
break ;
case CPU_74K :
if ( IS_BOTH_COUNTERS_74K_EVENT ( base_id ) )
raw_event . cntr_mask = CNTR_EVEN | CNTR_ODD ;
else
raw_event . cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN ;
# ifdef CONFIG_MIPS_MT_SMP
raw_event . range = P ;
# endif
break ;
case CPU_1004K :
if ( IS_BOTH_COUNTERS_1004K_EVENT ( base_id ) )
raw_event . cntr_mask = CNTR_EVEN | CNTR_ODD ;
else
raw_event . cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN ;
# ifdef CONFIG_MIPS_MT_SMP
if ( IS_RANGE_P_1004K_EVENT ( raw_id , base_id ) )
raw_event . range = P ;
else if ( unlikely ( IS_RANGE_V_1004K_EVENT ( raw_id ) ) )
raw_event . range = V ;
else
raw_event . range = T ;
# endif
break ;
2012-07-14 00:44:54 +04:00
case CPU_BMIPS5000 :
if ( IS_BOTH_COUNTERS_BMIPS5000_EVENT ( base_id ) )
raw_event . cntr_mask = CNTR_EVEN | CNTR_ODD ;
else
raw_event . cntr_mask =
raw_id > 127 ? CNTR_ODD : CNTR_EVEN ;
2010-10-12 15:37:24 +04:00
}
return & raw_event ;
}
2011-09-24 04:29:55 +04:00
static const struct mips_perf_event * octeon_pmu_map_raw_event ( u64 config )
{
unsigned int raw_id = config & 0xff ;
unsigned int base_id = raw_id & 0x7f ;
raw_event . cntr_mask = CNTR_ALL ;
raw_event . event_id = base_id ;
if ( current_cpu_type ( ) = = CPU_CAVIUM_OCTEON2 ) {
if ( base_id > 0x42 )
return ERR_PTR ( - EOPNOTSUPP ) ;
} else {
if ( base_id > 0x3a )
return ERR_PTR ( - EOPNOTSUPP ) ;
}
switch ( base_id ) {
case 0x00 :
case 0x0f :
case 0x1e :
case 0x1f :
case 0x2f :
case 0x34 :
case 0x3b . . . 0x3f :
return ERR_PTR ( - EOPNOTSUPP ) ;
default :
break ;
}
return & raw_event ;
}
2012-10-31 16:01:28 +04:00
static const struct mips_perf_event * xlp_pmu_map_raw_event ( u64 config )
{
unsigned int raw_id = config & 0xff ;
/* Only 1-63 are defined */
if ( ( raw_id < 0x01 ) | | ( raw_id > 0x3f ) )
return ERR_PTR ( - EOPNOTSUPP ) ;
raw_event . cntr_mask = CNTR_ALL ;
raw_event . event_id = raw_id ;
return & raw_event ;
}
2010-10-12 15:37:24 +04:00
static int __init
init_hw_perf_events ( void )
{
int counters , irq ;
2011-09-24 04:29:55 +04:00
int counter_bits ;
2010-10-12 15:37:24 +04:00
pr_info ( " Performance counters: " ) ;
counters = n_counters ( ) ;
if ( counters = = 0 ) {
pr_cont ( " No available PMU. \n " ) ;
return - ENODEV ;
}
2012-07-14 00:44:53 +04:00
# ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
2010-10-12 15:37:24 +04:00
cpu_has_mipsmt_pertccounters = read_c0_config7 ( ) & ( 1 < < 19 ) ;
if ( ! cpu_has_mipsmt_pertccounters )
counters = counters_total_to_per_cpu ( counters ) ;
# endif
# ifdef MSC01E_INT_BASE
if ( cpu_has_veic ) {
/*
* Using platform specific interrupt controller defines .
*/
irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR ;
} else {
# endif
2012-05-02 19:33:04 +04:00
if ( ( cp0_perfcount_irq > = 0 ) & &
( cp0_compare_irq ! = cp0_perfcount_irq ) )
2010-10-12 15:37:24 +04:00
irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq ;
else
irq = - 1 ;
# ifdef MSC01E_INT_BASE
}
# endif
2011-09-24 04:29:55 +04:00
mipspmu . map_raw_event = mipsxx_pmu_map_raw_event ;
2010-10-12 15:37:24 +04:00
switch ( current_cpu_type ( ) ) {
case CPU_24K :
2011-09-24 04:29:55 +04:00
mipspmu . name = " mips/24K " ;
mipspmu . general_event_map = & mipsxxcore_event_map ;
mipspmu . cache_event_map = & mipsxxcore_cache_map ;
2010-10-12 15:37:24 +04:00
break ;
case CPU_34K :
2011-09-24 04:29:55 +04:00
mipspmu . name = " mips/34K " ;
mipspmu . general_event_map = & mipsxxcore_event_map ;
mipspmu . cache_event_map = & mipsxxcore_cache_map ;
2010-10-12 15:37:24 +04:00
break ;
case CPU_74K :
2011-09-24 04:29:55 +04:00
mipspmu . name = " mips/74K " ;
mipspmu . general_event_map = & mipsxx74Kcore_event_map ;
mipspmu . cache_event_map = & mipsxx74Kcore_cache_map ;
2010-10-12 15:37:24 +04:00
break ;
case CPU_1004K :
2011-09-24 04:29:55 +04:00
mipspmu . name = " mips/1004K " ;
mipspmu . general_event_map = & mipsxxcore_event_map ;
mipspmu . cache_event_map = & mipsxxcore_cache_map ;
2010-10-12 15:37:24 +04:00
break ;
2012-06-20 23:05:32 +04:00
case CPU_LOONGSON1 :
mipspmu . name = " mips/loongson1 " ;
mipspmu . general_event_map = & mipsxxcore_event_map ;
mipspmu . cache_event_map = & mipsxxcore_cache_map ;
break ;
2011-09-24 04:29:55 +04:00
case CPU_CAVIUM_OCTEON :
case CPU_CAVIUM_OCTEON_PLUS :
case CPU_CAVIUM_OCTEON2 :
mipspmu . name = " octeon " ;
mipspmu . general_event_map = & octeon_event_map ;
mipspmu . cache_event_map = & octeon_cache_map ;
mipspmu . map_raw_event = octeon_pmu_map_raw_event ;
break ;
2012-07-14 00:44:54 +04:00
case CPU_BMIPS5000 :
mipspmu . name = " BMIPS5000 " ;
mipspmu . general_event_map = & bmips5000_event_map ;
mipspmu . cache_event_map = & bmips5000_cache_map ;
break ;
2012-10-31 16:01:28 +04:00
case CPU_XLP :
mipspmu . name = " xlp " ;
mipspmu . general_event_map = & xlp_event_map ;
mipspmu . cache_event_map = & xlp_cache_map ;
mipspmu . map_raw_event = xlp_pmu_map_raw_event ;
break ;
2010-10-12 15:37:24 +04:00
default :
pr_cont ( " Either hardware does not support performance "
" counters, or not yet implemented. \n " ) ;
return - ENODEV ;
}
2011-09-24 04:29:55 +04:00
mipspmu . num_counters = counters ;
mipspmu . irq = irq ;
if ( read_c0_perfctrl0 ( ) & M_PERFCTL_WIDE ) {
mipspmu . max_period = ( 1ULL < < 63 ) - 1 ;
mipspmu . valid_count = ( 1ULL < < 63 ) - 1 ;
mipspmu . overflow = 1ULL < < 63 ;
mipspmu . read_counter = mipsxx_pmu_read_counter_64 ;
mipspmu . write_counter = mipsxx_pmu_write_counter_64 ;
counter_bits = 64 ;
} else {
mipspmu . max_period = ( 1ULL < < 31 ) - 1 ;
mipspmu . valid_count = ( 1ULL < < 31 ) - 1 ;
mipspmu . overflow = 1ULL < < 31 ;
mipspmu . read_counter = mipsxx_pmu_read_counter ;
mipspmu . write_counter = mipsxx_pmu_write_counter ;
counter_bits = 32 ;
}
on_each_cpu ( reset_counters , ( void * ) ( long ) counters , 1 ) ;
pr_cont ( " %s PMU enabled, %d %d-bit counters available to each "
" CPU, irq %d%s \n " , mipspmu . name , counters , counter_bits , irq ,
irq < 0 ? " (share with timer interrupt) " : " " ) ;
2010-10-12 15:37:24 +04:00
MIPS, Perf-events: Work with the new PMU interface
This is the MIPS part of the following commits by Peter Zijlstra:
- a4eaf7f14675cb512d69f0c928055e73d0c6d252
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
For MIPSXX, the stopped state is implemented in the way of 1.b as above.
- 33696fc0d141bbbcb12f75b69608ea83282e3117
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable().
- 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7
perf: Reduce perf_disable() usage
Since the current perf_disable() usage is only an optimization,
remove it for now. This eases the removal of the __weak
hw_perf_enable() interface.
- b0a873ebbf87bf38bf70b5e39a7cadc96099fa13
perf: Register PMU implementations
Simple registration interface for struct pmu, this provides the
infrastructure for removing all the weak functions.
- 51b0fe39549a04858001922919ab355dee9bdfcf
perf: Deconstify struct pmu
sed -ie 's/const struct pmu\>/struct pmu/g' `git grep -l "const struct pmu\>"`
Reported-by: Wu Zhangjin <wuzhangjin@gmail.com>
Acked-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
To: a.p.zijlstra@chello.nl
To: fweisbec@gmail.com
To: will.deacon@arm.com
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: wuzhangjin@gmail.com
Cc: paulus@samba.org
Cc: mingo@elte.hu
Cc: acme@redhat.com
Cc: dengcheng.zhu@gmail.com
Cc: matt@console-pimps.org
Cc: sshtylyov@mvista.com
Cc: ddaney@caviumnetworks.com
Patchwork: http://patchwork.linux-mips.org/patch/2012/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2011-01-21 11:19:18 +03:00
perf_pmu_register ( & pmu , " cpu " , PERF_TYPE_RAW ) ;
2010-10-12 15:37:24 +04:00
return 0 ;
}
2010-11-25 20:38:29 +03:00
early_initcall ( init_hw_perf_events ) ;