2010-02-02 22:25:44 +03:00
# undef DEBUG
/*
* ARM performance counter support .
*
* Copyright ( C ) 2009 picoChip Designs , Ltd . , Jamie Iles
2010-11-13 22:04:32 +03:00
* Copyright ( C ) 2010 ARM Ltd . , Will Deacon < will . deacon @ arm . com >
2010-01-26 20:51:05 +03:00
*
2010-02-02 22:25:44 +03:00
* This code is based on the sparc64 perf event code , which is in turn based
* on the x86 code . Callchain code is based on the ARM OProfile backtrace
* code .
*/
# define pr_fmt(fmt) "hw perfevents: " fmt
# include <linux/interrupt.h>
# include <linux/kernel.h>
2010-04-30 14:32:44 +04:00
# include <linux/module.h>
2010-02-02 22:25:44 +03:00
# include <linux/perf_event.h>
2010-04-29 20:13:24 +04:00
# include <linux/platform_device.h>
2010-02-02 22:25:44 +03:00
# include <linux/spinlock.h>
# include <linux/uaccess.h>
# include <asm/cputype.h>
# include <asm/irq.h>
# include <asm/irq_regs.h>
# include <asm/pmu.h>
# include <asm/stacktrace.h>
/*
2011-07-20 01:43:28 +04:00
* ARMv6 supports a maximum of 3 events , starting from index 0. If we add
2010-02-02 22:25:44 +03:00
* another platform that supports more , we need to increase this to be the
* largest of all platforms .
2010-01-26 20:51:05 +03:00
*
* ARMv7 supports up to 32 events :
* cycle counter CCNT + 31 events counters CNT0 . .30 .
* Cortex - A8 has 1 + 4 counters , Cortex - A9 has 1 + 6 counters .
2010-02-02 22:25:44 +03:00
*/
2011-07-20 01:43:28 +04:00
# define ARMPMU_MAX_HWEVENTS 32
2010-02-02 22:25:44 +03:00
/* The events for a given CPU. */
struct cpu_hw_events {
/*
2011-07-20 01:43:28 +04:00
* The events that are active on the CPU for the given index .
2010-02-02 22:25:44 +03:00
*/
struct perf_event * events [ ARMPMU_MAX_HWEVENTS ] ;
/*
* A 1 bit for an index indicates that the counter is being used for
* an event . A 0 means that the counter can be used .
*/
unsigned long used_mask [ BITS_TO_LONGS ( ARMPMU_MAX_HWEVENTS ) ] ;
2011-04-28 13:17:04 +04:00
/*
* Hardware lock to serialize accesses to PMU registers . Needed for the
* read / modify / write sequences .
*/
raw_spinlock_t pmu_lock ;
2010-02-02 22:25:44 +03:00
} ;
2010-11-30 20:15:53 +03:00
static DEFINE_PER_CPU ( struct cpu_hw_events , cpu_hw_events ) ;
2010-04-30 14:32:44 +04:00
2010-02-02 22:25:44 +03:00
struct arm_pmu {
2010-04-30 14:32:44 +04:00
enum arm_perf_pmu_ids id ;
2011-07-27 18:18:59 +04:00
cpumask_t active_irqs ;
2010-11-13 21:45:27 +03:00
const char * name ;
2010-02-02 22:25:44 +03:00
irqreturn_t ( * handle_irq ) ( int irq_num , void * dev ) ;
void ( * enable ) ( struct hw_perf_event * evt , int idx ) ;
void ( * disable ) ( struct hw_perf_event * evt , int idx ) ;
int ( * get_event_idx ) ( struct cpu_hw_events * cpuc ,
struct hw_perf_event * hwc ) ;
2011-07-19 14:57:30 +04:00
int ( * set_event_filter ) ( struct hw_perf_event * evt ,
struct perf_event_attr * attr ) ;
2010-02-02 22:25:44 +03:00
u32 ( * read_counter ) ( int idx ) ;
void ( * write_counter ) ( int idx , u32 val ) ;
void ( * start ) ( void ) ;
void ( * stop ) ( void ) ;
2011-03-25 15:13:34 +03:00
void ( * reset ) ( void * ) ;
2010-11-13 20:13:56 +03:00
const unsigned ( * cache_map ) [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] ;
const unsigned ( * event_map ) [ PERF_COUNT_HW_MAX ] ;
u32 raw_event_mask ;
2010-02-02 22:25:44 +03:00
int num_events ;
2011-04-27 14:20:11 +04:00
atomic_t active_events ;
struct mutex reserve_mutex ;
2010-02-02 22:25:44 +03:00
u64 max_period ;
2011-05-04 12:23:15 +04:00
struct platform_device * plat_device ;
2011-05-04 12:23:51 +04:00
struct cpu_hw_events * ( * get_hw_events ) ( void ) ;
2010-02-02 22:25:44 +03:00
} ;
/* Set at runtime when we know what CPU type we are. */
2011-04-15 14:14:38 +04:00
static struct arm_pmu * armpmu ;
2010-02-02 22:25:44 +03:00
2010-04-30 14:32:44 +04:00
enum arm_perf_pmu_ids
armpmu_get_pmu_id ( void )
{
int id = - ENODEV ;
if ( armpmu ! = NULL )
id = armpmu - > id ;
return id ;
}
EXPORT_SYMBOL_GPL ( armpmu_get_pmu_id ) ;
2010-04-30 14:34:26 +04:00
int
armpmu_get_max_events ( void )
{
int max_events = 0 ;
if ( armpmu ! = NULL )
max_events = armpmu - > num_events ;
return max_events ;
}
EXPORT_SYMBOL_GPL ( armpmu_get_max_events ) ;
2010-09-27 23:22:24 +04:00
int perf_num_counters ( void )
{
return armpmu_get_max_events ( ) ;
}
EXPORT_SYMBOL_GPL ( perf_num_counters ) ;
2010-02-02 22:25:44 +03:00
# define HW_OP_UNSUPPORTED 0xFFFF
# define C(_x) \
PERF_COUNT_HW_CACHE_ # # _x
# define CACHE_OP_UNSUPPORTED 0xFFFF
static int
armpmu_map_cache_event ( u64 config )
{
unsigned int cache_type , cache_op , cache_result , ret ;
cache_type = ( config > > 0 ) & 0xff ;
if ( cache_type > = PERF_COUNT_HW_CACHE_MAX )
return - EINVAL ;
cache_op = ( config > > 8 ) & 0xff ;
if ( cache_op > = PERF_COUNT_HW_CACHE_OP_MAX )
return - EINVAL ;
cache_result = ( config > > 16 ) & 0xff ;
if ( cache_result > = PERF_COUNT_HW_CACHE_RESULT_MAX )
return - EINVAL ;
2010-11-13 20:13:56 +03:00
ret = ( int ) ( * armpmu - > cache_map ) [ cache_type ] [ cache_op ] [ cache_result ] ;
2010-02-02 22:25:44 +03:00
if ( ret = = CACHE_OP_UNSUPPORTED )
return - ENOENT ;
return ret ;
}
2010-11-13 20:13:56 +03:00
static int
armpmu_map_event ( u64 config )
{
int mapping = ( * armpmu - > event_map ) [ config ] ;
return mapping = = HW_OP_UNSUPPORTED ? - EOPNOTSUPP : mapping ;
}
static int
armpmu_map_raw_event ( u64 config )
{
return ( int ) ( config & armpmu - > raw_event_mask ) ;
}
2010-02-02 22:25:44 +03:00
static int
armpmu_event_set_period ( struct perf_event * event ,
struct hw_perf_event * hwc ,
int idx )
{
2010-05-21 16:43:08 +04:00
s64 left = local64_read ( & hwc - > period_left ) ;
2010-02-02 22:25:44 +03:00
s64 period = hwc - > sample_period ;
int ret = 0 ;
if ( unlikely ( left < = - period ) ) {
left = period ;
2010-05-21 16:43:08 +04:00
local64_set ( & hwc - > period_left , left ) ;
2010-02-02 22:25:44 +03:00
hwc - > last_period = period ;
ret = 1 ;
}
if ( unlikely ( left < = 0 ) ) {
left + = period ;
2010-05-21 16:43:08 +04:00
local64_set ( & hwc - > period_left , left ) ;
2010-02-02 22:25:44 +03:00
hwc - > last_period = period ;
ret = 1 ;
}
if ( left > ( s64 ) armpmu - > max_period )
left = armpmu - > max_period ;
2010-05-21 16:43:08 +04:00
local64_set ( & hwc - > prev_count , ( u64 ) - left ) ;
2010-02-02 22:25:44 +03:00
armpmu - > write_counter ( idx , ( u64 ) ( - left ) & 0xffffffff ) ;
perf_event_update_userpage ( event ) ;
return ret ;
}
static u64
armpmu_event_update ( struct perf_event * event ,
struct hw_perf_event * hwc ,
2011-03-25 19:12:37 +03:00
int idx , int overflow )
2010-02-02 22:25:44 +03:00
{
2011-03-25 19:12:37 +03:00
u64 delta , prev_raw_count , new_raw_count ;
2010-02-02 22:25:44 +03:00
again :
2010-05-21 16:43:08 +04:00
prev_raw_count = local64_read ( & hwc - > prev_count ) ;
2010-02-02 22:25:44 +03:00
new_raw_count = armpmu - > read_counter ( idx ) ;
2010-05-21 16:43:08 +04:00
if ( local64_cmpxchg ( & hwc - > prev_count , prev_raw_count ,
2010-02-02 22:25:44 +03:00
new_raw_count ) ! = prev_raw_count )
goto again ;
2011-03-25 19:12:37 +03:00
new_raw_count & = armpmu - > max_period ;
prev_raw_count & = armpmu - > max_period ;
if ( overflow )
2011-04-05 17:01:24 +04:00
delta = armpmu - > max_period - prev_raw_count + new_raw_count + 1 ;
2011-03-25 19:12:37 +03:00
else
delta = new_raw_count - prev_raw_count ;
2010-02-02 22:25:44 +03:00
2010-05-21 16:43:08 +04:00
local64_add ( delta , & event - > count ) ;
local64_sub ( delta , & hwc - > period_left ) ;
2010-02-02 22:25:44 +03:00
return new_raw_count ;
}
static void
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
armpmu_read ( struct perf_event * event )
2010-02-02 22:25:44 +03:00
{
struct hw_perf_event * hwc = & event - > hw ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
/* Don't read disabled counters! */
if ( hwc - > idx < 0 )
return ;
2010-02-02 22:25:44 +03:00
2011-03-25 19:12:37 +03:00
armpmu_event_update ( event , hwc , hwc - > idx , 0 ) ;
2010-02-02 22:25:44 +03:00
}
static void
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
armpmu_stop ( struct perf_event * event , int flags )
2010-02-02 22:25:44 +03:00
{
struct hw_perf_event * hwc = & event - > hw ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
/*
* ARM pmu always has to update the counter , so ignore
* PERF_EF_UPDATE , see comments in armpmu_start ( ) .
*/
if ( ! ( hwc - > state & PERF_HES_STOPPED ) ) {
armpmu - > disable ( hwc , hwc - > idx ) ;
barrier ( ) ; /* why? */
2011-03-25 19:12:37 +03:00
armpmu_event_update ( event , hwc , hwc - > idx , 0 ) ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
hwc - > state | = PERF_HES_STOPPED | PERF_HES_UPTODATE ;
}
2010-02-02 22:25:44 +03:00
}
static void
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
armpmu_start ( struct perf_event * event , int flags )
2010-02-02 22:25:44 +03:00
{
struct hw_perf_event * hwc = & event - > hw ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
/*
* ARM pmu always has to reprogram the period , so ignore
* PERF_EF_RELOAD , see the comment below .
*/
if ( flags & PERF_EF_RELOAD )
WARN_ON_ONCE ( ! ( hwc - > state & PERF_HES_UPTODATE ) ) ;
hwc - > state = 0 ;
2010-02-02 22:25:44 +03:00
/*
* Set the period again . Some counters can ' t be stopped , so when we
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
* were stopped we simply disabled the IRQ source and the counter
2010-02-02 22:25:44 +03:00
* may have been left counting . If we don ' t do this step then we may
* get an interrupt too soon or * way * too late if the overflow has
* happened since disabling .
*/
armpmu_event_set_period ( event , hwc , hwc - > idx ) ;
armpmu - > enable ( hwc , hwc - > idx ) ;
}
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
static void
armpmu_del ( struct perf_event * event , int flags )
{
2011-05-04 12:23:51 +04:00
struct cpu_hw_events * cpuc = armpmu - > get_hw_events ( ) ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
WARN_ON ( idx < 0 ) ;
armpmu_stop ( event , PERF_EF_UPDATE ) ;
cpuc - > events [ idx ] = NULL ;
clear_bit ( idx , cpuc - > used_mask ) ;
perf_event_update_userpage ( event ) ;
}
2010-02-02 22:25:44 +03:00
static int
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
armpmu_add ( struct perf_event * event , int flags )
2010-02-02 22:25:44 +03:00
{
2011-05-04 12:23:51 +04:00
struct cpu_hw_events * cpuc = armpmu - > get_hw_events ( ) ;
2010-02-02 22:25:44 +03:00
struct hw_perf_event * hwc = & event - > hw ;
int idx ;
int err = 0 ;
2010-06-14 10:49:00 +04:00
perf_pmu_disable ( event - > pmu ) ;
2010-06-11 19:32:03 +04:00
2010-02-02 22:25:44 +03:00
/* If we don't have a space for the counter then finish early. */
idx = armpmu - > get_event_idx ( cpuc , hwc ) ;
if ( idx < 0 ) {
err = idx ;
goto out ;
}
/*
* If there is an event in the counter we are going to use then make
* sure it is disabled .
*/
event - > hw . idx = idx ;
armpmu - > disable ( hwc , idx ) ;
cpuc - > events [ idx ] = event ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
hwc - > state = PERF_HES_STOPPED | PERF_HES_UPTODATE ;
if ( flags & PERF_EF_START )
armpmu_start ( event , PERF_EF_RELOAD ) ;
2010-02-02 22:25:44 +03:00
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage ( event ) ;
out :
2010-06-14 10:49:00 +04:00
perf_pmu_enable ( event - > pmu ) ;
2010-02-02 22:25:44 +03:00
return err ;
}
2010-06-11 15:35:08 +04:00
static struct pmu pmu ;
2010-02-02 22:25:44 +03:00
static int
validate_event ( struct cpu_hw_events * cpuc ,
struct perf_event * event )
{
struct hw_perf_event fake_event = event - > hw ;
2011-04-27 19:22:21 +04:00
struct pmu * leader_pmu = event - > group_leader - > pmu ;
2010-02-02 22:25:44 +03:00
2011-04-27 19:22:21 +04:00
if ( event - > pmu ! = leader_pmu | | event - > state < = PERF_EVENT_STATE_OFF )
2010-09-02 12:32:08 +04:00
return 1 ;
2010-02-02 22:25:44 +03:00
return armpmu - > get_event_idx ( cpuc , & fake_event ) > = 0 ;
}
static int
validate_group ( struct perf_event * event )
{
struct perf_event * sibling , * leader = event - > group_leader ;
struct cpu_hw_events fake_pmu ;
memset ( & fake_pmu , 0 , sizeof ( fake_pmu ) ) ;
if ( ! validate_event ( & fake_pmu , leader ) )
return - ENOSPC ;
list_for_each_entry ( sibling , & leader - > sibling_list , group_entry ) {
if ( ! validate_event ( & fake_pmu , sibling ) )
return - ENOSPC ;
}
if ( ! validate_event ( & fake_pmu , event ) )
return - ENOSPC ;
return 0 ;
}
2011-02-08 06:54:36 +03:00
static irqreturn_t armpmu_platform_irq ( int irq , void * dev )
{
2011-05-04 12:23:15 +04:00
struct platform_device * plat_device = armpmu - > plat_device ;
struct arm_pmu_platdata * plat = dev_get_platdata ( & plat_device - > dev ) ;
2011-02-08 06:54:36 +03:00
return plat - > handle_irq ( irq , dev , armpmu - > handle_irq ) ;
}
2011-07-27 18:18:59 +04:00
static void
armpmu_release_hardware ( void )
{
int i , irq , irqs ;
2011-05-04 12:23:15 +04:00
struct platform_device * pmu_device = armpmu - > plat_device ;
2011-07-27 18:18:59 +04:00
irqs = min ( pmu_device - > num_resources , num_possible_cpus ( ) ) ;
for ( i = 0 ; i < irqs ; + + i ) {
if ( ! cpumask_test_and_clear_cpu ( i , & armpmu - > active_irqs ) )
continue ;
irq = platform_get_irq ( pmu_device , i ) ;
if ( irq > = 0 )
free_irq ( irq , NULL ) ;
}
release_pmu ( ARM_PMU_DEVICE_CPU ) ;
}
2010-02-02 22:25:44 +03:00
static int
armpmu_reserve_hardware ( void )
{
2011-02-08 06:54:36 +03:00
struct arm_pmu_platdata * plat ;
irq_handler_t handle_irq ;
2011-07-27 01:10:28 +04:00
int i , err , irq , irqs ;
2011-05-04 12:23:15 +04:00
struct platform_device * pmu_device = armpmu - > plat_device ;
2010-02-02 22:25:44 +03:00
2011-07-27 01:10:28 +04:00
err = reserve_pmu ( ARM_PMU_DEVICE_CPU ) ;
if ( err ) {
2010-02-02 22:25:44 +03:00
pr_warning ( " unable to reserve pmu \n " ) ;
2011-07-27 01:10:28 +04:00
return err ;
2010-02-02 22:25:44 +03:00
}
2011-02-08 06:54:36 +03:00
plat = dev_get_platdata ( & pmu_device - > dev ) ;
if ( plat & & plat - > handle_irq )
handle_irq = armpmu_platform_irq ;
else
handle_irq = armpmu - > handle_irq ;
2011-07-27 18:18:59 +04:00
irqs = min ( pmu_device - > num_resources , num_possible_cpus ( ) ) ;
2011-07-27 01:10:28 +04:00
if ( irqs < 1 ) {
2010-02-02 22:25:44 +03:00
pr_err ( " no irqs for PMUs defined \n " ) ;
return - ENODEV ;
}
2011-07-27 01:10:28 +04:00
for ( i = 0 ; i < irqs ; + + i ) {
2011-07-27 18:18:59 +04:00
err = 0 ;
2010-04-29 20:13:24 +04:00
irq = platform_get_irq ( pmu_device , i ) ;
if ( irq < 0 )
continue ;
2011-07-27 01:10:28 +04:00
/*
* If we have a single PMU interrupt that we can ' t shift ,
* assume that we ' re running on a uniprocessor machine and
2011-07-27 18:18:59 +04:00
* continue . Otherwise , continue without this interrupt .
2011-07-27 01:10:28 +04:00
*/
2011-07-27 18:18:59 +04:00
if ( irq_set_affinity ( irq , cpumask_of ( i ) ) & & irqs > 1 ) {
pr_warning ( " unable to set irq affinity (irq=%d, cpu=%u) \n " ,
irq , i ) ;
continue ;
2011-07-27 01:10:28 +04:00
}
2011-02-08 06:54:36 +03:00
err = request_irq ( irq , handle_irq ,
2010-02-25 17:04:14 +03:00
IRQF_DISABLED | IRQF_NOBALANCING ,
2011-07-27 01:10:28 +04:00
" arm-pmu " , NULL ) ;
2010-02-02 22:25:44 +03:00
if ( err ) {
2011-07-27 01:10:28 +04:00
pr_err ( " unable to request IRQ%d for ARM PMU counters \n " ,
irq ) ;
2011-07-27 18:18:59 +04:00
armpmu_release_hardware ( ) ;
return err ;
2010-02-02 22:25:44 +03:00
}
2011-07-27 18:18:59 +04:00
cpumask_set_cpu ( i , & armpmu - > active_irqs ) ;
2010-04-29 20:13:24 +04:00
}
2010-02-02 22:25:44 +03:00
2011-07-27 18:18:59 +04:00
return 0 ;
2010-02-02 22:25:44 +03:00
}
static void
hw_perf_event_destroy ( struct perf_event * event )
{
2011-04-27 14:20:11 +04:00
atomic_t * active_events = & armpmu - > active_events ;
struct mutex * pmu_reserve_mutex = & armpmu - > reserve_mutex ;
if ( atomic_dec_and_mutex_lock ( active_events , pmu_reserve_mutex ) ) {
2010-02-02 22:25:44 +03:00
armpmu_release_hardware ( ) ;
2011-04-27 14:20:11 +04:00
mutex_unlock ( pmu_reserve_mutex ) ;
2010-02-02 22:25:44 +03:00
}
}
2011-07-19 14:57:30 +04:00
static int
event_requires_mode_exclusion ( struct perf_event_attr * attr )
{
return attr - > exclude_idle | | attr - > exclude_user | |
attr - > exclude_kernel | | attr - > exclude_hv ;
}
2010-02-02 22:25:44 +03:00
static int
__hw_perf_event_init ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
int mapping , err ;
/* Decode the generic type into an ARM event identifier. */
if ( PERF_TYPE_HARDWARE = = event - > attr . type ) {
2010-11-13 20:13:56 +03:00
mapping = armpmu_map_event ( event - > attr . config ) ;
2010-02-02 22:25:44 +03:00
} else if ( PERF_TYPE_HW_CACHE = = event - > attr . type ) {
mapping = armpmu_map_cache_event ( event - > attr . config ) ;
} else if ( PERF_TYPE_RAW = = event - > attr . type ) {
2010-11-13 20:13:56 +03:00
mapping = armpmu_map_raw_event ( event - > attr . config ) ;
2010-02-02 22:25:44 +03:00
} else {
pr_debug ( " event type %x not supported \n " , event - > attr . type ) ;
return - EOPNOTSUPP ;
}
if ( mapping < 0 ) {
pr_debug ( " event %x:%llx not supported \n " , event - > attr . type ,
event - > attr . config ) ;
return mapping ;
}
2011-07-19 14:57:30 +04:00
/*
* We don ' t assign an index until we actually place the event onto
* hardware . Use - 1 to signify that we haven ' t decided where to put it
* yet . For SMP systems , each core has it ' s own PMU so we can ' t do any
* clever allocation or constraints checking at this point .
*/
hwc - > idx = - 1 ;
hwc - > config_base = 0 ;
hwc - > config = 0 ;
hwc - > event_base = 0 ;
2010-02-02 22:25:44 +03:00
/*
* Check whether we need to exclude the counter from certain modes .
*/
2011-07-19 14:57:30 +04:00
if ( ( ! armpmu - > set_event_filter | |
armpmu - > set_event_filter ( hwc , & event - > attr ) ) & &
event_requires_mode_exclusion ( & event - > attr ) ) {
2010-02-02 22:25:44 +03:00
pr_debug ( " ARM performance counters do not support "
" mode exclusion \n " ) ;
return - EPERM ;
}
/*
2011-07-19 14:57:30 +04:00
* Store the event encoding into the config_base field .
2010-02-02 22:25:44 +03:00
*/
2011-07-19 14:57:30 +04:00
hwc - > config_base | = ( unsigned long ) mapping ;
2010-02-02 22:25:44 +03:00
if ( ! hwc - > sample_period ) {
hwc - > sample_period = armpmu - > max_period ;
hwc - > last_period = hwc - > sample_period ;
2010-05-21 16:43:08 +04:00
local64_set ( & hwc - > period_left , hwc - > sample_period ) ;
2010-02-02 22:25:44 +03:00
}
err = 0 ;
if ( event - > group_leader ! = event ) {
err = validate_group ( event ) ;
if ( err )
return - EINVAL ;
}
return err ;
}
2010-06-11 15:35:08 +04:00
static int armpmu_event_init ( struct perf_event * event )
2010-02-02 22:25:44 +03:00
{
int err = 0 ;
2011-04-27 14:20:11 +04:00
atomic_t * active_events = & armpmu - > active_events ;
2010-02-02 22:25:44 +03:00
2010-06-11 15:35:08 +04:00
switch ( event - > attr . type ) {
case PERF_TYPE_RAW :
case PERF_TYPE_HARDWARE :
case PERF_TYPE_HW_CACHE :
break ;
default :
return - ENOENT ;
}
2010-02-02 22:25:44 +03:00
event - > destroy = hw_perf_event_destroy ;
2011-04-27 14:20:11 +04:00
if ( ! atomic_inc_not_zero ( active_events ) ) {
mutex_lock ( & armpmu - > reserve_mutex ) ;
if ( atomic_read ( active_events ) = = 0 )
2010-02-02 22:25:44 +03:00
err = armpmu_reserve_hardware ( ) ;
if ( ! err )
2011-04-27 14:20:11 +04:00
atomic_inc ( active_events ) ;
mutex_unlock ( & armpmu - > reserve_mutex ) ;
2010-02-02 22:25:44 +03:00
}
if ( err )
2010-06-11 15:35:08 +04:00
return err ;
2010-02-02 22:25:44 +03:00
err = __hw_perf_event_init ( event ) ;
if ( err )
hw_perf_event_destroy ( event ) ;
2010-06-11 15:35:08 +04:00
return err ;
2010-02-02 22:25:44 +03:00
}
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
static void armpmu_enable ( struct pmu * pmu )
2010-02-02 22:25:44 +03:00
{
/* Enable all of the perf events on hardware. */
2011-07-01 17:38:12 +04:00
int idx , enabled = 0 ;
2011-05-04 12:23:51 +04:00
struct cpu_hw_events * cpuc = armpmu - > get_hw_events ( ) ;
2010-02-02 22:25:44 +03:00
2011-07-20 01:43:28 +04:00
for ( idx = 0 ; idx < armpmu - > num_events ; + + idx ) {
2010-02-02 22:25:44 +03:00
struct perf_event * event = cpuc - > events [ idx ] ;
if ( ! event )
continue ;
armpmu - > enable ( & event - > hw , idx ) ;
2011-07-01 17:38:12 +04:00
enabled = 1 ;
2010-02-02 22:25:44 +03:00
}
2011-07-01 17:38:12 +04:00
if ( enabled )
armpmu - > start ( ) ;
2010-02-02 22:25:44 +03:00
}
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
static void armpmu_disable ( struct pmu * pmu )
2010-02-02 22:25:44 +03:00
{
2011-04-27 13:31:51 +04:00
armpmu - > stop ( ) ;
2010-02-02 22:25:44 +03:00
}
2010-06-14 10:49:00 +04:00
static struct pmu pmu = {
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
. pmu_enable = armpmu_enable ,
. pmu_disable = armpmu_disable ,
. event_init = armpmu_event_init ,
. add = armpmu_add ,
. del = armpmu_del ,
. start = armpmu_start ,
. stop = armpmu_stop ,
. read = armpmu_read ,
2010-06-14 10:49:00 +04:00
} ;
2011-04-27 14:20:11 +04:00
static void __init armpmu_init ( struct arm_pmu * armpmu )
{
atomic_set ( & armpmu - > active_events , 0 ) ;
mutex_init ( & armpmu - > reserve_mutex ) ;
}
2010-11-13 22:04:32 +03:00
/* Include the PMU-specific implementations. */
# include "perf_event_xscale.c"
# include "perf_event_v6.c"
# include "perf_event_v7.c"
2010-04-30 14:33:33 +04:00
2011-03-25 15:13:34 +03:00
/*
* Ensure the PMU has sane values out of reset .
* This requires SMP to be available , so exists as a separate initcall .
*/
static int __init
armpmu_reset ( void )
{
if ( armpmu & & armpmu - > reset )
return on_each_cpu ( armpmu - > reset , NULL , 1 ) ;
return 0 ;
}
arch_initcall ( armpmu_reset ) ;
2011-07-27 01:10:28 +04:00
/*
* PMU platform driver and devicetree bindings .
*/
static struct of_device_id armpmu_of_device_ids [ ] = {
{ . compatible = " arm,cortex-a9-pmu " } ,
{ . compatible = " arm,cortex-a8-pmu " } ,
{ . compatible = " arm,arm1136-pmu " } ,
{ . compatible = " arm,arm1176-pmu " } ,
{ } ,
} ;
static struct platform_device_id armpmu_plat_device_ids [ ] = {
{ . name = " arm-pmu " } ,
{ } ,
} ;
static int __devinit armpmu_device_probe ( struct platform_device * pdev )
{
2011-05-04 12:23:15 +04:00
armpmu - > plat_device = pdev ;
2011-07-27 01:10:28 +04:00
return 0 ;
}
static struct platform_driver armpmu_driver = {
. driver = {
. name = " arm-pmu " ,
. of_match_table = armpmu_of_device_ids ,
} ,
. probe = armpmu_device_probe ,
. id_table = armpmu_plat_device_ids ,
} ;
static int __init register_pmu_driver ( void )
{
return platform_driver_register ( & armpmu_driver ) ;
}
device_initcall ( register_pmu_driver ) ;
2011-05-04 12:23:51 +04:00
static struct cpu_hw_events * armpmu_get_cpu_events ( void )
{
return & __get_cpu_var ( cpu_hw_events ) ;
}
static void __init cpu_pmu_init ( struct arm_pmu * armpmu )
{
2011-04-28 13:17:04 +04:00
int cpu ;
for_each_possible_cpu ( cpu ) {
struct cpu_hw_events * events = & per_cpu ( cpu_hw_events , cpu ) ;
raw_spin_lock_init ( & events - > pmu_lock ) ;
}
2011-05-04 12:23:51 +04:00
armpmu - > get_hw_events = armpmu_get_cpu_events ;
}
2011-07-27 01:10:28 +04:00
/*
* CPU PMU identification and registration .
*/
2010-02-02 22:25:44 +03:00
static int __init
init_hw_perf_events ( void )
{
unsigned long cpuid = read_cpuid_id ( ) ;
unsigned long implementor = ( cpuid & 0xFF000000 ) > > 24 ;
unsigned long part_number = ( cpuid & 0xFFF0 ) ;
2010-04-30 14:33:33 +04:00
/* ARM Ltd CPUs. */
2010-02-02 22:25:44 +03:00
if ( 0x41 = = implementor ) {
switch ( part_number ) {
case 0xB360 : /* ARM1136 */
case 0xB560 : /* ARM1156 */
case 0xB760 : /* ARM1176 */
2010-11-13 20:37:46 +03:00
armpmu = armv6pmu_init ( ) ;
2010-02-02 22:25:44 +03:00
break ;
case 0xB020 : /* ARM11mpcore */
2010-11-13 20:37:46 +03:00
armpmu = armv6mpcore_pmu_init ( ) ;
2010-02-02 22:25:44 +03:00
break ;
2010-01-26 20:51:05 +03:00
case 0xC080 : /* Cortex-A8 */
2010-11-13 20:37:46 +03:00
armpmu = armv7_a8_pmu_init ( ) ;
2010-01-26 20:51:05 +03:00
break ;
case 0xC090 : /* Cortex-A9 */
2010-11-13 20:37:46 +03:00
armpmu = armv7_a9_pmu_init ( ) ;
2010-01-26 20:51:05 +03:00
break ;
2011-06-03 20:40:15 +04:00
case 0xC050 : /* Cortex-A5 */
armpmu = armv7_a5_pmu_init ( ) ;
break ;
2011-01-19 17:24:38 +03:00
case 0xC0F0 : /* Cortex-A15 */
armpmu = armv7_a15_pmu_init ( ) ;
break ;
2010-04-30 14:33:33 +04:00
}
/* Intel CPUs [xscale]. */
} else if ( 0x69 = = implementor ) {
part_number = ( cpuid > > 13 ) & 0x7 ;
switch ( part_number ) {
case 1 :
2010-11-13 20:37:46 +03:00
armpmu = xscale1pmu_init ( ) ;
2010-04-30 14:33:33 +04:00
break ;
case 2 :
2010-11-13 20:37:46 +03:00
armpmu = xscale2pmu_init ( ) ;
2010-04-30 14:33:33 +04:00
break ;
2010-02-02 22:25:44 +03:00
}
}
2010-04-30 14:33:33 +04:00
if ( armpmu ) {
2010-01-26 20:51:05 +03:00
pr_info ( " enabled with %s PMU driver, %d counters available \n " ,
2010-11-13 21:45:27 +03:00
armpmu - > name , armpmu - > num_events ) ;
2011-05-04 12:23:51 +04:00
cpu_pmu_init ( armpmu ) ;
2011-04-27 14:20:11 +04:00
armpmu_init ( armpmu ) ;
2011-04-27 13:31:51 +04:00
perf_pmu_register ( & pmu , " cpu " , PERF_TYPE_RAW ) ;
2010-04-30 14:33:33 +04:00
} else {
pr_info ( " no hardware support available \n " ) ;
}
2010-02-02 22:25:44 +03:00
return 0 ;
}
2010-11-25 20:38:29 +03:00
early_initcall ( init_hw_perf_events ) ;
2010-02-02 22:25:44 +03:00
/*
* Callchain handling code .
*/
/*
* The registers we ' re interested in are at the end of the variable
* length saved register structure . The fp points at the end of this
* structure so the address of this struct is :
* ( struct frame_tail * ) ( xxx - > fp ) - 1
*
* This code has been adapted from the ARM OProfile support .
*/
struct frame_tail {
2010-11-30 20:15:53 +03:00
struct frame_tail __user * fp ;
unsigned long sp ;
unsigned long lr ;
2010-02-02 22:25:44 +03:00
} __attribute__ ( ( packed ) ) ;
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail .
*/
2010-11-30 20:15:53 +03:00
static struct frame_tail __user *
user_backtrace ( struct frame_tail __user * tail ,
2010-02-02 22:25:44 +03:00
struct perf_callchain_entry * entry )
{
struct frame_tail buftail ;
/* Also check accessibility of one struct frame_tail beyond */
if ( ! access_ok ( VERIFY_READ , tail , sizeof ( buftail ) ) )
return NULL ;
if ( __copy_from_user_inatomic ( & buftail , tail , sizeof ( buftail ) ) )
return NULL ;
2010-06-29 21:34:05 +04:00
perf_callchain_store ( entry , buftail . lr ) ;
2010-02-02 22:25:44 +03:00
/*
* Frame pointers should strictly progress back up the stack
* ( towards higher addresses ) .
*/
2011-02-09 13:35:12 +03:00
if ( tail + 1 > = buftail . fp )
2010-02-02 22:25:44 +03:00
return NULL ;
return buftail . fp - 1 ;
}
2010-07-01 01:03:51 +04:00
void
perf_callchain_user ( struct perf_callchain_entry * entry , struct pt_regs * regs )
2010-02-02 22:25:44 +03:00
{
2010-11-30 20:15:53 +03:00
struct frame_tail __user * tail ;
2010-02-02 22:25:44 +03:00
2010-11-30 20:15:53 +03:00
tail = ( struct frame_tail __user * ) regs - > ARM_fp - 1 ;
2010-02-02 22:25:44 +03:00
2011-04-19 01:12:59 +04:00
while ( ( entry - > nr < PERF_MAX_STACK_DEPTH ) & &
tail & & ! ( ( unsigned long ) tail & 0x3 ) )
2010-02-02 22:25:44 +03:00
tail = user_backtrace ( tail , entry ) ;
}
/*
* Gets called by walk_stackframe ( ) for every stackframe . This will be called
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC .
*/
static int
callchain_trace ( struct stackframe * fr ,
void * data )
{
struct perf_callchain_entry * entry = data ;
2010-06-29 21:34:05 +04:00
perf_callchain_store ( entry , fr - > pc ) ;
2010-02-02 22:25:44 +03:00
return 0 ;
}
2010-07-01 01:03:51 +04:00
void
perf_callchain_kernel ( struct perf_callchain_entry * entry , struct pt_regs * regs )
2010-02-02 22:25:44 +03:00
{
struct stackframe fr ;
fr . fp = regs - > ARM_fp ;
fr . sp = regs - > ARM_sp ;
fr . lr = regs - > ARM_lr ;
fr . pc = regs - > ARM_pc ;
walk_stackframe ( & fr , callchain_trace , entry ) ;
}