2010-02-02 22:25:44 +03:00
# undef DEBUG
/*
* ARM performance counter support .
*
* Copyright ( C ) 2009 picoChip Designs , Ltd . , Jamie Iles
2010-11-13 22:04:32 +03:00
* Copyright ( C ) 2010 ARM Ltd . , Will Deacon < will . deacon @ arm . com >
2010-01-26 20:51:05 +03:00
*
2010-02-02 22:25:44 +03:00
* This code is based on the sparc64 perf event code , which is in turn based
* on the x86 code . Callchain code is based on the ARM OProfile backtrace
* code .
*/
# define pr_fmt(fmt) "hw perfevents: " fmt
# include <linux/kernel.h>
2010-04-29 20:13:24 +04:00
# include <linux/platform_device.h>
2012-05-31 22:05:20 +04:00
# include <linux/pm_runtime.h>
2012-07-29 16:09:14 +04:00
# include <linux/uaccess.h>
2010-02-02 22:25:44 +03:00
# include <asm/irq_regs.h>
# include <asm/pmu.h>
# include <asm/stacktrace.h>
static int
2011-04-28 18:47:10 +04:00
armpmu_map_cache_event ( const unsigned ( * cache_map )
[ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] ,
u64 config )
2010-02-02 22:25:44 +03:00
{
unsigned int cache_type , cache_op , cache_result , ret ;
cache_type = ( config > > 0 ) & 0xff ;
if ( cache_type > = PERF_COUNT_HW_CACHE_MAX )
return - EINVAL ;
cache_op = ( config > > 8 ) & 0xff ;
if ( cache_op > = PERF_COUNT_HW_CACHE_OP_MAX )
return - EINVAL ;
cache_result = ( config > > 16 ) & 0xff ;
if ( cache_result > = PERF_COUNT_HW_CACHE_RESULT_MAX )
return - EINVAL ;
2011-04-28 18:47:10 +04:00
ret = ( int ) ( * cache_map ) [ cache_type ] [ cache_op ] [ cache_result ] ;
2010-02-02 22:25:44 +03:00
if ( ret = = CACHE_OP_UNSUPPORTED )
return - ENOENT ;
return ret ;
}
2010-11-13 20:13:56 +03:00
static int
2012-07-29 15:36:28 +04:00
armpmu_map_hw_event ( const unsigned ( * event_map ) [ PERF_COUNT_HW_MAX ] , u64 config )
2010-11-13 20:13:56 +03:00
{
2011-04-28 18:47:10 +04:00
int mapping = ( * event_map ) [ config ] ;
return mapping = = HW_OP_UNSUPPORTED ? - ENOENT : mapping ;
2010-11-13 20:13:56 +03:00
}
static int
2011-04-28 18:47:10 +04:00
armpmu_map_raw_event ( u32 raw_event_mask , u64 config )
2010-11-13 20:13:56 +03:00
{
2011-04-28 18:47:10 +04:00
return ( int ) ( config & raw_event_mask ) ;
}
2012-07-29 15:36:28 +04:00
int
armpmu_map_event ( struct perf_event * event ,
const unsigned ( * event_map ) [ PERF_COUNT_HW_MAX ] ,
const unsigned ( * cache_map )
[ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] ,
u32 raw_event_mask )
2011-04-28 18:47:10 +04:00
{
u64 config = event - > attr . config ;
switch ( event - > attr . type ) {
case PERF_TYPE_HARDWARE :
2012-07-29 15:36:28 +04:00
return armpmu_map_hw_event ( event_map , config ) ;
2011-04-28 18:47:10 +04:00
case PERF_TYPE_HW_CACHE :
return armpmu_map_cache_event ( cache_map , config ) ;
case PERF_TYPE_RAW :
return armpmu_map_raw_event ( raw_event_mask , config ) ;
}
return - ENOENT ;
2010-11-13 20:13:56 +03:00
}
2012-07-30 15:00:02 +04:00
int armpmu_event_set_period ( struct perf_event * event )
2010-02-02 22:25:44 +03:00
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
2010-05-21 16:43:08 +04:00
s64 left = local64_read ( & hwc - > period_left ) ;
2010-02-02 22:25:44 +03:00
s64 period = hwc - > sample_period ;
int ret = 0 ;
2012-10-17 15:01:34 +04:00
/* The period may have been changed by PERF_EVENT_IOC_PERIOD */
if ( unlikely ( period ! = hwc - > last_period ) )
left = period - ( hwc - > last_period - left ) ;
2010-02-02 22:25:44 +03:00
if ( unlikely ( left < = - period ) ) {
left = period ;
2010-05-21 16:43:08 +04:00
local64_set ( & hwc - > period_left , left ) ;
2010-02-02 22:25:44 +03:00
hwc - > last_period = period ;
ret = 1 ;
}
if ( unlikely ( left < = 0 ) ) {
left + = period ;
2010-05-21 16:43:08 +04:00
local64_set ( & hwc - > period_left , left ) ;
2010-02-02 22:25:44 +03:00
hwc - > last_period = period ;
ret = 1 ;
}
if ( left > ( s64 ) armpmu - > max_period )
left = armpmu - > max_period ;
2010-05-21 16:43:08 +04:00
local64_set ( & hwc - > prev_count , ( u64 ) - left ) ;
2010-02-02 22:25:44 +03:00
2012-07-30 15:00:02 +04:00
armpmu - > write_counter ( event , ( u64 ) ( - left ) & 0xffffffff ) ;
2010-02-02 22:25:44 +03:00
perf_event_update_userpage ( event ) ;
return ret ;
}
2012-07-30 15:00:02 +04:00
u64 armpmu_event_update ( struct perf_event * event )
2010-02-02 22:25:44 +03:00
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
2011-03-25 19:12:37 +03:00
u64 delta , prev_raw_count , new_raw_count ;
2010-02-02 22:25:44 +03:00
again :
2010-05-21 16:43:08 +04:00
prev_raw_count = local64_read ( & hwc - > prev_count ) ;
2012-07-30 15:00:02 +04:00
new_raw_count = armpmu - > read_counter ( event ) ;
2010-02-02 22:25:44 +03:00
2010-05-21 16:43:08 +04:00
if ( local64_cmpxchg ( & hwc - > prev_count , prev_raw_count ,
2010-02-02 22:25:44 +03:00
new_raw_count ) ! = prev_raw_count )
goto again ;
2012-03-06 20:33:17 +04:00
delta = ( new_raw_count - prev_raw_count ) & armpmu - > max_period ;
2010-02-02 22:25:44 +03:00
2010-05-21 16:43:08 +04:00
local64_add ( delta , & event - > count ) ;
local64_sub ( delta , & hwc - > period_left ) ;
2010-02-02 22:25:44 +03:00
return new_raw_count ;
}
static void
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
armpmu_read ( struct perf_event * event )
2010-02-02 22:25:44 +03:00
{
2012-07-30 15:00:02 +04:00
armpmu_event_update ( event ) ;
2010-02-02 22:25:44 +03:00
}
static void
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
armpmu_stop ( struct perf_event * event , int flags )
2010-02-02 22:25:44 +03:00
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2010-02-02 22:25:44 +03:00
struct hw_perf_event * hwc = & event - > hw ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
/*
* ARM pmu always has to update the counter , so ignore
* PERF_EF_UPDATE , see comments in armpmu_start ( ) .
*/
if ( ! ( hwc - > state & PERF_HES_STOPPED ) ) {
2012-07-30 15:00:02 +04:00
armpmu - > disable ( event ) ;
armpmu_event_update ( event ) ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
hwc - > state | = PERF_HES_STOPPED | PERF_HES_UPTODATE ;
}
2010-02-02 22:25:44 +03:00
}
2012-07-30 15:00:02 +04:00
static void armpmu_start ( struct perf_event * event , int flags )
2010-02-02 22:25:44 +03:00
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2010-02-02 22:25:44 +03:00
struct hw_perf_event * hwc = & event - > hw ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
/*
* ARM pmu always has to reprogram the period , so ignore
* PERF_EF_RELOAD , see the comment below .
*/
if ( flags & PERF_EF_RELOAD )
WARN_ON_ONCE ( ! ( hwc - > state & PERF_HES_UPTODATE ) ) ;
hwc - > state = 0 ;
2010-02-02 22:25:44 +03:00
/*
* Set the period again . Some counters can ' t be stopped , so when we
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
* were stopped we simply disabled the IRQ source and the counter
2010-02-02 22:25:44 +03:00
* may have been left counting . If we don ' t do this step then we may
* get an interrupt too soon or * way * too late if the overflow has
* happened since disabling .
*/
2012-07-30 15:00:02 +04:00
armpmu_event_set_period ( event ) ;
armpmu - > enable ( event ) ;
2010-02-02 22:25:44 +03:00
}
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
static void
armpmu_del ( struct perf_event * event , int flags )
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2011-05-17 14:20:11 +04:00
struct pmu_hw_events * hw_events = armpmu - > get_hw_events ( ) ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
armpmu_stop ( event , PERF_EF_UPDATE ) ;
2011-05-17 14:20:11 +04:00
hw_events - > events [ idx ] = NULL ;
clear_bit ( idx , hw_events - > used_mask ) ;
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
perf_event_update_userpage ( event ) ;
}
2010-02-02 22:25:44 +03:00
static int
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
armpmu_add ( struct perf_event * event , int flags )
2010-02-02 22:25:44 +03:00
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2011-05-17 14:20:11 +04:00
struct pmu_hw_events * hw_events = armpmu - > get_hw_events ( ) ;
2010-02-02 22:25:44 +03:00
struct hw_perf_event * hwc = & event - > hw ;
int idx ;
int err = 0 ;
2010-06-14 10:49:00 +04:00
perf_pmu_disable ( event - > pmu ) ;
2010-06-11 19:32:03 +04:00
2010-02-02 22:25:44 +03:00
/* If we don't have a space for the counter then finish early. */
2012-07-30 15:00:02 +04:00
idx = armpmu - > get_event_idx ( hw_events , event ) ;
2010-02-02 22:25:44 +03:00
if ( idx < 0 ) {
err = idx ;
goto out ;
}
/*
* If there is an event in the counter we are going to use then make
* sure it is disabled .
*/
event - > hw . idx = idx ;
2012-07-30 15:00:02 +04:00
armpmu - > disable ( event ) ;
2011-05-17 14:20:11 +04:00
hw_events - > events [ idx ] = event ;
2010-02-02 22:25:44 +03:00
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
hwc - > state = PERF_HES_STOPPED | PERF_HES_UPTODATE ;
if ( flags & PERF_EF_START )
armpmu_start ( event , PERF_EF_RELOAD ) ;
2010-02-02 22:25:44 +03:00
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage ( event ) ;
out :
2010-06-14 10:49:00 +04:00
perf_pmu_enable ( event - > pmu ) ;
2010-02-02 22:25:44 +03:00
return err ;
}
static int
2011-05-17 14:20:11 +04:00
validate_event ( struct pmu_hw_events * hw_events ,
2010-02-02 22:25:44 +03:00
struct perf_event * event )
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2011-04-27 19:22:21 +04:00
struct pmu * leader_pmu = event - > group_leader - > pmu ;
2010-02-02 22:25:44 +03:00
2011-04-27 19:22:21 +04:00
if ( event - > pmu ! = leader_pmu | | event - > state < = PERF_EVENT_STATE_OFF )
2010-09-02 12:32:08 +04:00
return 1 ;
2010-02-02 22:25:44 +03:00
2012-07-30 15:00:02 +04:00
return armpmu - > get_event_idx ( hw_events , event ) > = 0 ;
2010-02-02 22:25:44 +03:00
}
static int
validate_group ( struct perf_event * event )
{
struct perf_event * sibling , * leader = event - > group_leader ;
2011-05-17 14:20:11 +04:00
struct pmu_hw_events fake_pmu ;
2011-11-17 19:05:14 +04:00
DECLARE_BITMAP ( fake_used_mask , ARMPMU_MAX_HWEVENTS ) ;
2010-02-02 22:25:44 +03:00
2011-11-17 19:05:14 +04:00
/*
* Initialise the fake PMU . We only need to populate the
* used_mask for the purposes of validation .
*/
memset ( fake_used_mask , 0 , sizeof ( fake_used_mask ) ) ;
fake_pmu . used_mask = fake_used_mask ;
2010-02-02 22:25:44 +03:00
if ( ! validate_event ( & fake_pmu , leader ) )
2011-11-09 20:56:37 +04:00
return - EINVAL ;
2010-02-02 22:25:44 +03:00
list_for_each_entry ( sibling , & leader - > sibling_list , group_entry ) {
if ( ! validate_event ( & fake_pmu , sibling ) )
2011-11-09 20:56:37 +04:00
return - EINVAL ;
2010-02-02 22:25:44 +03:00
}
if ( ! validate_event ( & fake_pmu , event ) )
2011-11-09 20:56:37 +04:00
return - EINVAL ;
2010-02-02 22:25:44 +03:00
return 0 ;
}
2012-07-31 13:34:25 +04:00
static irqreturn_t armpmu_dispatch_irq ( int irq , void * dev )
2011-02-08 06:54:36 +03:00
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = ( struct arm_pmu * ) dev ;
2011-05-04 12:23:15 +04:00
struct platform_device * plat_device = armpmu - > plat_device ;
struct arm_pmu_platdata * plat = dev_get_platdata ( & plat_device - > dev ) ;
2011-02-08 06:54:36 +03:00
2012-07-31 13:34:25 +04:00
if ( plat & & plat - > handle_irq )
return plat - > handle_irq ( irq , dev , armpmu - > handle_irq ) ;
else
return armpmu - > handle_irq ( irq , dev ) ;
2011-02-08 06:54:36 +03:00
}
2011-07-27 18:18:59 +04:00
static void
2011-04-28 19:27:54 +04:00
armpmu_release_hardware ( struct arm_pmu * armpmu )
2011-07-27 18:18:59 +04:00
{
2012-07-30 15:00:02 +04:00
armpmu - > free_irq ( armpmu ) ;
2012-07-31 13:34:25 +04:00
pm_runtime_put_sync ( & armpmu - > plat_device - > dev ) ;
2011-07-27 18:18:59 +04:00
}
2010-02-02 22:25:44 +03:00
static int
2011-04-28 19:27:54 +04:00
armpmu_reserve_hardware ( struct arm_pmu * armpmu )
2010-02-02 22:25:44 +03:00
{
2012-07-31 13:34:25 +04:00
int err ;
2011-05-04 12:23:15 +04:00
struct platform_device * pmu_device = armpmu - > plat_device ;
2010-02-02 22:25:44 +03:00
2011-11-22 22:01:46 +04:00
if ( ! pmu_device )
return - ENODEV ;
2012-05-31 22:05:20 +04:00
pm_runtime_get_sync ( & pmu_device - > dev ) ;
2012-07-30 15:00:02 +04:00
err = armpmu - > request_irq ( armpmu , armpmu_dispatch_irq ) ;
2012-07-31 13:34:25 +04:00
if ( err ) {
armpmu_release_hardware ( armpmu ) ;
return err ;
2010-04-29 20:13:24 +04:00
}
2010-02-02 22:25:44 +03:00
2011-07-27 18:18:59 +04:00
return 0 ;
2010-02-02 22:25:44 +03:00
}
static void
hw_perf_event_destroy ( struct perf_event * event )
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2011-04-27 14:20:11 +04:00
atomic_t * active_events = & armpmu - > active_events ;
struct mutex * pmu_reserve_mutex = & armpmu - > reserve_mutex ;
if ( atomic_dec_and_mutex_lock ( active_events , pmu_reserve_mutex ) ) {
2011-04-28 19:27:54 +04:00
armpmu_release_hardware ( armpmu ) ;
2011-04-27 14:20:11 +04:00
mutex_unlock ( pmu_reserve_mutex ) ;
2010-02-02 22:25:44 +03:00
}
}
2011-07-19 14:57:30 +04:00
static int
event_requires_mode_exclusion ( struct perf_event_attr * attr )
{
return attr - > exclude_idle | | attr - > exclude_user | |
attr - > exclude_kernel | | attr - > exclude_hv ;
}
2010-02-02 22:25:44 +03:00
static int
__hw_perf_event_init ( struct perf_event * event )
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2010-02-02 22:25:44 +03:00
struct hw_perf_event * hwc = & event - > hw ;
2013-01-18 20:10:06 +04:00
int mapping ;
2010-02-02 22:25:44 +03:00
2011-04-28 18:47:10 +04:00
mapping = armpmu - > map_event ( event ) ;
2010-02-02 22:25:44 +03:00
if ( mapping < 0 ) {
pr_debug ( " event %x:%llx not supported \n " , event - > attr . type ,
event - > attr . config ) ;
return mapping ;
}
2011-07-19 14:57:30 +04:00
/*
* We don ' t assign an index until we actually place the event onto
* hardware . Use - 1 to signify that we haven ' t decided where to put it
* yet . For SMP systems , each core has it ' s own PMU so we can ' t do any
* clever allocation or constraints checking at this point .
*/
hwc - > idx = - 1 ;
hwc - > config_base = 0 ;
hwc - > config = 0 ;
hwc - > event_base = 0 ;
2010-02-02 22:25:44 +03:00
/*
* Check whether we need to exclude the counter from certain modes .
*/
2011-07-19 14:57:30 +04:00
if ( ( ! armpmu - > set_event_filter | |
armpmu - > set_event_filter ( hwc , & event - > attr ) ) & &
event_requires_mode_exclusion ( & event - > attr ) ) {
2010-02-02 22:25:44 +03:00
pr_debug ( " ARM performance counters do not support "
" mode exclusion \n " ) ;
2012-07-04 21:15:42 +04:00
return - EOPNOTSUPP ;
2010-02-02 22:25:44 +03:00
}
/*
2011-07-19 14:57:30 +04:00
* Store the event encoding into the config_base field .
2010-02-02 22:25:44 +03:00
*/
2011-07-19 14:57:30 +04:00
hwc - > config_base | = ( unsigned long ) mapping ;
2010-02-02 22:25:44 +03:00
if ( ! hwc - > sample_period ) {
2012-03-06 20:33:17 +04:00
/*
* For non - sampling runs , limit the sample_period to half
* of the counter width . That way , the new counter value
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues .
*/
hwc - > sample_period = armpmu - > max_period > > 1 ;
2010-02-02 22:25:44 +03:00
hwc - > last_period = hwc - > sample_period ;
2010-05-21 16:43:08 +04:00
local64_set ( & hwc - > period_left , hwc - > sample_period ) ;
2010-02-02 22:25:44 +03:00
}
if ( event - > group_leader ! = event ) {
2013-02-28 20:51:29 +04:00
if ( validate_group ( event ) ! = 0 )
2010-02-02 22:25:44 +03:00
return - EINVAL ;
}
2013-01-18 20:10:06 +04:00
return 0 ;
2010-02-02 22:25:44 +03:00
}
2010-06-11 15:35:08 +04:00
static int armpmu_event_init ( struct perf_event * event )
2010-02-02 22:25:44 +03:00
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( event - > pmu ) ;
2010-02-02 22:25:44 +03:00
int err = 0 ;
2011-04-27 14:20:11 +04:00
atomic_t * active_events = & armpmu - > active_events ;
2010-02-02 22:25:44 +03:00
2012-02-10 02:20:59 +04:00
/* does not support taken branch sampling */
if ( has_branch_stack ( event ) )
return - EOPNOTSUPP ;
2011-04-28 18:47:10 +04:00
if ( armpmu - > map_event ( event ) = = - ENOENT )
2010-06-11 15:35:08 +04:00
return - ENOENT ;
2010-02-02 22:25:44 +03:00
event - > destroy = hw_perf_event_destroy ;
2011-04-27 14:20:11 +04:00
if ( ! atomic_inc_not_zero ( active_events ) ) {
mutex_lock ( & armpmu - > reserve_mutex ) ;
if ( atomic_read ( active_events ) = = 0 )
2011-04-28 19:27:54 +04:00
err = armpmu_reserve_hardware ( armpmu ) ;
2010-02-02 22:25:44 +03:00
if ( ! err )
2011-04-27 14:20:11 +04:00
atomic_inc ( active_events ) ;
mutex_unlock ( & armpmu - > reserve_mutex ) ;
2010-02-02 22:25:44 +03:00
}
if ( err )
2010-06-11 15:35:08 +04:00
return err ;
2010-02-02 22:25:44 +03:00
err = __hw_perf_event_init ( event ) ;
if ( err )
hw_perf_event_destroy ( event ) ;
2010-06-11 15:35:08 +04:00
return err ;
2010-02-02 22:25:44 +03:00
}
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
static void armpmu_enable ( struct pmu * pmu )
2010-02-02 22:25:44 +03:00
{
2011-05-17 14:20:11 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( pmu ) ;
struct pmu_hw_events * hw_events = armpmu - > get_hw_events ( ) ;
2011-08-23 14:59:49 +04:00
int enabled = bitmap_weight ( hw_events - > used_mask , armpmu - > num_events ) ;
2010-02-02 22:25:44 +03:00
2011-07-01 17:38:12 +04:00
if ( enabled )
2012-07-30 15:00:02 +04:00
armpmu - > start ( armpmu ) ;
2010-02-02 22:25:44 +03:00
}
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 16:37:10 +04:00
static void armpmu_disable ( struct pmu * pmu )
2010-02-02 22:25:44 +03:00
{
2011-04-28 19:27:54 +04:00
struct arm_pmu * armpmu = to_arm_pmu ( pmu ) ;
2012-07-30 15:00:02 +04:00
armpmu - > stop ( armpmu ) ;
2010-02-02 22:25:44 +03:00
}
2012-05-31 22:05:20 +04:00
# ifdef CONFIG_PM_RUNTIME
static int armpmu_runtime_resume ( struct device * dev )
{
struct arm_pmu_platdata * plat = dev_get_platdata ( dev ) ;
if ( plat & & plat - > runtime_resume )
return plat - > runtime_resume ( dev ) ;
return 0 ;
}
static int armpmu_runtime_suspend ( struct device * dev )
{
struct arm_pmu_platdata * plat = dev_get_platdata ( dev ) ;
if ( plat & & plat - > runtime_suspend )
return plat - > runtime_suspend ( dev ) ;
return 0 ;
}
# endif
2012-07-29 15:36:28 +04:00
const struct dev_pm_ops armpmu_dev_pm_ops = {
SET_RUNTIME_PM_OPS ( armpmu_runtime_suspend , armpmu_runtime_resume , NULL )
} ;
2013-03-05 06:54:06 +04:00
static void armpmu_init ( struct arm_pmu * armpmu )
2011-04-27 14:20:11 +04:00
{
atomic_set ( & armpmu - > active_events , 0 ) ;
mutex_init ( & armpmu - > reserve_mutex ) ;
2011-04-28 19:27:54 +04:00
armpmu - > pmu = ( struct pmu ) {
. pmu_enable = armpmu_enable ,
. pmu_disable = armpmu_disable ,
. event_init = armpmu_event_init ,
. add = armpmu_add ,
. del = armpmu_del ,
. start = armpmu_start ,
. stop = armpmu_stop ,
. read = armpmu_read ,
} ;
}
2012-09-21 17:23:47 +04:00
int armpmu_register ( struct arm_pmu * armpmu , int type )
2011-04-28 19:27:54 +04:00
{
armpmu_init ( armpmu ) ;
2012-10-26 00:23:18 +04:00
pm_runtime_enable ( & armpmu - > plat_device - > dev ) ;
2012-07-28 20:42:22 +04:00
pr_info ( " enabled with %s PMU driver, %d counters available \n " ,
armpmu - > name , armpmu - > num_events ) ;
2012-09-21 17:23:47 +04:00
return perf_pmu_register ( & armpmu - > pmu , armpmu - > name , type ) ;
2011-04-27 14:20:11 +04:00
}
2010-02-02 22:25:44 +03:00
/*
* Callchain handling code .
*/
/*
* The registers we ' re interested in are at the end of the variable
* length saved register structure . The fp points at the end of this
* structure so the address of this struct is :
* ( struct frame_tail * ) ( xxx - > fp ) - 1
*
* This code has been adapted from the ARM OProfile support .
*/
struct frame_tail {
2010-11-30 20:15:53 +03:00
struct frame_tail __user * fp ;
unsigned long sp ;
unsigned long lr ;
2010-02-02 22:25:44 +03:00
} __attribute__ ( ( packed ) ) ;
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail .
*/
2010-11-30 20:15:53 +03:00
static struct frame_tail __user *
user_backtrace ( struct frame_tail __user * tail ,
2010-02-02 22:25:44 +03:00
struct perf_callchain_entry * entry )
{
struct frame_tail buftail ;
/* Also check accessibility of one struct frame_tail beyond */
if ( ! access_ok ( VERIFY_READ , tail , sizeof ( buftail ) ) )
return NULL ;
if ( __copy_from_user_inatomic ( & buftail , tail , sizeof ( buftail ) ) )
return NULL ;
2010-06-29 21:34:05 +04:00
perf_callchain_store ( entry , buftail . lr ) ;
2010-02-02 22:25:44 +03:00
/*
* Frame pointers should strictly progress back up the stack
* ( towards higher addresses ) .
*/
2011-02-09 13:35:12 +03:00
if ( tail + 1 > = buftail . fp )
2010-02-02 22:25:44 +03:00
return NULL ;
return buftail . fp - 1 ;
}
2010-07-01 01:03:51 +04:00
void
perf_callchain_user ( struct perf_callchain_entry * entry , struct pt_regs * regs )
2010-02-02 22:25:44 +03:00
{
2010-11-30 20:15:53 +03:00
struct frame_tail __user * tail ;
2010-02-02 22:25:44 +03:00
2012-09-13 19:40:46 +04:00
if ( perf_guest_cbs & & perf_guest_cbs - > is_in_guest ( ) ) {
/* We don't support guest os callchain now */
return ;
}
2010-02-02 22:25:44 +03:00
2010-11-30 20:15:53 +03:00
tail = ( struct frame_tail __user * ) regs - > ARM_fp - 1 ;
2010-02-02 22:25:44 +03:00
2011-04-19 01:12:59 +04:00
while ( ( entry - > nr < PERF_MAX_STACK_DEPTH ) & &
tail & & ! ( ( unsigned long ) tail & 0x3 ) )
2010-02-02 22:25:44 +03:00
tail = user_backtrace ( tail , entry ) ;
}
/*
* Gets called by walk_stackframe ( ) for every stackframe . This will be called
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC .
*/
static int
callchain_trace ( struct stackframe * fr ,
void * data )
{
struct perf_callchain_entry * entry = data ;
2010-06-29 21:34:05 +04:00
perf_callchain_store ( entry , fr - > pc ) ;
2010-02-02 22:25:44 +03:00
return 0 ;
}
2010-07-01 01:03:51 +04:00
void
perf_callchain_kernel ( struct perf_callchain_entry * entry , struct pt_regs * regs )
2010-02-02 22:25:44 +03:00
{
struct stackframe fr ;
2012-09-13 19:40:46 +04:00
if ( perf_guest_cbs & & perf_guest_cbs - > is_in_guest ( ) ) {
/* We don't support guest os callchain now */
return ;
}
2010-02-02 22:25:44 +03:00
fr . fp = regs - > ARM_fp ;
fr . sp = regs - > ARM_sp ;
fr . lr = regs - > ARM_lr ;
fr . pc = regs - > ARM_pc ;
walk_stackframe ( & fr , callchain_trace , entry ) ;
}
2012-09-13 19:40:46 +04:00
unsigned long perf_instruction_pointer ( struct pt_regs * regs )
{
if ( perf_guest_cbs & & perf_guest_cbs - > is_in_guest ( ) )
return perf_guest_cbs - > get_guest_ip ( ) ;
return instruction_pointer ( regs ) ;
}
unsigned long perf_misc_flags ( struct pt_regs * regs )
{
int misc = 0 ;
if ( perf_guest_cbs & & perf_guest_cbs - > is_in_guest ( ) ) {
if ( perf_guest_cbs - > is_user_mode ( ) )
misc | = PERF_RECORD_MISC_GUEST_USER ;
else
misc | = PERF_RECORD_MISC_GUEST_KERNEL ;
} else {
if ( user_mode ( regs ) )
misc | = PERF_RECORD_MISC_USER ;
else
misc | = PERF_RECORD_MISC_KERNEL ;
}
return misc ;
}