2010-02-02 20:23:15 +01:00
/*
* linux / arch / arm / include / asm / pmu . h
*
* Copyright ( C ) 2009 picoChip Designs Ltd , Jamie Iles
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
*/
# ifndef __ARM_PMU_H__
# define __ARM_PMU_H__
2011-02-08 09:24:36 +05:30
# include <linux/interrupt.h>
2011-05-19 10:07:57 +01:00
# include <linux/perf_event.h>
2011-02-08 09:24:36 +05:30
/*
* struct arm_pmu_platdata - ARM PMU platform data
*
2011-03-02 15:00:08 +08:00
* @ handle_irq : an optional handler which will be called from the
* interrupt and passed the address of the low level handler ,
* and can be used to implement any platform specific handling
* before or after calling it .
2012-05-31 13:05:20 -05:00
* @ runtime_resume : an optional handler which will be called by the
* runtime PM framework following a call to pm_runtime_get ( ) .
* Note that if pm_runtime_get ( ) is called more than once in
* succession this handler will only be called once .
* @ runtime_suspend : an optional handler which will be called by the
* runtime PM framework following a call to pm_runtime_put ( ) .
* Note that if pm_runtime_get ( ) is called more than once in
* succession this handler will only be called following the
* final call to pm_runtime_put ( ) that actually disables the
* hardware .
2011-02-08 09:24:36 +05:30
*/
struct arm_pmu_platdata {
irqreturn_t ( * handle_irq ) ( int irq , void * dev ,
irq_handler_t pmu_handler ) ;
2012-05-31 13:05:20 -05:00
int ( * runtime_resume ) ( struct device * dev ) ;
int ( * runtime_suspend ) ( struct device * dev ) ;
2011-02-08 09:24:36 +05:30
} ;
2011-05-19 10:07:57 +01:00
# ifdef CONFIG_HW_PERF_EVENTS
2014-05-28 18:08:40 +01:00
/*
* The ARMv7 CPU PMU supports up to 32 event counters .
*/
# define ARMPMU_MAX_HWEVENTS 32
# define HW_OP_UNSUPPORTED 0xFFFF
# define C(_x) PERF_COUNT_HW_CACHE_##_x
# define CACHE_OP_UNSUPPORTED 0xFFFF
2014-05-29 17:29:51 +01:00
# define PERF_MAP_ALL_UNSUPPORTED \
[ 0 . . . PERF_COUNT_HW_MAX - 1 ] = HW_OP_UNSUPPORTED
# define PERF_CACHE_MAP_ALL_UNSUPPORTED \
[ 0 . . . C ( MAX ) - 1 ] = { \
[ 0 . . . C ( OP_MAX ) - 1 ] = { \
[ 0 . . . C ( RESULT_MAX ) - 1 ] = CACHE_OP_UNSUPPORTED , \
} , \
}
2011-05-19 10:07:57 +01:00
/* The events for a given PMU register set. */
struct pmu_hw_events {
/*
* The events that are active on the PMU for the given index .
*/
struct perf_event * * events ;
/*
* A 1 bit for an index indicates that the counter is being used for
* an event . A 0 means that the counter can be used .
*/
unsigned long * used_mask ;
/*
* Hardware lock to serialize accesses to PMU registers . Needed for the
* read / modify / write sequences .
*/
raw_spinlock_t pmu_lock ;
} ;
struct arm_pmu {
struct pmu pmu ;
cpumask_t active_irqs ;
2012-07-06 15:45:00 +01:00
char * name ;
2011-05-19 10:07:57 +01:00
irqreturn_t ( * handle_irq ) ( int irq_num , void * dev ) ;
2012-07-30 12:00:02 +01:00
void ( * enable ) ( struct perf_event * event ) ;
void ( * disable ) ( struct perf_event * event ) ;
2011-05-19 10:07:57 +01:00
int ( * get_event_idx ) ( struct pmu_hw_events * hw_events ,
2012-07-30 12:00:02 +01:00
struct perf_event * event ) ;
2014-02-07 21:01:22 +00:00
void ( * clear_event_idx ) ( struct pmu_hw_events * hw_events ,
struct perf_event * event ) ;
2011-05-19 10:07:57 +01:00
int ( * set_event_filter ) ( struct hw_perf_event * evt ,
struct perf_event_attr * attr ) ;
2012-07-30 12:00:02 +01:00
u32 ( * read_counter ) ( struct perf_event * event ) ;
void ( * write_counter ) ( struct perf_event * event , u32 val ) ;
void ( * start ) ( struct arm_pmu * ) ;
void ( * stop ) ( struct arm_pmu * ) ;
2011-05-19 10:07:57 +01:00
void ( * reset ) ( void * ) ;
2012-07-30 12:00:02 +01:00
int ( * request_irq ) ( struct arm_pmu * , irq_handler_t handler ) ;
void ( * free_irq ) ( struct arm_pmu * ) ;
2011-05-19 10:07:57 +01:00
int ( * map_event ) ( struct perf_event * event ) ;
int num_events ;
atomic_t active_events ;
struct mutex reserve_mutex ;
u64 max_period ;
struct platform_device * plat_device ;
struct pmu_hw_events * ( * get_hw_events ) ( void ) ;
} ;
# define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
2012-07-29 12:36:28 +01:00
extern const struct dev_pm_ops armpmu_dev_pm_ops ;
2012-09-21 14:23:47 +01:00
int armpmu_register ( struct arm_pmu * armpmu , int type ) ;
2011-05-19 10:07:57 +01:00
2012-07-30 12:00:02 +01:00
u64 armpmu_event_update ( struct perf_event * event ) ;
2011-05-19 10:07:57 +01:00
2012-07-30 12:00:02 +01:00
int armpmu_event_set_period ( struct perf_event * event ) ;
2011-05-19 10:07:57 +01:00
2012-07-29 12:36:28 +01:00
int armpmu_map_event ( struct perf_event * event ,
const unsigned ( * event_map ) [ PERF_COUNT_HW_MAX ] ,
const unsigned ( * cache_map ) [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] ,
u32 raw_event_mask ) ;
2011-05-19 10:07:57 +01:00
# endif /* CONFIG_HW_PERF_EVENTS */
2010-02-02 20:23:15 +01:00
# endif /* __ARM_PMU_H__ */