2010-11-13 22:04:32 +03:00
/*
* ARMv7 Cortex - A8 and Cortex - A9 Performance Events handling code .
*
* ARMv7 support : Jean Pihet < jpihet @ mvista . com >
* 2010 ( c ) MontaVista Software , LLC .
*
* Copied from ARMv6 code , with the low level code inspired
* by the ARMv7 Oprofile code .
*
* Cortex - A8 has up to 4 configurable performance counters and
* a single cycle counter .
* Cortex - A9 has up to 31 configurable performance counters and
* a single cycle counter .
*
* All counters can be enabled / disabled and IRQ masked separately . The cycle
* counter and all 4 performance counters together can be reset separately .
*/
# ifdef CONFIG_CPU_V7
2011-07-19 16:53:36 +04:00
2014-02-08 01:01:23 +04:00
# include <asm/cp15.h>
2015-05-26 19:23:38 +03:00
# include <asm/cputype.h>
# include <asm/irq_regs.h>
2014-02-08 01:01:23 +04:00
# include <asm/vfp.h>
# include "../vfp/vfpinstr.h"
2015-05-26 19:23:38 +03:00
# include <linux/of.h>
2015-07-06 14:23:53 +03:00
# include <linux/perf/arm_pmu.h>
2015-05-26 19:23:38 +03:00
# include <linux/platform_device.h>
2011-01-19 17:15:34 +03:00
/*
* Common ARMv7 event types
*
* Note : An implementation may not be able to count all of these events
* but the encodings are considered to be ` reserved ' in the case that
* they are not available .
*/
2015-10-28 18:19:56 +03:00
# define ARMV7_PERFCTR_PMNC_SW_INCR 0x00
# define ARMV7_PERFCTR_L1_ICACHE_REFILL 0x01
# define ARMV7_PERFCTR_ITLB_REFILL 0x02
# define ARMV7_PERFCTR_L1_DCACHE_REFILL 0x03
# define ARMV7_PERFCTR_L1_DCACHE_ACCESS 0x04
# define ARMV7_PERFCTR_DTLB_REFILL 0x05
# define ARMV7_PERFCTR_MEM_READ 0x06
# define ARMV7_PERFCTR_MEM_WRITE 0x07
# define ARMV7_PERFCTR_INSTR_EXECUTED 0x08
# define ARMV7_PERFCTR_EXC_TAKEN 0x09
# define ARMV7_PERFCTR_EXC_EXECUTED 0x0A
# define ARMV7_PERFCTR_CID_WRITE 0x0B
/*
* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS .
* It counts :
* - all ( taken ) branch instructions ,
* - instructions that explicitly write the PC ,
* - exception generating instructions .
*/
# define ARMV7_PERFCTR_PC_WRITE 0x0C
# define ARMV7_PERFCTR_PC_IMM_BRANCH 0x0D
# define ARMV7_PERFCTR_PC_PROC_RETURN 0x0E
# define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F
# define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED 0x10
# define ARMV7_PERFCTR_CLOCK_CYCLES 0x11
# define ARMV7_PERFCTR_PC_BRANCH_PRED 0x12
/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
# define ARMV7_PERFCTR_MEM_ACCESS 0x13
# define ARMV7_PERFCTR_L1_ICACHE_ACCESS 0x14
# define ARMV7_PERFCTR_L1_DCACHE_WB 0x15
# define ARMV7_PERFCTR_L2_CACHE_ACCESS 0x16
# define ARMV7_PERFCTR_L2_CACHE_REFILL 0x17
# define ARMV7_PERFCTR_L2_CACHE_WB 0x18
# define ARMV7_PERFCTR_BUS_ACCESS 0x19
# define ARMV7_PERFCTR_MEM_ERROR 0x1A
# define ARMV7_PERFCTR_INSTR_SPEC 0x1B
# define ARMV7_PERFCTR_TTBR_WRITE 0x1C
# define ARMV7_PERFCTR_BUS_CYCLES 0x1D
# define ARMV7_PERFCTR_CPU_CYCLES 0xFF
2010-11-13 22:04:32 +03:00
/* ARMv7 Cortex-A8 specific event types */
2015-10-28 18:19:56 +03:00
# define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS 0x43
# define ARMV7_A8_PERFCTR_L2_CACHE_REFILL 0x44
# define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS 0x50
# define ARMV7_A8_PERFCTR_STALL_ISIDE 0x56
2010-11-13 22:04:32 +03:00
/* ARMv7 Cortex-A9 specific event types */
2015-10-28 18:19:56 +03:00
# define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME 0x68
# define ARMV7_A9_PERFCTR_STALL_ICACHE 0x60
# define ARMV7_A9_PERFCTR_STALL_DISPATCH 0x66
2010-11-13 22:04:32 +03:00
2011-06-03 20:40:15 +04:00
/* ARMv7 Cortex-A5 specific event types */
2015-10-28 18:19:56 +03:00
# define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL 0xc2
# define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP 0xc3
2011-06-03 20:40:15 +04:00
2011-01-19 17:24:38 +03:00
/* ARMv7 Cortex-A15 specific event types */
2015-10-28 18:19:56 +03:00
# define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
# define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
# define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ 0x42
# define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE 0x43
2011-01-19 17:24:38 +03:00
2015-10-28 18:19:56 +03:00
# define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ 0x4C
# define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE 0x4D
2011-01-19 17:24:38 +03:00
2015-10-28 18:19:56 +03:00
# define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ 0x50
# define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
# define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ 0x52
# define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE 0x53
2011-01-19 17:24:38 +03:00
2015-10-28 18:19:56 +03:00
# define ARMV7_A15_PERFCTR_PC_WRITE_SPEC 0x76
2011-01-19 17:24:38 +03:00
2014-01-29 18:28:57 +04:00
/* ARMv7 Cortex-A12 specific event types */
2015-10-28 18:19:56 +03:00
# define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
# define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
2014-01-29 18:28:57 +04:00
2015-10-28 18:19:56 +03:00
# define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ 0x50
# define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
2014-01-29 18:28:57 +04:00
2015-10-28 18:19:56 +03:00
# define ARMV7_A12_PERFCTR_PC_WRITE_SPEC 0x76
2014-01-29 18:28:57 +04:00
2015-10-28 18:19:56 +03:00
# define ARMV7_A12_PERFCTR_PF_TLB_REFILL 0xe7
2014-01-29 18:28:57 +04:00
2014-02-08 01:01:23 +04:00
/* ARMv7 Krait specific event types */
2015-10-28 18:19:56 +03:00
# define KRAIT_PMRESR0_GROUP0 0xcc
# define KRAIT_PMRESR1_GROUP0 0xd0
# define KRAIT_PMRESR2_GROUP0 0xd4
# define KRAIT_VPMRESR0_GROUP0 0xd8
2014-02-08 01:01:23 +04:00
2015-10-28 18:19:56 +03:00
# define KRAIT_PERFCTR_L1_ICACHE_ACCESS 0x10011
# define KRAIT_PERFCTR_L1_ICACHE_MISS 0x10010
2014-02-08 01:01:23 +04:00
2015-10-28 18:19:56 +03:00
# define KRAIT_PERFCTR_L1_ITLB_ACCESS 0x12222
# define KRAIT_PERFCTR_L1_DTLB_ACCESS 0x12210
2014-02-08 01:01:23 +04:00
2015-02-28 03:11:35 +03:00
/* ARMv7 Scorpion specific event types */
2015-10-28 18:19:56 +03:00
# define SCORPION_LPM0_GROUP0 0x4c
# define SCORPION_LPM1_GROUP0 0x50
# define SCORPION_LPM2_GROUP0 0x54
# define SCORPION_L2LPM_GROUP0 0x58
# define SCORPION_VLPM_GROUP0 0x5c
2015-02-28 03:11:35 +03:00
2015-10-28 18:19:56 +03:00
# define SCORPION_ICACHE_ACCESS 0x10053
# define SCORPION_ICACHE_MISS 0x10052
2015-02-28 03:11:35 +03:00
2015-10-28 18:19:56 +03:00
# define SCORPION_DTLB_ACCESS 0x12013
# define SCORPION_DTLB_MISS 0x12012
2015-02-28 03:11:35 +03:00
2015-10-28 18:19:56 +03:00
# define SCORPION_ITLB_MISS 0x12021
2015-02-28 03:11:35 +03:00
2010-11-13 22:04:32 +03:00
/*
* Cortex - A8 HW events mapping
*
* The hardware events that we support . We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses / misses in hardware .
*/
static const unsigned armv7_a8_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_MAP_ALL_UNSUPPORTED ,
2011-09-29 21:23:39 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = ARMV7_A8_PERFCTR_STALL_ISIDE ,
2010-11-13 22:04:32 +03:00
} ;
static const unsigned armv7_a8_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
/*
* The performance counters don ' t differentiate between read and write
* accesses / misses so this isn ' t strictly correct , but it ' s the best we
* can do . Writes and reads get combined .
*/
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2010-11-13 22:04:32 +03:00
} ;
/*
* Cortex - A9 HW events mapping
*/
static const unsigned armv7_a9_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_MAP_ALL_UNSUPPORTED ,
2011-09-29 21:23:39 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = ARMV7_A9_PERFCTR_STALL_ICACHE ,
[ PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] = ARMV7_A9_PERFCTR_STALL_DISPATCH ,
2010-11-13 22:04:32 +03:00
} ;
static const unsigned armv7_a9_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
/*
* The performance counters don ' t differentiate between read and write
* accesses / misses so this isn ' t strictly correct , but it ' s the best we
* can do . Writes and reads get combined .
*/
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2010-11-13 22:04:32 +03:00
} ;
2011-06-03 20:40:15 +04:00
/*
* Cortex - A5 HW events mapping
*/
static const unsigned armv7_a5_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_MAP_ALL_UNSUPPORTED ,
2011-09-29 21:23:39 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-06-03 20:40:15 +04:00
} ;
static const unsigned armv7_a5_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL ,
[ C ( L1D ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_MISS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
/*
* The prefetch counters don ' t differentiate between the I side and the
* D side .
*/
[ C ( L1I ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL ,
[ C ( L1I ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_MISS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-06-03 20:40:15 +04:00
} ;
2011-01-19 17:24:38 +03:00
/*
* Cortex - A15 HW events mapping
*/
static const unsigned armv7_a15_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_MAP_ALL_UNSUPPORTED ,
2011-09-29 21:23:39 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_BUS_CYCLES ,
2011-01-19 17:24:38 +03:00
} ;
static const unsigned armv7_a15_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE ,
/*
* Not all performance counters differentiate between read and write
* accesses / misses so we ' re not always strictly correct , but it ' s the
* best we can do . Writes and reads get combined in these cases .
*/
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-01-19 17:24:38 +03:00
} ;
2012-02-03 17:46:01 +04:00
/*
* Cortex - A7 HW events mapping
*/
static const unsigned armv7_a7_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_MAP_ALL_UNSUPPORTED ,
2012-02-03 17:46:01 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_BUS_CYCLES ,
} ;
static const unsigned armv7_a7_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
/*
* The performance counters don ' t differentiate between read and write
* accesses / misses so this isn ' t strictly correct , but it ' s the best we
* can do . Writes and reads get combined .
*/
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L2_CACHE_ACCESS ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L2_CACHE_ACCESS ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2012-02-03 17:46:01 +04:00
} ;
2014-01-29 18:28:57 +04:00
/*
* Cortex - A12 HW events mapping
*/
static const unsigned armv7_a12_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_MAP_ALL_UNSUPPORTED ,
2014-01-29 18:28:57 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_BUS_CYCLES ,
} ;
static const unsigned armv7_a12_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
/*
* Not all performance counters differentiate between read and write
* accesses / misses so we ' re not always strictly correct , but it ' s the
* best we can do . Writes and reads get combined in these cases .
*/
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_MISS ) ] = ARMV7_A12_PERFCTR_PF_TLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2014-01-29 18:28:57 +04:00
} ;
2014-02-08 01:01:21 +04:00
/*
* Krait HW events mapping
*/
static const unsigned krait_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_MAP_ALL_UNSUPPORTED ,
2014-02-08 01:01:21 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_CLOCK_CYCLES ,
} ;
static const unsigned krait_perf_map_no_branch [ PERF_COUNT_HW_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_MAP_ALL_UNSUPPORTED ,
2014-02-08 01:01:21 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_CLOCK_CYCLES ,
} ;
static const unsigned krait_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 20:27:21 +04:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
/*
* The performance counters don ' t differentiate between read and write
* accesses / misses so this isn ' t strictly correct , but it ' s the best we
* can do . Writes and reads get combined .
*/
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = KRAIT_PERFCTR_L1_ICACHE_MISS ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_DTLB_ACCESS ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_DTLB_ACCESS ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_ITLB_ACCESS ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_ITLB_ACCESS ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2014-02-08 01:01:21 +04:00
} ;
2015-02-28 03:11:35 +03:00
/*
* Scorpion HW events mapping
*/
static const unsigned scorpion_perf_map [ PERF_COUNT_HW_MAX ] = {
PERF_MAP_ALL_UNSUPPORTED ,
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_CLOCK_CYCLES ,
} ;
static const unsigned scorpion_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
/*
* The performance counters don ' t differentiate between read and write
* accesses / misses so this isn ' t strictly correct , but it ' s the best we
* can do . Writes and reads get combined .
*/
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = SCORPION_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = SCORPION_ICACHE_MISS ,
/*
* Only ITLB misses and DTLB refills are supported . If users want the
* DTLB refills misses a raw counter must be used .
*/
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = SCORPION_DTLB_ACCESS ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = SCORPION_DTLB_MISS ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = SCORPION_DTLB_ACCESS ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = SCORPION_DTLB_MISS ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = SCORPION_ITLB_MISS ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = SCORPION_ITLB_MISS ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
} ;
2015-10-28 18:20:41 +03:00
# define ARMV7_EVENT_ATTR_RESOLVE(m) #m
# define ARMV7_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR_STRING ( name , armv7_event_attr_ # # name , \
" event= " ARMV7_EVENT_ATTR_RESOLVE ( config ) )
ARMV7_EVENT_ATTR ( sw_incr , ARMV7_PERFCTR_PMNC_SW_INCR ) ;
ARMV7_EVENT_ATTR ( l1i_cache_refill , ARMV7_PERFCTR_L1_ICACHE_REFILL ) ;
ARMV7_EVENT_ATTR ( l1i_tlb_refill , ARMV7_PERFCTR_ITLB_REFILL ) ;
ARMV7_EVENT_ATTR ( l1d_cache_refill , ARMV7_PERFCTR_L1_DCACHE_REFILL ) ;
ARMV7_EVENT_ATTR ( l1d_cache , ARMV7_PERFCTR_L1_DCACHE_ACCESS ) ;
ARMV7_EVENT_ATTR ( l1d_tlb_refill , ARMV7_PERFCTR_DTLB_REFILL ) ;
ARMV7_EVENT_ATTR ( ld_retired , ARMV7_PERFCTR_MEM_READ ) ;
ARMV7_EVENT_ATTR ( st_retired , ARMV7_PERFCTR_MEM_WRITE ) ;
ARMV7_EVENT_ATTR ( inst_retired , ARMV7_PERFCTR_INSTR_EXECUTED ) ;
ARMV7_EVENT_ATTR ( exc_taken , ARMV7_PERFCTR_EXC_TAKEN ) ;
ARMV7_EVENT_ATTR ( exc_return , ARMV7_PERFCTR_EXC_EXECUTED ) ;
ARMV7_EVENT_ATTR ( cid_write_retired , ARMV7_PERFCTR_CID_WRITE ) ;
ARMV7_EVENT_ATTR ( pc_write_retired , ARMV7_PERFCTR_PC_WRITE ) ;
ARMV7_EVENT_ATTR ( br_immed_retired , ARMV7_PERFCTR_PC_IMM_BRANCH ) ;
ARMV7_EVENT_ATTR ( br_return_retired , ARMV7_PERFCTR_PC_PROC_RETURN ) ;
ARMV7_EVENT_ATTR ( unaligned_ldst_retired , ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS ) ;
ARMV7_EVENT_ATTR ( br_mis_pred , ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ) ;
ARMV7_EVENT_ATTR ( cpu_cycles , ARMV7_PERFCTR_CLOCK_CYCLES ) ;
ARMV7_EVENT_ATTR ( br_pred , ARMV7_PERFCTR_PC_BRANCH_PRED ) ;
static struct attribute * armv7_pmuv1_event_attrs [ ] = {
& armv7_event_attr_sw_incr . attr . attr ,
& armv7_event_attr_l1i_cache_refill . attr . attr ,
& armv7_event_attr_l1i_tlb_refill . attr . attr ,
& armv7_event_attr_l1d_cache_refill . attr . attr ,
& armv7_event_attr_l1d_cache . attr . attr ,
& armv7_event_attr_l1d_tlb_refill . attr . attr ,
& armv7_event_attr_ld_retired . attr . attr ,
& armv7_event_attr_st_retired . attr . attr ,
& armv7_event_attr_inst_retired . attr . attr ,
& armv7_event_attr_exc_taken . attr . attr ,
& armv7_event_attr_exc_return . attr . attr ,
& armv7_event_attr_cid_write_retired . attr . attr ,
& armv7_event_attr_pc_write_retired . attr . attr ,
& armv7_event_attr_br_immed_retired . attr . attr ,
& armv7_event_attr_br_return_retired . attr . attr ,
& armv7_event_attr_unaligned_ldst_retired . attr . attr ,
& armv7_event_attr_br_mis_pred . attr . attr ,
& armv7_event_attr_cpu_cycles . attr . attr ,
& armv7_event_attr_br_pred . attr . attr ,
NULL
} ;
static struct attribute_group armv7_pmuv1_events_attr_group = {
. name = " events " ,
. attrs = armv7_pmuv1_event_attrs ,
} ;
static const struct attribute_group * armv7_pmuv1_attr_groups [ ] = {
& armv7_pmuv1_events_attr_group ,
NULL
} ;
ARMV7_EVENT_ATTR ( mem_access , ARMV7_PERFCTR_MEM_ACCESS ) ;
ARMV7_EVENT_ATTR ( l1i_cache , ARMV7_PERFCTR_L1_ICACHE_ACCESS ) ;
ARMV7_EVENT_ATTR ( l1d_cache_wb , ARMV7_PERFCTR_L1_DCACHE_WB ) ;
ARMV7_EVENT_ATTR ( l2d_cache , ARMV7_PERFCTR_L2_CACHE_ACCESS ) ;
ARMV7_EVENT_ATTR ( l2d_cache_refill , ARMV7_PERFCTR_L2_CACHE_REFILL ) ;
ARMV7_EVENT_ATTR ( l2d_cache_wb , ARMV7_PERFCTR_L2_CACHE_WB ) ;
ARMV7_EVENT_ATTR ( bus_access , ARMV7_PERFCTR_BUS_ACCESS ) ;
ARMV7_EVENT_ATTR ( memory_error , ARMV7_PERFCTR_MEM_ERROR ) ;
ARMV7_EVENT_ATTR ( inst_spec , ARMV7_PERFCTR_INSTR_SPEC ) ;
ARMV7_EVENT_ATTR ( ttbr_write_retired , ARMV7_PERFCTR_TTBR_WRITE ) ;
ARMV7_EVENT_ATTR ( bus_cycles , ARMV7_PERFCTR_BUS_CYCLES ) ;
static struct attribute * armv7_pmuv2_event_attrs [ ] = {
& armv7_event_attr_sw_incr . attr . attr ,
& armv7_event_attr_l1i_cache_refill . attr . attr ,
& armv7_event_attr_l1i_tlb_refill . attr . attr ,
& armv7_event_attr_l1d_cache_refill . attr . attr ,
& armv7_event_attr_l1d_cache . attr . attr ,
& armv7_event_attr_l1d_tlb_refill . attr . attr ,
& armv7_event_attr_ld_retired . attr . attr ,
& armv7_event_attr_st_retired . attr . attr ,
& armv7_event_attr_inst_retired . attr . attr ,
& armv7_event_attr_exc_taken . attr . attr ,
& armv7_event_attr_exc_return . attr . attr ,
& armv7_event_attr_cid_write_retired . attr . attr ,
& armv7_event_attr_pc_write_retired . attr . attr ,
& armv7_event_attr_br_immed_retired . attr . attr ,
& armv7_event_attr_br_return_retired . attr . attr ,
& armv7_event_attr_unaligned_ldst_retired . attr . attr ,
& armv7_event_attr_br_mis_pred . attr . attr ,
& armv7_event_attr_cpu_cycles . attr . attr ,
& armv7_event_attr_br_pred . attr . attr ,
& armv7_event_attr_mem_access . attr . attr ,
& armv7_event_attr_l1i_cache . attr . attr ,
& armv7_event_attr_l1d_cache_wb . attr . attr ,
& armv7_event_attr_l2d_cache . attr . attr ,
& armv7_event_attr_l2d_cache_refill . attr . attr ,
& armv7_event_attr_l2d_cache_wb . attr . attr ,
& armv7_event_attr_bus_access . attr . attr ,
& armv7_event_attr_memory_error . attr . attr ,
& armv7_event_attr_inst_spec . attr . attr ,
& armv7_event_attr_ttbr_write_retired . attr . attr ,
& armv7_event_attr_bus_cycles . attr . attr ,
NULL
} ;
static struct attribute_group armv7_pmuv2_events_attr_group = {
. name = " events " ,
. attrs = armv7_pmuv2_event_attrs ,
} ;
static const struct attribute_group * armv7_pmuv2_attr_groups [ ] = {
& armv7_pmuv2_events_attr_group ,
NULL
} ;
2010-11-13 22:04:32 +03:00
/*
2011-07-20 01:25:55 +04:00
* Perf Events ' indices
2010-11-13 22:04:32 +03:00
*/
2011-07-20 01:25:55 +04:00
# define ARMV7_IDX_CYCLE_COUNTER 0
# define ARMV7_IDX_COUNTER0 1
2012-07-20 18:18:07 +04:00
# define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
( ARMV7_IDX_CYCLE_COUNTER + cpu_pmu - > num_events - 1 )
2011-07-20 01:25:55 +04:00
# define ARMV7_MAX_COUNTERS 32
# define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
2010-11-13 22:04:32 +03:00
/*
2011-07-20 01:25:55 +04:00
* ARMv7 low level PMNC access
2010-11-13 22:04:32 +03:00
*/
/*
2011-07-20 01:25:55 +04:00
* Perf Event to low level counters mapping
2010-11-13 22:04:32 +03:00
*/
2011-07-20 01:25:55 +04:00
# define ARMV7_IDX_TO_COUNTER(x) \
( ( ( x ) - ARMV7_IDX_COUNTER0 ) & ARMV7_COUNTER_MASK )
2010-11-13 22:04:32 +03:00
/*
* Per - CPU PMNC : config reg
*/
# define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
# define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
# define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
# define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
# define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
# define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
# define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
# define ARMV7_PMNC_N_MASK 0x1f
# define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/*
* FLAG : counters overflow flag status reg
*/
# define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
# define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
/*
2011-07-19 16:53:36 +04:00
* PMXEVTYPER : Event selection reg
2010-11-13 22:04:32 +03:00
*/
2013-02-28 20:49:11 +04:00
# define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
2011-07-19 16:53:36 +04:00
# define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
2010-11-13 22:04:32 +03:00
/*
2011-07-19 16:53:36 +04:00
* Event filters for PMUv2
2010-11-13 22:04:32 +03:00
*/
2011-07-19 16:53:36 +04:00
# define ARMV7_EXCLUDE_PL1 (1 << 31)
# define ARMV7_EXCLUDE_USER (1 << 30)
# define ARMV7_INCLUDE_HYP (1 << 27)
2010-11-13 22:04:32 +03:00
2011-07-19 17:55:57 +04:00
static inline u32 armv7_pmnc_read ( void )
2010-11-13 22:04:32 +03:00
{
u32 val ;
asm volatile ( " mrc p15, 0, %0, c9, c12, 0 " : " =r " ( val ) ) ;
return val ;
}
2011-07-19 17:55:57 +04:00
static inline void armv7_pmnc_write ( u32 val )
2010-11-13 22:04:32 +03:00
{
val & = ARMV7_PMNC_MASK ;
2011-03-25 15:12:23 +03:00
isb ( ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mcr p15, 0, %0, c9, c12, 0 " : : " r " ( val ) ) ;
}
2011-07-19 17:55:57 +04:00
static inline int armv7_pmnc_has_overflowed ( u32 pmnc )
2010-11-13 22:04:32 +03:00
{
return pmnc & ARMV7_OVERFLOWED_MASK ;
}
2012-07-20 18:18:07 +04:00
static inline int armv7_pmnc_counter_valid ( struct arm_pmu * cpu_pmu , int idx )
2011-07-20 01:25:55 +04:00
{
2012-07-20 18:18:07 +04:00
return idx > = ARMV7_IDX_CYCLE_COUNTER & &
idx < = ARMV7_IDX_COUNTER_LAST ( cpu_pmu ) ;
2011-07-20 01:25:55 +04:00
}
static inline int armv7_pmnc_counter_has_overflowed ( u32 pmnc , int idx )
2010-11-13 22:04:32 +03:00
{
2012-07-20 18:18:07 +04:00
return pmnc & BIT ( ARMV7_IDX_TO_COUNTER ( idx ) ) ;
2010-11-13 22:04:32 +03:00
}
2014-10-22 16:16:49 +04:00
static inline void armv7_pmnc_select_counter ( int idx )
2010-11-13 22:04:32 +03:00
{
2012-07-20 18:18:07 +04:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-20 01:25:55 +04:00
asm volatile ( " mcr p15, 0, %0, c9, c12, 5 " : : " r " ( counter ) ) ;
2011-03-25 15:12:23 +03:00
isb ( ) ;
2010-11-13 22:04:32 +03:00
}
2012-07-30 15:00:02 +04:00
static inline u32 armv7pmu_read_counter ( struct perf_event * event )
2010-11-13 22:04:32 +03:00
{
2012-07-20 18:18:07 +04:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2011-07-19 17:55:57 +04:00
u32 value = 0 ;
2010-11-13 22:04:32 +03:00
2014-10-22 16:16:49 +04:00
if ( ! armv7_pmnc_counter_valid ( cpu_pmu , idx ) ) {
2010-11-13 22:04:32 +03:00
pr_err ( " CPU%u reading wrong counter %d \n " ,
smp_processor_id ( ) , idx ) ;
2014-10-22 16:16:49 +04:00
} else if ( idx = = ARMV7_IDX_CYCLE_COUNTER ) {
2011-07-20 01:25:55 +04:00
asm volatile ( " mrc p15, 0, %0, c9, c13, 0 " : " =r " ( value ) ) ;
2014-10-22 16:16:49 +04:00
} else {
armv7_pmnc_select_counter ( idx ) ;
2011-07-20 01:25:55 +04:00
asm volatile ( " mrc p15, 0, %0, c9, c13, 2 " : " =r " ( value ) ) ;
2014-10-22 16:16:49 +04:00
}
2010-11-13 22:04:32 +03:00
return value ;
}
2012-07-30 15:00:02 +04:00
static inline void armv7pmu_write_counter ( struct perf_event * event , u32 value )
2010-11-13 22:04:32 +03:00
{
2012-07-20 18:18:07 +04:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2014-10-22 16:16:49 +04:00
if ( ! armv7_pmnc_counter_valid ( cpu_pmu , idx ) ) {
2010-11-13 22:04:32 +03:00
pr_err ( " CPU%u writing wrong counter %d \n " ,
smp_processor_id ( ) , idx ) ;
2014-10-22 16:16:49 +04:00
} else if ( idx = = ARMV7_IDX_CYCLE_COUNTER ) {
2011-07-20 01:25:55 +04:00
asm volatile ( " mcr p15, 0, %0, c9, c13, 0 " : : " r " ( value ) ) ;
2014-10-22 16:16:49 +04:00
} else {
armv7_pmnc_select_counter ( idx ) ;
2011-07-20 01:25:55 +04:00
asm volatile ( " mcr p15, 0, %0, c9, c13, 2 " : : " r " ( value ) ) ;
2014-10-22 16:16:49 +04:00
}
2010-11-13 22:04:32 +03:00
}
2011-07-20 01:17:48 +04:00
static inline void armv7_pmnc_write_evtsel ( int idx , u32 val )
2010-11-13 22:04:32 +03:00
{
2014-10-22 16:16:49 +04:00
armv7_pmnc_select_counter ( idx ) ;
val & = ARMV7_EVTYPE_MASK ;
asm volatile ( " mcr p15, 0, %0, c9, c13, 1 " : : " r " ( val ) ) ;
2010-11-13 22:04:32 +03:00
}
2014-10-22 16:16:49 +04:00
static inline void armv7_pmnc_enable_counter ( int idx )
2010-11-13 22:04:32 +03:00
{
2012-07-20 18:18:07 +04:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-20 01:25:55 +04:00
asm volatile ( " mcr p15, 0, %0, c9, c12, 1 " : : " r " ( BIT ( counter ) ) ) ;
2010-11-13 22:04:32 +03:00
}
2014-10-22 16:16:49 +04:00
static inline void armv7_pmnc_disable_counter ( int idx )
2010-11-13 22:04:32 +03:00
{
2012-07-20 18:18:07 +04:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-20 01:25:55 +04:00
asm volatile ( " mcr p15, 0, %0, c9, c12, 2 " : : " r " ( BIT ( counter ) ) ) ;
2010-11-13 22:04:32 +03:00
}
2014-10-22 16:16:49 +04:00
static inline void armv7_pmnc_enable_intens ( int idx )
2010-11-13 22:04:32 +03:00
{
2012-07-20 18:18:07 +04:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-20 01:25:55 +04:00
asm volatile ( " mcr p15, 0, %0, c9, c14, 1 " : : " r " ( BIT ( counter ) ) ) ;
2010-11-13 22:04:32 +03:00
}
2014-10-22 16:16:49 +04:00
static inline void armv7_pmnc_disable_intens ( int idx )
2010-11-13 22:04:32 +03:00
{
2012-07-20 18:18:07 +04:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-20 01:25:55 +04:00
asm volatile ( " mcr p15, 0, %0, c9, c14, 2 " : : " r " ( BIT ( counter ) ) ) ;
2012-03-06 20:34:22 +04:00
isb ( ) ;
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile ( " mcr p15, 0, %0, c9, c12, 3 " : : " r " ( BIT ( counter ) ) ) ;
isb ( ) ;
2010-11-13 22:04:32 +03:00
}
static inline u32 armv7_pmnc_getreset_flags ( void )
{
u32 val ;
/* Read */
asm volatile ( " mrc p15, 0, %0, c9, c12, 3 " : " =r " ( val ) ) ;
/* Write to clear flags */
val & = ARMV7_FLAG_MASK ;
asm volatile ( " mcr p15, 0, %0, c9, c12, 3 " : : " r " ( val ) ) ;
return val ;
}
# ifdef DEBUG
2012-07-20 18:18:07 +04:00
static void armv7_pmnc_dump_regs ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
u32 val ;
unsigned int cnt ;
2014-10-30 14:26:57 +03:00
pr_info ( " PMNC registers dump: \n " ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mrc p15, 0, %0, c9, c12, 0 " : " =r " ( val ) ) ;
2014-10-30 14:26:57 +03:00
pr_info ( " PMNC =0x%08x \n " , val ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mrc p15, 0, %0, c9, c12, 1 " : " =r " ( val ) ) ;
2014-10-30 14:26:57 +03:00
pr_info ( " CNTENS=0x%08x \n " , val ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mrc p15, 0, %0, c9, c14, 1 " : " =r " ( val ) ) ;
2014-10-30 14:26:57 +03:00
pr_info ( " INTENS=0x%08x \n " , val ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mrc p15, 0, %0, c9, c12, 3 " : " =r " ( val ) ) ;
2014-10-30 14:26:57 +03:00
pr_info ( " FLAGS =0x%08x \n " , val ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mrc p15, 0, %0, c9, c12, 5 " : " =r " ( val ) ) ;
2014-10-30 14:26:57 +03:00
pr_info ( " SELECT=0x%08x \n " , val ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mrc p15, 0, %0, c9, c13, 0 " : " =r " ( val ) ) ;
2014-10-30 14:26:57 +03:00
pr_info ( " CCNT =0x%08x \n " , val ) ;
2010-11-13 22:04:32 +03:00
2012-07-20 18:18:07 +04:00
for ( cnt = ARMV7_IDX_COUNTER0 ;
cnt < = ARMV7_IDX_COUNTER_LAST ( cpu_pmu ) ; cnt + + ) {
2010-11-13 22:04:32 +03:00
armv7_pmnc_select_counter ( cnt ) ;
asm volatile ( " mrc p15, 0, %0, c9, c13, 2 " : " =r " ( val ) ) ;
2014-10-30 14:26:57 +03:00
pr_info ( " CNT[%d] count =0x%08x \n " ,
2011-07-20 01:25:55 +04:00
ARMV7_IDX_TO_COUNTER ( cnt ) , val ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mrc p15, 0, %0, c9, c13, 1 " : " =r " ( val ) ) ;
2014-10-30 14:26:57 +03:00
pr_info ( " CNT[%d] evtsel=0x%08x \n " ,
2011-07-20 01:25:55 +04:00
ARMV7_IDX_TO_COUNTER ( cnt ) , val ) ;
2010-11-13 22:04:32 +03:00
}
}
# endif
2012-07-30 15:00:02 +04:00
static void armv7pmu_enable_event ( struct perf_event * event )
2010-11-13 22:04:32 +03:00
{
unsigned long flags ;
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2014-05-13 22:36:31 +04:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2012-07-30 15:00:02 +04:00
int idx = hwc - > idx ;
2010-11-13 22:04:32 +03:00
2012-07-20 18:18:07 +04:00
if ( ! armv7_pmnc_counter_valid ( cpu_pmu , idx ) ) {
pr_err ( " CPU%u enabling wrong PMNC counter IRQ enable %d \n " ,
smp_processor_id ( ) , idx ) ;
return ;
}
2010-11-13 22:04:32 +03:00
/*
* Enable counter and interrupt , and set the counter to count
* the event that we ' re interested in .
*/
2011-04-28 13:17:04 +04:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
/*
* Disable counter
*/
armv7_pmnc_disable_counter ( idx ) ;
/*
* Set event ( if destined for PMNx counters )
2011-07-19 16:53:36 +04:00
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering .
2010-11-13 22:04:32 +03:00
*/
2012-07-31 13:11:23 +04:00
if ( cpu_pmu - > set_event_filter | | idx ! = ARMV7_IDX_CYCLE_COUNTER )
2010-11-13 22:04:32 +03:00
armv7_pmnc_write_evtsel ( idx , hwc - > config_base ) ;
/*
* Enable interrupt for this counter
*/
armv7_pmnc_enable_intens ( idx ) ;
/*
* Enable counter
*/
armv7_pmnc_enable_counter ( idx ) ;
2011-04-28 13:17:04 +04:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
}
2012-07-30 15:00:02 +04:00
static void armv7pmu_disable_event ( struct perf_event * event )
2010-11-13 22:04:32 +03:00
{
unsigned long flags ;
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2014-05-13 22:36:31 +04:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2012-07-30 15:00:02 +04:00
int idx = hwc - > idx ;
2010-11-13 22:04:32 +03:00
2012-07-20 18:18:07 +04:00
if ( ! armv7_pmnc_counter_valid ( cpu_pmu , idx ) ) {
pr_err ( " CPU%u disabling wrong PMNC counter IRQ enable %d \n " ,
smp_processor_id ( ) , idx ) ;
return ;
}
2010-11-13 22:04:32 +03:00
/*
* Disable counter and interrupt
*/
2011-04-28 13:17:04 +04:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
/*
* Disable counter
*/
armv7_pmnc_disable_counter ( idx ) ;
/*
* Disable interrupt for this counter
*/
armv7_pmnc_disable_intens ( idx ) ;
2011-04-28 13:17:04 +04:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
}
static irqreturn_t armv7pmu_handle_irq ( int irq_num , void * dev )
{
2011-07-19 17:55:57 +04:00
u32 pmnc ;
2010-11-13 22:04:32 +03:00
struct perf_sample_data data ;
2012-07-30 15:00:02 +04:00
struct arm_pmu * cpu_pmu = ( struct arm_pmu * ) dev ;
2014-05-13 22:36:31 +04:00
struct pmu_hw_events * cpuc = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2010-11-13 22:04:32 +03:00
struct pt_regs * regs ;
int idx ;
/*
* Get and reset the IRQ flags
*/
pmnc = armv7_pmnc_getreset_flags ( ) ;
/*
* Did an overflow occur ?
*/
if ( ! armv7_pmnc_has_overflowed ( pmnc ) )
return IRQ_NONE ;
/*
* Handle the counter ( s ) overflow ( s )
*/
regs = get_irq_regs ( ) ;
2011-05-17 14:20:11 +04:00
for ( idx = 0 ; idx < cpu_pmu - > num_events ; + + idx ) {
2010-11-13 22:04:32 +03:00
struct perf_event * event = cpuc - > events [ idx ] ;
struct hw_perf_event * hwc ;
2012-03-06 20:34:50 +04:00
/* Ignore if we don't have an event. */
if ( ! event )
continue ;
2010-11-13 22:04:32 +03:00
/*
* We have a single interrupt for all counters . Check that
* each counter has overflowed before we process it .
*/
if ( ! armv7_pmnc_counter_has_overflowed ( pmnc , idx ) )
continue ;
hwc = & event - > hw ;
2012-07-30 15:00:02 +04:00
armpmu_event_update ( event ) ;
2012-04-02 22:19:08 +04:00
perf_sample_data_init ( & data , 0 , hwc - > last_period ) ;
2012-07-30 15:00:02 +04:00
if ( ! armpmu_event_set_period ( event ) )
2010-11-13 22:04:32 +03:00
continue ;
2011-06-27 16:41:57 +04:00
if ( perf_event_overflow ( event , & data , regs ) )
2012-07-30 15:00:02 +04:00
cpu_pmu - > disable ( event ) ;
2010-11-13 22:04:32 +03:00
}
/*
* Handle the pending perf events .
*
* Note : this call * must * be run with interrupts disabled . For
* platforms that can have the PMU interrupts raised as an NMI , this
* will not work .
*/
irq_work_run ( ) ;
return IRQ_HANDLED ;
}
2012-07-30 15:00:02 +04:00
static void armv7pmu_start ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
unsigned long flags ;
2014-05-13 22:36:31 +04:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2010-11-13 22:04:32 +03:00
2011-04-28 13:17:04 +04:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
/* Enable all counters */
armv7_pmnc_write ( armv7_pmnc_read ( ) | ARMV7_PMNC_E ) ;
2011-04-28 13:17:04 +04:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
}
2012-07-30 15:00:02 +04:00
static void armv7pmu_stop ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
unsigned long flags ;
2014-05-13 22:36:31 +04:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2010-11-13 22:04:32 +03:00
2011-04-28 13:17:04 +04:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
/* Disable all counters */
armv7_pmnc_write ( armv7_pmnc_read ( ) & ~ ARMV7_PMNC_E ) ;
2011-04-28 13:17:04 +04:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
}
2011-05-17 14:20:11 +04:00
static int armv7pmu_get_event_idx ( struct pmu_hw_events * cpuc ,
2012-07-30 15:00:02 +04:00
struct perf_event * event )
2010-11-13 22:04:32 +03:00
{
int idx ;
2012-07-30 15:00:02 +04:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct hw_perf_event * hwc = & event - > hw ;
unsigned long evtype = hwc - > config_base & ARMV7_EVTYPE_EVENT ;
2010-11-13 22:04:32 +03:00
/* Always place a cycle counter into the cycle counter. */
2011-07-19 16:53:36 +04:00
if ( evtype = = ARMV7_PERFCTR_CPU_CYCLES ) {
2011-07-20 01:25:55 +04:00
if ( test_and_set_bit ( ARMV7_IDX_CYCLE_COUNTER , cpuc - > used_mask ) )
2010-11-13 22:04:32 +03:00
return - EAGAIN ;
2011-07-20 01:25:55 +04:00
return ARMV7_IDX_CYCLE_COUNTER ;
}
2010-11-13 22:04:32 +03:00
2011-07-20 01:25:55 +04:00
/*
* For anything other than a cycle counter , try and use
* the events counters
*/
2011-05-17 14:20:11 +04:00
for ( idx = ARMV7_IDX_COUNTER0 ; idx < cpu_pmu - > num_events ; + + idx ) {
2011-07-20 01:25:55 +04:00
if ( ! test_and_set_bit ( idx , cpuc - > used_mask ) )
return idx ;
2010-11-13 22:04:32 +03:00
}
2011-07-20 01:25:55 +04:00
/* The counters are all in use. */
return - EAGAIN ;
2010-11-13 22:04:32 +03:00
}
2011-07-19 16:53:36 +04:00
/*
* Add an event filter to a given event . This will only work for PMUv2 PMUs .
*/
static int armv7pmu_set_event_filter ( struct hw_perf_event * event ,
struct perf_event_attr * attr )
{
unsigned long config_base = 0 ;
if ( attr - > exclude_idle )
return - EPERM ;
if ( attr - > exclude_user )
config_base | = ARMV7_EXCLUDE_USER ;
if ( attr - > exclude_kernel )
config_base | = ARMV7_EXCLUDE_PL1 ;
if ( ! attr - > exclude_hv )
config_base | = ARMV7_INCLUDE_HYP ;
/*
* Install the filter into config_base as this is used to
* construct the event type .
*/
event - > config_base = config_base ;
return 0 ;
2010-11-13 22:04:32 +03:00
}
2011-03-25 15:13:34 +03:00
static void armv7pmu_reset ( void * info )
{
2012-07-30 15:00:02 +04:00
struct arm_pmu * cpu_pmu = ( struct arm_pmu * ) info ;
2011-05-17 14:20:11 +04:00
u32 idx , nb_cnt = cpu_pmu - > num_events ;
2011-03-25 15:13:34 +03:00
/* The counter and interrupt enable registers are unknown at reset. */
2012-07-30 15:00:02 +04:00
for ( idx = ARMV7_IDX_CYCLE_COUNTER ; idx < nb_cnt ; + + idx ) {
armv7_pmnc_disable_counter ( idx ) ;
armv7_pmnc_disable_intens ( idx ) ;
}
2011-03-25 15:13:34 +03:00
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write ( ARMV7_PMNC_P | ARMV7_PMNC_C ) ;
}
2011-04-28 18:47:10 +04:00
static int armv7_a8_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a8_perf_map ,
2011-04-28 18:47:10 +04:00
& armv7_a8_perf_cache_map , 0xFF ) ;
}
static int armv7_a9_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a9_perf_map ,
2011-04-28 18:47:10 +04:00
& armv7_a9_perf_cache_map , 0xFF ) ;
}
static int armv7_a5_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a5_perf_map ,
2011-04-28 18:47:10 +04:00
& armv7_a5_perf_cache_map , 0xFF ) ;
}
static int armv7_a15_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a15_perf_map ,
2011-04-28 18:47:10 +04:00
& armv7_a15_perf_cache_map , 0xFF ) ;
}
2012-02-03 17:46:01 +04:00
static int armv7_a7_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a7_perf_map ,
2012-02-03 17:46:01 +04:00
& armv7_a7_perf_cache_map , 0xFF ) ;
}
2014-01-29 18:28:57 +04:00
static int armv7_a12_map_event ( struct perf_event * event )
{
return armpmu_map_event ( event , & armv7_a12_perf_map ,
& armv7_a12_perf_cache_map , 0xFF ) ;
}
2014-02-08 01:01:21 +04:00
static int krait_map_event ( struct perf_event * event )
{
return armpmu_map_event ( event , & krait_perf_map ,
& krait_perf_cache_map , 0xFFFFF ) ;
}
static int krait_map_event_no_branch ( struct perf_event * event )
{
return armpmu_map_event ( event , & krait_perf_map_no_branch ,
& krait_perf_cache_map , 0xFFFFF ) ;
}
2015-02-28 03:11:35 +03:00
static int scorpion_map_event ( struct perf_event * event )
{
return armpmu_map_event ( event , & scorpion_perf_map ,
& scorpion_perf_cache_map , 0xFFFFF ) ;
}
2012-07-31 13:11:23 +04:00
static void armv7pmu_init ( struct arm_pmu * cpu_pmu )
{
cpu_pmu - > handle_irq = armv7pmu_handle_irq ;
cpu_pmu - > enable = armv7pmu_enable_event ;
cpu_pmu - > disable = armv7pmu_disable_event ;
cpu_pmu - > read_counter = armv7pmu_read_counter ;
cpu_pmu - > write_counter = armv7pmu_write_counter ;
cpu_pmu - > get_event_idx = armv7pmu_get_event_idx ;
cpu_pmu - > start = armv7pmu_start ;
cpu_pmu - > stop = armv7pmu_stop ;
cpu_pmu - > reset = armv7pmu_reset ;
cpu_pmu - > max_period = ( 1LLU < < 32 ) - 1 ;
2010-11-13 22:04:32 +03:00
} ;
2015-05-13 19:12:27 +03:00
static void armv7_read_num_pmnc_events ( void * info )
2010-11-13 22:04:32 +03:00
{
2015-05-13 19:12:27 +03:00
int * nb_cnt = info ;
2010-11-13 22:04:32 +03:00
/* Read the nb of CNTx counters supported from PMNC */
2015-05-13 19:12:27 +03:00
* nb_cnt = ( armv7_pmnc_read ( ) > > ARMV7_PMNC_N_SHIFT ) & ARMV7_PMNC_N_MASK ;
2010-11-13 22:04:32 +03:00
2015-05-13 19:12:27 +03:00
/* Add the CPU cycles counter */
* nb_cnt + = 1 ;
}
static int armv7_probe_num_events ( struct arm_pmu * arm_pmu )
{
return smp_call_function_any ( & arm_pmu - > supported_cpus ,
armv7_read_num_pmnc_events ,
& arm_pmu - > num_events , 1 ) ;
2010-11-13 22:04:32 +03:00
}
2012-12-22 02:02:24 +04:00
static int armv7_a8_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 20:33:24 +04:00
cpu_pmu - > name = " armv7_cortex_a8 " ;
2012-07-31 13:11:23 +04:00
cpu_pmu - > map_event = armv7_a8_map_event ;
2015-10-28 18:20:41 +03:00
cpu_pmu - > pmu . attr_groups = armv7_pmuv1_attr_groups ;
2015-05-13 19:12:27 +03:00
return armv7_probe_num_events ( cpu_pmu ) ;
2010-11-13 22:04:32 +03:00
}
2012-12-22 02:02:24 +04:00
static int armv7_a9_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 20:33:24 +04:00
cpu_pmu - > name = " armv7_cortex_a9 " ;
2012-07-31 13:11:23 +04:00
cpu_pmu - > map_event = armv7_a9_map_event ;
2015-10-28 18:20:41 +03:00
cpu_pmu - > pmu . attr_groups = armv7_pmuv1_attr_groups ;
2015-05-13 19:12:27 +03:00
return armv7_probe_num_events ( cpu_pmu ) ;
2010-11-13 22:04:32 +03:00
}
2011-06-03 20:40:15 +04:00
2012-12-22 02:02:24 +04:00
static int armv7_a5_pmu_init ( struct arm_pmu * cpu_pmu )
2011-06-03 20:40:15 +04:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 20:33:24 +04:00
cpu_pmu - > name = " armv7_cortex_a5 " ;
2012-07-31 13:11:23 +04:00
cpu_pmu - > map_event = armv7_a5_map_event ;
2015-10-28 18:20:41 +03:00
cpu_pmu - > pmu . attr_groups = armv7_pmuv1_attr_groups ;
2015-05-13 19:12:27 +03:00
return armv7_probe_num_events ( cpu_pmu ) ;
2011-06-03 20:40:15 +04:00
}
2011-01-19 17:24:38 +03:00
2012-12-22 02:02:24 +04:00
static int armv7_a15_pmu_init ( struct arm_pmu * cpu_pmu )
2011-01-19 17:24:38 +03:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 20:33:24 +04:00
cpu_pmu - > name = " armv7_cortex_a15 " ;
2012-07-31 13:11:23 +04:00
cpu_pmu - > map_event = armv7_a15_map_event ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
2015-10-28 18:20:41 +03:00
cpu_pmu - > pmu . attr_groups = armv7_pmuv2_attr_groups ;
2015-05-13 19:12:27 +03:00
return armv7_probe_num_events ( cpu_pmu ) ;
2011-01-19 17:24:38 +03:00
}
2012-02-03 17:46:01 +04:00
2012-12-22 02:02:24 +04:00
static int armv7_a7_pmu_init ( struct arm_pmu * cpu_pmu )
2012-02-03 17:46:01 +04:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 20:33:24 +04:00
cpu_pmu - > name = " armv7_cortex_a7 " ;
2012-07-31 13:11:23 +04:00
cpu_pmu - > map_event = armv7_a7_map_event ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
2015-10-28 18:20:41 +03:00
cpu_pmu - > pmu . attr_groups = armv7_pmuv2_attr_groups ;
2015-05-13 19:12:27 +03:00
return armv7_probe_num_events ( cpu_pmu ) ;
2012-02-03 17:46:01 +04:00
}
2014-02-08 01:01:21 +04:00
2014-01-29 18:28:57 +04:00
static int armv7_a12_pmu_init ( struct arm_pmu * cpu_pmu )
{
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 20:33:24 +04:00
cpu_pmu - > name = " armv7_cortex_a12 " ;
2014-01-29 18:28:57 +04:00
cpu_pmu - > map_event = armv7_a12_map_event ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
2015-10-28 18:20:41 +03:00
cpu_pmu - > pmu . attr_groups = armv7_pmuv2_attr_groups ;
2015-05-13 19:12:27 +03:00
return armv7_probe_num_events ( cpu_pmu ) ;
2014-01-29 18:28:57 +04:00
}
2014-05-09 21:34:19 +04:00
static int armv7_a17_pmu_init ( struct arm_pmu * cpu_pmu )
{
2015-05-13 19:12:27 +03:00
int ret = armv7_a12_pmu_init ( cpu_pmu ) ;
2012-12-19 20:33:24 +04:00
cpu_pmu - > name = " armv7_cortex_a17 " ;
2015-10-28 18:20:41 +03:00
cpu_pmu - > pmu . attr_groups = armv7_pmuv2_attr_groups ;
2015-05-13 19:12:27 +03:00
return ret ;
2014-05-09 21:34:19 +04:00
}
2014-02-08 01:01:23 +04:00
/*
* Krait Performance Monitor Region Event Selection Register ( PMRESRn )
*
* 31 30 24 16 8 0
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* PMRESR0 | EN | CC | CC | CC | CC | N = 1 , R = 0
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* PMRESR1 | EN | CC | CC | CC | CC | N = 1 , R = 1
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* PMRESR2 | EN | CC | CC | CC | CC | N = 1 , R = 2
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* VPMRESR0 | EN | CC | CC | CC | CC | N = 2 , R = ?
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* EN | G = 3 | G = 2 | G = 1 | G = 0
*
* Event Encoding :
*
* hwc - > config_base = 0 xNRCCG
*
* N = prefix , 1 for Krait CPU ( PMRESRn ) , 2 for Venum VFP ( VPMRESR )
* R = region register
* CC = class of events the group G is choosing from
* G = group or particular event
*
* Example : 0x12021 is a Krait CPU event in PMRESR2 ' s group 1 with code 2
*
* A region ( R ) corresponds to a piece of the CPU ( execution unit , instruction
* unit , etc . ) while the event code ( CC ) corresponds to a particular class of
* events ( interrupts for example ) . An event code is broken down into
* groups ( G ) that can be mapped into the PMU ( irq , fiqs , and irq + fiqs for
* example ) .
*/
# define KRAIT_EVENT (1 << 16)
# define VENUM_EVENT (2 << 16)
# define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
# define PMRESRn_EN BIT(31)
2015-02-28 03:11:33 +03:00
# define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
# define EVENT_GROUP(event) ((event) & 0xf) /* G */
# define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
# define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
# define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
2014-02-08 01:01:23 +04:00
static u32 krait_read_pmresrn ( int n )
{
u32 val ;
switch ( n ) {
case 0 :
asm volatile ( " mrc p15, 1, %0, c9, c15, 0 " : " =r " ( val ) ) ;
break ;
case 1 :
asm volatile ( " mrc p15, 1, %0, c9, c15, 1 " : " =r " ( val ) ) ;
break ;
case 2 :
asm volatile ( " mrc p15, 1, %0, c9, c15, 2 " : " =r " ( val ) ) ;
break ;
default :
BUG ( ) ; /* Should be validated in krait_pmu_get_event_idx() */
}
return val ;
}
static void krait_write_pmresrn ( int n , u32 val )
{
switch ( n ) {
case 0 :
asm volatile ( " mcr p15, 1, %0, c9, c15, 0 " : : " r " ( val ) ) ;
break ;
case 1 :
asm volatile ( " mcr p15, 1, %0, c9, c15, 1 " : : " r " ( val ) ) ;
break ;
case 2 :
asm volatile ( " mcr p15, 1, %0, c9, c15, 2 " : : " r " ( val ) ) ;
break ;
default :
BUG ( ) ; /* Should be validated in krait_pmu_get_event_idx() */
}
}
2015-02-28 03:11:33 +03:00
static u32 venum_read_pmresr ( void )
2014-02-08 01:01:23 +04:00
{
u32 val ;
asm volatile ( " mrc p10, 7, %0, c11, c0, 0 " : " =r " ( val ) ) ;
return val ;
}
2015-02-28 03:11:33 +03:00
static void venum_write_pmresr ( u32 val )
2014-02-08 01:01:23 +04:00
{
asm volatile ( " mcr p10, 7, %0, c11, c0, 0 " : : " r " ( val ) ) ;
}
2015-02-28 03:11:33 +03:00
static void venum_pre_pmresr ( u32 * venum_orig_val , u32 * fp_orig_val )
2014-02-08 01:01:23 +04:00
{
u32 venum_new_val ;
u32 fp_new_val ;
BUG_ON ( preemptible ( ) ) ;
/* CPACR Enable CP10 and CP11 access */
* venum_orig_val = get_copro_access ( ) ;
venum_new_val = * venum_orig_val | CPACC_SVC ( 10 ) | CPACC_SVC ( 11 ) ;
set_copro_access ( venum_new_val ) ;
/* Enable FPEXC */
* fp_orig_val = fmrx ( FPEXC ) ;
fp_new_val = * fp_orig_val | FPEXC_EN ;
fmxr ( FPEXC , fp_new_val ) ;
}
2015-02-28 03:11:33 +03:00
static void venum_post_pmresr ( u32 venum_orig_val , u32 fp_orig_val )
2014-02-08 01:01:23 +04:00
{
BUG_ON ( preemptible ( ) ) ;
/* Restore FPEXC */
fmxr ( FPEXC , fp_orig_val ) ;
isb ( ) ;
/* Restore CPACR */
set_copro_access ( venum_orig_val ) ;
}
static u32 krait_get_pmresrn_event ( unsigned int region )
{
static const u32 pmresrn_table [ ] = { KRAIT_PMRESR0_GROUP0 ,
KRAIT_PMRESR1_GROUP0 ,
KRAIT_PMRESR2_GROUP0 } ;
return pmresrn_table [ region ] ;
}
static void krait_evt_setup ( int idx , u32 config_base )
{
u32 val ;
u32 mask ;
u32 vval , fval ;
2015-02-28 03:11:33 +03:00
unsigned int region = EVENT_REGION ( config_base ) ;
unsigned int group = EVENT_GROUP ( config_base ) ;
unsigned int code = EVENT_CODE ( config_base ) ;
2014-02-08 01:01:23 +04:00
unsigned int group_shift ;
2015-02-28 03:11:33 +03:00
bool venum_event = EVENT_VENUM ( config_base ) ;
2014-02-08 01:01:23 +04:00
group_shift = group * 8 ;
mask = 0xff < < group_shift ;
/* Configure evtsel for the region and group */
if ( venum_event )
val = KRAIT_VPMRESR0_GROUP0 ;
else
val = krait_get_pmresrn_event ( region ) ;
val + = group ;
/* Mix in mode-exclusion bits */
val | = config_base & ( ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1 ) ;
armv7_pmnc_write_evtsel ( idx , val ) ;
if ( venum_event ) {
2015-02-28 03:11:33 +03:00
venum_pre_pmresr ( & vval , & fval ) ;
val = venum_read_pmresr ( ) ;
2014-02-08 01:01:23 +04:00
val & = ~ mask ;
val | = code < < group_shift ;
val | = PMRESRn_EN ;
2015-02-28 03:11:33 +03:00
venum_write_pmresr ( val ) ;
venum_post_pmresr ( vval , fval ) ;
2014-02-08 01:01:23 +04:00
} else {
val = krait_read_pmresrn ( region ) ;
val & = ~ mask ;
val | = code < < group_shift ;
val | = PMRESRn_EN ;
krait_write_pmresrn ( region , val ) ;
}
}
2015-02-28 03:11:33 +03:00
static u32 clear_pmresrn_group ( u32 val , int group )
2014-02-08 01:01:23 +04:00
{
u32 mask ;
int group_shift ;
group_shift = group * 8 ;
mask = 0xff < < group_shift ;
val & = ~ mask ;
/* Don't clear enable bit if entire region isn't disabled */
if ( val & ~ PMRESRn_EN )
return val | = PMRESRn_EN ;
return 0 ;
}
static void krait_clearpmu ( u32 config_base )
{
u32 val ;
u32 vval , fval ;
2015-02-28 03:11:33 +03:00
unsigned int region = EVENT_REGION ( config_base ) ;
unsigned int group = EVENT_GROUP ( config_base ) ;
bool venum_event = EVENT_VENUM ( config_base ) ;
2014-02-08 01:01:23 +04:00
if ( venum_event ) {
2015-02-28 03:11:33 +03:00
venum_pre_pmresr ( & vval , & fval ) ;
val = venum_read_pmresr ( ) ;
val = clear_pmresrn_group ( val , group ) ;
venum_write_pmresr ( val ) ;
venum_post_pmresr ( vval , fval ) ;
2014-02-08 01:01:23 +04:00
} else {
val = krait_read_pmresrn ( region ) ;
2015-02-28 03:11:33 +03:00
val = clear_pmresrn_group ( val , group ) ;
2014-02-08 01:01:23 +04:00
krait_write_pmresrn ( region , val ) ;
}
}
static void krait_pmu_disable_event ( struct perf_event * event )
{
unsigned long flags ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2014-05-21 21:06:13 +04:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2014-05-13 22:36:31 +04:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2014-02-08 01:01:23 +04:00
/* Disable counter and interrupt */
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/* Disable counter */
armv7_pmnc_disable_counter ( idx ) ;
/*
* Clear pmresr code ( if destined for PMNx counters )
*/
if ( hwc - > config_base & KRAIT_EVENT_MASK )
krait_clearpmu ( hwc - > config_base ) ;
/* Disable interrupt for this counter */
armv7_pmnc_disable_intens ( idx ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
static void krait_pmu_enable_event ( struct perf_event * event )
{
unsigned long flags ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2014-05-21 21:06:13 +04:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2014-05-13 22:36:31 +04:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2014-02-08 01:01:23 +04:00
/*
* Enable counter and interrupt , and set the counter to count
* the event that we ' re interested in .
*/
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/* Disable counter */
armv7_pmnc_disable_counter ( idx ) ;
/*
* Set event ( if destined for PMNx counters )
* We set the event for the cycle counter because we
* have the ability to perform event filtering .
*/
if ( hwc - > config_base & KRAIT_EVENT_MASK )
krait_evt_setup ( idx , hwc - > config_base ) ;
else
armv7_pmnc_write_evtsel ( idx , hwc - > config_base ) ;
/* Enable interrupt for this counter */
armv7_pmnc_enable_intens ( idx ) ;
/* Enable counter */
armv7_pmnc_enable_counter ( idx ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
static void krait_pmu_reset ( void * info )
{
u32 vval , fval ;
2015-02-28 03:11:34 +03:00
struct arm_pmu * cpu_pmu = info ;
u32 idx , nb_cnt = cpu_pmu - > num_events ;
2014-02-08 01:01:23 +04:00
armv7pmu_reset ( info ) ;
/* Clear all pmresrs */
krait_write_pmresrn ( 0 , 0 ) ;
krait_write_pmresrn ( 1 , 0 ) ;
krait_write_pmresrn ( 2 , 0 ) ;
2015-02-28 03:11:33 +03:00
venum_pre_pmresr ( & vval , & fval ) ;
venum_write_pmresr ( 0 ) ;
venum_post_pmresr ( vval , fval ) ;
2015-02-28 03:11:34 +03:00
/* Reset PMxEVNCTCR to sane default */
for ( idx = ARMV7_IDX_CYCLE_COUNTER ; idx < nb_cnt ; + + idx ) {
armv7_pmnc_select_counter ( idx ) ;
asm volatile ( " mcr p15, 0, %0, c9, c15, 0 " : : " r " ( 0 ) ) ;
}
2014-02-08 01:01:23 +04:00
}
static int krait_event_to_bit ( struct perf_event * event , unsigned int region ,
unsigned int group )
{
int bit ;
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
if ( hwc - > config_base & VENUM_EVENT )
bit = KRAIT_VPMRESR0_GROUP0 ;
else
bit = krait_get_pmresrn_event ( region ) ;
bit - = krait_get_pmresrn_event ( 0 ) ;
bit + = group ;
/*
* Lower bits are reserved for use by the counters ( see
* armv7pmu_get_event_idx ( ) for more info )
*/
bit + = ARMV7_IDX_COUNTER_LAST ( cpu_pmu ) + 1 ;
return bit ;
}
/*
* We check for column exclusion constraints here .
* Two events cant use the same group within a pmresr register .
*/
static int krait_pmu_get_event_idx ( struct pmu_hw_events * cpuc ,
struct perf_event * event )
{
int idx ;
2014-06-19 13:39:41 +04:00
int bit = - 1 ;
2014-02-08 01:01:23 +04:00
struct hw_perf_event * hwc = & event - > hw ;
2015-02-28 03:11:33 +03:00
unsigned int region = EVENT_REGION ( hwc - > config_base ) ;
unsigned int code = EVENT_CODE ( hwc - > config_base ) ;
unsigned int group = EVENT_GROUP ( hwc - > config_base ) ;
bool venum_event = EVENT_VENUM ( hwc - > config_base ) ;
bool krait_event = EVENT_CPU ( hwc - > config_base ) ;
2014-02-08 01:01:23 +04:00
2015-02-28 03:11:33 +03:00
if ( venum_event | | krait_event ) {
2014-02-08 01:01:23 +04:00
/* Ignore invalid events */
if ( group > 3 | | region > 2 )
return - EINVAL ;
2015-02-28 03:11:33 +03:00
if ( venum_event & & ( code & 0xe0 ) )
2014-02-08 01:01:23 +04:00
return - EINVAL ;
bit = krait_event_to_bit ( event , region , group ) ;
if ( test_and_set_bit ( bit , cpuc - > used_mask ) )
return - EAGAIN ;
}
idx = armv7pmu_get_event_idx ( cpuc , event ) ;
2014-06-19 13:39:41 +04:00
if ( idx < 0 & & bit > = 0 )
2014-02-08 01:01:23 +04:00
clear_bit ( bit , cpuc - > used_mask ) ;
return idx ;
}
static void krait_pmu_clear_event_idx ( struct pmu_hw_events * cpuc ,
struct perf_event * event )
{
int bit ;
struct hw_perf_event * hwc = & event - > hw ;
2015-02-28 03:11:33 +03:00
unsigned int region = EVENT_REGION ( hwc - > config_base ) ;
unsigned int group = EVENT_GROUP ( hwc - > config_base ) ;
bool venum_event = EVENT_VENUM ( hwc - > config_base ) ;
bool krait_event = EVENT_CPU ( hwc - > config_base ) ;
2014-02-08 01:01:23 +04:00
2015-02-28 03:11:33 +03:00
if ( venum_event | | krait_event ) {
2014-02-08 01:01:23 +04:00
bit = krait_event_to_bit ( event , region , group ) ;
clear_bit ( bit , cpuc - > used_mask ) ;
}
}
2014-02-08 01:01:21 +04:00
static int krait_pmu_init ( struct arm_pmu * cpu_pmu )
{
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 20:33:24 +04:00
cpu_pmu - > name = " armv7_krait " ;
2014-02-08 01:01:21 +04:00
/* Some early versions of Krait don't support PC write events */
if ( of_property_read_bool ( cpu_pmu - > plat_device - > dev . of_node ,
" qcom,no-pc-write " ) )
cpu_pmu - > map_event = krait_map_event_no_branch ;
else
cpu_pmu - > map_event = krait_map_event ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
2014-02-08 01:01:23 +04:00
cpu_pmu - > reset = krait_pmu_reset ;
cpu_pmu - > enable = krait_pmu_enable_event ;
cpu_pmu - > disable = krait_pmu_disable_event ;
cpu_pmu - > get_event_idx = krait_pmu_get_event_idx ;
cpu_pmu - > clear_event_idx = krait_pmu_clear_event_idx ;
2015-05-13 19:12:27 +03:00
return armv7_probe_num_events ( cpu_pmu ) ;
2014-02-08 01:01:21 +04:00
}
2015-02-28 03:11:35 +03:00
/*
* Scorpion Local Performance Monitor Register ( LPMn )
*
* 31 30 24 16 8 0
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* LPM0 | EN | CC | CC | CC | CC | N = 1 , R = 0
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* LPM1 | EN | CC | CC | CC | CC | N = 1 , R = 1
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* LPM2 | EN | CC | CC | CC | CC | N = 1 , R = 2
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* L2LPM | EN | CC | CC | CC | CC | N = 1 , R = 3
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* VLPM | EN | CC | CC | CC | CC | N = 2 , R = ?
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* EN | G = 3 | G = 2 | G = 1 | G = 0
*
*
* Event Encoding :
*
* hwc - > config_base = 0 xNRCCG
*
* N = prefix , 1 for Scorpion CPU ( LPMn / L2LPM ) , 2 for Venum VFP ( VLPM )
* R = region register
* CC = class of events the group G is choosing from
* G = group or particular event
*
* Example : 0x12021 is a Scorpion CPU event in LPM2 ' s group 1 with code 2
*
* A region ( R ) corresponds to a piece of the CPU ( execution unit , instruction
* unit , etc . ) while the event code ( CC ) corresponds to a particular class of
* events ( interrupts for example ) . An event code is broken down into
* groups ( G ) that can be mapped into the PMU ( irq , fiqs , and irq + fiqs for
* example ) .
*/
static u32 scorpion_read_pmresrn ( int n )
{
u32 val ;
switch ( n ) {
case 0 :
asm volatile ( " mrc p15, 0, %0, c15, c0, 0 " : " =r " ( val ) ) ;
break ;
case 1 :
asm volatile ( " mrc p15, 1, %0, c15, c0, 0 " : " =r " ( val ) ) ;
break ;
case 2 :
asm volatile ( " mrc p15, 2, %0, c15, c0, 0 " : " =r " ( val ) ) ;
break ;
case 3 :
asm volatile ( " mrc p15, 3, %0, c15, c2, 0 " : " =r " ( val ) ) ;
break ;
default :
BUG ( ) ; /* Should be validated in scorpion_pmu_get_event_idx() */
}
return val ;
}
static void scorpion_write_pmresrn ( int n , u32 val )
{
switch ( n ) {
case 0 :
asm volatile ( " mcr p15, 0, %0, c15, c0, 0 " : : " r " ( val ) ) ;
break ;
case 1 :
asm volatile ( " mcr p15, 1, %0, c15, c0, 0 " : : " r " ( val ) ) ;
break ;
case 2 :
asm volatile ( " mcr p15, 2, %0, c15, c0, 0 " : : " r " ( val ) ) ;
break ;
case 3 :
asm volatile ( " mcr p15, 3, %0, c15, c2, 0 " : : " r " ( val ) ) ;
break ;
default :
BUG ( ) ; /* Should be validated in scorpion_pmu_get_event_idx() */
}
}
static u32 scorpion_get_pmresrn_event ( unsigned int region )
{
static const u32 pmresrn_table [ ] = { SCORPION_LPM0_GROUP0 ,
SCORPION_LPM1_GROUP0 ,
SCORPION_LPM2_GROUP0 ,
SCORPION_L2LPM_GROUP0 } ;
return pmresrn_table [ region ] ;
}
static void scorpion_evt_setup ( int idx , u32 config_base )
{
u32 val ;
u32 mask ;
u32 vval , fval ;
unsigned int region = EVENT_REGION ( config_base ) ;
unsigned int group = EVENT_GROUP ( config_base ) ;
unsigned int code = EVENT_CODE ( config_base ) ;
unsigned int group_shift ;
bool venum_event = EVENT_VENUM ( config_base ) ;
group_shift = group * 8 ;
mask = 0xff < < group_shift ;
/* Configure evtsel for the region and group */
if ( venum_event )
val = SCORPION_VLPM_GROUP0 ;
else
val = scorpion_get_pmresrn_event ( region ) ;
val + = group ;
/* Mix in mode-exclusion bits */
val | = config_base & ( ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1 ) ;
armv7_pmnc_write_evtsel ( idx , val ) ;
asm volatile ( " mcr p15, 0, %0, c9, c15, 0 " : : " r " ( 0 ) ) ;
if ( venum_event ) {
venum_pre_pmresr ( & vval , & fval ) ;
val = venum_read_pmresr ( ) ;
val & = ~ mask ;
val | = code < < group_shift ;
val | = PMRESRn_EN ;
venum_write_pmresr ( val ) ;
venum_post_pmresr ( vval , fval ) ;
} else {
val = scorpion_read_pmresrn ( region ) ;
val & = ~ mask ;
val | = code < < group_shift ;
val | = PMRESRn_EN ;
scorpion_write_pmresrn ( region , val ) ;
}
}
static void scorpion_clearpmu ( u32 config_base )
{
u32 val ;
u32 vval , fval ;
unsigned int region = EVENT_REGION ( config_base ) ;
unsigned int group = EVENT_GROUP ( config_base ) ;
bool venum_event = EVENT_VENUM ( config_base ) ;
if ( venum_event ) {
venum_pre_pmresr ( & vval , & fval ) ;
val = venum_read_pmresr ( ) ;
val = clear_pmresrn_group ( val , group ) ;
venum_write_pmresr ( val ) ;
venum_post_pmresr ( vval , fval ) ;
} else {
val = scorpion_read_pmresrn ( region ) ;
val = clear_pmresrn_group ( val , group ) ;
scorpion_write_pmresrn ( region , val ) ;
}
}
static void scorpion_pmu_disable_event ( struct perf_event * event )
{
unsigned long flags ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
/* Disable counter and interrupt */
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/* Disable counter */
armv7_pmnc_disable_counter ( idx ) ;
/*
* Clear pmresr code ( if destined for PMNx counters )
*/
if ( hwc - > config_base & KRAIT_EVENT_MASK )
scorpion_clearpmu ( hwc - > config_base ) ;
/* Disable interrupt for this counter */
armv7_pmnc_disable_intens ( idx ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
static void scorpion_pmu_enable_event ( struct perf_event * event )
{
unsigned long flags ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
/*
* Enable counter and interrupt , and set the counter to count
* the event that we ' re interested in .
*/
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/* Disable counter */
armv7_pmnc_disable_counter ( idx ) ;
/*
* Set event ( if destined for PMNx counters )
* We don ' t set the event for the cycle counter because we
* don ' t have the ability to perform event filtering .
*/
if ( hwc - > config_base & KRAIT_EVENT_MASK )
scorpion_evt_setup ( idx , hwc - > config_base ) ;
else if ( idx ! = ARMV7_IDX_CYCLE_COUNTER )
armv7_pmnc_write_evtsel ( idx , hwc - > config_base ) ;
/* Enable interrupt for this counter */
armv7_pmnc_enable_intens ( idx ) ;
/* Enable counter */
armv7_pmnc_enable_counter ( idx ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
static void scorpion_pmu_reset ( void * info )
{
u32 vval , fval ;
struct arm_pmu * cpu_pmu = info ;
u32 idx , nb_cnt = cpu_pmu - > num_events ;
armv7pmu_reset ( info ) ;
/* Clear all pmresrs */
scorpion_write_pmresrn ( 0 , 0 ) ;
scorpion_write_pmresrn ( 1 , 0 ) ;
scorpion_write_pmresrn ( 2 , 0 ) ;
scorpion_write_pmresrn ( 3 , 0 ) ;
venum_pre_pmresr ( & vval , & fval ) ;
venum_write_pmresr ( 0 ) ;
venum_post_pmresr ( vval , fval ) ;
/* Reset PMxEVNCTCR to sane default */
for ( idx = ARMV7_IDX_CYCLE_COUNTER ; idx < nb_cnt ; + + idx ) {
armv7_pmnc_select_counter ( idx ) ;
asm volatile ( " mcr p15, 0, %0, c9, c15, 0 " : : " r " ( 0 ) ) ;
}
}
static int scorpion_event_to_bit ( struct perf_event * event , unsigned int region ,
unsigned int group )
{
int bit ;
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
if ( hwc - > config_base & VENUM_EVENT )
bit = SCORPION_VLPM_GROUP0 ;
else
bit = scorpion_get_pmresrn_event ( region ) ;
bit - = scorpion_get_pmresrn_event ( 0 ) ;
bit + = group ;
/*
* Lower bits are reserved for use by the counters ( see
* armv7pmu_get_event_idx ( ) for more info )
*/
bit + = ARMV7_IDX_COUNTER_LAST ( cpu_pmu ) + 1 ;
return bit ;
}
/*
* We check for column exclusion constraints here .
* Two events cant use the same group within a pmresr register .
*/
static int scorpion_pmu_get_event_idx ( struct pmu_hw_events * cpuc ,
struct perf_event * event )
{
int idx ;
int bit = - 1 ;
struct hw_perf_event * hwc = & event - > hw ;
unsigned int region = EVENT_REGION ( hwc - > config_base ) ;
unsigned int group = EVENT_GROUP ( hwc - > config_base ) ;
bool venum_event = EVENT_VENUM ( hwc - > config_base ) ;
bool scorpion_event = EVENT_CPU ( hwc - > config_base ) ;
if ( venum_event | | scorpion_event ) {
/* Ignore invalid events */
if ( group > 3 | | region > 3 )
return - EINVAL ;
bit = scorpion_event_to_bit ( event , region , group ) ;
if ( test_and_set_bit ( bit , cpuc - > used_mask ) )
return - EAGAIN ;
}
idx = armv7pmu_get_event_idx ( cpuc , event ) ;
if ( idx < 0 & & bit > = 0 )
clear_bit ( bit , cpuc - > used_mask ) ;
return idx ;
}
static void scorpion_pmu_clear_event_idx ( struct pmu_hw_events * cpuc ,
struct perf_event * event )
{
int bit ;
struct hw_perf_event * hwc = & event - > hw ;
unsigned int region = EVENT_REGION ( hwc - > config_base ) ;
unsigned int group = EVENT_GROUP ( hwc - > config_base ) ;
bool venum_event = EVENT_VENUM ( hwc - > config_base ) ;
bool scorpion_event = EVENT_CPU ( hwc - > config_base ) ;
if ( venum_event | | scorpion_event ) {
bit = scorpion_event_to_bit ( event , region , group ) ;
clear_bit ( bit , cpuc - > used_mask ) ;
}
}
static int scorpion_pmu_init ( struct arm_pmu * cpu_pmu )
{
armv7pmu_init ( cpu_pmu ) ;
cpu_pmu - > name = " armv7_scorpion " ;
cpu_pmu - > map_event = scorpion_map_event ;
cpu_pmu - > reset = scorpion_pmu_reset ;
cpu_pmu - > enable = scorpion_pmu_enable_event ;
cpu_pmu - > disable = scorpion_pmu_disable_event ;
cpu_pmu - > get_event_idx = scorpion_pmu_get_event_idx ;
cpu_pmu - > clear_event_idx = scorpion_pmu_clear_event_idx ;
2015-05-13 19:12:27 +03:00
return armv7_probe_num_events ( cpu_pmu ) ;
2015-02-28 03:11:35 +03:00
}
static int scorpion_mp_pmu_init ( struct arm_pmu * cpu_pmu )
{
armv7pmu_init ( cpu_pmu ) ;
cpu_pmu - > name = " armv7_scorpion_mp " ;
cpu_pmu - > map_event = scorpion_map_event ;
cpu_pmu - > reset = scorpion_pmu_reset ;
cpu_pmu - > enable = scorpion_pmu_enable_event ;
cpu_pmu - > disable = scorpion_pmu_disable_event ;
cpu_pmu - > get_event_idx = scorpion_pmu_get_event_idx ;
cpu_pmu - > clear_event_idx = scorpion_pmu_clear_event_idx ;
2015-05-13 19:12:27 +03:00
return armv7_probe_num_events ( cpu_pmu ) ;
2015-02-28 03:11:35 +03:00
}
2012-02-03 17:46:01 +04:00
2015-05-26 19:23:38 +03:00
static const struct of_device_id armv7_pmu_of_device_ids [ ] = {
{ . compatible = " arm,cortex-a17-pmu " , . data = armv7_a17_pmu_init } ,
{ . compatible = " arm,cortex-a15-pmu " , . data = armv7_a15_pmu_init } ,
{ . compatible = " arm,cortex-a12-pmu " , . data = armv7_a12_pmu_init } ,
{ . compatible = " arm,cortex-a9-pmu " , . data = armv7_a9_pmu_init } ,
{ . compatible = " arm,cortex-a8-pmu " , . data = armv7_a8_pmu_init } ,
{ . compatible = " arm,cortex-a7-pmu " , . data = armv7_a7_pmu_init } ,
{ . compatible = " arm,cortex-a5-pmu " , . data = armv7_a5_pmu_init } ,
{ . compatible = " qcom,krait-pmu " , . data = krait_pmu_init } ,
{ . compatible = " qcom,scorpion-pmu " , . data = scorpion_pmu_init } ,
{ . compatible = " qcom,scorpion-mp-pmu " , . data = scorpion_mp_pmu_init } ,
{ } ,
} ;
2014-02-08 01:01:21 +04:00
2015-05-26 19:23:38 +03:00
static const struct pmu_probe_info armv7_pmu_probe_table [ ] = {
ARM_PMU_PROBE ( ARM_CPU_PART_CORTEX_A8 , armv7_a8_pmu_init ) ,
ARM_PMU_PROBE ( ARM_CPU_PART_CORTEX_A9 , armv7_a9_pmu_init ) ,
{ /* sentinel value */ }
} ;
2014-01-29 18:28:57 +04:00
2014-05-09 21:34:19 +04:00
2015-05-26 19:23:38 +03:00
static int armv7_pmu_device_probe ( struct platform_device * pdev )
2014-02-08 01:01:21 +04:00
{
2015-05-26 19:23:38 +03:00
return arm_pmu_device_probe ( pdev , armv7_pmu_of_device_ids ,
armv7_pmu_probe_table ) ;
2014-02-08 01:01:21 +04:00
}
2015-02-28 03:11:35 +03:00
2015-05-26 19:23:38 +03:00
static struct platform_driver armv7_pmu_driver = {
. driver = {
. name = " armv7-pmu " ,
. of_match_table = armv7_pmu_of_device_ids ,
} ,
. probe = armv7_pmu_device_probe ,
} ;
2015-02-28 03:11:35 +03:00
2015-05-26 19:23:38 +03:00
static int __init register_armv7_pmu_driver ( void )
2015-02-28 03:11:35 +03:00
{
2015-05-26 19:23:38 +03:00
return platform_driver_register ( & armv7_pmu_driver ) ;
2015-02-28 03:11:35 +03:00
}
2015-05-26 19:23:38 +03:00
device_initcall ( register_armv7_pmu_driver ) ;
2010-11-13 22:04:32 +03:00
# endif /* CONFIG_CPU_V7 */