2010-11-13 19:04:32 +00:00
/*
* ARMv7 Cortex - A8 and Cortex - A9 Performance Events handling code .
*
* ARMv7 support : Jean Pihet < jpihet @ mvista . com >
* 2010 ( c ) MontaVista Software , LLC .
*
* Copied from ARMv6 code , with the low level code inspired
* by the ARMv7 Oprofile code .
*
* Cortex - A8 has up to 4 configurable performance counters and
* a single cycle counter .
* Cortex - A9 has up to 31 configurable performance counters and
* a single cycle counter .
*
* All counters can be enabled / disabled and IRQ masked separately . The cycle
* counter and all 4 performance counters together can be reset separately .
*/
# ifdef CONFIG_CPU_V7
2011-07-19 13:53:36 +01:00
2014-02-07 13:01:23 -08:00
# include <asm/cp15.h>
# include <asm/vfp.h>
# include "../vfp/vfpinstr.h"
2011-01-19 14:15:34 +00:00
/*
* Common ARMv7 event types
*
* Note : An implementation may not be able to count all of these events
* but the encodings are considered to be ` reserved ' in the case that
* they are not available .
*/
2010-11-13 19:04:32 +00:00
enum armv7_perf_types {
2011-09-29 15:29:02 +01:00
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00 ,
ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01 ,
ARMV7_PERFCTR_ITLB_REFILL = 0x02 ,
ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03 ,
ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04 ,
ARMV7_PERFCTR_DTLB_REFILL = 0x05 ,
ARMV7_PERFCTR_MEM_READ = 0x06 ,
ARMV7_PERFCTR_MEM_WRITE = 0x07 ,
ARMV7_PERFCTR_INSTR_EXECUTED = 0x08 ,
ARMV7_PERFCTR_EXC_TAKEN = 0x09 ,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A ,
ARMV7_PERFCTR_CID_WRITE = 0x0B ,
/*
* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS .
2010-11-13 19:04:32 +00:00
* It counts :
2011-09-29 15:29:02 +01:00
* - all ( taken ) branch instructions ,
2010-11-13 19:04:32 +00:00
* - instructions that explicitly write the PC ,
* - exception generating instructions .
*/
2011-09-29 15:29:02 +01:00
ARMV7_PERFCTR_PC_WRITE = 0x0C ,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D ,
ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E ,
ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F ,
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10 ,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11 ,
ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12 ,
2011-01-19 14:15:34 +00:00
/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
2011-09-29 15:29:02 +01:00
ARMV7_PERFCTR_MEM_ACCESS = 0x13 ,
ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14 ,
ARMV7_PERFCTR_L1_DCACHE_WB = 0x15 ,
ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16 ,
ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17 ,
ARMV7_PERFCTR_L2_CACHE_WB = 0x18 ,
ARMV7_PERFCTR_BUS_ACCESS = 0x19 ,
ARMV7_PERFCTR_MEM_ERROR = 0x1A ,
ARMV7_PERFCTR_INSTR_SPEC = 0x1B ,
ARMV7_PERFCTR_TTBR_WRITE = 0x1C ,
ARMV7_PERFCTR_BUS_CYCLES = 0x1D ,
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
2010-11-13 19:04:32 +00:00
} ;
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
2011-09-29 15:29:02 +01:00
ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43 ,
ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44 ,
ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50 ,
2011-09-29 18:23:39 +01:00
ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56 ,
2010-11-13 19:04:32 +00:00
} ;
/* ARMv7 Cortex-A9 specific event types */
enum armv7_a9_perf_types {
2011-09-29 15:29:02 +01:00
ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68 ,
2011-09-29 18:23:39 +01:00
ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60 ,
ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66 ,
2010-11-13 19:04:32 +00:00
} ;
2011-06-03 17:40:15 +01:00
/* ARMv7 Cortex-A5 specific event types */
enum armv7_a5_perf_types {
2011-09-29 15:29:02 +01:00
ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2 ,
ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3 ,
2011-06-03 17:40:15 +01:00
} ;
2011-01-19 14:24:38 +00:00
/* ARMv7 Cortex-A15 specific event types */
enum armv7_a15_perf_types {
2011-09-29 15:29:02 +01:00
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40 ,
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41 ,
ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42 ,
ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43 ,
2011-01-19 14:24:38 +00:00
2011-09-29 15:29:02 +01:00
ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C ,
ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D ,
2011-01-19 14:24:38 +00:00
2011-09-29 15:29:02 +01:00
ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50 ,
ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51 ,
ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52 ,
ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53 ,
2011-01-19 14:24:38 +00:00
2011-09-29 15:29:02 +01:00
ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76 ,
2011-01-19 14:24:38 +00:00
} ;
2014-01-29 14:28:57 +00:00
/* ARMv7 Cortex-A12 specific event types */
enum armv7_a12_perf_types {
ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40 ,
ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41 ,
ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ = 0x50 ,
ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51 ,
ARMV7_A12_PERFCTR_PC_WRITE_SPEC = 0x76 ,
ARMV7_A12_PERFCTR_PF_TLB_REFILL = 0xe7 ,
} ;
2014-02-07 13:01:23 -08:00
/* ARMv7 Krait specific event types */
enum krait_perf_types {
KRAIT_PMRESR0_GROUP0 = 0xcc ,
KRAIT_PMRESR1_GROUP0 = 0xd0 ,
KRAIT_PMRESR2_GROUP0 = 0xd4 ,
KRAIT_VPMRESR0_GROUP0 = 0xd8 ,
KRAIT_PERFCTR_L1_ICACHE_ACCESS = 0x10011 ,
KRAIT_PERFCTR_L1_ICACHE_MISS = 0x10010 ,
KRAIT_PERFCTR_L1_ITLB_ACCESS = 0x12222 ,
KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210 ,
} ;
2010-11-13 19:04:32 +00:00
/*
* Cortex - A8 HW events mapping
*
* The hardware events that we support . We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses / misses in hardware .
*/
static const unsigned armv7_a8_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_MAP_ALL_UNSUPPORTED ,
2011-09-29 18:23:39 +01:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = ARMV7_A8_PERFCTR_STALL_ISIDE ,
2010-11-13 19:04:32 +00:00
} ;
static const unsigned armv7_a8_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
/*
* The performance counters don ' t differentiate between read and write
* accesses / misses so this isn ' t strictly correct , but it ' s the best we
* can do . Writes and reads get combined .
*/
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2010-11-13 19:04:32 +00:00
} ;
/*
* Cortex - A9 HW events mapping
*/
static const unsigned armv7_a9_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_MAP_ALL_UNSUPPORTED ,
2011-09-29 18:23:39 +01:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = ARMV7_A9_PERFCTR_STALL_ICACHE ,
[ PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] = ARMV7_A9_PERFCTR_STALL_DISPATCH ,
2010-11-13 19:04:32 +00:00
} ;
static const unsigned armv7_a9_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
/*
* The performance counters don ' t differentiate between read and write
* accesses / misses so this isn ' t strictly correct , but it ' s the best we
* can do . Writes and reads get combined .
*/
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2010-11-13 19:04:32 +00:00
} ;
2011-06-03 17:40:15 +01:00
/*
* Cortex - A5 HW events mapping
*/
static const unsigned armv7_a5_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_MAP_ALL_UNSUPPORTED ,
2011-09-29 18:23:39 +01:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-06-03 17:40:15 +01:00
} ;
static const unsigned armv7_a5_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL ,
[ C ( L1D ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_MISS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
/*
* The prefetch counters don ' t differentiate between the I side and the
* D side .
*/
[ C ( L1I ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL ,
[ C ( L1I ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_MISS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-06-03 17:40:15 +01:00
} ;
2011-01-19 14:24:38 +00:00
/*
* Cortex - A15 HW events mapping
*/
static const unsigned armv7_a15_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_MAP_ALL_UNSUPPORTED ,
2011-09-29 18:23:39 +01:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_BUS_CYCLES ,
2011-01-19 14:24:38 +00:00
} ;
static const unsigned armv7_a15_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE ,
/*
* Not all performance counters differentiate between read and write
* accesses / misses so we ' re not always strictly correct , but it ' s the
* best we can do . Writes and reads get combined in these cases .
*/
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-01-19 14:24:38 +00:00
} ;
2012-02-03 14:46:01 +01:00
/*
* Cortex - A7 HW events mapping
*/
static const unsigned armv7_a7_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_MAP_ALL_UNSUPPORTED ,
2012-02-03 14:46:01 +01:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_BUS_CYCLES ,
} ;
static const unsigned armv7_a7_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
/*
* The performance counters don ' t differentiate between read and write
* accesses / misses so this isn ' t strictly correct , but it ' s the best we
* can do . Writes and reads get combined .
*/
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L2_CACHE_ACCESS ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L2_CACHE_ACCESS ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2012-02-03 14:46:01 +01:00
} ;
2014-01-29 14:28:57 +00:00
/*
* Cortex - A12 HW events mapping
*/
static const unsigned armv7_a12_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_MAP_ALL_UNSUPPORTED ,
2014-01-29 14:28:57 +00:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_BUS_CYCLES ,
} ;
static const unsigned armv7_a12_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
/*
* Not all performance counters differentiate between read and write
* accesses / misses so we ' re not always strictly correct , but it ' s the
* best we can do . Writes and reads get combined in these cases .
*/
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ ,
[ C ( LL ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE ,
[ C ( LL ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
[ C ( DTLB ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_MISS ) ] = ARMV7_A12_PERFCTR_PF_TLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2014-01-29 14:28:57 +00:00
} ;
2014-02-07 21:01:21 +00:00
/*
* Krait HW events mapping
*/
static const unsigned krait_perf_map [ PERF_COUNT_HW_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_MAP_ALL_UNSUPPORTED ,
2014-02-07 21:01:21 +00:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_CLOCK_CYCLES ,
} ;
static const unsigned krait_perf_map_no_branch [ PERF_COUNT_HW_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_MAP_ALL_UNSUPPORTED ,
2014-02-07 21:01:21 +00:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_CLOCK_CYCLES ,
} ;
static const unsigned krait_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2014-05-30 17:27:21 +01:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
/*
* The performance counters don ' t differentiate between read and write
* accesses / misses so this isn ' t strictly correct , but it ' s the best we
* can do . Writes and reads get combined .
*/
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = KRAIT_PERFCTR_L1_ICACHE_MISS ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_DTLB_ACCESS ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_DTLB_ACCESS ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_ITLB_ACCESS ,
[ C ( ITLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = KRAIT_PERFCTR_L1_ITLB_ACCESS ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2014-02-07 21:01:21 +00:00
} ;
2010-11-13 19:04:32 +00:00
/*
2011-07-19 22:25:55 +01:00
* Perf Events ' indices
2010-11-13 19:04:32 +00:00
*/
2011-07-19 22:25:55 +01:00
# define ARMV7_IDX_CYCLE_COUNTER 0
# define ARMV7_IDX_COUNTER0 1
2012-07-20 15:18:07 +01:00
# define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
( ARMV7_IDX_CYCLE_COUNTER + cpu_pmu - > num_events - 1 )
2011-07-19 22:25:55 +01:00
# define ARMV7_MAX_COUNTERS 32
# define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
2010-11-13 19:04:32 +00:00
/*
2011-07-19 22:25:55 +01:00
* ARMv7 low level PMNC access
2010-11-13 19:04:32 +00:00
*/
/*
2011-07-19 22:25:55 +01:00
* Perf Event to low level counters mapping
2010-11-13 19:04:32 +00:00
*/
2011-07-19 22:25:55 +01:00
# define ARMV7_IDX_TO_COUNTER(x) \
( ( ( x ) - ARMV7_IDX_COUNTER0 ) & ARMV7_COUNTER_MASK )
2010-11-13 19:04:32 +00:00
/*
* Per - CPU PMNC : config reg
*/
# define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
# define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
# define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
# define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
# define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
# define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
# define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
# define ARMV7_PMNC_N_MASK 0x1f
# define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/*
* FLAG : counters overflow flag status reg
*/
# define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
# define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
/*
2011-07-19 13:53:36 +01:00
* PMXEVTYPER : Event selection reg
2010-11-13 19:04:32 +00:00
*/
2013-02-28 17:49:11 +01:00
# define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
2011-07-19 13:53:36 +01:00
# define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
2010-11-13 19:04:32 +00:00
/*
2011-07-19 13:53:36 +01:00
* Event filters for PMUv2
2010-11-13 19:04:32 +00:00
*/
2011-07-19 13:53:36 +01:00
# define ARMV7_EXCLUDE_PL1 (1 << 31)
# define ARMV7_EXCLUDE_USER (1 << 30)
# define ARMV7_INCLUDE_HYP (1 << 27)
2010-11-13 19:04:32 +00:00
2011-07-19 14:55:57 +01:00
static inline u32 armv7_pmnc_read ( void )
2010-11-13 19:04:32 +00:00
{
u32 val ;
asm volatile ( " mrc p15, 0, %0, c9, c12, 0 " : " =r " ( val ) ) ;
return val ;
}
2011-07-19 14:55:57 +01:00
static inline void armv7_pmnc_write ( u32 val )
2010-11-13 19:04:32 +00:00
{
val & = ARMV7_PMNC_MASK ;
2011-03-25 13:12:23 +01:00
isb ( ) ;
2010-11-13 19:04:32 +00:00
asm volatile ( " mcr p15, 0, %0, c9, c12, 0 " : : " r " ( val ) ) ;
}
2011-07-19 14:55:57 +01:00
static inline int armv7_pmnc_has_overflowed ( u32 pmnc )
2010-11-13 19:04:32 +00:00
{
return pmnc & ARMV7_OVERFLOWED_MASK ;
}
2012-07-20 15:18:07 +01:00
static inline int armv7_pmnc_counter_valid ( struct arm_pmu * cpu_pmu , int idx )
2011-07-19 22:25:55 +01:00
{
2012-07-20 15:18:07 +01:00
return idx > = ARMV7_IDX_CYCLE_COUNTER & &
idx < = ARMV7_IDX_COUNTER_LAST ( cpu_pmu ) ;
2011-07-19 22:25:55 +01:00
}
static inline int armv7_pmnc_counter_has_overflowed ( u32 pmnc , int idx )
2010-11-13 19:04:32 +00:00
{
2012-07-20 15:18:07 +01:00
return pmnc & BIT ( ARMV7_IDX_TO_COUNTER ( idx ) ) ;
2010-11-13 19:04:32 +00:00
}
2014-10-22 13:16:49 +01:00
static inline void armv7_pmnc_select_counter ( int idx )
2010-11-13 19:04:32 +00:00
{
2012-07-20 15:18:07 +01:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-19 22:25:55 +01:00
asm volatile ( " mcr p15, 0, %0, c9, c12, 5 " : : " r " ( counter ) ) ;
2011-03-25 13:12:23 +01:00
isb ( ) ;
2010-11-13 19:04:32 +00:00
}
2012-07-30 12:00:02 +01:00
static inline u32 armv7pmu_read_counter ( struct perf_event * event )
2010-11-13 19:04:32 +00:00
{
2012-07-20 15:18:07 +01:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2012-07-30 12:00:02 +01:00
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2011-07-19 14:55:57 +01:00
u32 value = 0 ;
2010-11-13 19:04:32 +00:00
2014-10-22 13:16:49 +01:00
if ( ! armv7_pmnc_counter_valid ( cpu_pmu , idx ) ) {
2010-11-13 19:04:32 +00:00
pr_err ( " CPU%u reading wrong counter %d \n " ,
smp_processor_id ( ) , idx ) ;
2014-10-22 13:16:49 +01:00
} else if ( idx = = ARMV7_IDX_CYCLE_COUNTER ) {
2011-07-19 22:25:55 +01:00
asm volatile ( " mrc p15, 0, %0, c9, c13, 0 " : " =r " ( value ) ) ;
2014-10-22 13:16:49 +01:00
} else {
armv7_pmnc_select_counter ( idx ) ;
2011-07-19 22:25:55 +01:00
asm volatile ( " mrc p15, 0, %0, c9, c13, 2 " : " =r " ( value ) ) ;
2014-10-22 13:16:49 +01:00
}
2010-11-13 19:04:32 +00:00
return value ;
}
2012-07-30 12:00:02 +01:00
static inline void armv7pmu_write_counter ( struct perf_event * event , u32 value )
2010-11-13 19:04:32 +00:00
{
2012-07-20 15:18:07 +01:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2012-07-30 12:00:02 +01:00
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2014-10-22 13:16:49 +01:00
if ( ! armv7_pmnc_counter_valid ( cpu_pmu , idx ) ) {
2010-11-13 19:04:32 +00:00
pr_err ( " CPU%u writing wrong counter %d \n " ,
smp_processor_id ( ) , idx ) ;
2014-10-22 13:16:49 +01:00
} else if ( idx = = ARMV7_IDX_CYCLE_COUNTER ) {
2011-07-19 22:25:55 +01:00
asm volatile ( " mcr p15, 0, %0, c9, c13, 0 " : : " r " ( value ) ) ;
2014-10-22 13:16:49 +01:00
} else {
armv7_pmnc_select_counter ( idx ) ;
2011-07-19 22:25:55 +01:00
asm volatile ( " mcr p15, 0, %0, c9, c13, 2 " : : " r " ( value ) ) ;
2014-10-22 13:16:49 +01:00
}
2010-11-13 19:04:32 +00:00
}
2011-07-19 22:17:48 +01:00
static inline void armv7_pmnc_write_evtsel ( int idx , u32 val )
2010-11-13 19:04:32 +00:00
{
2014-10-22 13:16:49 +01:00
armv7_pmnc_select_counter ( idx ) ;
val & = ARMV7_EVTYPE_MASK ;
asm volatile ( " mcr p15, 0, %0, c9, c13, 1 " : : " r " ( val ) ) ;
2010-11-13 19:04:32 +00:00
}
2014-10-22 13:16:49 +01:00
static inline void armv7_pmnc_enable_counter ( int idx )
2010-11-13 19:04:32 +00:00
{
2012-07-20 15:18:07 +01:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-19 22:25:55 +01:00
asm volatile ( " mcr p15, 0, %0, c9, c12, 1 " : : " r " ( BIT ( counter ) ) ) ;
2010-11-13 19:04:32 +00:00
}
2014-10-22 13:16:49 +01:00
static inline void armv7_pmnc_disable_counter ( int idx )
2010-11-13 19:04:32 +00:00
{
2012-07-20 15:18:07 +01:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-19 22:25:55 +01:00
asm volatile ( " mcr p15, 0, %0, c9, c12, 2 " : : " r " ( BIT ( counter ) ) ) ;
2010-11-13 19:04:32 +00:00
}
2014-10-22 13:16:49 +01:00
static inline void armv7_pmnc_enable_intens ( int idx )
2010-11-13 19:04:32 +00:00
{
2012-07-20 15:18:07 +01:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-19 22:25:55 +01:00
asm volatile ( " mcr p15, 0, %0, c9, c14, 1 " : : " r " ( BIT ( counter ) ) ) ;
2010-11-13 19:04:32 +00:00
}
2014-10-22 13:16:49 +01:00
static inline void armv7_pmnc_disable_intens ( int idx )
2010-11-13 19:04:32 +00:00
{
2012-07-20 15:18:07 +01:00
u32 counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
2011-07-19 22:25:55 +01:00
asm volatile ( " mcr p15, 0, %0, c9, c14, 2 " : : " r " ( BIT ( counter ) ) ) ;
2012-03-06 17:34:22 +01:00
isb ( ) ;
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile ( " mcr p15, 0, %0, c9, c12, 3 " : : " r " ( BIT ( counter ) ) ) ;
isb ( ) ;
2010-11-13 19:04:32 +00:00
}
static inline u32 armv7_pmnc_getreset_flags ( void )
{
u32 val ;
/* Read */
asm volatile ( " mrc p15, 0, %0, c9, c12, 3 " : " =r " ( val ) ) ;
/* Write to clear flags */
val & = ARMV7_FLAG_MASK ;
asm volatile ( " mcr p15, 0, %0, c9, c12, 3 " : : " r " ( val ) ) ;
return val ;
}
# ifdef DEBUG
2012-07-20 15:18:07 +01:00
static void armv7_pmnc_dump_regs ( struct arm_pmu * cpu_pmu )
2010-11-13 19:04:32 +00:00
{
u32 val ;
unsigned int cnt ;
2014-10-30 11:26:57 +00:00
pr_info ( " PMNC registers dump: \n " ) ;
2010-11-13 19:04:32 +00:00
asm volatile ( " mrc p15, 0, %0, c9, c12, 0 " : " =r " ( val ) ) ;
2014-10-30 11:26:57 +00:00
pr_info ( " PMNC =0x%08x \n " , val ) ;
2010-11-13 19:04:32 +00:00
asm volatile ( " mrc p15, 0, %0, c9, c12, 1 " : " =r " ( val ) ) ;
2014-10-30 11:26:57 +00:00
pr_info ( " CNTENS=0x%08x \n " , val ) ;
2010-11-13 19:04:32 +00:00
asm volatile ( " mrc p15, 0, %0, c9, c14, 1 " : " =r " ( val ) ) ;
2014-10-30 11:26:57 +00:00
pr_info ( " INTENS=0x%08x \n " , val ) ;
2010-11-13 19:04:32 +00:00
asm volatile ( " mrc p15, 0, %0, c9, c12, 3 " : " =r " ( val ) ) ;
2014-10-30 11:26:57 +00:00
pr_info ( " FLAGS =0x%08x \n " , val ) ;
2010-11-13 19:04:32 +00:00
asm volatile ( " mrc p15, 0, %0, c9, c12, 5 " : " =r " ( val ) ) ;
2014-10-30 11:26:57 +00:00
pr_info ( " SELECT=0x%08x \n " , val ) ;
2010-11-13 19:04:32 +00:00
asm volatile ( " mrc p15, 0, %0, c9, c13, 0 " : " =r " ( val ) ) ;
2014-10-30 11:26:57 +00:00
pr_info ( " CCNT =0x%08x \n " , val ) ;
2010-11-13 19:04:32 +00:00
2012-07-20 15:18:07 +01:00
for ( cnt = ARMV7_IDX_COUNTER0 ;
cnt < = ARMV7_IDX_COUNTER_LAST ( cpu_pmu ) ; cnt + + ) {
2010-11-13 19:04:32 +00:00
armv7_pmnc_select_counter ( cnt ) ;
asm volatile ( " mrc p15, 0, %0, c9, c13, 2 " : " =r " ( val ) ) ;
2014-10-30 11:26:57 +00:00
pr_info ( " CNT[%d] count =0x%08x \n " ,
2011-07-19 22:25:55 +01:00
ARMV7_IDX_TO_COUNTER ( cnt ) , val ) ;
2010-11-13 19:04:32 +00:00
asm volatile ( " mrc p15, 0, %0, c9, c13, 1 " : " =r " ( val ) ) ;
2014-10-30 11:26:57 +00:00
pr_info ( " CNT[%d] evtsel=0x%08x \n " ,
2011-07-19 22:25:55 +01:00
ARMV7_IDX_TO_COUNTER ( cnt ) , val ) ;
2010-11-13 19:04:32 +00:00
}
}
# endif
2012-07-30 12:00:02 +01:00
static void armv7pmu_enable_event ( struct perf_event * event )
2010-11-13 19:04:32 +00:00
{
unsigned long flags ;
2012-07-30 12:00:02 +01:00
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2014-05-13 19:36:31 +01:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2012-07-30 12:00:02 +01:00
int idx = hwc - > idx ;
2010-11-13 19:04:32 +00:00
2012-07-20 15:18:07 +01:00
if ( ! armv7_pmnc_counter_valid ( cpu_pmu , idx ) ) {
pr_err ( " CPU%u enabling wrong PMNC counter IRQ enable %d \n " ,
smp_processor_id ( ) , idx ) ;
return ;
}
2010-11-13 19:04:32 +00:00
/*
* Enable counter and interrupt , and set the counter to count
* the event that we ' re interested in .
*/
2011-04-28 10:17:04 +01:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 19:04:32 +00:00
/*
* Disable counter
*/
armv7_pmnc_disable_counter ( idx ) ;
/*
* Set event ( if destined for PMNx counters )
2011-07-19 13:53:36 +01:00
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering .
2010-11-13 19:04:32 +00:00
*/
2012-07-31 10:11:23 +01:00
if ( cpu_pmu - > set_event_filter | | idx ! = ARMV7_IDX_CYCLE_COUNTER )
2010-11-13 19:04:32 +00:00
armv7_pmnc_write_evtsel ( idx , hwc - > config_base ) ;
/*
* Enable interrupt for this counter
*/
armv7_pmnc_enable_intens ( idx ) ;
/*
* Enable counter
*/
armv7_pmnc_enable_counter ( idx ) ;
2011-04-28 10:17:04 +01:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 19:04:32 +00:00
}
2012-07-30 12:00:02 +01:00
static void armv7pmu_disable_event ( struct perf_event * event )
2010-11-13 19:04:32 +00:00
{
unsigned long flags ;
2012-07-30 12:00:02 +01:00
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2014-05-13 19:36:31 +01:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2012-07-30 12:00:02 +01:00
int idx = hwc - > idx ;
2010-11-13 19:04:32 +00:00
2012-07-20 15:18:07 +01:00
if ( ! armv7_pmnc_counter_valid ( cpu_pmu , idx ) ) {
pr_err ( " CPU%u disabling wrong PMNC counter IRQ enable %d \n " ,
smp_processor_id ( ) , idx ) ;
return ;
}
2010-11-13 19:04:32 +00:00
/*
* Disable counter and interrupt
*/
2011-04-28 10:17:04 +01:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 19:04:32 +00:00
/*
* Disable counter
*/
armv7_pmnc_disable_counter ( idx ) ;
/*
* Disable interrupt for this counter
*/
armv7_pmnc_disable_intens ( idx ) ;
2011-04-28 10:17:04 +01:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 19:04:32 +00:00
}
static irqreturn_t armv7pmu_handle_irq ( int irq_num , void * dev )
{
2011-07-19 14:55:57 +01:00
u32 pmnc ;
2010-11-13 19:04:32 +00:00
struct perf_sample_data data ;
2012-07-30 12:00:02 +01:00
struct arm_pmu * cpu_pmu = ( struct arm_pmu * ) dev ;
2014-05-13 19:36:31 +01:00
struct pmu_hw_events * cpuc = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2010-11-13 19:04:32 +00:00
struct pt_regs * regs ;
int idx ;
/*
* Get and reset the IRQ flags
*/
pmnc = armv7_pmnc_getreset_flags ( ) ;
/*
* Did an overflow occur ?
*/
if ( ! armv7_pmnc_has_overflowed ( pmnc ) )
return IRQ_NONE ;
/*
* Handle the counter ( s ) overflow ( s )
*/
regs = get_irq_regs ( ) ;
2011-05-17 11:20:11 +01:00
for ( idx = 0 ; idx < cpu_pmu - > num_events ; + + idx ) {
2010-11-13 19:04:32 +00:00
struct perf_event * event = cpuc - > events [ idx ] ;
struct hw_perf_event * hwc ;
2012-03-06 17:34:50 +01:00
/* Ignore if we don't have an event. */
if ( ! event )
continue ;
2010-11-13 19:04:32 +00:00
/*
* We have a single interrupt for all counters . Check that
* each counter has overflowed before we process it .
*/
if ( ! armv7_pmnc_counter_has_overflowed ( pmnc , idx ) )
continue ;
hwc = & event - > hw ;
2012-07-30 12:00:02 +01:00
armpmu_event_update ( event ) ;
2012-04-02 20:19:08 +02:00
perf_sample_data_init ( & data , 0 , hwc - > last_period ) ;
2012-07-30 12:00:02 +01:00
if ( ! armpmu_event_set_period ( event ) )
2010-11-13 19:04:32 +00:00
continue ;
2011-06-27 14:41:57 +02:00
if ( perf_event_overflow ( event , & data , regs ) )
2012-07-30 12:00:02 +01:00
cpu_pmu - > disable ( event ) ;
2010-11-13 19:04:32 +00:00
}
/*
* Handle the pending perf events .
*
* Note : this call * must * be run with interrupts disabled . For
* platforms that can have the PMU interrupts raised as an NMI , this
* will not work .
*/
irq_work_run ( ) ;
return IRQ_HANDLED ;
}
2012-07-30 12:00:02 +01:00
static void armv7pmu_start ( struct arm_pmu * cpu_pmu )
2010-11-13 19:04:32 +00:00
{
unsigned long flags ;
2014-05-13 19:36:31 +01:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2010-11-13 19:04:32 +00:00
2011-04-28 10:17:04 +01:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 19:04:32 +00:00
/* Enable all counters */
armv7_pmnc_write ( armv7_pmnc_read ( ) | ARMV7_PMNC_E ) ;
2011-04-28 10:17:04 +01:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 19:04:32 +00:00
}
2012-07-30 12:00:02 +01:00
static void armv7pmu_stop ( struct arm_pmu * cpu_pmu )
2010-11-13 19:04:32 +00:00
{
unsigned long flags ;
2014-05-13 19:36:31 +01:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2010-11-13 19:04:32 +00:00
2011-04-28 10:17:04 +01:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 19:04:32 +00:00
/* Disable all counters */
armv7_pmnc_write ( armv7_pmnc_read ( ) & ~ ARMV7_PMNC_E ) ;
2011-04-28 10:17:04 +01:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 19:04:32 +00:00
}
2011-05-17 11:20:11 +01:00
static int armv7pmu_get_event_idx ( struct pmu_hw_events * cpuc ,
2012-07-30 12:00:02 +01:00
struct perf_event * event )
2010-11-13 19:04:32 +00:00
{
int idx ;
2012-07-30 12:00:02 +01:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct hw_perf_event * hwc = & event - > hw ;
unsigned long evtype = hwc - > config_base & ARMV7_EVTYPE_EVENT ;
2010-11-13 19:04:32 +00:00
/* Always place a cycle counter into the cycle counter. */
2011-07-19 13:53:36 +01:00
if ( evtype = = ARMV7_PERFCTR_CPU_CYCLES ) {
2011-07-19 22:25:55 +01:00
if ( test_and_set_bit ( ARMV7_IDX_CYCLE_COUNTER , cpuc - > used_mask ) )
2010-11-13 19:04:32 +00:00
return - EAGAIN ;
2011-07-19 22:25:55 +01:00
return ARMV7_IDX_CYCLE_COUNTER ;
}
2010-11-13 19:04:32 +00:00
2011-07-19 22:25:55 +01:00
/*
* For anything other than a cycle counter , try and use
* the events counters
*/
2011-05-17 11:20:11 +01:00
for ( idx = ARMV7_IDX_COUNTER0 ; idx < cpu_pmu - > num_events ; + + idx ) {
2011-07-19 22:25:55 +01:00
if ( ! test_and_set_bit ( idx , cpuc - > used_mask ) )
return idx ;
2010-11-13 19:04:32 +00:00
}
2011-07-19 22:25:55 +01:00
/* The counters are all in use. */
return - EAGAIN ;
2010-11-13 19:04:32 +00:00
}
2011-07-19 13:53:36 +01:00
/*
* Add an event filter to a given event . This will only work for PMUv2 PMUs .
*/
static int armv7pmu_set_event_filter ( struct hw_perf_event * event ,
struct perf_event_attr * attr )
{
unsigned long config_base = 0 ;
if ( attr - > exclude_idle )
return - EPERM ;
if ( attr - > exclude_user )
config_base | = ARMV7_EXCLUDE_USER ;
if ( attr - > exclude_kernel )
config_base | = ARMV7_EXCLUDE_PL1 ;
if ( ! attr - > exclude_hv )
config_base | = ARMV7_INCLUDE_HYP ;
/*
* Install the filter into config_base as this is used to
* construct the event type .
*/
event - > config_base = config_base ;
return 0 ;
2010-11-13 19:04:32 +00:00
}
2011-03-25 13:13:34 +01:00
static void armv7pmu_reset ( void * info )
{
2012-07-30 12:00:02 +01:00
struct arm_pmu * cpu_pmu = ( struct arm_pmu * ) info ;
2011-05-17 11:20:11 +01:00
u32 idx , nb_cnt = cpu_pmu - > num_events ;
2011-03-25 13:13:34 +01:00
/* The counter and interrupt enable registers are unknown at reset. */
2012-07-30 12:00:02 +01:00
for ( idx = ARMV7_IDX_CYCLE_COUNTER ; idx < nb_cnt ; + + idx ) {
armv7_pmnc_disable_counter ( idx ) ;
armv7_pmnc_disable_intens ( idx ) ;
}
2011-03-25 13:13:34 +01:00
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write ( ARMV7_PMNC_P | ARMV7_PMNC_C ) ;
}
2011-04-28 15:47:10 +01:00
static int armv7_a8_map_event ( struct perf_event * event )
{
2012-07-29 12:36:28 +01:00
return armpmu_map_event ( event , & armv7_a8_perf_map ,
2011-04-28 15:47:10 +01:00
& armv7_a8_perf_cache_map , 0xFF ) ;
}
static int armv7_a9_map_event ( struct perf_event * event )
{
2012-07-29 12:36:28 +01:00
return armpmu_map_event ( event , & armv7_a9_perf_map ,
2011-04-28 15:47:10 +01:00
& armv7_a9_perf_cache_map , 0xFF ) ;
}
static int armv7_a5_map_event ( struct perf_event * event )
{
2012-07-29 12:36:28 +01:00
return armpmu_map_event ( event , & armv7_a5_perf_map ,
2011-04-28 15:47:10 +01:00
& armv7_a5_perf_cache_map , 0xFF ) ;
}
static int armv7_a15_map_event ( struct perf_event * event )
{
2012-07-29 12:36:28 +01:00
return armpmu_map_event ( event , & armv7_a15_perf_map ,
2011-04-28 15:47:10 +01:00
& armv7_a15_perf_cache_map , 0xFF ) ;
}
2012-02-03 14:46:01 +01:00
static int armv7_a7_map_event ( struct perf_event * event )
{
2012-07-29 12:36:28 +01:00
return armpmu_map_event ( event , & armv7_a7_perf_map ,
2012-02-03 14:46:01 +01:00
& armv7_a7_perf_cache_map , 0xFF ) ;
}
2014-01-29 14:28:57 +00:00
static int armv7_a12_map_event ( struct perf_event * event )
{
return armpmu_map_event ( event , & armv7_a12_perf_map ,
& armv7_a12_perf_cache_map , 0xFF ) ;
}
2014-02-07 21:01:21 +00:00
static int krait_map_event ( struct perf_event * event )
{
return armpmu_map_event ( event , & krait_perf_map ,
& krait_perf_cache_map , 0xFFFFF ) ;
}
static int krait_map_event_no_branch ( struct perf_event * event )
{
return armpmu_map_event ( event , & krait_perf_map_no_branch ,
& krait_perf_cache_map , 0xFFFFF ) ;
}
2012-07-31 10:11:23 +01:00
static void armv7pmu_init ( struct arm_pmu * cpu_pmu )
{
cpu_pmu - > handle_irq = armv7pmu_handle_irq ;
cpu_pmu - > enable = armv7pmu_enable_event ;
cpu_pmu - > disable = armv7pmu_disable_event ;
cpu_pmu - > read_counter = armv7pmu_read_counter ;
cpu_pmu - > write_counter = armv7pmu_write_counter ;
cpu_pmu - > get_event_idx = armv7pmu_get_event_idx ;
cpu_pmu - > start = armv7pmu_start ;
cpu_pmu - > stop = armv7pmu_stop ;
cpu_pmu - > reset = armv7pmu_reset ;
cpu_pmu - > max_period = ( 1LLU < < 32 ) - 1 ;
2010-11-13 19:04:32 +00:00
} ;
2012-12-21 14:02:24 -08:00
static u32 armv7_read_num_pmnc_events ( void )
2010-11-13 19:04:32 +00:00
{
u32 nb_cnt ;
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt = ( armv7_pmnc_read ( ) > > ARMV7_PMNC_N_SHIFT ) & ARMV7_PMNC_N_MASK ;
/* Add the CPU cycles counter and return */
return nb_cnt + 1 ;
}
2012-12-21 14:02:24 -08:00
static int armv7_a8_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 19:04:32 +00:00
{
2012-07-31 10:11:23 +01:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 16:33:24 +00:00
cpu_pmu - > name = " armv7_cortex_a8 " ;
2012-07-31 10:11:23 +01:00
cpu_pmu - > map_event = armv7_a8_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
return 0 ;
2010-11-13 19:04:32 +00:00
}
2012-12-21 14:02:24 -08:00
static int armv7_a9_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 19:04:32 +00:00
{
2012-07-31 10:11:23 +01:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 16:33:24 +00:00
cpu_pmu - > name = " armv7_cortex_a9 " ;
2012-07-31 10:11:23 +01:00
cpu_pmu - > map_event = armv7_a9_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
return 0 ;
2010-11-13 19:04:32 +00:00
}
2011-06-03 17:40:15 +01:00
2012-12-21 14:02:24 -08:00
static int armv7_a5_pmu_init ( struct arm_pmu * cpu_pmu )
2011-06-03 17:40:15 +01:00
{
2012-07-31 10:11:23 +01:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 16:33:24 +00:00
cpu_pmu - > name = " armv7_cortex_a5 " ;
2012-07-31 10:11:23 +01:00
cpu_pmu - > map_event = armv7_a5_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
return 0 ;
2011-06-03 17:40:15 +01:00
}
2011-01-19 14:24:38 +00:00
2012-12-21 14:02:24 -08:00
static int armv7_a15_pmu_init ( struct arm_pmu * cpu_pmu )
2011-01-19 14:24:38 +00:00
{
2012-07-31 10:11:23 +01:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 16:33:24 +00:00
cpu_pmu - > name = " armv7_cortex_a15 " ;
2012-07-31 10:11:23 +01:00
cpu_pmu - > map_event = armv7_a15_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
return 0 ;
2011-01-19 14:24:38 +00:00
}
2012-02-03 14:46:01 +01:00
2012-12-21 14:02:24 -08:00
static int armv7_a7_pmu_init ( struct arm_pmu * cpu_pmu )
2012-02-03 14:46:01 +01:00
{
2012-07-31 10:11:23 +01:00
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 16:33:24 +00:00
cpu_pmu - > name = " armv7_cortex_a7 " ;
2012-07-31 10:11:23 +01:00
cpu_pmu - > map_event = armv7_a7_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
return 0 ;
2012-02-03 14:46:01 +01:00
}
2014-02-07 21:01:21 +00:00
2014-01-29 14:28:57 +00:00
static int armv7_a12_pmu_init ( struct arm_pmu * cpu_pmu )
{
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 16:33:24 +00:00
cpu_pmu - > name = " armv7_cortex_a12 " ;
2014-01-29 14:28:57 +00:00
cpu_pmu - > map_event = armv7_a12_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
return 0 ;
}
2014-05-09 18:34:19 +01:00
static int armv7_a17_pmu_init ( struct arm_pmu * cpu_pmu )
{
armv7_a12_pmu_init ( cpu_pmu ) ;
2012-12-19 16:33:24 +00:00
cpu_pmu - > name = " armv7_cortex_a17 " ;
2014-05-09 18:34:19 +01:00
return 0 ;
}
2014-02-07 13:01:23 -08:00
/*
* Krait Performance Monitor Region Event Selection Register ( PMRESRn )
*
* 31 30 24 16 8 0
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* PMRESR0 | EN | CC | CC | CC | CC | N = 1 , R = 0
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* PMRESR1 | EN | CC | CC | CC | CC | N = 1 , R = 1
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* PMRESR2 | EN | CC | CC | CC | CC | N = 1 , R = 2
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* VPMRESR0 | EN | CC | CC | CC | CC | N = 2 , R = ?
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* EN | G = 3 | G = 2 | G = 1 | G = 0
*
* Event Encoding :
*
* hwc - > config_base = 0 xNRCCG
*
* N = prefix , 1 for Krait CPU ( PMRESRn ) , 2 for Venum VFP ( VPMRESR )
* R = region register
* CC = class of events the group G is choosing from
* G = group or particular event
*
* Example : 0x12021 is a Krait CPU event in PMRESR2 ' s group 1 with code 2
*
* A region ( R ) corresponds to a piece of the CPU ( execution unit , instruction
* unit , etc . ) while the event code ( CC ) corresponds to a particular class of
* events ( interrupts for example ) . An event code is broken down into
* groups ( G ) that can be mapped into the PMU ( irq , fiqs , and irq + fiqs for
* example ) .
*/
# define KRAIT_EVENT (1 << 16)
# define VENUM_EVENT (2 << 16)
# define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
# define PMRESRn_EN BIT(31)
static u32 krait_read_pmresrn ( int n )
{
u32 val ;
switch ( n ) {
case 0 :
asm volatile ( " mrc p15, 1, %0, c9, c15, 0 " : " =r " ( val ) ) ;
break ;
case 1 :
asm volatile ( " mrc p15, 1, %0, c9, c15, 1 " : " =r " ( val ) ) ;
break ;
case 2 :
asm volatile ( " mrc p15, 1, %0, c9, c15, 2 " : " =r " ( val ) ) ;
break ;
default :
BUG ( ) ; /* Should be validated in krait_pmu_get_event_idx() */
}
return val ;
}
static void krait_write_pmresrn ( int n , u32 val )
{
switch ( n ) {
case 0 :
asm volatile ( " mcr p15, 1, %0, c9, c15, 0 " : : " r " ( val ) ) ;
break ;
case 1 :
asm volatile ( " mcr p15, 1, %0, c9, c15, 1 " : : " r " ( val ) ) ;
break ;
case 2 :
asm volatile ( " mcr p15, 1, %0, c9, c15, 2 " : : " r " ( val ) ) ;
break ;
default :
BUG ( ) ; /* Should be validated in krait_pmu_get_event_idx() */
}
}
static u32 krait_read_vpmresr0 ( void )
{
u32 val ;
asm volatile ( " mrc p10, 7, %0, c11, c0, 0 " : " =r " ( val ) ) ;
return val ;
}
static void krait_write_vpmresr0 ( u32 val )
{
asm volatile ( " mcr p10, 7, %0, c11, c0, 0 " : : " r " ( val ) ) ;
}
static void krait_pre_vpmresr0 ( u32 * venum_orig_val , u32 * fp_orig_val )
{
u32 venum_new_val ;
u32 fp_new_val ;
BUG_ON ( preemptible ( ) ) ;
/* CPACR Enable CP10 and CP11 access */
* venum_orig_val = get_copro_access ( ) ;
venum_new_val = * venum_orig_val | CPACC_SVC ( 10 ) | CPACC_SVC ( 11 ) ;
set_copro_access ( venum_new_val ) ;
/* Enable FPEXC */
* fp_orig_val = fmrx ( FPEXC ) ;
fp_new_val = * fp_orig_val | FPEXC_EN ;
fmxr ( FPEXC , fp_new_val ) ;
}
static void krait_post_vpmresr0 ( u32 venum_orig_val , u32 fp_orig_val )
{
BUG_ON ( preemptible ( ) ) ;
/* Restore FPEXC */
fmxr ( FPEXC , fp_orig_val ) ;
isb ( ) ;
/* Restore CPACR */
set_copro_access ( venum_orig_val ) ;
}
static u32 krait_get_pmresrn_event ( unsigned int region )
{
static const u32 pmresrn_table [ ] = { KRAIT_PMRESR0_GROUP0 ,
KRAIT_PMRESR1_GROUP0 ,
KRAIT_PMRESR2_GROUP0 } ;
return pmresrn_table [ region ] ;
}
static void krait_evt_setup ( int idx , u32 config_base )
{
u32 val ;
u32 mask ;
u32 vval , fval ;
unsigned int region ;
unsigned int group ;
unsigned int code ;
unsigned int group_shift ;
bool venum_event ;
venum_event = ! ! ( config_base & VENUM_EVENT ) ;
region = ( config_base > > 12 ) & 0xf ;
code = ( config_base > > 4 ) & 0xff ;
group = ( config_base > > 0 ) & 0xf ;
group_shift = group * 8 ;
mask = 0xff < < group_shift ;
/* Configure evtsel for the region and group */
if ( venum_event )
val = KRAIT_VPMRESR0_GROUP0 ;
else
val = krait_get_pmresrn_event ( region ) ;
val + = group ;
/* Mix in mode-exclusion bits */
val | = config_base & ( ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1 ) ;
armv7_pmnc_write_evtsel ( idx , val ) ;
asm volatile ( " mcr p15, 0, %0, c9, c15, 0 " : : " r " ( 0 ) ) ;
if ( venum_event ) {
krait_pre_vpmresr0 ( & vval , & fval ) ;
val = krait_read_vpmresr0 ( ) ;
val & = ~ mask ;
val | = code < < group_shift ;
val | = PMRESRn_EN ;
krait_write_vpmresr0 ( val ) ;
krait_post_vpmresr0 ( vval , fval ) ;
} else {
val = krait_read_pmresrn ( region ) ;
val & = ~ mask ;
val | = code < < group_shift ;
val | = PMRESRn_EN ;
krait_write_pmresrn ( region , val ) ;
}
}
static u32 krait_clear_pmresrn_group ( u32 val , int group )
{
u32 mask ;
int group_shift ;
group_shift = group * 8 ;
mask = 0xff < < group_shift ;
val & = ~ mask ;
/* Don't clear enable bit if entire region isn't disabled */
if ( val & ~ PMRESRn_EN )
return val | = PMRESRn_EN ;
return 0 ;
}
static void krait_clearpmu ( u32 config_base )
{
u32 val ;
u32 vval , fval ;
unsigned int region ;
unsigned int group ;
bool venum_event ;
venum_event = ! ! ( config_base & VENUM_EVENT ) ;
region = ( config_base > > 12 ) & 0xf ;
group = ( config_base > > 0 ) & 0xf ;
if ( venum_event ) {
krait_pre_vpmresr0 ( & vval , & fval ) ;
val = krait_read_vpmresr0 ( ) ;
val = krait_clear_pmresrn_group ( val , group ) ;
krait_write_vpmresr0 ( val ) ;
krait_post_vpmresr0 ( vval , fval ) ;
} else {
val = krait_read_pmresrn ( region ) ;
val = krait_clear_pmresrn_group ( val , group ) ;
krait_write_pmresrn ( region , val ) ;
}
}
static void krait_pmu_disable_event ( struct perf_event * event )
{
unsigned long flags ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2014-05-21 18:06:13 +01:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2014-05-13 19:36:31 +01:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2014-02-07 13:01:23 -08:00
/* Disable counter and interrupt */
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/* Disable counter */
armv7_pmnc_disable_counter ( idx ) ;
/*
* Clear pmresr code ( if destined for PMNx counters )
*/
if ( hwc - > config_base & KRAIT_EVENT_MASK )
krait_clearpmu ( hwc - > config_base ) ;
/* Disable interrupt for this counter */
armv7_pmnc_disable_intens ( idx ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
static void krait_pmu_enable_event ( struct perf_event * event )
{
unsigned long flags ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2014-05-21 18:06:13 +01:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2014-05-13 19:36:31 +01:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2014-02-07 13:01:23 -08:00
/*
* Enable counter and interrupt , and set the counter to count
* the event that we ' re interested in .
*/
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/* Disable counter */
armv7_pmnc_disable_counter ( idx ) ;
/*
* Set event ( if destined for PMNx counters )
* We set the event for the cycle counter because we
* have the ability to perform event filtering .
*/
if ( hwc - > config_base & KRAIT_EVENT_MASK )
krait_evt_setup ( idx , hwc - > config_base ) ;
else
armv7_pmnc_write_evtsel ( idx , hwc - > config_base ) ;
/* Enable interrupt for this counter */
armv7_pmnc_enable_intens ( idx ) ;
/* Enable counter */
armv7_pmnc_enable_counter ( idx ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
static void krait_pmu_reset ( void * info )
{
u32 vval , fval ;
armv7pmu_reset ( info ) ;
/* Clear all pmresrs */
krait_write_pmresrn ( 0 , 0 ) ;
krait_write_pmresrn ( 1 , 0 ) ;
krait_write_pmresrn ( 2 , 0 ) ;
krait_pre_vpmresr0 ( & vval , & fval ) ;
krait_write_vpmresr0 ( 0 ) ;
krait_post_vpmresr0 ( vval , fval ) ;
}
static int krait_event_to_bit ( struct perf_event * event , unsigned int region ,
unsigned int group )
{
int bit ;
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
if ( hwc - > config_base & VENUM_EVENT )
bit = KRAIT_VPMRESR0_GROUP0 ;
else
bit = krait_get_pmresrn_event ( region ) ;
bit - = krait_get_pmresrn_event ( 0 ) ;
bit + = group ;
/*
* Lower bits are reserved for use by the counters ( see
* armv7pmu_get_event_idx ( ) for more info )
*/
bit + = ARMV7_IDX_COUNTER_LAST ( cpu_pmu ) + 1 ;
return bit ;
}
/*
* We check for column exclusion constraints here .
* Two events cant use the same group within a pmresr register .
*/
static int krait_pmu_get_event_idx ( struct pmu_hw_events * cpuc ,
struct perf_event * event )
{
int idx ;
2014-06-19 10:39:41 +01:00
int bit = - 1 ;
2014-02-07 13:01:23 -08:00
unsigned int prefix ;
unsigned int region ;
unsigned int code ;
unsigned int group ;
bool krait_event ;
struct hw_perf_event * hwc = & event - > hw ;
region = ( hwc - > config_base > > 12 ) & 0xf ;
code = ( hwc - > config_base > > 4 ) & 0xff ;
group = ( hwc - > config_base > > 0 ) & 0xf ;
krait_event = ! ! ( hwc - > config_base & KRAIT_EVENT_MASK ) ;
if ( krait_event ) {
/* Ignore invalid events */
if ( group > 3 | | region > 2 )
return - EINVAL ;
prefix = hwc - > config_base & KRAIT_EVENT_MASK ;
if ( prefix ! = KRAIT_EVENT & & prefix ! = VENUM_EVENT )
return - EINVAL ;
if ( prefix = = VENUM_EVENT & & ( code & 0xe0 ) )
return - EINVAL ;
bit = krait_event_to_bit ( event , region , group ) ;
if ( test_and_set_bit ( bit , cpuc - > used_mask ) )
return - EAGAIN ;
}
idx = armv7pmu_get_event_idx ( cpuc , event ) ;
2014-06-19 10:39:41 +01:00
if ( idx < 0 & & bit > = 0 )
2014-02-07 13:01:23 -08:00
clear_bit ( bit , cpuc - > used_mask ) ;
return idx ;
}
static void krait_pmu_clear_event_idx ( struct pmu_hw_events * cpuc ,
struct perf_event * event )
{
int bit ;
struct hw_perf_event * hwc = & event - > hw ;
unsigned int region ;
unsigned int group ;
bool krait_event ;
region = ( hwc - > config_base > > 12 ) & 0xf ;
group = ( hwc - > config_base > > 0 ) & 0xf ;
krait_event = ! ! ( hwc - > config_base & KRAIT_EVENT_MASK ) ;
if ( krait_event ) {
bit = krait_event_to_bit ( event , region , group ) ;
clear_bit ( bit , cpuc - > used_mask ) ;
}
}
2014-02-07 21:01:21 +00:00
static int krait_pmu_init ( struct arm_pmu * cpu_pmu )
{
armv7pmu_init ( cpu_pmu ) ;
2012-12-19 16:33:24 +00:00
cpu_pmu - > name = " armv7_krait " ;
2014-02-07 21:01:21 +00:00
/* Some early versions of Krait don't support PC write events */
if ( of_property_read_bool ( cpu_pmu - > plat_device - > dev . of_node ,
" qcom,no-pc-write " ) )
cpu_pmu - > map_event = krait_map_event_no_branch ;
else
cpu_pmu - > map_event = krait_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
2014-02-07 13:01:23 -08:00
cpu_pmu - > reset = krait_pmu_reset ;
cpu_pmu - > enable = krait_pmu_enable_event ;
cpu_pmu - > disable = krait_pmu_disable_event ;
cpu_pmu - > get_event_idx = krait_pmu_get_event_idx ;
cpu_pmu - > clear_event_idx = krait_pmu_clear_event_idx ;
2014-02-07 21:01:21 +00:00
return 0 ;
}
2010-11-13 19:04:32 +00:00
# else
2012-07-31 10:11:23 +01:00
static inline int armv7_a8_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 19:04:32 +00:00
{
2012-07-31 10:11:23 +01:00
return - ENODEV ;
2010-11-13 19:04:32 +00:00
}
2012-07-31 10:11:23 +01:00
static inline int armv7_a9_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 19:04:32 +00:00
{
2012-07-31 10:11:23 +01:00
return - ENODEV ;
2010-11-13 19:04:32 +00:00
}
2011-06-03 17:40:15 +01:00
2012-07-31 10:11:23 +01:00
static inline int armv7_a5_pmu_init ( struct arm_pmu * cpu_pmu )
2011-06-03 17:40:15 +01:00
{
2012-07-31 10:11:23 +01:00
return - ENODEV ;
2011-06-03 17:40:15 +01:00
}
2011-01-19 14:24:38 +00:00
2012-07-31 10:11:23 +01:00
static inline int armv7_a15_pmu_init ( struct arm_pmu * cpu_pmu )
2011-01-19 14:24:38 +00:00
{
2012-07-31 10:11:23 +01:00
return - ENODEV ;
2011-01-19 14:24:38 +00:00
}
2012-02-03 14:46:01 +01:00
2012-07-31 10:11:23 +01:00
static inline int armv7_a7_pmu_init ( struct arm_pmu * cpu_pmu )
2012-02-03 14:46:01 +01:00
{
2012-07-31 10:11:23 +01:00
return - ENODEV ;
2012-02-03 14:46:01 +01:00
}
2014-02-07 21:01:21 +00:00
2014-01-29 14:28:57 +00:00
static inline int armv7_a12_pmu_init ( struct arm_pmu * cpu_pmu )
{
return - ENODEV ;
}
2014-05-09 18:34:19 +01:00
static inline int armv7_a17_pmu_init ( struct arm_pmu * cpu_pmu )
{
return - ENODEV ;
}
2014-02-07 21:01:21 +00:00
static inline int krait_pmu_init ( struct arm_pmu * cpu_pmu )
{
return - ENODEV ;
}
2010-11-13 19:04:32 +00:00
# endif /* CONFIG_CPU_V7 */