2010-11-13 22:04:32 +03:00
/*
* ARMv7 Cortex - A8 and Cortex - A9 Performance Events handling code .
*
* ARMv7 support : Jean Pihet < jpihet @ mvista . com >
* 2010 ( c ) MontaVista Software , LLC .
*
* Copied from ARMv6 code , with the low level code inspired
* by the ARMv7 Oprofile code .
*
* Cortex - A8 has up to 4 configurable performance counters and
* a single cycle counter .
* Cortex - A9 has up to 31 configurable performance counters and
* a single cycle counter .
*
* All counters can be enabled / disabled and IRQ masked separately . The cycle
* counter and all 4 performance counters together can be reset separately .
*/
# ifdef CONFIG_CPU_V7
2011-07-19 16:53:36 +04:00
2011-01-19 17:15:34 +03:00
/*
* Common ARMv7 event types
*
* Note : An implementation may not be able to count all of these events
* but the encodings are considered to be ` reserved ' in the case that
* they are not available .
*/
2010-11-13 22:04:32 +03:00
enum armv7_perf_types {
2011-09-29 18:29:02 +04:00
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00 ,
ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01 ,
ARMV7_PERFCTR_ITLB_REFILL = 0x02 ,
ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03 ,
ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04 ,
ARMV7_PERFCTR_DTLB_REFILL = 0x05 ,
ARMV7_PERFCTR_MEM_READ = 0x06 ,
ARMV7_PERFCTR_MEM_WRITE = 0x07 ,
ARMV7_PERFCTR_INSTR_EXECUTED = 0x08 ,
ARMV7_PERFCTR_EXC_TAKEN = 0x09 ,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A ,
ARMV7_PERFCTR_CID_WRITE = 0x0B ,
/*
* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS .
2010-11-13 22:04:32 +03:00
* It counts :
2011-09-29 18:29:02 +04:00
* - all ( taken ) branch instructions ,
2010-11-13 22:04:32 +03:00
* - instructions that explicitly write the PC ,
* - exception generating instructions .
*/
2011-09-29 18:29:02 +04:00
ARMV7_PERFCTR_PC_WRITE = 0x0C ,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D ,
ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E ,
ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F ,
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10 ,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11 ,
ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12 ,
2011-01-19 17:15:34 +03:00
/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
2011-09-29 18:29:02 +04:00
ARMV7_PERFCTR_MEM_ACCESS = 0x13 ,
ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14 ,
ARMV7_PERFCTR_L1_DCACHE_WB = 0x15 ,
ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16 ,
ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17 ,
ARMV7_PERFCTR_L2_CACHE_WB = 0x18 ,
ARMV7_PERFCTR_BUS_ACCESS = 0x19 ,
ARMV7_PERFCTR_MEM_ERROR = 0x1A ,
ARMV7_PERFCTR_INSTR_SPEC = 0x1B ,
ARMV7_PERFCTR_TTBR_WRITE = 0x1C ,
ARMV7_PERFCTR_BUS_CYCLES = 0x1D ,
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
2010-11-13 22:04:32 +03:00
} ;
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
2011-09-29 18:29:02 +04:00
ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43 ,
ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44 ,
ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50 ,
2011-09-29 21:23:39 +04:00
ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56 ,
2010-11-13 22:04:32 +03:00
} ;
/* ARMv7 Cortex-A9 specific event types */
enum armv7_a9_perf_types {
2011-09-29 18:29:02 +04:00
ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68 ,
2011-09-29 21:23:39 +04:00
ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60 ,
ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66 ,
2010-11-13 22:04:32 +03:00
} ;
2011-06-03 20:40:15 +04:00
/* ARMv7 Cortex-A5 specific event types */
enum armv7_a5_perf_types {
2011-09-29 18:29:02 +04:00
ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2 ,
ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3 ,
2011-06-03 20:40:15 +04:00
} ;
2011-01-19 17:24:38 +03:00
/* ARMv7 Cortex-A15 specific event types */
enum armv7_a15_perf_types {
2011-09-29 18:29:02 +04:00
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40 ,
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41 ,
ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42 ,
ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43 ,
2011-01-19 17:24:38 +03:00
2011-09-29 18:29:02 +04:00
ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C ,
ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D ,
2011-01-19 17:24:38 +03:00
2011-09-29 18:29:02 +04:00
ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50 ,
ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51 ,
ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52 ,
ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53 ,
2011-01-19 17:24:38 +03:00
2011-09-29 18:29:02 +04:00
ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76 ,
2011-01-19 17:24:38 +03:00
} ;
2010-11-13 22:04:32 +03:00
/*
* Cortex - A8 HW events mapping
*
* The hardware events that we support . We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses / misses in hardware .
*/
static const unsigned armv7_a8_perf_map [ PERF_COUNT_HW_MAX ] = {
2011-09-29 21:23:39 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = HW_OP_UNSUPPORTED ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = ARMV7_A8_PERFCTR_STALL_ISIDE ,
[ PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] = HW_OP_UNSUPPORTED ,
2010-11-13 22:04:32 +03:00
} ;
static const unsigned armv7_a8_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
/*
* The performance counters don ' t differentiate between read
* and write accesses / misses so this isn ' t strictly correct ,
* but it ' s the best we can do . Writes and reads get
* combined .
*/
[ C ( OP_READ ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_WRITE ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_WRITE ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_WRITE ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( DTLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( BPU ) ] = {
[ C ( OP_READ ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_WRITE ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
2011-04-23 01:37:06 +04:00
[ C ( NODE ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
2010-11-13 22:04:32 +03:00
} ;
/*
* Cortex - A9 HW events mapping
*/
static const unsigned armv7_a9_perf_map [ PERF_COUNT_HW_MAX ] = {
2011-09-29 21:23:39 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = HW_OP_UNSUPPORTED ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = ARMV7_A9_PERFCTR_STALL_ICACHE ,
[ PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] = ARMV7_A9_PERFCTR_STALL_DISPATCH ,
2010-11-13 22:04:32 +03:00
} ;
static const unsigned armv7_a9_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
/*
* The performance counters don ' t differentiate between read
* and write accesses / misses so this isn ' t strictly correct ,
* but it ' s the best we can do . Writes and reads get
* combined .
*/
[ C ( OP_READ ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_WRITE ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( DTLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( BPU ) ] = {
[ C ( OP_READ ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_WRITE ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2010-11-13 22:04:32 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
2011-04-23 01:37:06 +04:00
[ C ( NODE ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
2010-11-13 22:04:32 +03:00
} ;
2011-06-03 20:40:15 +04:00
/*
* Cortex - A5 HW events mapping
*/
static const unsigned armv7_a5_perf_map [ PERF_COUNT_HW_MAX ] = {
2011-09-29 21:23:39 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = HW_OP_UNSUPPORTED ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = HW_OP_UNSUPPORTED ,
[ PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] = HW_OP_UNSUPPORTED ,
2011-06-03 20:40:15 +04:00
} ;
static const unsigned armv7_a5_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
[ C ( OP_READ ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
2011-06-03 20:40:15 +04:00
} ,
[ C ( OP_WRITE ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
2011-06-03 20:40:15 +04:00
} ,
[ C ( OP_PREFETCH ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL ,
[ C ( RESULT_MISS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP ,
2011-06-03 20:40:15 +04:00
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
2011-06-03 20:40:15 +04:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
2011-06-03 20:40:15 +04:00
} ,
/*
* The prefetch counters don ' t differentiate between the I
* side and the D side .
*/
[ C ( OP_PREFETCH ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL ,
[ C ( RESULT_MISS ) ] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP ,
2011-06-03 20:40:15 +04:00
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( DTLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
2011-06-03 20:40:15 +04:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
2011-06-03 20:40:15 +04:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( BPU ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-06-03 20:40:15 +04:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-06-03 20:40:15 +04:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
2012-01-25 22:36:28 +04:00
[ C ( NODE ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
2011-06-03 20:40:15 +04:00
} ;
2011-01-19 17:24:38 +03:00
/*
* Cortex - A15 HW events mapping
*/
static const unsigned armv7_a15_perf_map [ PERF_COUNT_HW_MAX ] = {
2011-09-29 21:23:39 +04:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_BUS_CYCLES ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = HW_OP_UNSUPPORTED ,
[ PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] = HW_OP_UNSUPPORTED ,
2011-01-19 17:24:38 +03:00
} ;
static const unsigned armv7_a15_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
[ C ( OP_READ ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ ,
[ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_WRITE ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE ,
[ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( L1I ) ] = {
/*
* Not all performance counters differentiate between read
* and write accesses / misses so we ' re not always strictly
* correct , but it ' s the best we can do . Writes and reads get
* combined in these cases .
*/
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ ,
[ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_WRITE ) ] = {
2011-09-29 18:29:02 +04:00
[ C ( RESULT_ACCESS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE ,
[ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( DTLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( BPU ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
2011-09-29 18:29:02 +04:00
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
2011-01-19 17:24:38 +03:00
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
2012-01-25 22:36:28 +04:00
[ C ( NODE ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
2011-01-19 17:24:38 +03:00
} ;
2012-02-03 17:46:01 +04:00
/*
* Cortex - A7 HW events mapping
*/
static const unsigned armv7_a7_perf_map [ PERF_COUNT_HW_MAX ] = {
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV7_PERFCTR_CPU_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV7_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV7_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV7_PERFCTR_BUS_CYCLES ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = HW_OP_UNSUPPORTED ,
[ PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] = HW_OP_UNSUPPORTED ,
} ;
static const unsigned armv7_a7_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
/*
* The performance counters don ' t differentiate between read
* and write accesses / misses so this isn ' t strictly correct ,
* but it ' s the best we can do . Writes and reads get
* combined .
*/
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_DCACHE_REFILL ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L1_ICACHE_REFILL ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L2_CACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_L2_CACHE_ACCESS ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_L2_CACHE_REFILL ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( DTLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_DTLB_REFILL ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_ITLB_REFILL ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( BPU ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = ARMV7_PERFCTR_PC_BRANCH_PRED ,
[ C ( RESULT_MISS ) ] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
[ C ( NODE ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = CACHE_OP_UNSUPPORTED ,
[ C ( RESULT_MISS ) ] = CACHE_OP_UNSUPPORTED ,
} ,
} ,
} ;
2010-11-13 22:04:32 +03:00
/*
2011-07-20 01:25:55 +04:00
* Perf Events ' indices
2010-11-13 22:04:32 +03:00
*/
2011-07-20 01:25:55 +04:00
# define ARMV7_IDX_CYCLE_COUNTER 0
# define ARMV7_IDX_COUNTER0 1
2011-05-17 14:20:11 +04:00
# define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
2011-07-20 01:25:55 +04:00
# define ARMV7_MAX_COUNTERS 32
# define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
2010-11-13 22:04:32 +03:00
/*
2011-07-20 01:25:55 +04:00
* ARMv7 low level PMNC access
2010-11-13 22:04:32 +03:00
*/
/*
2011-07-20 01:25:55 +04:00
* Perf Event to low level counters mapping
2010-11-13 22:04:32 +03:00
*/
2011-07-20 01:25:55 +04:00
# define ARMV7_IDX_TO_COUNTER(x) \
( ( ( x ) - ARMV7_IDX_COUNTER0 ) & ARMV7_COUNTER_MASK )
2010-11-13 22:04:32 +03:00
/*
* Per - CPU PMNC : config reg
*/
# define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
# define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
# define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
# define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
# define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
# define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
# define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
# define ARMV7_PMNC_N_MASK 0x1f
# define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/*
* FLAG : counters overflow flag status reg
*/
# define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
# define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
/*
2011-07-19 16:53:36 +04:00
* PMXEVTYPER : Event selection reg
2010-11-13 22:04:32 +03:00
*/
2011-07-19 16:53:36 +04:00
# define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
# define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
2010-11-13 22:04:32 +03:00
/*
2011-07-19 16:53:36 +04:00
* Event filters for PMUv2
2010-11-13 22:04:32 +03:00
*/
2011-07-19 16:53:36 +04:00
# define ARMV7_EXCLUDE_PL1 (1 << 31)
# define ARMV7_EXCLUDE_USER (1 << 30)
# define ARMV7_INCLUDE_HYP (1 << 27)
2010-11-13 22:04:32 +03:00
2011-07-19 17:55:57 +04:00
static inline u32 armv7_pmnc_read ( void )
2010-11-13 22:04:32 +03:00
{
u32 val ;
asm volatile ( " mrc p15, 0, %0, c9, c12, 0 " : " =r " ( val ) ) ;
return val ;
}
2011-07-19 17:55:57 +04:00
static inline void armv7_pmnc_write ( u32 val )
2010-11-13 22:04:32 +03:00
{
val & = ARMV7_PMNC_MASK ;
2011-03-25 15:12:23 +03:00
isb ( ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mcr p15, 0, %0, c9, c12, 0 " : : " r " ( val ) ) ;
}
2011-07-19 17:55:57 +04:00
static inline int armv7_pmnc_has_overflowed ( u32 pmnc )
2010-11-13 22:04:32 +03:00
{
return pmnc & ARMV7_OVERFLOWED_MASK ;
}
2011-07-20 01:25:55 +04:00
static inline int armv7_pmnc_counter_valid ( int idx )
{
return idx > = ARMV7_IDX_CYCLE_COUNTER & & idx < = ARMV7_IDX_COUNTER_LAST ;
}
static inline int armv7_pmnc_counter_has_overflowed ( u32 pmnc , int idx )
2010-11-13 22:04:32 +03:00
{
int ret = 0 ;
2011-07-20 01:25:55 +04:00
u32 counter ;
2010-11-13 22:04:32 +03:00
2011-07-20 01:25:55 +04:00
if ( ! armv7_pmnc_counter_valid ( idx ) ) {
2010-11-13 22:04:32 +03:00
pr_err ( " CPU%u checking wrong counter %d overflow status \n " ,
2011-07-20 01:25:55 +04:00
smp_processor_id ( ) , idx ) ;
} else {
counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
ret = pmnc & BIT ( counter ) ;
}
2010-11-13 22:04:32 +03:00
return ret ;
}
2011-07-20 01:17:48 +04:00
static inline int armv7_pmnc_select_counter ( int idx )
2010-11-13 22:04:32 +03:00
{
2011-07-20 01:25:55 +04:00
u32 counter ;
2010-11-13 22:04:32 +03:00
2011-07-20 01:25:55 +04:00
if ( ! armv7_pmnc_counter_valid ( idx ) ) {
pr_err ( " CPU%u selecting wrong PMNC counter %d \n " ,
smp_processor_id ( ) , idx ) ;
return - EINVAL ;
2010-11-13 22:04:32 +03:00
}
2011-07-20 01:25:55 +04:00
counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
asm volatile ( " mcr p15, 0, %0, c9, c12, 5 " : : " r " ( counter ) ) ;
2011-03-25 15:12:23 +03:00
isb ( ) ;
2010-11-13 22:04:32 +03:00
return idx ;
}
2012-07-30 15:00:02 +04:00
static inline u32 armv7pmu_read_counter ( struct perf_event * event )
2010-11-13 22:04:32 +03:00
{
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2011-07-19 17:55:57 +04:00
u32 value = 0 ;
2010-11-13 22:04:32 +03:00
2011-07-20 01:25:55 +04:00
if ( ! armv7_pmnc_counter_valid ( idx ) )
2010-11-13 22:04:32 +03:00
pr_err ( " CPU%u reading wrong counter %d \n " ,
smp_processor_id ( ) , idx ) ;
2011-07-20 01:25:55 +04:00
else if ( idx = = ARMV7_IDX_CYCLE_COUNTER )
asm volatile ( " mrc p15, 0, %0, c9, c13, 0 " : " =r " ( value ) ) ;
else if ( armv7_pmnc_select_counter ( idx ) = = idx )
asm volatile ( " mrc p15, 0, %0, c9, c13, 2 " : " =r " ( value ) ) ;
2010-11-13 22:04:32 +03:00
return value ;
}
2012-07-30 15:00:02 +04:00
static inline void armv7pmu_write_counter ( struct perf_event * event , u32 value )
2010-11-13 22:04:32 +03:00
{
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2011-07-20 01:25:55 +04:00
if ( ! armv7_pmnc_counter_valid ( idx ) )
2010-11-13 22:04:32 +03:00
pr_err ( " CPU%u writing wrong counter %d \n " ,
smp_processor_id ( ) , idx ) ;
2011-07-20 01:25:55 +04:00
else if ( idx = = ARMV7_IDX_CYCLE_COUNTER )
asm volatile ( " mcr p15, 0, %0, c9, c13, 0 " : : " r " ( value ) ) ;
else if ( armv7_pmnc_select_counter ( idx ) = = idx )
asm volatile ( " mcr p15, 0, %0, c9, c13, 2 " : : " r " ( value ) ) ;
2010-11-13 22:04:32 +03:00
}
2011-07-20 01:17:48 +04:00
static inline void armv7_pmnc_write_evtsel ( int idx , u32 val )
2010-11-13 22:04:32 +03:00
{
if ( armv7_pmnc_select_counter ( idx ) = = idx ) {
2011-07-19 16:53:36 +04:00
val & = ARMV7_EVTYPE_MASK ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mcr p15, 0, %0, c9, c13, 1 " : : " r " ( val ) ) ;
}
}
2011-07-20 01:17:48 +04:00
static inline int armv7_pmnc_enable_counter ( int idx )
2010-11-13 22:04:32 +03:00
{
2011-07-20 01:25:55 +04:00
u32 counter ;
2010-11-13 22:04:32 +03:00
2011-07-20 01:25:55 +04:00
if ( ! armv7_pmnc_counter_valid ( idx ) ) {
pr_err ( " CPU%u enabling wrong PMNC counter %d \n " ,
smp_processor_id ( ) , idx ) ;
return - EINVAL ;
2010-11-13 22:04:32 +03:00
}
2011-07-20 01:25:55 +04:00
counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
asm volatile ( " mcr p15, 0, %0, c9, c12, 1 " : : " r " ( BIT ( counter ) ) ) ;
2010-11-13 22:04:32 +03:00
return idx ;
}
2011-07-20 01:17:48 +04:00
static inline int armv7_pmnc_disable_counter ( int idx )
2010-11-13 22:04:32 +03:00
{
2011-07-20 01:25:55 +04:00
u32 counter ;
2010-11-13 22:04:32 +03:00
2011-07-20 01:25:55 +04:00
if ( ! armv7_pmnc_counter_valid ( idx ) ) {
pr_err ( " CPU%u disabling wrong PMNC counter %d \n " ,
smp_processor_id ( ) , idx ) ;
return - EINVAL ;
2010-11-13 22:04:32 +03:00
}
2011-07-20 01:25:55 +04:00
counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
asm volatile ( " mcr p15, 0, %0, c9, c12, 2 " : : " r " ( BIT ( counter ) ) ) ;
2010-11-13 22:04:32 +03:00
return idx ;
}
2011-07-20 01:17:48 +04:00
static inline int armv7_pmnc_enable_intens ( int idx )
2010-11-13 22:04:32 +03:00
{
2011-07-20 01:25:55 +04:00
u32 counter ;
2010-11-13 22:04:32 +03:00
2011-07-20 01:25:55 +04:00
if ( ! armv7_pmnc_counter_valid ( idx ) ) {
pr_err ( " CPU%u enabling wrong PMNC counter IRQ enable %d \n " ,
smp_processor_id ( ) , idx ) ;
return - EINVAL ;
2010-11-13 22:04:32 +03:00
}
2011-07-20 01:25:55 +04:00
counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
asm volatile ( " mcr p15, 0, %0, c9, c14, 1 " : : " r " ( BIT ( counter ) ) ) ;
2010-11-13 22:04:32 +03:00
return idx ;
}
2011-07-20 01:17:48 +04:00
static inline int armv7_pmnc_disable_intens ( int idx )
2010-11-13 22:04:32 +03:00
{
2011-07-20 01:25:55 +04:00
u32 counter ;
2010-11-13 22:04:32 +03:00
2011-07-20 01:25:55 +04:00
if ( ! armv7_pmnc_counter_valid ( idx ) ) {
pr_err ( " CPU%u disabling wrong PMNC counter IRQ enable %d \n " ,
smp_processor_id ( ) , idx ) ;
return - EINVAL ;
2010-11-13 22:04:32 +03:00
}
2011-07-20 01:25:55 +04:00
counter = ARMV7_IDX_TO_COUNTER ( idx ) ;
asm volatile ( " mcr p15, 0, %0, c9, c14, 2 " : : " r " ( BIT ( counter ) ) ) ;
2012-03-06 20:34:22 +04:00
isb ( ) ;
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile ( " mcr p15, 0, %0, c9, c12, 3 " : : " r " ( BIT ( counter ) ) ) ;
isb ( ) ;
2010-11-13 22:04:32 +03:00
return idx ;
}
static inline u32 armv7_pmnc_getreset_flags ( void )
{
u32 val ;
/* Read */
asm volatile ( " mrc p15, 0, %0, c9, c12, 3 " : " =r " ( val ) ) ;
/* Write to clear flags */
val & = ARMV7_FLAG_MASK ;
asm volatile ( " mcr p15, 0, %0, c9, c12, 3 " : : " r " ( val ) ) ;
return val ;
}
# ifdef DEBUG
static void armv7_pmnc_dump_regs ( void )
{
u32 val ;
unsigned int cnt ;
printk ( KERN_INFO " PMNC registers dump: \n " ) ;
asm volatile ( " mrc p15, 0, %0, c9, c12, 0 " : " =r " ( val ) ) ;
printk ( KERN_INFO " PMNC =0x%08x \n " , val ) ;
asm volatile ( " mrc p15, 0, %0, c9, c12, 1 " : " =r " ( val ) ) ;
printk ( KERN_INFO " CNTENS=0x%08x \n " , val ) ;
asm volatile ( " mrc p15, 0, %0, c9, c14, 1 " : " =r " ( val ) ) ;
printk ( KERN_INFO " INTENS=0x%08x \n " , val ) ;
asm volatile ( " mrc p15, 0, %0, c9, c12, 3 " : " =r " ( val ) ) ;
printk ( KERN_INFO " FLAGS =0x%08x \n " , val ) ;
asm volatile ( " mrc p15, 0, %0, c9, c12, 5 " : " =r " ( val ) ) ;
printk ( KERN_INFO " SELECT=0x%08x \n " , val ) ;
asm volatile ( " mrc p15, 0, %0, c9, c13, 0 " : " =r " ( val ) ) ;
printk ( KERN_INFO " CCNT =0x%08x \n " , val ) ;
2011-07-20 01:25:55 +04:00
for ( cnt = ARMV7_IDX_COUNTER0 ; cnt < = ARMV7_IDX_COUNTER_LAST ; cnt + + ) {
2010-11-13 22:04:32 +03:00
armv7_pmnc_select_counter ( cnt ) ;
asm volatile ( " mrc p15, 0, %0, c9, c13, 2 " : " =r " ( val ) ) ;
printk ( KERN_INFO " CNT[%d] count =0x%08x \n " ,
2011-07-20 01:25:55 +04:00
ARMV7_IDX_TO_COUNTER ( cnt ) , val ) ;
2010-11-13 22:04:32 +03:00
asm volatile ( " mrc p15, 0, %0, c9, c13, 1 " : " =r " ( val ) ) ;
printk ( KERN_INFO " CNT[%d] evtsel=0x%08x \n " ,
2011-07-20 01:25:55 +04:00
ARMV7_IDX_TO_COUNTER ( cnt ) , val ) ;
2010-11-13 22:04:32 +03:00
}
}
# endif
2012-07-30 15:00:02 +04:00
static void armv7pmu_enable_event ( struct perf_event * event )
2010-11-13 22:04:32 +03:00
{
unsigned long flags ;
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2011-05-17 14:20:11 +04:00
struct pmu_hw_events * events = cpu_pmu - > get_hw_events ( ) ;
2012-07-30 15:00:02 +04:00
int idx = hwc - > idx ;
2010-11-13 22:04:32 +03:00
/*
* Enable counter and interrupt , and set the counter to count
* the event that we ' re interested in .
*/
2011-04-28 13:17:04 +04:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
/*
* Disable counter
*/
armv7_pmnc_disable_counter ( idx ) ;
/*
* Set event ( if destined for PMNx counters )
2011-07-19 16:53:36 +04:00
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering .
2010-11-13 22:04:32 +03:00
*/
2012-07-31 13:11:23 +04:00
if ( cpu_pmu - > set_event_filter | | idx ! = ARMV7_IDX_CYCLE_COUNTER )
2010-11-13 22:04:32 +03:00
armv7_pmnc_write_evtsel ( idx , hwc - > config_base ) ;
/*
* Enable interrupt for this counter
*/
armv7_pmnc_enable_intens ( idx ) ;
/*
* Enable counter
*/
armv7_pmnc_enable_counter ( idx ) ;
2011-04-28 13:17:04 +04:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
}
2012-07-30 15:00:02 +04:00
static void armv7pmu_disable_event ( struct perf_event * event )
2010-11-13 22:04:32 +03:00
{
unsigned long flags ;
2012-07-30 15:00:02 +04:00
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
2011-05-17 14:20:11 +04:00
struct pmu_hw_events * events = cpu_pmu - > get_hw_events ( ) ;
2012-07-30 15:00:02 +04:00
int idx = hwc - > idx ;
2010-11-13 22:04:32 +03:00
/*
* Disable counter and interrupt
*/
2011-04-28 13:17:04 +04:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
/*
* Disable counter
*/
armv7_pmnc_disable_counter ( idx ) ;
/*
* Disable interrupt for this counter
*/
armv7_pmnc_disable_intens ( idx ) ;
2011-04-28 13:17:04 +04:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
}
static irqreturn_t armv7pmu_handle_irq ( int irq_num , void * dev )
{
2011-07-19 17:55:57 +04:00
u32 pmnc ;
2010-11-13 22:04:32 +03:00
struct perf_sample_data data ;
2012-07-30 15:00:02 +04:00
struct arm_pmu * cpu_pmu = ( struct arm_pmu * ) dev ;
struct pmu_hw_events * cpuc = cpu_pmu - > get_hw_events ( ) ;
2010-11-13 22:04:32 +03:00
struct pt_regs * regs ;
int idx ;
/*
* Get and reset the IRQ flags
*/
pmnc = armv7_pmnc_getreset_flags ( ) ;
/*
* Did an overflow occur ?
*/
if ( ! armv7_pmnc_has_overflowed ( pmnc ) )
return IRQ_NONE ;
/*
* Handle the counter ( s ) overflow ( s )
*/
regs = get_irq_regs ( ) ;
2011-05-17 14:20:11 +04:00
for ( idx = 0 ; idx < cpu_pmu - > num_events ; + + idx ) {
2010-11-13 22:04:32 +03:00
struct perf_event * event = cpuc - > events [ idx ] ;
struct hw_perf_event * hwc ;
2012-03-06 20:34:50 +04:00
/* Ignore if we don't have an event. */
if ( ! event )
continue ;
2010-11-13 22:04:32 +03:00
/*
* We have a single interrupt for all counters . Check that
* each counter has overflowed before we process it .
*/
if ( ! armv7_pmnc_counter_has_overflowed ( pmnc , idx ) )
continue ;
hwc = & event - > hw ;
2012-07-30 15:00:02 +04:00
armpmu_event_update ( event ) ;
2012-04-02 22:19:08 +04:00
perf_sample_data_init ( & data , 0 , hwc - > last_period ) ;
2012-07-30 15:00:02 +04:00
if ( ! armpmu_event_set_period ( event ) )
2010-11-13 22:04:32 +03:00
continue ;
2011-06-27 16:41:57 +04:00
if ( perf_event_overflow ( event , & data , regs ) )
2012-07-30 15:00:02 +04:00
cpu_pmu - > disable ( event ) ;
2010-11-13 22:04:32 +03:00
}
/*
* Handle the pending perf events .
*
* Note : this call * must * be run with interrupts disabled . For
* platforms that can have the PMU interrupts raised as an NMI , this
* will not work .
*/
irq_work_run ( ) ;
return IRQ_HANDLED ;
}
2012-07-30 15:00:02 +04:00
static void armv7pmu_start ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
unsigned long flags ;
2011-05-17 14:20:11 +04:00
struct pmu_hw_events * events = cpu_pmu - > get_hw_events ( ) ;
2010-11-13 22:04:32 +03:00
2011-04-28 13:17:04 +04:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
/* Enable all counters */
armv7_pmnc_write ( armv7_pmnc_read ( ) | ARMV7_PMNC_E ) ;
2011-04-28 13:17:04 +04:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
}
2012-07-30 15:00:02 +04:00
static void armv7pmu_stop ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
unsigned long flags ;
2011-05-17 14:20:11 +04:00
struct pmu_hw_events * events = cpu_pmu - > get_hw_events ( ) ;
2010-11-13 22:04:32 +03:00
2011-04-28 13:17:04 +04:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
/* Disable all counters */
armv7_pmnc_write ( armv7_pmnc_read ( ) & ~ ARMV7_PMNC_E ) ;
2011-04-28 13:17:04 +04:00
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
2010-11-13 22:04:32 +03:00
}
2011-05-17 14:20:11 +04:00
static int armv7pmu_get_event_idx ( struct pmu_hw_events * cpuc ,
2012-07-30 15:00:02 +04:00
struct perf_event * event )
2010-11-13 22:04:32 +03:00
{
int idx ;
2012-07-30 15:00:02 +04:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct hw_perf_event * hwc = & event - > hw ;
unsigned long evtype = hwc - > config_base & ARMV7_EVTYPE_EVENT ;
2010-11-13 22:04:32 +03:00
/* Always place a cycle counter into the cycle counter. */
2011-07-19 16:53:36 +04:00
if ( evtype = = ARMV7_PERFCTR_CPU_CYCLES ) {
2011-07-20 01:25:55 +04:00
if ( test_and_set_bit ( ARMV7_IDX_CYCLE_COUNTER , cpuc - > used_mask ) )
2010-11-13 22:04:32 +03:00
return - EAGAIN ;
2011-07-20 01:25:55 +04:00
return ARMV7_IDX_CYCLE_COUNTER ;
}
2010-11-13 22:04:32 +03:00
2011-07-20 01:25:55 +04:00
/*
* For anything other than a cycle counter , try and use
* the events counters
*/
2011-05-17 14:20:11 +04:00
for ( idx = ARMV7_IDX_COUNTER0 ; idx < cpu_pmu - > num_events ; + + idx ) {
2011-07-20 01:25:55 +04:00
if ( ! test_and_set_bit ( idx , cpuc - > used_mask ) )
return idx ;
2010-11-13 22:04:32 +03:00
}
2011-07-20 01:25:55 +04:00
/* The counters are all in use. */
return - EAGAIN ;
2010-11-13 22:04:32 +03:00
}
2011-07-19 16:53:36 +04:00
/*
* Add an event filter to a given event . This will only work for PMUv2 PMUs .
*/
static int armv7pmu_set_event_filter ( struct hw_perf_event * event ,
struct perf_event_attr * attr )
{
unsigned long config_base = 0 ;
if ( attr - > exclude_idle )
return - EPERM ;
if ( attr - > exclude_user )
config_base | = ARMV7_EXCLUDE_USER ;
if ( attr - > exclude_kernel )
config_base | = ARMV7_EXCLUDE_PL1 ;
if ( ! attr - > exclude_hv )
config_base | = ARMV7_INCLUDE_HYP ;
/*
* Install the filter into config_base as this is used to
* construct the event type .
*/
event - > config_base = config_base ;
return 0 ;
2010-11-13 22:04:32 +03:00
}
2011-03-25 15:13:34 +03:00
static void armv7pmu_reset ( void * info )
{
2012-07-30 15:00:02 +04:00
struct arm_pmu * cpu_pmu = ( struct arm_pmu * ) info ;
2011-05-17 14:20:11 +04:00
u32 idx , nb_cnt = cpu_pmu - > num_events ;
2011-03-25 15:13:34 +03:00
/* The counter and interrupt enable registers are unknown at reset. */
2012-07-30 15:00:02 +04:00
for ( idx = ARMV7_IDX_CYCLE_COUNTER ; idx < nb_cnt ; + + idx ) {
armv7_pmnc_disable_counter ( idx ) ;
armv7_pmnc_disable_intens ( idx ) ;
}
2011-03-25 15:13:34 +03:00
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write ( ARMV7_PMNC_P | ARMV7_PMNC_C ) ;
}
2011-04-28 18:47:10 +04:00
static int armv7_a8_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a8_perf_map ,
2011-04-28 18:47:10 +04:00
& armv7_a8_perf_cache_map , 0xFF ) ;
}
static int armv7_a9_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a9_perf_map ,
2011-04-28 18:47:10 +04:00
& armv7_a9_perf_cache_map , 0xFF ) ;
}
static int armv7_a5_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a5_perf_map ,
2011-04-28 18:47:10 +04:00
& armv7_a5_perf_cache_map , 0xFF ) ;
}
static int armv7_a15_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a15_perf_map ,
2011-04-28 18:47:10 +04:00
& armv7_a15_perf_cache_map , 0xFF ) ;
}
2012-02-03 17:46:01 +04:00
static int armv7_a7_map_event ( struct perf_event * event )
{
2012-07-29 15:36:28 +04:00
return armpmu_map_event ( event , & armv7_a7_perf_map ,
2012-02-03 17:46:01 +04:00
& armv7_a7_perf_cache_map , 0xFF ) ;
}
2012-07-31 13:11:23 +04:00
static void armv7pmu_init ( struct arm_pmu * cpu_pmu )
{
cpu_pmu - > handle_irq = armv7pmu_handle_irq ;
cpu_pmu - > enable = armv7pmu_enable_event ;
cpu_pmu - > disable = armv7pmu_disable_event ;
cpu_pmu - > read_counter = armv7pmu_read_counter ;
cpu_pmu - > write_counter = armv7pmu_write_counter ;
cpu_pmu - > get_event_idx = armv7pmu_get_event_idx ;
cpu_pmu - > start = armv7pmu_start ;
cpu_pmu - > stop = armv7pmu_stop ;
cpu_pmu - > reset = armv7pmu_reset ;
cpu_pmu - > max_period = ( 1LLU < < 32 ) - 1 ;
2010-11-13 22:04:32 +03:00
} ;
2012-07-28 20:42:22 +04:00
static u32 __devinit armv7_read_num_pmnc_events ( void )
2010-11-13 22:04:32 +03:00
{
u32 nb_cnt ;
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt = ( armv7_pmnc_read ( ) > > ARMV7_PMNC_N_SHIFT ) & ARMV7_PMNC_N_MASK ;
/* Add the CPU cycles counter and return */
return nb_cnt + 1 ;
}
2012-07-31 13:11:23 +04:00
static int __devinit armv7_a8_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
cpu_pmu - > name = " ARMv7 Cortex-A8 " ;
cpu_pmu - > map_event = armv7_a8_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
return 0 ;
2010-11-13 22:04:32 +03:00
}
2012-07-31 13:11:23 +04:00
static int __devinit armv7_a9_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
cpu_pmu - > name = " ARMv7 Cortex-A9 " ;
cpu_pmu - > map_event = armv7_a9_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
return 0 ;
2010-11-13 22:04:32 +03:00
}
2011-06-03 20:40:15 +04:00
2012-07-31 13:11:23 +04:00
static int __devinit armv7_a5_pmu_init ( struct arm_pmu * cpu_pmu )
2011-06-03 20:40:15 +04:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
cpu_pmu - > name = " ARMv7 Cortex-A5 " ;
cpu_pmu - > map_event = armv7_a5_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
return 0 ;
2011-06-03 20:40:15 +04:00
}
2011-01-19 17:24:38 +03:00
2012-07-31 13:11:23 +04:00
static int __devinit armv7_a15_pmu_init ( struct arm_pmu * cpu_pmu )
2011-01-19 17:24:38 +03:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
cpu_pmu - > name = " ARMv7 Cortex-A15 " ;
cpu_pmu - > map_event = armv7_a15_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
return 0 ;
2011-01-19 17:24:38 +03:00
}
2012-02-03 17:46:01 +04:00
2012-07-31 13:11:23 +04:00
static int __devinit armv7_a7_pmu_init ( struct arm_pmu * cpu_pmu )
2012-02-03 17:46:01 +04:00
{
2012-07-31 13:11:23 +04:00
armv7pmu_init ( cpu_pmu ) ;
cpu_pmu - > name = " ARMv7 Cortex-A7 " ;
cpu_pmu - > map_event = armv7_a7_map_event ;
cpu_pmu - > num_events = armv7_read_num_pmnc_events ( ) ;
cpu_pmu - > set_event_filter = armv7pmu_set_event_filter ;
return 0 ;
2012-02-03 17:46:01 +04:00
}
2010-11-13 22:04:32 +03:00
# else
2012-07-31 13:11:23 +04:00
static inline int armv7_a8_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
2012-07-31 13:11:23 +04:00
return - ENODEV ;
2010-11-13 22:04:32 +03:00
}
2012-07-31 13:11:23 +04:00
static inline int armv7_a9_pmu_init ( struct arm_pmu * cpu_pmu )
2010-11-13 22:04:32 +03:00
{
2012-07-31 13:11:23 +04:00
return - ENODEV ;
2010-11-13 22:04:32 +03:00
}
2011-06-03 20:40:15 +04:00
2012-07-31 13:11:23 +04:00
static inline int armv7_a5_pmu_init ( struct arm_pmu * cpu_pmu )
2011-06-03 20:40:15 +04:00
{
2012-07-31 13:11:23 +04:00
return - ENODEV ;
2011-06-03 20:40:15 +04:00
}
2011-01-19 17:24:38 +03:00
2012-07-31 13:11:23 +04:00
static inline int armv7_a15_pmu_init ( struct arm_pmu * cpu_pmu )
2011-01-19 17:24:38 +03:00
{
2012-07-31 13:11:23 +04:00
return - ENODEV ;
2011-01-19 17:24:38 +03:00
}
2012-02-03 17:46:01 +04:00
2012-07-31 13:11:23 +04:00
static inline int armv7_a7_pmu_init ( struct arm_pmu * cpu_pmu )
2012-02-03 17:46:01 +04:00
{
2012-07-31 13:11:23 +04:00
return - ENODEV ;
2012-02-03 17:46:01 +04:00
}
2010-11-13 22:04:32 +03:00
# endif /* CONFIG_CPU_V7 */