2012-03-05 11:49:32 +00:00
/*
* PMU support
*
* Copyright ( C ) 2012 ARM Limited
* Author : Will Deacon < will . deacon @ arm . com >
*
* This code is based heavily on the ARMv7 perf event code .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <asm/irq_regs.h>
2015-10-02 10:55:03 +01:00
# include <linux/of.h>
# include <linux/perf/arm_pmu.h>
# include <linux/platform_device.h>
2012-03-05 11:49:32 +00:00
/*
* ARMv8 PMUv3 Performance Events handling code .
* Common event types .
*/
2015-10-22 07:07:01 -07:00
/* Required events. */
# define ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR 0x00
# define ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL 0x03
# define ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS 0x04
# define ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED 0x10
# define ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES 0x11
# define ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED 0x12
2012-03-05 11:49:32 +00:00
2015-10-22 07:07:01 -07:00
/* At least one of the following is required. */
# define ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED 0x08
# define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x1B
2012-03-05 11:49:32 +00:00
2015-10-22 07:07:01 -07:00
/* Common architectural events. */
# define ARMV8_PMUV3_PERFCTR_MEM_READ 0x06
# define ARMV8_PMUV3_PERFCTR_MEM_WRITE 0x07
# define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
# define ARMV8_PMUV3_PERFCTR_EXC_EXECUTED 0x0A
# define ARMV8_PMUV3_PERFCTR_CID_WRITE 0x0B
# define ARMV8_PMUV3_PERFCTR_PC_WRITE 0x0C
# define ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH 0x0D
# define ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN 0x0E
# define ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F
# define ARMV8_PMUV3_PERFCTR_TTBR_WRITE 0x1C
/* Common microarchitectural events. */
# define ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL 0x01
# define ARMV8_PMUV3_PERFCTR_ITLB_REFILL 0x02
# define ARMV8_PMUV3_PERFCTR_DTLB_REFILL 0x05
# define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
# define ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS 0x14
# define ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB 0x15
# define ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS 0x16
# define ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL 0x17
# define ARMV8_PMUV3_PERFCTR_L2_CACHE_WB 0x18
# define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
# define ARMV8_PMUV3_PERFCTR_MEM_ERROR 0x1A
# define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
2012-03-05 11:49:32 +00:00
2015-10-02 10:55:04 +01:00
/* ARMv8 Cortex-A53 specific event types. */
2015-10-22 07:07:01 -07:00
# define ARMV8_A53_PERFCTR_PREFETCH_LINEFILL 0xC2
2015-10-02 10:55:04 +01:00
2015-10-02 10:55:05 +01:00
/* ARMv8 Cortex-A57 specific event types. */
2015-10-22 07:07:01 -07:00
# define ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD 0x40
# define ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST 0x41
# define ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD 0x42
# define ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST 0x43
# define ARMV8_A57_PERFCTR_DTLB_REFILL_LD 0x4c
# define ARMV8_A57_PERFCTR_DTLB_REFILL_ST 0x4d
2015-10-02 10:55:05 +01:00
2012-03-05 11:49:32 +00:00
/* PMUv3 HW events mapping. */
static const unsigned armv8_pmuv3_perf_map [ PERF_COUNT_HW_MAX ] = {
2015-07-21 11:36:39 +01:00
PERF_MAP_ALL_UNSUPPORTED ,
2012-11-05 12:34:47 +00:00
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES ,
2012-03-05 11:49:32 +00:00
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED ,
} ;
2015-10-02 10:55:04 +01:00
/* ARM Cortex-A53 HW events mapping. */
static const unsigned armv8_a53_perf_map [ PERF_COUNT_HW_MAX ] = {
PERF_MAP_ALL_UNSUPPORTED ,
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV8_PMUV3_PERFCTR_PC_WRITE ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES ,
} ;
2015-10-02 10:55:05 +01:00
static const unsigned armv8_a57_perf_map [ PERF_COUNT_HW_MAX ] = {
PERF_MAP_ALL_UNSUPPORTED ,
[ PERF_COUNT_HW_CPU_CYCLES ] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS ,
[ PERF_COUNT_HW_CACHE_MISSES ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED ,
[ PERF_COUNT_HW_BUS_CYCLES ] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES ,
} ;
2012-03-05 11:49:32 +00:00
static const unsigned armv8_pmuv3_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
2015-07-21 11:36:39 +01:00
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED ,
2012-03-05 11:49:32 +00:00
} ;
2015-10-02 10:55:04 +01:00
static const unsigned armv8_a53_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL ,
[ C ( L1D ) ] [ C ( OP_PREFETCH ) ] [ C ( RESULT_MISS ) ] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED ,
} ;
2015-10-02 10:55:05 +01:00
static const unsigned armv8_a57_perf_cache_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD ,
[ C ( L1D ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST ,
[ C ( L1D ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS ,
[ C ( L1I ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL ,
[ C ( DTLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD ,
[ C ( DTLB ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST ,
[ C ( ITLB ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_READ ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_ACCESS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED ,
[ C ( BPU ) ] [ C ( OP_WRITE ) ] [ C ( RESULT_MISS ) ] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED ,
} ;
2012-03-05 11:49:32 +00:00
/*
* Perf Events ' indices
*/
# define ARMV8_IDX_CYCLE_COUNTER 0
# define ARMV8_IDX_COUNTER0 1
2015-10-02 10:55:03 +01:00
# define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
( ARMV8_IDX_CYCLE_COUNTER + cpu_pmu - > num_events - 1 )
2012-03-05 11:49:32 +00:00
# define ARMV8_MAX_COUNTERS 32
# define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
/*
* ARMv8 low level PMU access
*/
/*
* Perf Event to low level counters mapping
*/
# define ARMV8_IDX_TO_COUNTER(x) \
( ( ( x ) - ARMV8_IDX_COUNTER0 ) & ARMV8_COUNTER_MASK )
/*
* Per - CPU PMCR : config reg
*/
# define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
# define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
# define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
# define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
# define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
# define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
# define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
# define ARMV8_PMCR_N_MASK 0x1f
# define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
/*
* PMOVSR : counters overflow flag status reg
*/
# define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
# define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
/*
* PMXEVTYPER : Event selection reg
*/
2013-10-18 13:59:06 +01:00
# define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
# define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
2012-03-05 11:49:32 +00:00
/*
* Event filters for PMUv3
*/
# define ARMV8_EXCLUDE_EL1 (1 << 31)
# define ARMV8_EXCLUDE_EL0 (1 << 30)
# define ARMV8_INCLUDE_EL2 (1 << 27)
static inline u32 armv8pmu_pmcr_read ( void )
{
u32 val ;
asm volatile ( " mrs %0, pmcr_el0 " : " =r " ( val ) ) ;
return val ;
}
static inline void armv8pmu_pmcr_write ( u32 val )
{
val & = ARMV8_PMCR_MASK ;
isb ( ) ;
asm volatile ( " msr pmcr_el0, %0 " : : " r " ( val ) ) ;
}
static inline int armv8pmu_has_overflowed ( u32 pmovsr )
{
return pmovsr & ARMV8_OVERFLOWED_MASK ;
}
2015-10-02 10:55:03 +01:00
static inline int armv8pmu_counter_valid ( struct arm_pmu * cpu_pmu , int idx )
2012-03-05 11:49:32 +00:00
{
2015-10-02 10:55:03 +01:00
return idx > = ARMV8_IDX_CYCLE_COUNTER & &
idx < = ARMV8_IDX_COUNTER_LAST ( cpu_pmu ) ;
2012-03-05 11:49:32 +00:00
}
static inline int armv8pmu_counter_has_overflowed ( u32 pmnc , int idx )
{
2015-10-02 10:55:03 +01:00
return pmnc & BIT ( ARMV8_IDX_TO_COUNTER ( idx ) ) ;
2012-03-05 11:49:32 +00:00
}
static inline int armv8pmu_select_counter ( int idx )
{
2015-10-02 10:55:03 +01:00
u32 counter = ARMV8_IDX_TO_COUNTER ( idx ) ;
2012-03-05 11:49:32 +00:00
asm volatile ( " msr pmselr_el0, %0 " : : " r " ( counter ) ) ;
isb ( ) ;
return idx ;
}
2015-10-02 10:55:03 +01:00
static inline u32 armv8pmu_read_counter ( struct perf_event * event )
2012-03-05 11:49:32 +00:00
{
2015-10-02 10:55:03 +01:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2012-03-05 11:49:32 +00:00
u32 value = 0 ;
2015-10-02 10:55:03 +01:00
if ( ! armv8pmu_counter_valid ( cpu_pmu , idx ) )
2012-03-05 11:49:32 +00:00
pr_err ( " CPU%u reading wrong counter %d \n " ,
smp_processor_id ( ) , idx ) ;
else if ( idx = = ARMV8_IDX_CYCLE_COUNTER )
asm volatile ( " mrs %0, pmccntr_el0 " : " =r " ( value ) ) ;
else if ( armv8pmu_select_counter ( idx ) = = idx )
asm volatile ( " mrs %0, pmxevcntr_el0 " : " =r " ( value ) ) ;
return value ;
}
2015-10-02 10:55:03 +01:00
static inline void armv8pmu_write_counter ( struct perf_event * event , u32 value )
2012-03-05 11:49:32 +00:00
{
2015-10-02 10:55:03 +01:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
if ( ! armv8pmu_counter_valid ( cpu_pmu , idx ) )
2012-03-05 11:49:32 +00:00
pr_err ( " CPU%u writing wrong counter %d \n " ,
smp_processor_id ( ) , idx ) ;
else if ( idx = = ARMV8_IDX_CYCLE_COUNTER )
asm volatile ( " msr pmccntr_el0, %0 " : : " r " ( value ) ) ;
else if ( armv8pmu_select_counter ( idx ) = = idx )
asm volatile ( " msr pmxevcntr_el0, %0 " : : " r " ( value ) ) ;
}
static inline void armv8pmu_write_evtype ( int idx , u32 val )
{
if ( armv8pmu_select_counter ( idx ) = = idx ) {
val & = ARMV8_EVTYPE_MASK ;
asm volatile ( " msr pmxevtyper_el0, %0 " : : " r " ( val ) ) ;
}
}
static inline int armv8pmu_enable_counter ( int idx )
{
2015-10-02 10:55:03 +01:00
u32 counter = ARMV8_IDX_TO_COUNTER ( idx ) ;
2012-03-05 11:49:32 +00:00
asm volatile ( " msr pmcntenset_el0, %0 " : : " r " ( BIT ( counter ) ) ) ;
return idx ;
}
static inline int armv8pmu_disable_counter ( int idx )
{
2015-10-02 10:55:03 +01:00
u32 counter = ARMV8_IDX_TO_COUNTER ( idx ) ;
2012-03-05 11:49:32 +00:00
asm volatile ( " msr pmcntenclr_el0, %0 " : : " r " ( BIT ( counter ) ) ) ;
return idx ;
}
static inline int armv8pmu_enable_intens ( int idx )
{
2015-10-02 10:55:03 +01:00
u32 counter = ARMV8_IDX_TO_COUNTER ( idx ) ;
2012-03-05 11:49:32 +00:00
asm volatile ( " msr pmintenset_el1, %0 " : : " r " ( BIT ( counter ) ) ) ;
return idx ;
}
static inline int armv8pmu_disable_intens ( int idx )
{
2015-10-02 10:55:03 +01:00
u32 counter = ARMV8_IDX_TO_COUNTER ( idx ) ;
2012-03-05 11:49:32 +00:00
asm volatile ( " msr pmintenclr_el1, %0 " : : " r " ( BIT ( counter ) ) ) ;
isb ( ) ;
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile ( " msr pmovsclr_el0, %0 " : : " r " ( BIT ( counter ) ) ) ;
isb ( ) ;
2015-10-02 10:55:03 +01:00
2012-03-05 11:49:32 +00:00
return idx ;
}
static inline u32 armv8pmu_getreset_flags ( void )
{
u32 value ;
/* Read */
asm volatile ( " mrs %0, pmovsclr_el0 " : " =r " ( value ) ) ;
/* Write to clear flags */
value & = ARMV8_OVSR_MASK ;
asm volatile ( " msr pmovsclr_el0, %0 " : : " r " ( value ) ) ;
return value ;
}
2015-10-02 10:55:03 +01:00
static void armv8pmu_enable_event ( struct perf_event * event )
2012-03-05 11:49:32 +00:00
{
unsigned long flags ;
2015-10-02 10:55:03 +01:00
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
int idx = hwc - > idx ;
2012-03-05 11:49:32 +00:00
/*
* Enable counter and interrupt , and set the counter to count
* the event that we ' re interested in .
*/
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/*
* Disable counter
*/
armv8pmu_disable_counter ( idx ) ;
/*
* Set event ( if destined for PMNx counters ) .
*/
armv8pmu_write_evtype ( idx , hwc - > config_base ) ;
/*
* Enable interrupt for this counter
*/
armv8pmu_enable_intens ( idx ) ;
/*
* Enable counter
*/
armv8pmu_enable_counter ( idx ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
2015-10-02 10:55:03 +01:00
static void armv8pmu_disable_event ( struct perf_event * event )
2012-03-05 11:49:32 +00:00
{
unsigned long flags ;
2015-10-02 10:55:03 +01:00
struct hw_perf_event * hwc = & event - > hw ;
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
int idx = hwc - > idx ;
2012-03-05 11:49:32 +00:00
/*
* Disable counter and interrupt
*/
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/*
* Disable counter
*/
armv8pmu_disable_counter ( idx ) ;
/*
* Disable interrupt for this counter
*/
armv8pmu_disable_intens ( idx ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
static irqreturn_t armv8pmu_handle_irq ( int irq_num , void * dev )
{
u32 pmovsr ;
struct perf_sample_data data ;
2015-10-02 10:55:03 +01:00
struct arm_pmu * cpu_pmu = ( struct arm_pmu * ) dev ;
struct pmu_hw_events * cpuc = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2012-03-05 11:49:32 +00:00
struct pt_regs * regs ;
int idx ;
/*
* Get and reset the IRQ flags
*/
pmovsr = armv8pmu_getreset_flags ( ) ;
/*
* Did an overflow occur ?
*/
if ( ! armv8pmu_has_overflowed ( pmovsr ) )
return IRQ_NONE ;
/*
* Handle the counter ( s ) overflow ( s )
*/
regs = get_irq_regs ( ) ;
for ( idx = 0 ; idx < cpu_pmu - > num_events ; + + idx ) {
struct perf_event * event = cpuc - > events [ idx ] ;
struct hw_perf_event * hwc ;
/* Ignore if we don't have an event. */
if ( ! event )
continue ;
/*
* We have a single interrupt for all counters . Check that
* each counter has overflowed before we process it .
*/
if ( ! armv8pmu_counter_has_overflowed ( pmovsr , idx ) )
continue ;
hwc = & event - > hw ;
2015-10-02 10:55:03 +01:00
armpmu_event_update ( event ) ;
2012-03-05 11:49:32 +00:00
perf_sample_data_init ( & data , 0 , hwc - > last_period ) ;
2015-10-02 10:55:03 +01:00
if ( ! armpmu_event_set_period ( event ) )
2012-03-05 11:49:32 +00:00
continue ;
if ( perf_event_overflow ( event , & data , regs ) )
2015-10-02 10:55:03 +01:00
cpu_pmu - > disable ( event ) ;
2012-03-05 11:49:32 +00:00
}
/*
* Handle the pending perf events .
*
* Note : this call * must * be run with interrupts disabled . For
* platforms that can have the PMU interrupts raised as an NMI , this
* will not work .
*/
irq_work_run ( ) ;
return IRQ_HANDLED ;
}
2015-10-02 10:55:03 +01:00
static void armv8pmu_start ( struct arm_pmu * cpu_pmu )
2012-03-05 11:49:32 +00:00
{
unsigned long flags ;
2015-10-02 10:55:03 +01:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2012-03-05 11:49:32 +00:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/* Enable all counters */
armv8pmu_pmcr_write ( armv8pmu_pmcr_read ( ) | ARMV8_PMCR_E ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
2015-10-02 10:55:03 +01:00
static void armv8pmu_stop ( struct arm_pmu * cpu_pmu )
2012-03-05 11:49:32 +00:00
{
unsigned long flags ;
2015-10-02 10:55:03 +01:00
struct pmu_hw_events * events = this_cpu_ptr ( cpu_pmu - > hw_events ) ;
2012-03-05 11:49:32 +00:00
raw_spin_lock_irqsave ( & events - > pmu_lock , flags ) ;
/* Disable all counters */
armv8pmu_pmcr_write ( armv8pmu_pmcr_read ( ) & ~ ARMV8_PMCR_E ) ;
raw_spin_unlock_irqrestore ( & events - > pmu_lock , flags ) ;
}
static int armv8pmu_get_event_idx ( struct pmu_hw_events * cpuc ,
2015-10-02 10:55:03 +01:00
struct perf_event * event )
2012-03-05 11:49:32 +00:00
{
int idx ;
2015-10-02 10:55:03 +01:00
struct arm_pmu * cpu_pmu = to_arm_pmu ( event - > pmu ) ;
struct hw_perf_event * hwc = & event - > hw ;
unsigned long evtype = hwc - > config_base & ARMV8_EVTYPE_EVENT ;
2012-03-05 11:49:32 +00:00
/* Always place a cycle counter into the cycle counter. */
2012-11-05 12:34:47 +00:00
if ( evtype = = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES ) {
2012-03-05 11:49:32 +00:00
if ( test_and_set_bit ( ARMV8_IDX_CYCLE_COUNTER , cpuc - > used_mask ) )
return - EAGAIN ;
return ARMV8_IDX_CYCLE_COUNTER ;
}
/*
* For anything other than a cycle counter , try and use
* the events counters
*/
for ( idx = ARMV8_IDX_COUNTER0 ; idx < cpu_pmu - > num_events ; + + idx ) {
if ( ! test_and_set_bit ( idx , cpuc - > used_mask ) )
return idx ;
}
/* The counters are all in use. */
return - EAGAIN ;
}
/*
* Add an event filter to a given event . This will only work for PMUv2 PMUs .
*/
static int armv8pmu_set_event_filter ( struct hw_perf_event * event ,
struct perf_event_attr * attr )
{
unsigned long config_base = 0 ;
if ( attr - > exclude_idle )
return - EPERM ;
if ( attr - > exclude_user )
config_base | = ARMV8_EXCLUDE_EL0 ;
if ( attr - > exclude_kernel )
config_base | = ARMV8_EXCLUDE_EL1 ;
if ( ! attr - > exclude_hv )
config_base | = ARMV8_INCLUDE_EL2 ;
/*
* Install the filter into config_base as this is used to
* construct the event type .
*/
event - > config_base = config_base ;
return 0 ;
}
static void armv8pmu_reset ( void * info )
{
2015-10-02 10:55:03 +01:00
struct arm_pmu * cpu_pmu = ( struct arm_pmu * ) info ;
2012-03-05 11:49:32 +00:00
u32 idx , nb_cnt = cpu_pmu - > num_events ;
/* The counter and interrupt enable registers are unknown at reset. */
2015-10-02 10:55:03 +01:00
for ( idx = ARMV8_IDX_CYCLE_COUNTER ; idx < nb_cnt ; + + idx ) {
armv8pmu_disable_counter ( idx ) ;
armv8pmu_disable_intens ( idx ) ;
}
2012-03-05 11:49:32 +00:00
/* Initialize & Reset PMNC: C and P bits. */
armv8pmu_pmcr_write ( ARMV8_PMCR_P | ARMV8_PMCR_C ) ;
/* Disable access from userspace. */
asm volatile ( " msr pmuserenr_el0, %0 " : : " r " ( 0 ) ) ;
}
static int armv8_pmuv3_map_event ( struct perf_event * event )
{
2015-10-02 10:55:03 +01:00
return armpmu_map_event ( event , & armv8_pmuv3_perf_map ,
2013-10-18 13:59:06 +01:00
& armv8_pmuv3_perf_cache_map ,
ARMV8_EVTYPE_EVENT ) ;
2012-03-05 11:49:32 +00:00
}
2015-10-02 10:55:04 +01:00
static int armv8_a53_map_event ( struct perf_event * event )
{
return armpmu_map_event ( event , & armv8_a53_perf_map ,
& armv8_a53_perf_cache_map ,
ARMV8_EVTYPE_EVENT ) ;
}
2015-10-02 10:55:05 +01:00
static int armv8_a57_map_event ( struct perf_event * event )
{
return armpmu_map_event ( event , & armv8_a57_perf_map ,
& armv8_a57_perf_cache_map ,
ARMV8_EVTYPE_EVENT ) ;
}
2015-10-02 10:55:03 +01:00
static void armv8pmu_read_num_pmnc_events ( void * info )
2012-03-05 11:49:32 +00:00
{
2015-10-02 10:55:03 +01:00
int * nb_cnt = info ;
2012-03-05 11:49:32 +00:00
/* Read the nb of CNTx counters supported from PMNC */
2015-10-02 10:55:03 +01:00
* nb_cnt = ( armv8pmu_pmcr_read ( ) > > ARMV8_PMCR_N_SHIFT ) & ARMV8_PMCR_N_MASK ;
2012-03-05 11:49:32 +00:00
2015-10-02 10:55:03 +01:00
/* Add the CPU cycles counter */
* nb_cnt + = 1 ;
2012-03-05 11:49:32 +00:00
}
2015-10-02 10:55:03 +01:00
static int armv8pmu_probe_num_events ( struct arm_pmu * arm_pmu )
2012-03-05 11:49:32 +00:00
{
2015-10-02 10:55:03 +01:00
return smp_call_function_any ( & arm_pmu - > supported_cpus ,
armv8pmu_read_num_pmnc_events ,
& arm_pmu - > num_events , 1 ) ;
2012-03-05 11:49:32 +00:00
}
2015-10-02 10:55:04 +01:00
static void armv8_pmu_init ( struct arm_pmu * cpu_pmu )
2012-03-05 11:49:32 +00:00
{
2015-10-02 10:55:03 +01:00
cpu_pmu - > handle_irq = armv8pmu_handle_irq ,
cpu_pmu - > enable = armv8pmu_enable_event ,
cpu_pmu - > disable = armv8pmu_disable_event ,
cpu_pmu - > read_counter = armv8pmu_read_counter ,
cpu_pmu - > write_counter = armv8pmu_write_counter ,
cpu_pmu - > get_event_idx = armv8pmu_get_event_idx ,
cpu_pmu - > start = armv8pmu_start ,
cpu_pmu - > stop = armv8pmu_stop ,
cpu_pmu - > reset = armv8pmu_reset ,
cpu_pmu - > max_period = ( 1LLU < < 32 ) - 1 ,
2015-10-02 10:55:04 +01:00
cpu_pmu - > set_event_filter = armv8pmu_set_event_filter ;
}
static int armv8_pmuv3_init ( struct arm_pmu * cpu_pmu )
{
armv8_pmu_init ( cpu_pmu ) ;
2015-10-02 10:55:03 +01:00
cpu_pmu - > name = " armv8_pmuv3 " ;
cpu_pmu - > map_event = armv8_pmuv3_map_event ;
2015-10-02 10:55:04 +01:00
return armv8pmu_probe_num_events ( cpu_pmu ) ;
}
static int armv8_a53_pmu_init ( struct arm_pmu * cpu_pmu )
{
armv8_pmu_init ( cpu_pmu ) ;
cpu_pmu - > name = " armv8_cortex_a53 " ;
cpu_pmu - > map_event = armv8_a53_map_event ;
2015-10-02 10:55:03 +01:00
return armv8pmu_probe_num_events ( cpu_pmu ) ;
2012-03-05 11:49:32 +00:00
}
2015-10-02 10:55:05 +01:00
static int armv8_a57_pmu_init ( struct arm_pmu * cpu_pmu )
{
armv8_pmu_init ( cpu_pmu ) ;
cpu_pmu - > name = " armv8_cortex_a57 " ;
cpu_pmu - > map_event = armv8_a57_map_event ;
return armv8pmu_probe_num_events ( cpu_pmu ) ;
}
2015-10-02 10:55:03 +01:00
static const struct of_device_id armv8_pmu_of_device_ids [ ] = {
{ . compatible = " arm,armv8-pmuv3 " , . data = armv8_pmuv3_init } ,
2015-10-02 10:55:04 +01:00
{ . compatible = " arm,cortex-a53-pmu " , . data = armv8_a53_pmu_init } ,
2015-10-02 10:55:05 +01:00
{ . compatible = " arm,cortex-a57-pmu " , . data = armv8_a57_pmu_init } ,
2012-03-05 11:49:32 +00:00
{ } ,
} ;
2015-10-02 10:55:03 +01:00
static int armv8_pmu_device_probe ( struct platform_device * pdev )
2012-03-05 11:49:32 +00:00
{
2015-10-02 10:55:03 +01:00
return arm_pmu_device_probe ( pdev , armv8_pmu_of_device_ids , NULL ) ;
2012-03-05 11:49:32 +00:00
}
2015-10-02 10:55:03 +01:00
static struct platform_driver armv8_pmu_driver = {
2012-03-05 11:49:32 +00:00
. driver = {
2015-10-02 10:55:03 +01:00
. name = " armv8-pmu " ,
. of_match_table = armv8_pmu_of_device_ids ,
2012-03-05 11:49:32 +00:00
} ,
2015-10-02 10:55:03 +01:00
. probe = armv8_pmu_device_probe ,
2012-03-05 11:49:32 +00:00
} ;
2015-10-02 10:55:03 +01:00
static int __init register_armv8_pmu_driver ( void )
2012-03-05 11:49:32 +00:00
{
2015-10-02 10:55:03 +01:00
return platform_driver_register ( & armv8_pmu_driver ) ;
2012-03-05 11:49:32 +00:00
}
2015-10-02 10:55:03 +01:00
device_initcall ( register_armv8_pmu_driver ) ;