2022-02-19 03:46:57 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* RISC - V performance counter support .
*
* Copyright ( C ) 2021 Western Digital Corporation or its affiliates .
*
* This code is based on ARM perf event code which is in turn based on
* sparc64 and x86 code .
*/
# define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
# include <linux/mod_devicetable.h>
# include <linux/perf/riscv_pmu.h>
# include <linux/platform_device.h>
2022-02-19 03:46:58 +03:00
# include <linux/irq.h>
# include <linux/irqdomain.h>
# include <linux/of_irq.h>
# include <linux/of.h>
2022-07-05 12:19:20 +03:00
# include <linux/cpu_pm.h>
2022-08-30 18:53:06 +03:00
# include <linux/sched/clock.h>
2024-02-22 11:39:43 +03:00
# include <linux/soc/andes/irq.h>
2022-02-19 03:46:57 +03:00
2022-10-12 02:18:41 +03:00
# include <asm/errata_list.h>
2022-02-19 03:46:57 +03:00
# include <asm/sbi.h>
2023-10-31 09:45:52 +03:00
# include <asm/cpufeature.h>
2022-02-19 03:46:57 +03:00
2024-02-22 11:39:43 +03:00
# define ALT_SBI_PMU_OVERFLOW(__ovl) \
asm volatile ( ALTERNATIVE_2 ( \
" csrr %0, " __stringify ( CSR_SSCOUNTOVF ) , \
" csrr %0, " __stringify ( THEAD_C9XX_CSR_SCOUNTEROF ) , \
THEAD_VENDOR_ID , ERRATA_THEAD_PMU , \
CONFIG_ERRATA_THEAD_PMU , \
" csrr %0, " __stringify ( ANDES_CSR_SCOUNTEROF ) , \
0 , RISCV_ISA_EXT_XANDESPMU , \
CONFIG_ANDES_CUSTOM_PMU ) \
: " =r " ( __ovl ) : \
: " memory " )
# define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \
asm volatile ( ALTERNATIVE ( \
" csrc " __stringify ( CSR_IP ) " , %0 \n \t " , \
" csrc " __stringify ( ANDES_CSR_SLIP ) " , %0 \n \t " , \
0 , RISCV_ISA_EXT_XANDESPMU , \
CONFIG_ANDES_CUSTOM_PMU ) \
: : " r " ( __irq_mask ) \
: " memory " )
2023-08-02 11:03:25 +03:00
# define SYSCTL_NO_USER_ACCESS 0
# define SYSCTL_USER_ACCESS 1
# define SYSCTL_LEGACY 2
# define PERF_EVENT_FLAG_NO_USER_ACCESS BIT(SYSCTL_NO_USER_ACCESS)
# define PERF_EVENT_FLAG_USER_ACCESS BIT(SYSCTL_USER_ACCESS)
# define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
2022-06-28 14:45:55 +03:00
PMU_FORMAT_ATTR ( event , " config:0-47 " ) ;
PMU_FORMAT_ATTR ( firmware , " config:63 " ) ;
static struct attribute * riscv_arch_formats_attr [ ] = {
& format_attr_event . attr ,
& format_attr_firmware . attr ,
NULL ,
} ;
static struct attribute_group riscv_pmu_format_group = {
. name = " format " ,
. attrs = riscv_arch_formats_attr ,
} ;
static const struct attribute_group * riscv_pmu_attr_groups [ ] = {
& riscv_pmu_format_group ,
NULL ,
} ;
2023-08-02 11:03:25 +03:00
/* Allow user mode access by default */
static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS ;
2022-03-23 01:01:47 +03:00
/*
2023-02-05 04:15:02 +03:00
* RISC - V doesn ' t have heterogeneous harts yet . This need to be part of
2022-02-19 03:46:57 +03:00
* per_cpu in case of harts with different pmu counters
*/
static union sbi_pmu_ctr_info * pmu_ctr_list ;
2022-10-12 02:18:41 +03:00
static bool riscv_pmu_use_irq ;
static unsigned int riscv_pmu_irq_num ;
2024-02-22 11:39:43 +03:00
static unsigned int riscv_pmu_irq_mask ;
2022-02-19 03:46:58 +03:00
static unsigned int riscv_pmu_irq ;
2022-02-19 03:46:57 +03:00
2023-02-05 04:15:02 +03:00
/* Cache the available counters in a bitmask */
static unsigned long cmask ;
2022-02-19 03:46:57 +03:00
struct sbi_pmu_event_data {
union {
union {
struct hw_gen_event {
uint32_t event_code : 16 ;
uint32_t event_type : 4 ;
uint32_t reserved : 12 ;
} hw_gen_event ;
struct hw_cache_event {
uint32_t result_id : 1 ;
uint32_t op_id : 2 ;
uint32_t cache_id : 13 ;
uint32_t event_type : 4 ;
uint32_t reserved : 12 ;
} hw_cache_event ;
} ;
uint32_t event_idx ;
} ;
} ;
static const struct sbi_pmu_event_data pmu_hw_event_map [ ] = {
[ PERF_COUNT_HW_CPU_CYCLES ] = { . hw_gen_event = {
SBI_PMU_HW_CPU_CYCLES ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
[ PERF_COUNT_HW_INSTRUCTIONS ] = { . hw_gen_event = {
SBI_PMU_HW_INSTRUCTIONS ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
[ PERF_COUNT_HW_CACHE_REFERENCES ] = { . hw_gen_event = {
SBI_PMU_HW_CACHE_REFERENCES ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
[ PERF_COUNT_HW_CACHE_MISSES ] = { . hw_gen_event = {
SBI_PMU_HW_CACHE_MISSES ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
[ PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = { . hw_gen_event = {
SBI_PMU_HW_BRANCH_INSTRUCTIONS ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
[ PERF_COUNT_HW_BRANCH_MISSES ] = { . hw_gen_event = {
SBI_PMU_HW_BRANCH_MISSES ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
[ PERF_COUNT_HW_BUS_CYCLES ] = { . hw_gen_event = {
SBI_PMU_HW_BUS_CYCLES ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
[ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = { . hw_gen_event = {
SBI_PMU_HW_STALLED_CYCLES_FRONTEND ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
[ PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] = { . hw_gen_event = {
SBI_PMU_HW_STALLED_CYCLES_BACKEND ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
[ PERF_COUNT_HW_REF_CPU_CYCLES ] = { . hw_gen_event = {
SBI_PMU_HW_REF_CPU_CYCLES ,
SBI_PMU_EVENT_TYPE_HW , 0 } } ,
} ;
# define C(x) PERF_COUNT_HW_CACHE_##x
static const struct sbi_pmu_event_data pmu_cache_event_map [ PERF_COUNT_HW_CACHE_MAX ]
[ PERF_COUNT_HW_CACHE_OP_MAX ]
[ PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
[ C ( L1D ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_READ ) , C ( L1D ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_READ ) , C ( L1D ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_WRITE ) , C ( L1D ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_WRITE ) , C ( L1D ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_PREFETCH ) , C ( L1D ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_PREFETCH ) , C ( L1D ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
} ,
[ C ( L1I ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_READ ) , C ( L1I ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) , C ( OP_READ ) ,
C ( L1I ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_WRITE ) , C ( L1I ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_WRITE ) , C ( L1I ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_PREFETCH ) , C ( L1I ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_PREFETCH ) , C ( L1I ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
} ,
[ C ( LL ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_READ ) , C ( LL ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_READ ) , C ( LL ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_WRITE ) , C ( LL ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_WRITE ) , C ( LL ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_PREFETCH ) , C ( LL ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_PREFETCH ) , C ( LL ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
} ,
[ C ( DTLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_READ ) , C ( DTLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_READ ) , C ( DTLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_WRITE ) , C ( DTLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_WRITE ) , C ( DTLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_PREFETCH ) , C ( DTLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_PREFETCH ) , C ( DTLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
} ,
[ C ( ITLB ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_READ ) , C ( ITLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_READ ) , C ( ITLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_WRITE ) , C ( ITLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_WRITE ) , C ( ITLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_PREFETCH ) , C ( ITLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_PREFETCH ) , C ( ITLB ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
} ,
[ C ( BPU ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_READ ) , C ( BPU ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_READ ) , C ( BPU ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_WRITE ) , C ( BPU ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_WRITE ) , C ( BPU ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_PREFETCH ) , C ( BPU ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_PREFETCH ) , C ( BPU ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
} ,
[ C ( NODE ) ] = {
[ C ( OP_READ ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_READ ) , C ( NODE ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_READ ) , C ( NODE ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_WRITE ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_WRITE ) , C ( NODE ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_WRITE ) , C ( NODE ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
[ C ( OP_PREFETCH ) ] = {
[ C ( RESULT_ACCESS ) ] = { . hw_cache_event = { C ( RESULT_ACCESS ) ,
C ( OP_PREFETCH ) , C ( NODE ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
[ C ( RESULT_MISS ) ] = { . hw_cache_event = { C ( RESULT_MISS ) ,
C ( OP_PREFETCH ) , C ( NODE ) , SBI_PMU_EVENT_TYPE_CACHE , 0 } } ,
} ,
} ,
} ;
static int pmu_sbi_ctr_get_width ( int idx )
{
return pmu_ctr_list [ idx ] . width ;
}
static bool pmu_sbi_ctr_is_fw ( int cidx )
{
union sbi_pmu_ctr_info * info ;
info = & pmu_ctr_list [ cidx ] ;
if ( ! info )
return false ;
return ( info - > type = = SBI_PMU_CTR_TYPE_FW ) ? true : false ;
}
2023-02-05 04:15:02 +03:00
/*
* Returns the counter width of a programmable counter and number of hardware
* counters . As we don ' t support heterogeneous CPUs yet , it is okay to just
* return the counter width of the first programmable counter .
*/
int riscv_pmu_get_hpm_info ( u32 * hw_ctr_width , u32 * num_hw_ctr )
{
int i ;
union sbi_pmu_ctr_info * info ;
u32 hpm_width = 0 , hpm_count = 0 ;
if ( ! cmask )
return - EINVAL ;
for_each_set_bit ( i , & cmask , RISCV_MAX_COUNTERS ) {
info = & pmu_ctr_list [ i ] ;
if ( ! info )
continue ;
if ( ! hpm_width & & info - > csr ! = CSR_CYCLE & & info - > csr ! = CSR_INSTRET )
hpm_width = info - > width ;
if ( info - > type = = SBI_PMU_CTR_TYPE_HW )
hpm_count + + ;
}
* hw_ctr_width = hpm_width ;
* num_hw_ctr = hpm_count ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( riscv_pmu_get_hpm_info ) ;
2023-08-02 11:03:25 +03:00
static uint8_t pmu_sbi_csr_index ( struct perf_event * event )
{
return pmu_ctr_list [ event - > hw . idx ] . csr - CSR_CYCLE ;
}
2023-02-05 04:15:03 +03:00
static unsigned long pmu_sbi_get_filter_flags ( struct perf_event * event )
{
unsigned long cflags = 0 ;
bool guest_events = false ;
if ( event - > attr . config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS )
guest_events = true ;
if ( event - > attr . exclude_kernel )
cflags | = guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH ;
if ( event - > attr . exclude_user )
cflags | = guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH ;
if ( guest_events & & event - > attr . exclude_hv )
cflags | = SBI_PMU_CFG_FLAG_SET_SINH ;
if ( event - > attr . exclude_host )
cflags | = SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH ;
if ( event - > attr . exclude_guest )
cflags | = SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH ;
return cflags ;
}
2022-02-19 03:46:57 +03:00
static int pmu_sbi_ctr_get_idx ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
struct riscv_pmu * rvpmu = to_riscv_pmu ( event - > pmu ) ;
struct cpu_hw_events * cpuc = this_cpu_ptr ( rvpmu - > hw_events ) ;
struct sbiret ret ;
int idx ;
2023-08-02 11:03:25 +03:00
uint64_t cbase = 0 , cmask = rvpmu - > cmask ;
2022-02-19 03:46:57 +03:00
unsigned long cflags = 0 ;
2023-02-05 04:15:03 +03:00
cflags = pmu_sbi_get_filter_flags ( event ) ;
2023-08-02 11:03:25 +03:00
/*
* In legacy mode , we have to force the fixed counters for those events
* but not in the user access mode as we want to use the other counters
* that support sampling / filtering .
*/
if ( hwc - > flags & PERF_EVENT_FLAG_LEGACY ) {
if ( event - > attr . config = = PERF_COUNT_HW_CPU_CYCLES ) {
cflags | = SBI_PMU_CFG_FLAG_SKIP_MATCH ;
cmask = 1 ;
} else if ( event - > attr . config = = PERF_COUNT_HW_INSTRUCTIONS ) {
cflags | = SBI_PMU_CFG_FLAG_SKIP_MATCH ;
cmask = 1UL < < ( CSR_INSTRET - CSR_CYCLE ) ;
}
}
2022-02-19 03:46:57 +03:00
/* retrieve the available counter index */
2022-07-11 20:46:30 +03:00
# if defined(CONFIG_32BIT)
2022-08-30 18:53:05 +03:00
ret = sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_CFG_MATCH , cbase ,
2023-08-02 11:03:25 +03:00
cmask , cflags , hwc - > event_base , hwc - > config ,
2022-08-30 18:53:05 +03:00
hwc - > config > > 32 ) ;
2022-07-11 20:46:30 +03:00
# else
2022-08-30 18:53:05 +03:00
ret = sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_CFG_MATCH , cbase ,
2023-08-02 11:03:25 +03:00
cmask , cflags , hwc - > event_base , hwc - > config , 0 ) ;
2022-07-11 20:46:30 +03:00
# endif
2022-02-19 03:46:57 +03:00
if ( ret . error ) {
pr_debug ( " Not able to find a counter for event %lx config %llx \n " ,
hwc - > event_base , hwc - > config ) ;
return sbi_err_map_linux_errno ( ret . error ) ;
}
idx = ret . value ;
2022-08-30 18:53:05 +03:00
if ( ! test_bit ( idx , & rvpmu - > cmask ) | | ! pmu_ctr_list [ idx ] . value )
2022-02-19 03:46:57 +03:00
return - ENOENT ;
/* Additional sanity check for the counter id */
if ( pmu_sbi_ctr_is_fw ( idx ) ) {
if ( ! test_and_set_bit ( idx , cpuc - > used_fw_ctrs ) )
return idx ;
} else {
if ( ! test_and_set_bit ( idx , cpuc - > used_hw_ctrs ) )
return idx ;
}
return - ENOENT ;
}
static void pmu_sbi_ctr_clear_idx ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
struct riscv_pmu * rvpmu = to_riscv_pmu ( event - > pmu ) ;
struct cpu_hw_events * cpuc = this_cpu_ptr ( rvpmu - > hw_events ) ;
int idx = hwc - > idx ;
if ( pmu_sbi_ctr_is_fw ( idx ) )
clear_bit ( idx , cpuc - > used_fw_ctrs ) ;
else
clear_bit ( idx , cpuc - > used_hw_ctrs ) ;
}
static int pmu_event_find_cache ( u64 config )
{
unsigned int cache_type , cache_op , cache_result , ret ;
cache_type = ( config > > 0 ) & 0xff ;
if ( cache_type > = PERF_COUNT_HW_CACHE_MAX )
return - EINVAL ;
cache_op = ( config > > 8 ) & 0xff ;
if ( cache_op > = PERF_COUNT_HW_CACHE_OP_MAX )
return - EINVAL ;
cache_result = ( config > > 16 ) & 0xff ;
if ( cache_result > = PERF_COUNT_HW_CACHE_RESULT_MAX )
return - EINVAL ;
ret = pmu_cache_event_map [ cache_type ] [ cache_op ] [ cache_result ] . event_idx ;
return ret ;
}
static bool pmu_sbi_is_fw_event ( struct perf_event * event )
{
u32 type = event - > attr . type ;
u64 config = event - > attr . config ;
if ( ( type = = PERF_TYPE_RAW ) & & ( ( config > > 63 ) = = 1 ) )
return true ;
else
return false ;
}
static int pmu_sbi_event_map ( struct perf_event * event , u64 * econfig )
{
u32 type = event - > attr . type ;
u64 config = event - > attr . config ;
int bSoftware ;
u64 raw_config_val ;
int ret ;
switch ( type ) {
case PERF_TYPE_HARDWARE :
if ( config > = PERF_COUNT_HW_MAX )
return - EINVAL ;
ret = pmu_hw_event_map [ event - > attr . config ] . event_idx ;
break ;
case PERF_TYPE_HW_CACHE :
ret = pmu_event_find_cache ( config ) ;
break ;
case PERF_TYPE_RAW :
/*
* As per SBI specification , the upper 16 bits must be unused for
* a raw event . Use the MSB ( 63 b ) to distinguish between hardware
* raw event and firmware events .
*/
bSoftware = config > > 63 ;
raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK ;
if ( bSoftware ) {
2023-02-08 10:43:14 +03:00
ret = ( raw_config_val & 0xFFFF ) |
( SBI_PMU_EVENT_TYPE_FW < < 16 ) ;
2022-02-19 03:46:57 +03:00
} else {
ret = RISCV_PMU_RAW_EVENT_IDX ;
* econfig = raw_config_val ;
}
break ;
default :
ret = - EINVAL ;
break ;
}
return ret ;
}
static u64 pmu_sbi_ctr_read ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
struct sbiret ret ;
union sbi_pmu_ctr_info info ;
u64 val = 0 ;
if ( pmu_sbi_is_fw_event ( event ) ) {
ret = sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_FW_READ ,
hwc - > idx , 0 , 0 , 0 , 0 , 0 ) ;
if ( ! ret . error )
val = ret . value ;
} else {
info = pmu_ctr_list [ idx ] ;
val = riscv_pmu_ctr_read_csr ( info . csr ) ;
if ( IS_ENABLED ( CONFIG_32BIT ) )
val = ( ( u64 ) riscv_pmu_ctr_read_csr ( info . csr + 0x80 ) ) < < 31 | val ;
}
return val ;
}
2023-08-02 11:03:25 +03:00
static void pmu_sbi_set_scounteren ( void * arg )
{
struct perf_event * event = ( struct perf_event * ) arg ;
2023-10-06 11:20:10 +03:00
if ( event - > hw . idx ! = - 1 )
csr_write ( CSR_SCOUNTEREN ,
2024-02-28 14:54:25 +03:00
csr_read ( CSR_SCOUNTEREN ) | BIT ( pmu_sbi_csr_index ( event ) ) ) ;
2023-08-02 11:03:25 +03:00
}
static void pmu_sbi_reset_scounteren ( void * arg )
{
struct perf_event * event = ( struct perf_event * ) arg ;
2023-10-06 11:20:10 +03:00
if ( event - > hw . idx ! = - 1 )
csr_write ( CSR_SCOUNTEREN ,
2024-02-28 14:54:25 +03:00
csr_read ( CSR_SCOUNTEREN ) & ~ BIT ( pmu_sbi_csr_index ( event ) ) ) ;
2023-08-02 11:03:25 +03:00
}
2022-02-19 03:46:57 +03:00
static void pmu_sbi_ctr_start ( struct perf_event * event , u64 ival )
{
struct sbiret ret ;
struct hw_perf_event * hwc = & event - > hw ;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE ;
2022-07-11 20:46:30 +03:00
# if defined(CONFIG_32BIT)
2022-02-19 03:46:57 +03:00
ret = sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_START , hwc - > idx ,
1 , flag , ival , ival > > 32 , 0 ) ;
2022-07-11 20:46:30 +03:00
# else
ret = sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_START , hwc - > idx ,
1 , flag , ival , 0 , 0 ) ;
# endif
2022-02-19 03:46:57 +03:00
if ( ret . error & & ( ret . error ! = SBI_ERR_ALREADY_STARTED ) )
pr_err ( " Starting counter idx %d failed with error %d \n " ,
hwc - > idx , sbi_err_map_linux_errno ( ret . error ) ) ;
2023-08-02 11:03:25 +03:00
if ( ( hwc - > flags & PERF_EVENT_FLAG_USER_ACCESS ) & &
( hwc - > flags & PERF_EVENT_FLAG_USER_READ_CNT ) )
pmu_sbi_set_scounteren ( ( void * ) event ) ;
2022-02-19 03:46:57 +03:00
}
static void pmu_sbi_ctr_stop ( struct perf_event * event , unsigned long flag )
{
struct sbiret ret ;
struct hw_perf_event * hwc = & event - > hw ;
2023-08-02 11:03:25 +03:00
if ( ( hwc - > flags & PERF_EVENT_FLAG_USER_ACCESS ) & &
( hwc - > flags & PERF_EVENT_FLAG_USER_READ_CNT ) )
pmu_sbi_reset_scounteren ( ( void * ) event ) ;
2022-02-19 03:46:57 +03:00
ret = sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_STOP , hwc - > idx , 1 , flag , 0 , 0 , 0 ) ;
if ( ret . error & & ( ret . error ! = SBI_ERR_ALREADY_STOPPED ) & &
flag ! = SBI_PMU_STOP_FLAG_RESET )
pr_err ( " Stopping counter idx %d failed with error %d \n " ,
hwc - > idx , sbi_err_map_linux_errno ( ret . error ) ) ;
}
static int pmu_sbi_find_num_ctrs ( void )
{
struct sbiret ret ;
ret = sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_NUM_COUNTERS , 0 , 0 , 0 , 0 , 0 , 0 ) ;
if ( ! ret . error )
return ret . value ;
else
return sbi_err_map_linux_errno ( ret . error ) ;
}
2022-08-30 18:53:05 +03:00
static int pmu_sbi_get_ctrinfo ( int nctr , unsigned long * mask )
2022-02-19 03:46:57 +03:00
{
struct sbiret ret ;
int i , num_hw_ctr = 0 , num_fw_ctr = 0 ;
union sbi_pmu_ctr_info cinfo ;
pmu_ctr_list = kcalloc ( nctr , sizeof ( * pmu_ctr_list ) , GFP_KERNEL ) ;
if ( ! pmu_ctr_list )
return - ENOMEM ;
2022-08-30 18:53:04 +03:00
for ( i = 0 ; i < nctr ; i + + ) {
2022-02-19 03:46:57 +03:00
ret = sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_GET_INFO , i , 0 , 0 , 0 , 0 , 0 ) ;
if ( ret . error )
/* The logical counter ids are not expected to be contiguous */
continue ;
2022-08-30 18:53:05 +03:00
* mask | = BIT ( i ) ;
2022-02-19 03:46:57 +03:00
cinfo . value = ret . value ;
if ( cinfo . type = = SBI_PMU_CTR_TYPE_FW )
num_fw_ctr + + ;
else
num_hw_ctr + + ;
pmu_ctr_list [ i ] . value = cinfo . value ;
}
pr_info ( " %d firmware and %d hardware counters \n " , num_fw_ctr , num_hw_ctr ) ;
return 0 ;
}
2022-02-19 03:46:58 +03:00
static inline void pmu_sbi_stop_all ( struct riscv_pmu * pmu )
{
2022-03-23 01:01:47 +03:00
/*
2022-02-19 03:46:58 +03:00
* No need to check the error because we are disabling all the counters
* which may include counters that are not enabled yet .
*/
sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_STOP ,
2022-08-30 18:53:05 +03:00
0 , pmu - > cmask , 0 , 0 , 0 , 0 ) ;
2022-02-19 03:46:58 +03:00
}
static inline void pmu_sbi_stop_hw_ctrs ( struct riscv_pmu * pmu )
{
struct cpu_hw_events * cpu_hw_evt = this_cpu_ptr ( pmu - > hw_events ) ;
/* No need to check the error here as we can't do anything about the error */
sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_STOP , 0 ,
cpu_hw_evt - > used_hw_ctrs [ 0 ] , 0 , 0 , 0 , 0 ) ;
}
2022-03-23 01:01:47 +03:00
/*
2022-02-19 03:46:58 +03:00
* This function starts all the used counters in two step approach .
* Any counter that did not overflow can be start in a single step
* while the overflowed counters need to be started with updated initialization
* value .
*/
static inline void pmu_sbi_start_overflow_mask ( struct riscv_pmu * pmu ,
unsigned long ctr_ovf_mask )
{
int idx = 0 ;
struct cpu_hw_events * cpu_hw_evt = this_cpu_ptr ( pmu - > hw_events ) ;
struct perf_event * event ;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE ;
unsigned long ctr_start_mask = 0 ;
uint64_t max_period ;
struct hw_perf_event * hwc ;
u64 init_val = 0 ;
ctr_start_mask = cpu_hw_evt - > used_hw_ctrs [ 0 ] & ~ ctr_ovf_mask ;
/* Start all the counters that did not overflow in a single shot */
sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_START , 0 , ctr_start_mask ,
0 , 0 , 0 , 0 ) ;
/* Reinitialize and start all the counter that overflowed */
while ( ctr_ovf_mask ) {
if ( ctr_ovf_mask & 0x01 ) {
event = cpu_hw_evt - > events [ idx ] ;
hwc = & event - > hw ;
max_period = riscv_pmu_ctr_get_width_mask ( event ) ;
init_val = local64_read ( & hwc - > prev_count ) & max_period ;
2022-07-11 20:46:28 +03:00
# if defined(CONFIG_32BIT)
sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_START , idx , 1 ,
flag , init_val , init_val > > 32 , 0 ) ;
# else
2022-02-19 03:46:58 +03:00
sbi_ecall ( SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_START , idx , 1 ,
flag , init_val , 0 , 0 ) ;
2022-07-11 20:46:28 +03:00
# endif
2022-07-11 20:46:29 +03:00
perf_event_update_userpage ( event ) ;
2022-02-19 03:46:58 +03:00
}
ctr_ovf_mask = ctr_ovf_mask > > 1 ;
idx + + ;
}
}
static irqreturn_t pmu_sbi_ovf_handler ( int irq , void * dev )
{
struct perf_sample_data data ;
struct pt_regs * regs ;
struct hw_perf_event * hw_evt ;
union sbi_pmu_ctr_info * info ;
int lidx , hidx , fidx ;
struct riscv_pmu * pmu ;
struct perf_event * event ;
unsigned long overflow ;
unsigned long overflowed_ctrs = 0 ;
struct cpu_hw_events * cpu_hw_evt = dev ;
2022-08-30 18:53:06 +03:00
u64 start_clock = sched_clock ( ) ;
2022-02-19 03:46:58 +03:00
if ( WARN_ON_ONCE ( ! cpu_hw_evt ) )
return IRQ_NONE ;
/* Firmware counter don't support overflow yet */
fidx = find_first_bit ( cpu_hw_evt - > used_hw_ctrs , RISCV_MAX_COUNTERS ) ;
2023-11-09 11:21:28 +03:00
if ( fidx = = RISCV_MAX_COUNTERS ) {
csr_clear ( CSR_SIP , BIT ( riscv_pmu_irq_num ) ) ;
return IRQ_NONE ;
}
2022-02-19 03:46:58 +03:00
event = cpu_hw_evt - > events [ fidx ] ;
if ( ! event ) {
2024-02-22 11:39:43 +03:00
ALT_SBI_PMU_OVF_CLEAR_PENDING ( riscv_pmu_irq_mask ) ;
2022-02-19 03:46:58 +03:00
return IRQ_NONE ;
}
pmu = to_riscv_pmu ( event - > pmu ) ;
pmu_sbi_stop_hw_ctrs ( pmu ) ;
/* Overflow status register should only be read after counter are stopped */
2022-10-12 02:18:41 +03:00
ALT_SBI_PMU_OVERFLOW ( overflow ) ;
2022-02-19 03:46:58 +03:00
2022-03-23 01:01:47 +03:00
/*
2022-02-19 03:46:58 +03:00
* Overflow interrupt pending bit should only be cleared after stopping
* all the counters to avoid any race condition .
*/
2024-02-22 11:39:43 +03:00
ALT_SBI_PMU_OVF_CLEAR_PENDING ( riscv_pmu_irq_mask ) ;
2022-02-19 03:46:58 +03:00
/* No overflow bit is set */
if ( ! overflow )
return IRQ_NONE ;
regs = get_irq_regs ( ) ;
for_each_set_bit ( lidx , cpu_hw_evt - > used_hw_ctrs , RISCV_MAX_COUNTERS ) {
struct perf_event * event = cpu_hw_evt - > events [ lidx ] ;
/* Skip if invalid event or user did not request a sampling */
if ( ! event | | ! is_sampling_event ( event ) )
continue ;
info = & pmu_ctr_list [ lidx ] ;
/* Do a sanity check */
if ( ! info | | info - > type ! = SBI_PMU_CTR_TYPE_HW )
continue ;
/* compute hardware counter index */
hidx = info - > csr - CSR_CYCLE ;
/* check if the corresponding bit is set in sscountovf */
2024-02-28 14:54:25 +03:00
if ( ! ( overflow & BIT ( hidx ) ) )
2022-02-19 03:46:58 +03:00
continue ;
/*
* Keep a track of overflowed counters so that they can be started
* with updated initial value .
*/
2024-02-28 14:54:25 +03:00
overflowed_ctrs | = BIT ( lidx ) ;
2022-02-19 03:46:58 +03:00
hw_evt = & event - > hw ;
riscv_pmu_event_update ( event ) ;
perf_sample_data_init ( & data , 0 , hw_evt - > last_period ) ;
if ( riscv_pmu_event_set_period ( event ) ) {
/*
* Unlike other ISAs , RISC - V don ' t have to disable interrupts
* to avoid throttling here . As per the specification , the
* interrupt remains disabled until the OF bit is set .
* Interrupts are enabled again only during the start .
* TODO : We will need to stop the guest counters once
* virtualization support is added .
*/
perf_event_overflow ( event , & data , regs ) ;
}
}
2022-08-30 18:53:06 +03:00
2022-02-19 03:46:58 +03:00
pmu_sbi_start_overflow_mask ( pmu , overflowed_ctrs ) ;
2022-08-30 18:53:06 +03:00
perf_sample_event_took ( sched_clock ( ) - start_clock ) ;
2022-02-19 03:46:58 +03:00
return IRQ_HANDLED ;
}
2022-02-19 03:46:57 +03:00
static int pmu_sbi_starting_cpu ( unsigned int cpu , struct hlist_node * node )
{
struct riscv_pmu * pmu = hlist_entry_safe ( node , struct riscv_pmu , node ) ;
2022-02-19 03:46:58 +03:00
struct cpu_hw_events * cpu_hw_evt = this_cpu_ptr ( pmu - > hw_events ) ;
2022-02-19 03:46:57 +03:00
2022-09-28 16:18:07 +03:00
/*
2023-08-02 11:03:25 +03:00
* We keep enabling userspace access to CYCLE , TIME and INSTRET via the
* legacy option but that will be removed in the future .
2022-09-28 16:18:07 +03:00
*/
2023-08-02 11:03:25 +03:00
if ( sysctl_perf_user_access = = SYSCTL_LEGACY )
csr_write ( CSR_SCOUNTEREN , 0x7 ) ;
else
csr_write ( CSR_SCOUNTEREN , 0x2 ) ;
2022-02-19 03:46:57 +03:00
/* Stop all the counters so that they can be enabled from perf */
2022-02-19 03:46:58 +03:00
pmu_sbi_stop_all ( pmu ) ;
2022-10-12 02:18:41 +03:00
if ( riscv_pmu_use_irq ) {
2022-02-19 03:46:58 +03:00
cpu_hw_evt - > irq = riscv_pmu_irq ;
2024-02-22 11:39:43 +03:00
ALT_SBI_PMU_OVF_CLEAR_PENDING ( riscv_pmu_irq_mask ) ;
2022-02-19 03:46:58 +03:00
enable_percpu_irq ( riscv_pmu_irq , IRQ_TYPE_NONE ) ;
}
2022-02-19 03:46:57 +03:00
return 0 ;
}
static int pmu_sbi_dying_cpu ( unsigned int cpu , struct hlist_node * node )
{
2022-10-12 02:18:41 +03:00
if ( riscv_pmu_use_irq ) {
2022-02-19 03:46:58 +03:00
disable_percpu_irq ( riscv_pmu_irq ) ;
}
2022-02-19 03:46:57 +03:00
/* Disable all counters access for user mode now */
csr_write ( CSR_SCOUNTEREN , 0x0 ) ;
return 0 ;
}
2022-02-19 03:46:58 +03:00
static int pmu_sbi_setup_irqs ( struct riscv_pmu * pmu , struct platform_device * pdev )
{
int ret ;
struct cpu_hw_events __percpu * hw_events = pmu - > hw_events ;
struct irq_domain * domain = NULL ;
2022-10-12 02:18:41 +03:00
if ( riscv_isa_extension_available ( NULL , SSCOFPMF ) ) {
riscv_pmu_irq_num = RV_IRQ_PMU ;
riscv_pmu_use_irq = true ;
} else if ( IS_ENABLED ( CONFIG_ERRATA_THEAD_PMU ) & &
riscv_cached_mvendorid ( 0 ) = = THEAD_VENDOR_ID & &
riscv_cached_marchid ( 0 ) = = 0 & &
riscv_cached_mimpid ( 0 ) = = 0 ) {
riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU ;
riscv_pmu_use_irq = true ;
2024-02-22 11:39:43 +03:00
} else if ( riscv_isa_extension_available ( NULL , XANDESPMU ) & &
IS_ENABLED ( CONFIG_ANDES_CUSTOM_PMU ) ) {
riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI ;
riscv_pmu_use_irq = true ;
2022-10-12 02:18:41 +03:00
}
2024-02-22 11:39:43 +03:00
riscv_pmu_irq_mask = BIT ( riscv_pmu_irq_num % BITS_PER_LONG ) ;
2022-10-12 02:18:41 +03:00
if ( ! riscv_pmu_use_irq )
2022-02-19 03:46:58 +03:00
return - EOPNOTSUPP ;
2023-06-07 14:24:17 +03:00
domain = irq_find_matching_fwnode ( riscv_get_intc_hwnode ( ) ,
DOMAIN_BUS_ANY ) ;
2022-02-19 03:46:58 +03:00
if ( ! domain ) {
pr_err ( " Failed to find INTC IRQ root domain \n " ) ;
return - ENODEV ;
}
2022-10-12 02:18:41 +03:00
riscv_pmu_irq = irq_create_mapping ( domain , riscv_pmu_irq_num ) ;
2022-02-19 03:46:58 +03:00
if ( ! riscv_pmu_irq ) {
pr_err ( " Failed to map PMU interrupt for node \n " ) ;
return - ENODEV ;
}
ret = request_percpu_irq ( riscv_pmu_irq , pmu_sbi_ovf_handler , " riscv-pmu " , hw_events ) ;
if ( ret ) {
pr_err ( " registering percpu irq failed [%d] \n " , ret ) ;
return ret ;
}
return 0 ;
}
2022-07-05 12:19:20 +03:00
# ifdef CONFIG_CPU_PM
static int riscv_pm_pmu_notify ( struct notifier_block * b , unsigned long cmd ,
void * v )
{
struct riscv_pmu * rvpmu = container_of ( b , struct riscv_pmu , riscv_pm_nb ) ;
struct cpu_hw_events * cpuc = this_cpu_ptr ( rvpmu - > hw_events ) ;
int enabled = bitmap_weight ( cpuc - > used_hw_ctrs , RISCV_MAX_COUNTERS ) ;
struct perf_event * event ;
int idx ;
if ( ! enabled )
return NOTIFY_OK ;
for ( idx = 0 ; idx < RISCV_MAX_COUNTERS ; idx + + ) {
event = cpuc - > events [ idx ] ;
if ( ! event )
continue ;
switch ( cmd ) {
case CPU_PM_ENTER :
/*
* Stop and update the counter
*/
riscv_pmu_stop ( event , PERF_EF_UPDATE ) ;
break ;
case CPU_PM_EXIT :
case CPU_PM_ENTER_FAILED :
/*
* Restore and enable the counter .
*/
2023-01-12 22:44:00 +03:00
riscv_pmu_start ( event , PERF_EF_RELOAD ) ;
2022-07-05 12:19:20 +03:00
break ;
default :
break ;
}
}
return NOTIFY_OK ;
}
static int riscv_pm_pmu_register ( struct riscv_pmu * pmu )
{
pmu - > riscv_pm_nb . notifier_call = riscv_pm_pmu_notify ;
return cpu_pm_register_notifier ( & pmu - > riscv_pm_nb ) ;
}
static void riscv_pm_pmu_unregister ( struct riscv_pmu * pmu )
{
cpu_pm_unregister_notifier ( & pmu - > riscv_pm_nb ) ;
}
# else
static inline int riscv_pm_pmu_register ( struct riscv_pmu * pmu ) { return 0 ; }
static inline void riscv_pm_pmu_unregister ( struct riscv_pmu * pmu ) { }
# endif
static void riscv_pmu_destroy ( struct riscv_pmu * pmu )
{
riscv_pm_pmu_unregister ( pmu ) ;
cpuhp_state_remove_instance ( CPUHP_AP_PERF_RISCV_STARTING , & pmu - > node ) ;
}
2023-08-02 11:03:25 +03:00
static void pmu_sbi_event_init ( struct perf_event * event )
{
/*
* The permissions are set at event_init so that we do not depend
* on the sysctl value that can change .
*/
if ( sysctl_perf_user_access = = SYSCTL_NO_USER_ACCESS )
event - > hw . flags | = PERF_EVENT_FLAG_NO_USER_ACCESS ;
else if ( sysctl_perf_user_access = = SYSCTL_USER_ACCESS )
event - > hw . flags | = PERF_EVENT_FLAG_USER_ACCESS ;
else
event - > hw . flags | = PERF_EVENT_FLAG_LEGACY ;
}
static void pmu_sbi_event_mapped ( struct perf_event * event , struct mm_struct * mm )
{
if ( event - > hw . flags & PERF_EVENT_FLAG_NO_USER_ACCESS )
return ;
if ( event - > hw . flags & PERF_EVENT_FLAG_LEGACY ) {
if ( event - > attr . config ! = PERF_COUNT_HW_CPU_CYCLES & &
event - > attr . config ! = PERF_COUNT_HW_INSTRUCTIONS ) {
return ;
}
}
/*
* The user mmapped the event to directly access it : this is where
* we determine based on sysctl_perf_user_access if we grant userspace
* the direct access to this event . That means that within the same
* task , some events may be directly accessible and some other may not ,
* if the user changes the value of sysctl_perf_user_accesss in the
* meantime .
*/
event - > hw . flags | = PERF_EVENT_FLAG_USER_READ_CNT ;
/*
* We must enable userspace access * before * advertising in the user page
* that it is possible to do so to avoid any race .
* And we must notify all cpus here because threads that currently run
* on other cpus will try to directly access the counter too without
* calling pmu_sbi_ctr_start .
*/
if ( event - > hw . flags & PERF_EVENT_FLAG_USER_ACCESS )
on_each_cpu_mask ( mm_cpumask ( mm ) ,
pmu_sbi_set_scounteren , ( void * ) event , 1 ) ;
}
static void pmu_sbi_event_unmapped ( struct perf_event * event , struct mm_struct * mm )
{
if ( event - > hw . flags & PERF_EVENT_FLAG_NO_USER_ACCESS )
return ;
if ( event - > hw . flags & PERF_EVENT_FLAG_LEGACY ) {
if ( event - > attr . config ! = PERF_COUNT_HW_CPU_CYCLES & &
event - > attr . config ! = PERF_COUNT_HW_INSTRUCTIONS ) {
return ;
}
}
/*
* Here we can directly remove user access since the user does not have
* access to the user page anymore so we avoid the racy window where the
* user could have read cap_user_rdpmc to true right before we disable
* it .
*/
event - > hw . flags & = ~ PERF_EVENT_FLAG_USER_READ_CNT ;
if ( event - > hw . flags & PERF_EVENT_FLAG_USER_ACCESS )
on_each_cpu_mask ( mm_cpumask ( mm ) ,
pmu_sbi_reset_scounteren , ( void * ) event , 1 ) ;
}
static void riscv_pmu_update_counter_access ( void * info )
{
if ( sysctl_perf_user_access = = SYSCTL_LEGACY )
csr_write ( CSR_SCOUNTEREN , 0x7 ) ;
else
csr_write ( CSR_SCOUNTEREN , 0x2 ) ;
}
static int riscv_pmu_proc_user_access_handler ( struct ctl_table * table ,
int write , void * buffer ,
size_t * lenp , loff_t * ppos )
{
int prev = sysctl_perf_user_access ;
int ret = proc_dointvec_minmax ( table , write , buffer , lenp , ppos ) ;
/*
* Test against the previous value since we clear SCOUNTEREN when
* sysctl_perf_user_access is set to SYSCTL_USER_ACCESS , but we should
* not do that if that was already the case .
*/
if ( ret | | ! write | | prev = = sysctl_perf_user_access )
return ret ;
on_each_cpu ( riscv_pmu_update_counter_access , NULL , 1 ) ;
return 0 ;
}
static struct ctl_table sbi_pmu_sysctl_table [ ] = {
{
. procname = " perf_user_access " ,
. data = & sysctl_perf_user_access ,
. maxlen = sizeof ( unsigned int ) ,
. mode = 0644 ,
. proc_handler = riscv_pmu_proc_user_access_handler ,
. extra1 = SYSCTL_ZERO ,
. extra2 = SYSCTL_TWO ,
} ,
{ }
} ;
2022-02-19 03:46:57 +03:00
static int pmu_sbi_device_probe ( struct platform_device * pdev )
{
struct riscv_pmu * pmu = NULL ;
2022-02-19 03:46:58 +03:00
int ret = - ENODEV ;
2022-08-30 18:53:05 +03:00
int num_counters ;
2022-02-19 03:46:57 +03:00
pr_info ( " SBI PMU extension is available \n " ) ;
pmu = riscv_pmu_alloc ( ) ;
if ( ! pmu )
return - ENOMEM ;
num_counters = pmu_sbi_find_num_ctrs ( ) ;
if ( num_counters < 0 ) {
pr_err ( " SBI PMU extension doesn't provide any counters \n " ) ;
2022-02-19 03:46:58 +03:00
goto out_free ;
2022-02-19 03:46:57 +03:00
}
2023-05-05 10:20:57 +03:00
/* It is possible to get from SBI more than max number of counters */
if ( num_counters > RISCV_MAX_COUNTERS ) {
num_counters = RISCV_MAX_COUNTERS ;
pr_info ( " SBI returned more than maximum number of counters. Limiting the number of counters to %d \n " , num_counters ) ;
}
2022-02-19 03:46:57 +03:00
/* cache all the information about counters now */
2022-08-30 18:53:05 +03:00
if ( pmu_sbi_get_ctrinfo ( num_counters , & cmask ) )
2022-02-19 03:46:58 +03:00
goto out_free ;
2022-02-19 03:46:57 +03:00
2022-02-19 03:46:58 +03:00
ret = pmu_sbi_setup_irqs ( pmu , pdev ) ;
if ( ret < 0 ) {
pr_info ( " Perf sampling/filtering is not supported as sscof extension is not available \n " ) ;
pmu - > pmu . capabilities | = PERF_PMU_CAP_NO_INTERRUPT ;
pmu - > pmu . capabilities | = PERF_PMU_CAP_NO_EXCLUDE ;
}
2022-08-30 18:53:05 +03:00
2022-06-28 14:45:55 +03:00
pmu - > pmu . attr_groups = riscv_pmu_attr_groups ;
2022-08-30 18:53:05 +03:00
pmu - > cmask = cmask ;
2022-02-19 03:46:57 +03:00
pmu - > ctr_start = pmu_sbi_ctr_start ;
pmu - > ctr_stop = pmu_sbi_ctr_stop ;
pmu - > event_map = pmu_sbi_event_map ;
pmu - > ctr_get_idx = pmu_sbi_ctr_get_idx ;
pmu - > ctr_get_width = pmu_sbi_ctr_get_width ;
pmu - > ctr_clear_idx = pmu_sbi_ctr_clear_idx ;
pmu - > ctr_read = pmu_sbi_ctr_read ;
2023-08-02 11:03:25 +03:00
pmu - > event_init = pmu_sbi_event_init ;
pmu - > event_mapped = pmu_sbi_event_mapped ;
pmu - > event_unmapped = pmu_sbi_event_unmapped ;
pmu - > csr_index = pmu_sbi_csr_index ;
2022-02-19 03:46:57 +03:00
ret = cpuhp_state_add_instance ( CPUHP_AP_PERF_RISCV_STARTING , & pmu - > node ) ;
if ( ret )
return ret ;
2022-07-05 12:19:20 +03:00
ret = riscv_pm_pmu_register ( pmu ) ;
if ( ret )
goto out_unregister ;
2022-02-19 03:46:57 +03:00
ret = perf_pmu_register ( & pmu - > pmu , " cpu " , PERF_TYPE_RAW ) ;
2022-07-05 12:19:20 +03:00
if ( ret )
goto out_unregister ;
2022-02-19 03:46:57 +03:00
2023-08-02 11:03:25 +03:00
register_sysctl ( " kernel " , sbi_pmu_sysctl_table ) ;
2022-02-19 03:46:57 +03:00
return 0 ;
2022-02-19 03:46:58 +03:00
2022-07-05 12:19:20 +03:00
out_unregister :
riscv_pmu_destroy ( pmu ) ;
2022-02-19 03:46:58 +03:00
out_free :
kfree ( pmu ) ;
return ret ;
2022-02-19 03:46:57 +03:00
}
static struct platform_driver pmu_sbi_driver = {
. probe = pmu_sbi_device_probe ,
. driver = {
2023-08-02 11:03:22 +03:00
. name = RISCV_PMU_SBI_PDEV_NAME ,
2022-02-19 03:46:57 +03:00
} ,
} ;
static int __init pmu_sbi_devinit ( void )
{
int ret ;
struct platform_device * pdev ;
if ( sbi_spec_version < sbi_mk_version ( 0 , 3 ) | |
2023-04-27 19:36:26 +03:00
! sbi_probe_extension ( SBI_EXT_PMU ) ) {
2022-02-19 03:46:57 +03:00
return 0 ;
}
ret = cpuhp_setup_state_multi ( CPUHP_AP_PERF_RISCV_STARTING ,
" perf/riscv/pmu:starting " ,
pmu_sbi_starting_cpu , pmu_sbi_dying_cpu ) ;
if ( ret ) {
pr_err ( " CPU hotplug notifier could not be registered: %d \n " ,
ret ) ;
return ret ;
}
ret = platform_driver_register ( & pmu_sbi_driver ) ;
if ( ret )
return ret ;
2023-08-02 11:03:22 +03:00
pdev = platform_device_register_simple ( RISCV_PMU_SBI_PDEV_NAME , - 1 , NULL , 0 ) ;
2022-02-19 03:46:57 +03:00
if ( IS_ERR ( pdev ) ) {
platform_driver_unregister ( & pmu_sbi_driver ) ;
return PTR_ERR ( pdev ) ;
}
/* Notify legacy implementation that SBI pmu is available*/
riscv_pmu_legacy_skip_init ( ) ;
return ret ;
}
device_initcall ( pmu_sbi_devinit )