2018-12-13 19:56:18 +03:00
// SPDX-License-Identifier: GPL-2.0+
//
// Linux performance counter support for ARC CPUs.
// This code is inspired by the perf support of various other architectures.
//
// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
2013-11-07 14:55:11 +01:00
# include <linux/errno.h>
2015-08-24 13:48:06 +03:00
# include <linux/interrupt.h>
2013-11-07 14:55:11 +01:00
# include <linux/module.h>
# include <linux/of.h>
# include <linux/perf_event.h>
# include <linux/platform_device.h>
# include <asm/arcregs.h>
2013-11-12 11:00:03 +01:00
# include <asm/stacktrace.h>
2013-11-07 14:55:11 +01:00
2018-12-13 19:56:18 +03:00
/* HW holds 8 symbols + one for null terminator */
# define ARCPMU_EVENT_NAME_LEN 9
2018-12-13 19:56:19 +03:00
enum arc_pmu_attr_groups {
ARCPMU_ATTR_GR_EVENTS ,
ARCPMU_ATTR_GR_FORMATS ,
ARCPMU_NR_ATTR_GR
} ;
struct arc_pmu_raw_event_entry {
char name [ ARCPMU_EVENT_NAME_LEN ] ;
} ;
2013-11-07 14:55:11 +01:00
struct arc_pmu {
struct pmu pmu ;
2015-08-24 14:03:30 +03:00
unsigned int irq ;
2013-11-07 14:55:11 +01:00
int n_counters ;
2018-12-13 19:56:19 +03:00
int n_events ;
2015-08-24 13:42:27 +03:00
u64 max_period ;
2013-11-07 14:55:11 +01:00
int ev_hw_idx [ PERF_COUNT_ARC_HW_MAX ] ;
2018-12-13 19:56:19 +03:00
struct arc_pmu_raw_event_entry * raw_entry ;
struct attribute * * attrs ;
struct perf_pmu_events_attr * attr ;
const struct attribute_group * attr_groups [ ARCPMU_NR_ATTR_GR + 1 ] ;
2015-08-24 14:03:30 +03:00
} ;
struct arc_pmu_cpu {
/*
* A 1 bit for an index indicates that the counter is being used for
* an event . A 0 means that the counter can be used .
*/
unsigned long used_mask [ BITS_TO_LONGS ( ARC_PERF_MAX_COUNTERS ) ] ;
/*
* The events that are active on the PMU for the given index .
*/
2015-08-24 13:48:06 +03:00
struct perf_event * act_counter [ ARC_PERF_MAX_COUNTERS ] ;
2013-11-07 14:55:11 +01:00
} ;
2013-11-12 11:00:03 +01:00
struct arc_callchain_trace {
int depth ;
void * perf_stuff ;
} ;
static int callchain_trace ( unsigned int addr , void * data )
{
struct arc_callchain_trace * ctrl = data ;
2016-04-28 12:30:53 -03:00
struct perf_callchain_entry_ctx * entry = ctrl - > perf_stuff ;
2018-12-13 19:56:18 +03:00
2013-11-12 11:00:03 +01:00
perf_callchain_store ( entry , addr ) ;
if ( ctrl - > depth + + < 3 )
return 0 ;
return - 1 ;
}
2018-12-13 19:56:18 +03:00
void perf_callchain_kernel ( struct perf_callchain_entry_ctx * entry ,
struct pt_regs * regs )
2013-11-12 11:00:03 +01:00
{
struct arc_callchain_trace ctrl = {
. depth = 0 ,
. perf_stuff = entry ,
} ;
arc_unwind_core ( NULL , regs , callchain_trace , & ctrl ) ;
}
2018-12-13 19:56:18 +03:00
void perf_callchain_user ( struct perf_callchain_entry_ctx * entry ,
struct pt_regs * regs )
2013-07-12 15:55:54 +02:00
{
/*
* User stack can ' t be unwound trivially with kernel dwarf unwinder
* So for now just record the user PC
*/
perf_callchain_store ( entry , instruction_pointer ( regs ) ) ;
}
2014-11-17 17:13:03 +05:30
static struct arc_pmu * arc_pmu ;
2015-08-24 14:03:30 +03:00
static DEFINE_PER_CPU ( struct arc_pmu_cpu , arc_pmu_cpu ) ;
2014-11-17 17:13:03 +05:30
2013-11-07 14:55:11 +01:00
/* read counter #idx; note that counter# != event# on ARC! */
2018-12-13 19:56:18 +03:00
static u64 arc_pmu_read_counter ( int idx )
2013-11-07 14:55:11 +01:00
{
2018-12-13 19:56:18 +03:00
u32 tmp ;
u64 result ;
2013-11-07 14:55:11 +01:00
/*
* ARC supports making ' snapshots ' of the counters , so we don ' t
* need to care about counters wrapping to 0 underneath our feet
*/
write_aux_reg ( ARC_REG_PCT_INDEX , idx ) ;
tmp = read_aux_reg ( ARC_REG_PCT_CONTROL ) ;
write_aux_reg ( ARC_REG_PCT_CONTROL , tmp | ARC_REG_PCT_CONTROL_SN ) ;
2018-12-13 19:56:18 +03:00
result = ( u64 ) ( read_aux_reg ( ARC_REG_PCT_SNAPH ) ) < < 32 ;
2013-11-07 14:55:11 +01:00
result | = read_aux_reg ( ARC_REG_PCT_SNAPL ) ;
return result ;
}
static void arc_perf_event_update ( struct perf_event * event ,
struct hw_perf_event * hwc , int idx )
{
2018-12-13 19:56:18 +03:00
u64 prev_raw_count = local64_read ( & hwc - > prev_count ) ;
u64 new_raw_count = arc_pmu_read_counter ( idx ) ;
s64 delta = new_raw_count - prev_raw_count ;
2013-11-07 14:55:11 +01:00
2015-08-24 13:42:27 +03:00
/*
2016-05-21 13:45:35 +02:00
* We aren ' t afraid of hwc - > prev_count changing beneath our feet
2015-08-24 13:42:27 +03:00
* because there ' s no way for us to re - enter this function anytime .
*/
local64_set ( & hwc - > prev_count , new_raw_count ) ;
2013-11-07 14:55:11 +01:00
local64_add ( delta , & event - > count ) ;
local64_sub ( delta , & hwc - > period_left ) ;
}
static void arc_pmu_read ( struct perf_event * event )
{
arc_perf_event_update ( event , & event - > hw , event - > hw . idx ) ;
}
static int arc_pmu_cache_event ( u64 config )
{
unsigned int cache_type , cache_op , cache_result ;
int ret ;
cache_type = ( config > > 0 ) & 0xff ;
cache_op = ( config > > 8 ) & 0xff ;
cache_result = ( config > > 16 ) & 0xff ;
if ( cache_type > = PERF_COUNT_HW_CACHE_MAX )
return - EINVAL ;
2013-11-28 15:49:59 +05:30
if ( cache_op > = PERF_COUNT_HW_CACHE_OP_MAX )
2013-11-07 14:55:11 +01:00
return - EINVAL ;
2013-11-28 15:49:59 +05:30
if ( cache_result > = PERF_COUNT_HW_CACHE_RESULT_MAX )
2013-11-07 14:55:11 +01:00
return - EINVAL ;
ret = arc_pmu_cache_map [ cache_type ] [ cache_op ] [ cache_result ] ;
if ( ret = = CACHE_OP_UNSUPPORTED )
return - ENOENT ;
2015-04-15 19:44:07 +05:30
pr_debug ( " init cache event: type/op/result %d/%d/%d with h/w %d \' %s \' \n " ,
cache_type , cache_op , cache_result , ret ,
arc_pmu_ev_hw_map [ ret ] ) ;
2013-11-07 14:55:11 +01:00
return ret ;
}
/* initializes hw_perf_event structure if event is supported */
static int arc_pmu_event_init ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
int ret ;
2015-08-24 13:48:06 +03:00
if ( ! is_sampling_event ( event ) ) {
2018-12-13 19:56:18 +03:00
hwc - > sample_period = arc_pmu - > max_period ;
2015-08-24 13:48:06 +03:00
hwc - > last_period = hwc - > sample_period ;
local64_set ( & hwc - > period_left , hwc - > sample_period ) ;
}
2015-08-24 13:42:27 +03:00
2015-08-24 13:53:36 +03:00
hwc - > config = 0 ;
if ( is_isa_arcv2 ( ) ) {
/* "exclude user" means "count only kernel" */
if ( event - > attr . exclude_user )
hwc - > config | = ARC_REG_PCT_CONFIG_KERN ;
/* "exclude kernel" means "count only user" */
if ( event - > attr . exclude_kernel )
hwc - > config | = ARC_REG_PCT_CONFIG_USER ;
}
2013-11-07 14:55:11 +01:00
switch ( event - > attr . type ) {
case PERF_TYPE_HARDWARE :
if ( event - > attr . config > = PERF_COUNT_HW_MAX )
return - ENOENT ;
if ( arc_pmu - > ev_hw_idx [ event - > attr . config ] < 0 )
return - ENOENT ;
2015-08-24 13:53:36 +03:00
hwc - > config | = arc_pmu - > ev_hw_idx [ event - > attr . config ] ;
2016-08-25 14:47:27 +03:00
pr_debug ( " init event %d with h/w %08x \' %s \' \n " ,
( int ) event - > attr . config , ( int ) hwc - > config ,
2015-04-15 19:44:07 +05:30
arc_pmu_ev_hw_map [ event - > attr . config ] ) ;
2013-11-07 14:55:11 +01:00
return 0 ;
2015-08-24 13:42:27 +03:00
2013-11-07 14:55:11 +01:00
case PERF_TYPE_HW_CACHE :
ret = arc_pmu_cache_event ( event - > attr . config ) ;
if ( ret < 0 )
return ret ;
2015-08-24 13:53:36 +03:00
hwc - > config | = arc_pmu - > ev_hw_idx [ ret ] ;
2016-08-25 14:47:27 +03:00
pr_debug ( " init cache event with h/w %08x \' %s \' \n " ,
( int ) hwc - > config , arc_pmu_ev_hw_map [ ret ] ) ;
2013-11-07 14:55:11 +01:00
return 0 ;
2018-12-13 19:56:18 +03:00
2018-12-13 19:56:19 +03:00
case PERF_TYPE_RAW :
if ( event - > attr . config > = arc_pmu - > n_events )
return - ENOENT ;
hwc - > config | = event - > attr . config ;
pr_debug ( " init raw event with idx %lld \' %s \' \n " ,
event - > attr . config ,
arc_pmu - > raw_entry [ event - > attr . config ] . name ) ;
return 0 ;
2013-11-07 14:55:11 +01:00
default :
return - ENOENT ;
}
}
/* starts all counters */
static void arc_pmu_enable ( struct pmu * pmu )
{
2018-12-13 19:56:18 +03:00
u32 tmp ;
2013-11-07 14:55:11 +01:00
tmp = read_aux_reg ( ARC_REG_PCT_CONTROL ) ;
write_aux_reg ( ARC_REG_PCT_CONTROL , ( tmp & 0xffff0000 ) | 0x1 ) ;
}
/* stops all counters */
static void arc_pmu_disable ( struct pmu * pmu )
{
2018-12-13 19:56:18 +03:00
u32 tmp ;
2013-11-07 14:55:11 +01:00
tmp = read_aux_reg ( ARC_REG_PCT_CONTROL ) ;
write_aux_reg ( ARC_REG_PCT_CONTROL , ( tmp & 0xffff0000 ) | 0x0 ) ;
}
2015-08-24 13:42:27 +03:00
static int arc_pmu_event_set_period ( struct perf_event * event )
{
struct hw_perf_event * hwc = & event - > hw ;
s64 left = local64_read ( & hwc - > period_left ) ;
s64 period = hwc - > sample_period ;
int idx = hwc - > idx ;
int overflow = 0 ;
u64 value ;
if ( unlikely ( left < = - period ) ) {
/* left underflowed by more than period. */
left = period ;
local64_set ( & hwc - > period_left , left ) ;
hwc - > last_period = period ;
overflow = 1 ;
2018-12-13 19:56:18 +03:00
} else if ( unlikely ( left < = 0 ) ) {
2015-08-24 13:42:27 +03:00
/* left underflowed by less than period. */
left + = period ;
local64_set ( & hwc - > period_left , left ) ;
hwc - > last_period = period ;
overflow = 1 ;
}
if ( left > arc_pmu - > max_period )
left = arc_pmu - > max_period ;
value = arc_pmu - > max_period - left ;
local64_set ( & hwc - > prev_count , value ) ;
/* Select counter */
write_aux_reg ( ARC_REG_PCT_INDEX , idx ) ;
/* Write value */
2018-12-13 19:56:18 +03:00
write_aux_reg ( ARC_REG_PCT_COUNTL , lower_32_bits ( value ) ) ;
write_aux_reg ( ARC_REG_PCT_COUNTH , upper_32_bits ( value ) ) ;
2015-08-24 13:42:27 +03:00
perf_event_update_userpage ( event ) ;
return overflow ;
}
2013-11-07 14:55:11 +01:00
/*
* Assigns hardware counter to hardware condition .
* Note that there is no separate start / stop mechanism ;
* stopping is achieved by assigning the ' never ' condition
*/
static void arc_pmu_start ( struct perf_event * event , int flags )
{
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
if ( WARN_ON_ONCE ( idx = = - 1 ) )
return ;
if ( flags & PERF_EF_RELOAD )
2015-08-24 13:42:27 +03:00
WARN_ON_ONCE ( ! ( hwc - > state & PERF_HES_UPTODATE ) ) ;
hwc - > state = 0 ;
2013-11-07 14:55:11 +01:00
2015-08-24 13:42:27 +03:00
arc_pmu_event_set_period ( event ) ;
2013-11-07 14:55:11 +01:00
2015-08-24 13:48:06 +03:00
/* Enable interrupt for this counter */
if ( is_sampling_event ( event ) )
write_aux_reg ( ARC_REG_PCT_INT_CTRL ,
2018-12-13 19:56:18 +03:00
read_aux_reg ( ARC_REG_PCT_INT_CTRL ) | BIT ( idx ) ) ;
2015-08-24 13:48:06 +03:00
2013-11-07 14:55:11 +01:00
/* enable ARC pmu here */
2015-08-19 17:23:58 +05:30
write_aux_reg ( ARC_REG_PCT_INDEX , idx ) ; /* counter # */
write_aux_reg ( ARC_REG_PCT_CONFIG , hwc - > config ) ; /* condition */
2013-11-07 14:55:11 +01:00
}
static void arc_pmu_stop ( struct perf_event * event , int flags )
{
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2015-08-24 13:48:06 +03:00
/* Disable interrupt for this counter */
if ( is_sampling_event ( event ) ) {
/*
* Reset interrupt flag by writing of 1. This is required
* to make sure pending interrupt was not left .
*/
2018-12-13 19:56:18 +03:00
write_aux_reg ( ARC_REG_PCT_INT_ACT , BIT ( idx ) ) ;
2015-08-24 13:48:06 +03:00
write_aux_reg ( ARC_REG_PCT_INT_CTRL ,
2018-12-13 19:56:18 +03:00
read_aux_reg ( ARC_REG_PCT_INT_CTRL ) & ~ BIT ( idx ) ) ;
2015-08-24 13:48:06 +03:00
}
2013-11-07 14:55:11 +01:00
if ( ! ( event - > hw . state & PERF_HES_STOPPED ) ) {
/* stop ARC pmu here */
write_aux_reg ( ARC_REG_PCT_INDEX , idx ) ;
/* condition code #0 is always "never" */
write_aux_reg ( ARC_REG_PCT_CONFIG , 0 ) ;
event - > hw . state | = PERF_HES_STOPPED ;
}
if ( ( flags & PERF_EF_UPDATE ) & &
! ( event - > hw . state & PERF_HES_UPTODATE ) ) {
arc_perf_event_update ( event , & event - > hw , idx ) ;
event - > hw . state | = PERF_HES_UPTODATE ;
}
}
static void arc_pmu_del ( struct perf_event * event , int flags )
{
2015-08-24 14:03:30 +03:00
struct arc_pmu_cpu * pmu_cpu = this_cpu_ptr ( & arc_pmu_cpu ) ;
2013-11-07 14:55:11 +01:00
arc_pmu_stop ( event , PERF_EF_UPDATE ) ;
2015-08-24 14:03:30 +03:00
__clear_bit ( event - > hw . idx , pmu_cpu - > used_mask ) ;
2013-11-07 14:55:11 +01:00
2015-08-24 14:03:30 +03:00
pmu_cpu - > act_counter [ event - > hw . idx ] = 0 ;
2015-08-24 13:48:06 +03:00
2013-11-07 14:55:11 +01:00
perf_event_update_userpage ( event ) ;
}
/* allocate hardware counter and optionally start counting */
static int arc_pmu_add ( struct perf_event * event , int flags )
{
2015-08-24 14:03:30 +03:00
struct arc_pmu_cpu * pmu_cpu = this_cpu_ptr ( & arc_pmu_cpu ) ;
2013-11-07 14:55:11 +01:00
struct hw_perf_event * hwc = & event - > hw ;
int idx = hwc - > idx ;
2015-10-08 22:17:48 +05:30
idx = ffz ( pmu_cpu - > used_mask [ 0 ] ) ;
if ( idx = = arc_pmu - > n_counters )
return - EAGAIN ;
__set_bit ( idx , pmu_cpu - > used_mask ) ;
hwc - > idx = idx ;
2013-11-07 14:55:11 +01:00
write_aux_reg ( ARC_REG_PCT_INDEX , idx ) ;
2015-08-24 13:48:06 +03:00
2015-08-24 14:03:30 +03:00
pmu_cpu - > act_counter [ idx ] = event ;
2015-08-24 13:48:06 +03:00
if ( is_sampling_event ( event ) ) {
/* Mimic full counter overflow as other arches do */
2018-12-13 19:56:18 +03:00
write_aux_reg ( ARC_REG_PCT_INT_CNTL ,
lower_32_bits ( arc_pmu - > max_period ) ) ;
2015-08-24 13:48:06 +03:00
write_aux_reg ( ARC_REG_PCT_INT_CNTH ,
2018-12-13 19:56:18 +03:00
upper_32_bits ( arc_pmu - > max_period ) ) ;
2015-08-24 13:48:06 +03:00
}
2013-11-07 14:55:11 +01:00
write_aux_reg ( ARC_REG_PCT_CONFIG , 0 ) ;
write_aux_reg ( ARC_REG_PCT_COUNTL , 0 ) ;
write_aux_reg ( ARC_REG_PCT_COUNTH , 0 ) ;
local64_set ( & hwc - > prev_count , 0 ) ;
hwc - > state = PERF_HES_UPTODATE | PERF_HES_STOPPED ;
if ( flags & PERF_EF_START )
arc_pmu_start ( event , PERF_EF_RELOAD ) ;
perf_event_update_userpage ( event ) ;
return 0 ;
}
2015-08-24 13:48:06 +03:00
# ifdef CONFIG_ISA_ARCV2
static irqreturn_t arc_pmu_intr ( int irq , void * dev )
{
struct perf_sample_data data ;
2015-08-24 14:03:30 +03:00
struct arc_pmu_cpu * pmu_cpu = this_cpu_ptr ( & arc_pmu_cpu ) ;
2015-08-24 13:48:06 +03:00
struct pt_regs * regs ;
2015-05-09 18:27:30 +05:30
unsigned int active_ints ;
2015-08-24 13:48:06 +03:00
int idx ;
arc_pmu_disable ( & arc_pmu - > pmu ) ;
active_ints = read_aux_reg ( ARC_REG_PCT_INT_ACT ) ;
2015-05-09 18:27:30 +05:30
if ( ! active_ints )
goto done ;
2015-08-24 13:48:06 +03:00
regs = get_irq_regs ( ) ;
2015-05-09 18:27:30 +05:30
do {
struct perf_event * event ;
2015-08-24 13:48:06 +03:00
struct hw_perf_event * hwc ;
2015-05-09 18:27:30 +05:30
idx = __ffs ( active_ints ) ;
2015-08-24 13:48:06 +03:00
/* Reset interrupt flag by writing of 1 */
2018-12-13 19:56:18 +03:00
write_aux_reg ( ARC_REG_PCT_INT_ACT , BIT ( idx ) ) ;
2015-08-24 13:48:06 +03:00
/*
* On reset of " interrupt active " bit corresponding
* " interrupt enable " bit gets automatically reset as well .
* Now we need to re - enable interrupt for the counter .
*/
write_aux_reg ( ARC_REG_PCT_INT_CTRL ,
2018-12-13 19:56:18 +03:00
read_aux_reg ( ARC_REG_PCT_INT_CTRL ) | BIT ( idx ) ) ;
2015-08-24 13:48:06 +03:00
2015-05-09 18:27:30 +05:30
event = pmu_cpu - > act_counter [ idx ] ;
2015-08-24 13:48:06 +03:00
hwc = & event - > hw ;
WARN_ON_ONCE ( hwc - > idx ! = idx ) ;
arc_perf_event_update ( event , & event - > hw , event - > hw . idx ) ;
perf_sample_data_init ( & data , 0 , hwc - > last_period ) ;
2015-05-09 18:27:30 +05:30
if ( arc_pmu_event_set_period ( event ) ) {
if ( perf_event_overflow ( event , & data , regs ) )
arc_pmu_stop ( event , 0 ) ;
}
2015-08-24 13:48:06 +03:00
2018-12-13 19:56:18 +03:00
active_ints & = ~ BIT ( idx ) ;
2015-05-09 18:27:30 +05:30
} while ( active_ints ) ;
2015-08-24 13:48:06 +03:00
2015-05-09 18:27:30 +05:30
done :
2015-08-24 13:48:06 +03:00
arc_pmu_enable ( & arc_pmu - > pmu ) ;
return IRQ_HANDLED ;
}
# else
static irqreturn_t arc_pmu_intr ( int irq , void * dev )
{
return IRQ_NONE ;
}
# endif /* CONFIG_ISA_ARCV2 */
2015-12-11 13:13:12 +05:30
static void arc_cpu_pmu_irq_init ( void * data )
2015-08-24 14:03:30 +03:00
{
2015-12-11 13:13:12 +05:30
int irq = * ( int * ) data ;
2015-08-24 14:03:30 +03:00
2015-12-11 13:13:12 +05:30
enable_percpu_irq ( irq , IRQ_TYPE_NONE ) ;
2015-08-24 14:03:30 +03:00
/* Clear all pending interrupt flags */
write_aux_reg ( ARC_REG_PCT_INT_ACT , 0xffffffff ) ;
}
2018-12-13 19:56:19 +03:00
/* Event field occupies the bottom 15 bits of our config field */
PMU_FORMAT_ATTR ( event , " config:0-14 " ) ;
static struct attribute * arc_pmu_format_attrs [ ] = {
& format_attr_event . attr ,
NULL ,
} ;
static struct attribute_group arc_pmu_format_attr_gr = {
. name = " format " ,
. attrs = arc_pmu_format_attrs ,
} ;
static ssize_t arc_pmu_events_sysfs_show ( struct device * dev ,
struct device_attribute * attr ,
char * page )
{
struct perf_pmu_events_attr * pmu_attr ;
pmu_attr = container_of ( attr , struct perf_pmu_events_attr , attr ) ;
return sprintf ( page , " event=0x%04llx \n " , pmu_attr - > id ) ;
}
/*
* We don ' t add attrs here as we don ' t have pre - defined list of perf events .
* We will generate and add attrs dynamically in probe ( ) after we read HW
* configuration .
*/
static struct attribute_group arc_pmu_events_attr_gr = {
. name = " events " ,
} ;
static void arc_pmu_add_raw_event_attr ( int j , char * str )
{
memmove ( arc_pmu - > raw_entry [ j ] . name , str , ARCPMU_EVENT_NAME_LEN - 1 ) ;
arc_pmu - > attr [ j ] . attr . attr . name = arc_pmu - > raw_entry [ j ] . name ;
arc_pmu - > attr [ j ] . attr . attr . mode = VERIFY_OCTAL_PERMISSIONS ( 0444 ) ;
arc_pmu - > attr [ j ] . attr . show = arc_pmu_events_sysfs_show ;
arc_pmu - > attr [ j ] . id = j ;
arc_pmu - > attrs [ j ] = & ( arc_pmu - > attr [ j ] . attr . attr ) ;
}
static int arc_pmu_raw_alloc ( struct device * dev )
{
arc_pmu - > attr = devm_kmalloc_array ( dev , arc_pmu - > n_events + 1 ,
sizeof ( * arc_pmu - > attr ) , GFP_KERNEL | __GFP_ZERO ) ;
if ( ! arc_pmu - > attr )
return - ENOMEM ;
arc_pmu - > attrs = devm_kmalloc_array ( dev , arc_pmu - > n_events + 1 ,
sizeof ( * arc_pmu - > attrs ) , GFP_KERNEL | __GFP_ZERO ) ;
if ( ! arc_pmu - > attrs )
return - ENOMEM ;
arc_pmu - > raw_entry = devm_kmalloc_array ( dev , arc_pmu - > n_events ,
sizeof ( * arc_pmu - > raw_entry ) , GFP_KERNEL | __GFP_ZERO ) ;
if ( ! arc_pmu - > raw_entry )
return - ENOMEM ;
return 0 ;
}
2018-12-13 19:56:20 +03:00
static inline bool event_in_hw_event_map ( int i , char * name )
{
if ( ! arc_pmu_ev_hw_map [ i ] )
return false ;
if ( ! strlen ( arc_pmu_ev_hw_map [ i ] ) )
return false ;
if ( strcmp ( arc_pmu_ev_hw_map [ i ] , name ) )
return false ;
return true ;
}
static void arc_pmu_map_hw_event ( int j , char * str )
{
int i ;
/* See if HW condition has been mapped to a perf event_id */
for ( i = 0 ; i < ARRAY_SIZE ( arc_pmu_ev_hw_map ) ; i + + ) {
if ( event_in_hw_event_map ( i , str ) ) {
pr_debug ( " mapping perf event %2d to h/w event \' %8s \' (idx %d) \n " ,
i , str , j ) ;
arc_pmu - > ev_hw_idx [ i ] = j ;
}
}
}
2013-11-07 14:55:11 +01:00
static int arc_pmu_device_probe ( struct platform_device * pdev )
{
struct arc_reg_pct_build pct_bcr ;
struct arc_reg_cc_build cc_bcr ;
ARC: perf: redo the pct irq missing in device-tree handling
commit feb92d7d3813456c11dce21 "(ARC: perf: don't bail setup if pct irq
missing in device-tree)" introduced a silly brown-paper bag bug:
The assignment and comparison in an if statement were not bracketed
correctly leaving the order of evaluation undefined.
|
| if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
| ^^^ ^^^^
And given such a chance, the compiler will bite you hard, fully entitled
to generating this piece of beauty:
|
| # if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
|
| bl.d @platform_get_irq <-- irq returned in r0
|
| setge r2, r0, 0 <-- r2 is bool 1 or 0 if irq >= 0 true/false
| brlt.d r0, 0, @.L114
|
| st_s r2,[sp] <-- irq saved is bool 1 or 0, not actual return val
| st 1,[r3,160] # arc_pmu.18_29->irq <-- drops bool and assumes 1
|
| # return __request_percpu_irq(irq, handler, 0,
|
| bl.d @__request_percpu_irq;
| mov_s r0,1 <-- drops even bool and assumes 1 which fails
With the snafu fixed, everything is as expected.
| bl.d @platform_get_irq <-- returns irq in r0
|
| mov_s r2,r0
| brlt.d r2, 0, @.L112
|
| st_s r0,[sp] <-- irq isaved is actual return value above
| st r0,[r13,160] #arc_pmu.18_27->irq
|
| bl.d @__request_percpu_irq <-- r0 unchanged so actual irq returned
| add r4,r4,r12 #, tmp363, __ptr
Cc: <stable@vger.kernel.org>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2020-10-22 03:16:22 -07:00
int i , has_interrupts , irq = - 1 ;
2015-08-24 13:42:27 +03:00
int counter_size ; /* in bits */
2013-11-07 14:55:11 +01:00
union cc_name {
struct {
2018-12-13 19:56:18 +03:00
u32 word0 , word1 ;
2013-11-07 14:55:11 +01:00
char sentinel ;
} indiv ;
2018-12-13 19:56:18 +03:00
char str [ ARCPMU_EVENT_NAME_LEN ] ;
2013-11-07 14:55:11 +01:00
} cc_name ;
READ_BCR ( ARC_REG_PCT_BUILD , pct_bcr ) ;
if ( ! pct_bcr . v ) {
pr_err ( " This core does not have performance counters! \n " ) ;
return - ENODEV ;
}
2015-10-08 22:17:48 +05:30
BUILD_BUG_ON ( ARC_PERF_MAX_COUNTERS > 32 ) ;
2018-12-13 19:56:21 +03:00
if ( WARN_ON ( pct_bcr . c > ARC_PERF_MAX_COUNTERS ) )
return - EINVAL ;
2013-11-07 14:55:11 +01:00
2014-09-25 16:54:43 +05:30
READ_BCR ( ARC_REG_CC_BUILD , cc_bcr ) ;
2018-12-13 19:56:21 +03:00
if ( WARN ( ! cc_bcr . v , " Counters exist but No countable conditions? " ) )
return - EINVAL ;
2014-09-25 16:54:43 +05:30
arc_pmu = devm_kzalloc ( & pdev - > dev , sizeof ( struct arc_pmu ) , GFP_KERNEL ) ;
2013-11-07 14:55:11 +01:00
if ( ! arc_pmu )
return - ENOMEM ;
2018-12-13 19:56:19 +03:00
arc_pmu - > n_events = cc_bcr . c ;
if ( arc_pmu_raw_alloc ( & pdev - > dev ) )
return - ENOMEM ;
2015-08-24 13:48:06 +03:00
has_interrupts = is_isa_arcv2 ( ) ? pct_bcr . i : 0 ;
2013-11-07 14:55:11 +01:00
arc_pmu - > n_counters = pct_bcr . c ;
2015-08-24 13:42:27 +03:00
counter_size = 32 + ( pct_bcr . s < < 4 ) ;
2015-08-24 13:48:06 +03:00
2015-08-24 13:42:27 +03:00
arc_pmu - > max_period = ( 1ULL < < counter_size ) / 2 - 1ULL ;
2013-11-07 14:55:11 +01:00
2015-08-24 13:48:06 +03:00
pr_info ( " ARC perf \t : %d counters (%d bits), %d conditions%s \n " ,
arc_pmu - > n_counters , counter_size , cc_bcr . c ,
2018-12-13 19:56:18 +03:00
has_interrupts ? " , [overflow IRQ support] " : " " ) ;
2013-11-07 14:55:11 +01:00
2018-12-13 19:56:18 +03:00
cc_name . str [ ARCPMU_EVENT_NAME_LEN - 1 ] = 0 ;
2015-04-15 19:44:07 +05:30
for ( i = 0 ; i < PERF_COUNT_ARC_HW_MAX ; i + + )
2013-11-07 14:55:11 +01:00
arc_pmu - > ev_hw_idx [ i ] = - 1 ;
2015-04-15 19:44:07 +05:30
/* loop thru all available h/w condition indexes */
2018-12-13 19:56:20 +03:00
for ( i = 0 ; i < cc_bcr . c ; i + + ) {
write_aux_reg ( ARC_REG_CC_INDEX , i ) ;
2019-10-22 17:04:11 +03:00
cc_name . indiv . word0 = le32_to_cpu ( read_aux_reg ( ARC_REG_CC_NAME0 ) ) ;
cc_name . indiv . word1 = le32_to_cpu ( read_aux_reg ( ARC_REG_CC_NAME1 ) ) ;
2015-04-15 19:44:07 +05:30
2018-12-13 19:56:20 +03:00
arc_pmu_map_hw_event ( i , cc_name . str ) ;
arc_pmu_add_raw_event_attr ( i , cc_name . str ) ;
2013-11-07 14:55:11 +01:00
}
2018-12-13 19:56:19 +03:00
arc_pmu_events_attr_gr . attrs = arc_pmu - > attrs ;
arc_pmu - > attr_groups [ ARCPMU_ATTR_GR_EVENTS ] = & arc_pmu_events_attr_gr ;
arc_pmu - > attr_groups [ ARCPMU_ATTR_GR_FORMATS ] = & arc_pmu_format_attr_gr ;
2013-11-07 14:55:11 +01:00
arc_pmu - > pmu = ( struct pmu ) {
. pmu_enable = arc_pmu_enable ,
. pmu_disable = arc_pmu_disable ,
. event_init = arc_pmu_event_init ,
. add = arc_pmu_add ,
. del = arc_pmu_del ,
. start = arc_pmu_start ,
. stop = arc_pmu_stop ,
. read = arc_pmu_read ,
2018-12-13 19:56:19 +03:00
. attr_groups = arc_pmu - > attr_groups ,
2013-11-07 14:55:11 +01:00
} ;
ARC: perf: redo the pct irq missing in device-tree handling
commit feb92d7d3813456c11dce21 "(ARC: perf: don't bail setup if pct irq
missing in device-tree)" introduced a silly brown-paper bag bug:
The assignment and comparison in an if statement were not bracketed
correctly leaving the order of evaluation undefined.
|
| if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
| ^^^ ^^^^
And given such a chance, the compiler will bite you hard, fully entitled
to generating this piece of beauty:
|
| # if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
|
| bl.d @platform_get_irq <-- irq returned in r0
|
| setge r2, r0, 0 <-- r2 is bool 1 or 0 if irq >= 0 true/false
| brlt.d r0, 0, @.L114
|
| st_s r2,[sp] <-- irq saved is bool 1 or 0, not actual return val
| st 1,[r3,160] # arc_pmu.18_29->irq <-- drops bool and assumes 1
|
| # return __request_percpu_irq(irq, handler, 0,
|
| bl.d @__request_percpu_irq;
| mov_s r0,1 <-- drops even bool and assumes 1 which fails
With the snafu fixed, everything is as expected.
| bl.d @platform_get_irq <-- returns irq in r0
|
| mov_s r2,r0
| brlt.d r2, 0, @.L112
|
| st_s r0,[sp] <-- irq isaved is actual return value above
| st r0,[r13,160] #arc_pmu.18_27->irq
|
| bl.d @__request_percpu_irq <-- r0 unchanged so actual irq returned
| add r4,r4,r12 #, tmp363, __ptr
Cc: <stable@vger.kernel.org>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2020-10-22 03:16:22 -07:00
if ( has_interrupts ) {
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq > = 0 ) {
int ret ;
2015-08-24 13:48:06 +03:00
ARC: perf: redo the pct irq missing in device-tree handling
commit feb92d7d3813456c11dce21 "(ARC: perf: don't bail setup if pct irq
missing in device-tree)" introduced a silly brown-paper bag bug:
The assignment and comparison in an if statement were not bracketed
correctly leaving the order of evaluation undefined.
|
| if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
| ^^^ ^^^^
And given such a chance, the compiler will bite you hard, fully entitled
to generating this piece of beauty:
|
| # if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
|
| bl.d @platform_get_irq <-- irq returned in r0
|
| setge r2, r0, 0 <-- r2 is bool 1 or 0 if irq >= 0 true/false
| brlt.d r0, 0, @.L114
|
| st_s r2,[sp] <-- irq saved is bool 1 or 0, not actual return val
| st 1,[r3,160] # arc_pmu.18_29->irq <-- drops bool and assumes 1
|
| # return __request_percpu_irq(irq, handler, 0,
|
| bl.d @__request_percpu_irq;
| mov_s r0,1 <-- drops even bool and assumes 1 which fails
With the snafu fixed, everything is as expected.
| bl.d @platform_get_irq <-- returns irq in r0
|
| mov_s r2,r0
| brlt.d r2, 0, @.L112
|
| st_s r0,[sp] <-- irq isaved is actual return value above
| st r0,[r13,160] #arc_pmu.18_27->irq
|
| bl.d @__request_percpu_irq <-- r0 unchanged so actual irq returned
| add r4,r4,r12 #, tmp363, __ptr
Cc: <stable@vger.kernel.org>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2020-10-22 03:16:22 -07:00
arc_pmu - > irq = irq ;
2015-08-24 14:03:30 +03:00
ARC: perf: redo the pct irq missing in device-tree handling
commit feb92d7d3813456c11dce21 "(ARC: perf: don't bail setup if pct irq
missing in device-tree)" introduced a silly brown-paper bag bug:
The assignment and comparison in an if statement were not bracketed
correctly leaving the order of evaluation undefined.
|
| if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
| ^^^ ^^^^
And given such a chance, the compiler will bite you hard, fully entitled
to generating this piece of beauty:
|
| # if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
|
| bl.d @platform_get_irq <-- irq returned in r0
|
| setge r2, r0, 0 <-- r2 is bool 1 or 0 if irq >= 0 true/false
| brlt.d r0, 0, @.L114
|
| st_s r2,[sp] <-- irq saved is bool 1 or 0, not actual return val
| st 1,[r3,160] # arc_pmu.18_29->irq <-- drops bool and assumes 1
|
| # return __request_percpu_irq(irq, handler, 0,
|
| bl.d @__request_percpu_irq;
| mov_s r0,1 <-- drops even bool and assumes 1 which fails
With the snafu fixed, everything is as expected.
| bl.d @platform_get_irq <-- returns irq in r0
|
| mov_s r2,r0
| brlt.d r2, 0, @.L112
|
| st_s r0,[sp] <-- irq isaved is actual return value above
| st r0,[r13,160] #arc_pmu.18_27->irq
|
| bl.d @__request_percpu_irq <-- r0 unchanged so actual irq returned
| add r4,r4,r12 #, tmp363, __ptr
Cc: <stable@vger.kernel.org>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2020-10-22 03:16:22 -07:00
/* intc map function ensures irq_set_percpu_devid() called */
ret = request_percpu_irq ( irq , arc_pmu_intr , " ARC perf counters " ,
this_cpu_ptr ( & arc_pmu_cpu ) ) ;
if ( ! ret )
on_each_cpu ( arc_cpu_pmu_irq_init , & irq , 1 ) ;
else
irq = - 1 ;
}
2015-12-11 13:13:12 +05:30
2020-07-26 21:51:59 -07:00
}
2014-06-15 02:00:18 -04:00
ARC: perf: redo the pct irq missing in device-tree handling
commit feb92d7d3813456c11dce21 "(ARC: perf: don't bail setup if pct irq
missing in device-tree)" introduced a silly brown-paper bag bug:
The assignment and comparison in an if statement were not bracketed
correctly leaving the order of evaluation undefined.
|
| if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
| ^^^ ^^^^
And given such a chance, the compiler will bite you hard, fully entitled
to generating this piece of beauty:
|
| # if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
|
| bl.d @platform_get_irq <-- irq returned in r0
|
| setge r2, r0, 0 <-- r2 is bool 1 or 0 if irq >= 0 true/false
| brlt.d r0, 0, @.L114
|
| st_s r2,[sp] <-- irq saved is bool 1 or 0, not actual return val
| st 1,[r3,160] # arc_pmu.18_29->irq <-- drops bool and assumes 1
|
| # return __request_percpu_irq(irq, handler, 0,
|
| bl.d @__request_percpu_irq;
| mov_s r0,1 <-- drops even bool and assumes 1 which fails
With the snafu fixed, everything is as expected.
| bl.d @platform_get_irq <-- returns irq in r0
|
| mov_s r2,r0
| brlt.d r2, 0, @.L112
|
| st_s r0,[sp] <-- irq isaved is actual return value above
| st r0,[r13,160] #arc_pmu.18_27->irq
|
| bl.d @__request_percpu_irq <-- r0 unchanged so actual irq returned
| add r4,r4,r12 #, tmp363, __ptr
Cc: <stable@vger.kernel.org>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2020-10-22 03:16:22 -07:00
if ( irq = = - 1 )
arc_pmu - > pmu . capabilities | = PERF_PMU_CAP_NO_INTERRUPT ;
2018-12-13 19:56:19 +03:00
/*
* perf parser doesn ' t really like ' - ' symbol in events name , so let ' s
* use ' _ ' in arc pct name as it goes to kernel PMU event prefix .
*/
return perf_pmu_register ( & arc_pmu - > pmu , " arc_pct " , PERF_TYPE_RAW ) ;
2013-11-07 14:55:11 +01:00
}
static const struct of_device_id arc_pmu_match [ ] = {
2015-04-15 16:35:38 +05:30
{ . compatible = " snps,arc700-pct " } ,
2014-11-18 17:36:11 +05:30
{ . compatible = " snps,archs-pct " } ,
2013-11-07 14:55:11 +01:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , arc_pmu_match ) ;
static struct platform_driver arc_pmu_driver = {
. driver = {
2014-11-18 17:36:11 +05:30
. name = " arc-pct " ,
2013-11-07 14:55:11 +01:00
. of_match_table = of_match_ptr ( arc_pmu_match ) ,
} ,
. probe = arc_pmu_device_probe ,
} ;
module_platform_driver ( arc_pmu_driver ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Mischa Jonker <mjonker@synopsys.com> " ) ;
MODULE_DESCRIPTION ( " ARC PMU driver " ) ;