2012-07-29 13:09:14 +01:00
/*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*
* Copyright ( C ) 2012 ARM Limited
*
* Author : Will Deacon < will . deacon @ arm . com >
*/
# define pr_fmt(fmt) "CPU PMU: " fmt
# include <linux/bitmap.h>
# include <linux/export.h>
# include <linux/kernel.h>
# include <linux/of.h>
# include <linux/platform_device.h>
2012-07-31 10:11:23 +01:00
# include <linux/slab.h>
2012-07-29 13:09:14 +01:00
# include <linux/spinlock.h>
2014-02-07 21:01:19 +00:00
# include <linux/irq.h>
# include <linux/irqdesc.h>
2012-07-29 13:09:14 +01:00
# include <asm/cputype.h>
# include <asm/irq_regs.h>
# include <asm/pmu.h>
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu * cpu_pmu ;
/*
* Despite the names , these two functions are CPU - specific and are used
* by the OProfile / perf code .
*/
const char * perf_pmu_name ( void )
{
if ( ! cpu_pmu )
return NULL ;
2012-09-21 14:23:47 +01:00
return cpu_pmu - > name ;
2012-07-29 13:09:14 +01:00
}
EXPORT_SYMBOL_GPL ( perf_pmu_name ) ;
int perf_num_counters ( void )
{
int max_events = 0 ;
if ( cpu_pmu ! = NULL )
max_events = cpu_pmu - > num_events ;
return max_events ;
}
EXPORT_SYMBOL_GPL ( perf_num_counters ) ;
/* Include the PMU-specific implementations. */
# include "perf_event_xscale.c"
# include "perf_event_v6.c"
# include "perf_event_v7.c"
2014-02-07 21:01:19 +00:00
static void cpu_pmu_enable_percpu_irq ( void * data )
{
2014-09-11 23:25:30 +01:00
int irq = * ( int * ) data ;
2014-02-07 21:01:19 +00:00
enable_percpu_irq ( irq , IRQ_TYPE_NONE ) ;
}
static void cpu_pmu_disable_percpu_irq ( void * data )
{
2014-09-11 23:25:30 +01:00
int irq = * ( int * ) data ;
2014-02-07 21:01:19 +00:00
disable_percpu_irq ( irq ) ;
}
2012-07-30 12:00:02 +01:00
static void cpu_pmu_free_irq ( struct arm_pmu * cpu_pmu )
2012-07-31 10:34:25 +01:00
{
int i , irq , irqs ;
struct platform_device * pmu_device = cpu_pmu - > plat_device ;
2014-05-13 19:46:10 +01:00
struct pmu_hw_events __percpu * hw_events = cpu_pmu - > hw_events ;
2012-07-31 10:34:25 +01:00
irqs = min ( pmu_device - > num_resources , num_possible_cpus ( ) ) ;
2014-02-07 21:01:19 +00:00
irq = platform_get_irq ( pmu_device , 0 ) ;
if ( irq > = 0 & & irq_is_percpu ( irq ) ) {
2014-09-11 23:25:30 +01:00
on_each_cpu ( cpu_pmu_disable_percpu_irq , & irq , 1 ) ;
2014-05-13 19:46:10 +01:00
free_percpu_irq ( irq , & hw_events - > percpu_pmu ) ;
2014-02-07 21:01:19 +00:00
} else {
for ( i = 0 ; i < irqs ; + + i ) {
if ( ! cpumask_test_and_clear_cpu ( i , & cpu_pmu - > active_irqs ) )
continue ;
irq = platform_get_irq ( pmu_device , i ) ;
if ( irq > = 0 )
2014-05-13 19:46:10 +01:00
free_irq ( irq , per_cpu_ptr ( & hw_events - > percpu_pmu , i ) ) ;
2014-02-07 21:01:19 +00:00
}
2012-07-31 10:34:25 +01:00
}
}
2012-07-30 12:00:02 +01:00
static int cpu_pmu_request_irq ( struct arm_pmu * cpu_pmu , irq_handler_t handler )
2012-07-31 10:34:25 +01:00
{
int i , err , irq , irqs ;
struct platform_device * pmu_device = cpu_pmu - > plat_device ;
2014-05-13 19:46:10 +01:00
struct pmu_hw_events __percpu * hw_events = cpu_pmu - > hw_events ;
2012-07-31 10:34:25 +01:00
if ( ! pmu_device )
return - ENODEV ;
irqs = min ( pmu_device - > num_resources , num_possible_cpus ( ) ) ;
if ( irqs < 1 ) {
2014-10-30 11:26:57 +00:00
pr_warn_once ( " perf/ARM: No irqs for PMU defined, sampling events not supported \n " ) ;
2014-05-16 17:15:49 -04:00
return 0 ;
2012-07-31 10:34:25 +01:00
}
2014-02-07 21:01:19 +00:00
irq = platform_get_irq ( pmu_device , 0 ) ;
if ( irq > = 0 & & irq_is_percpu ( irq ) ) {
2014-05-13 19:46:10 +01:00
err = request_percpu_irq ( irq , handler , " arm-pmu " ,
& hw_events - > percpu_pmu ) ;
2012-07-31 10:34:25 +01:00
if ( err ) {
pr_err ( " unable to request IRQ%d for ARM PMU counters \n " ,
irq ) ;
return err ;
}
2014-09-11 23:25:30 +01:00
on_each_cpu ( cpu_pmu_enable_percpu_irq , & irq , 1 ) ;
2014-02-07 21:01:19 +00:00
} else {
for ( i = 0 ; i < irqs ; + + i ) {
err = 0 ;
irq = platform_get_irq ( pmu_device , i ) ;
if ( irq < 0 )
continue ;
/*
* If we have a single PMU interrupt that we can ' t shift ,
* assume that we ' re running on a uniprocessor machine and
* continue . Otherwise , continue without this interrupt .
*/
if ( irq_set_affinity ( irq , cpumask_of ( i ) ) & & irqs > 1 ) {
2014-09-16 20:41:43 +01:00
pr_warn ( " unable to set irq affinity (irq=%d, cpu=%u) \n " ,
irq , i ) ;
2014-02-07 21:01:19 +00:00
continue ;
}
err = request_irq ( irq , handler ,
IRQF_NOBALANCING | IRQF_NO_THREAD , " arm-pmu " ,
2014-05-13 19:46:10 +01:00
per_cpu_ptr ( & hw_events - > percpu_pmu , i ) ) ;
2014-02-07 21:01:19 +00:00
if ( err ) {
pr_err ( " unable to request IRQ%d for ARM PMU counters \n " ,
irq ) ;
return err ;
}
cpumask_set_cpu ( i , & cpu_pmu - > active_irqs ) ;
}
2012-07-31 10:34:25 +01:00
}
return 0 ;
}
2014-10-23 15:23:35 +01:00
/*
* PMU hardware loses all context when a CPU goes offline .
* When a CPU is hotplugged back in , since some hardware registers are
* UNKNOWN at reset , the PMU must be explicitly reset to avoid reading
* junk values out of them .
*/
static int cpu_pmu_notify ( struct notifier_block * b , unsigned long action ,
void * hcpu )
{
struct arm_pmu * pmu = container_of ( b , struct arm_pmu , hotplug_nb ) ;
if ( ( action & ~ CPU_TASKS_FROZEN ) ! = CPU_STARTING )
return NOTIFY_DONE ;
if ( pmu - > reset )
pmu - > reset ( pmu ) ;
else
return NOTIFY_DONE ;
return NOTIFY_OK ;
}
2014-10-21 14:11:23 +01:00
static int cpu_pmu_init ( struct arm_pmu * cpu_pmu )
2012-07-29 13:09:14 +01:00
{
2014-10-23 15:23:35 +01:00
int err ;
2012-07-29 13:09:14 +01:00
int cpu ;
2014-10-21 14:11:23 +01:00
struct pmu_hw_events __percpu * cpu_hw_events ;
cpu_hw_events = alloc_percpu ( struct pmu_hw_events ) ;
if ( ! cpu_hw_events )
return - ENOMEM ;
2014-10-23 15:23:35 +01:00
cpu_pmu - > hotplug_nb . notifier_call = cpu_pmu_notify ;
err = register_cpu_notifier ( & cpu_pmu - > hotplug_nb ) ;
if ( err )
goto out_hw_events ;
2012-07-29 13:09:14 +01:00
for_each_possible_cpu ( cpu ) {
2014-10-21 14:11:23 +01:00
struct pmu_hw_events * events = per_cpu_ptr ( cpu_hw_events , cpu ) ;
2012-07-29 13:09:14 +01:00
raw_spin_lock_init ( & events - > pmu_lock ) ;
2014-05-13 19:46:10 +01:00
events - > percpu_pmu = cpu_pmu ;
2012-07-29 13:09:14 +01:00
}
2012-07-31 10:34:25 +01:00
2014-10-21 14:11:23 +01:00
cpu_pmu - > hw_events = cpu_hw_events ;
2012-07-31 10:34:25 +01:00
cpu_pmu - > request_irq = cpu_pmu_request_irq ;
cpu_pmu - > free_irq = cpu_pmu_free_irq ;
2012-07-29 13:09:14 +01:00
/* Ensure the PMU has sane values out of reset. */
2013-01-14 17:27:35 +00:00
if ( cpu_pmu - > reset )
2012-07-30 12:00:02 +01:00
on_each_cpu ( cpu_pmu - > reset , cpu_pmu , 1 ) ;
2014-05-16 17:15:49 -04:00
/* If no interrupts available, set the corresponding capability flag */
if ( ! platform_get_irq ( cpu_pmu - > plat_device , 0 ) )
cpu_pmu - > pmu . capabilities | = PERF_PMU_CAP_NO_INTERRUPT ;
2014-10-21 14:11:23 +01:00
return 0 ;
2014-10-23 15:23:35 +01:00
out_hw_events :
free_percpu ( cpu_hw_events ) ;
return err ;
2014-10-21 14:11:23 +01:00
}
static void cpu_pmu_destroy ( struct arm_pmu * cpu_pmu )
{
2014-10-23 15:23:35 +01:00
unregister_cpu_notifier ( & cpu_pmu - > hotplug_nb ) ;
2014-10-21 14:11:23 +01:00
free_percpu ( cpu_pmu - > hw_events ) ;
2012-07-29 13:09:14 +01:00
}
/*
* PMU platform driver and devicetree bindings .
*/
2015-02-18 21:19:56 +01:00
static const struct of_device_id cpu_pmu_of_device_ids [ ] = {
2014-05-09 18:34:19 +01:00
{ . compatible = " arm,cortex-a17-pmu " , . data = armv7_a17_pmu_init } ,
2012-07-29 13:09:14 +01:00
{ . compatible = " arm,cortex-a15-pmu " , . data = armv7_a15_pmu_init } ,
2014-01-29 14:28:57 +00:00
{ . compatible = " arm,cortex-a12-pmu " , . data = armv7_a12_pmu_init } ,
2012-07-29 13:09:14 +01:00
{ . compatible = " arm,cortex-a9-pmu " , . data = armv7_a9_pmu_init } ,
{ . compatible = " arm,cortex-a8-pmu " , . data = armv7_a8_pmu_init } ,
{ . compatible = " arm,cortex-a7-pmu " , . data = armv7_a7_pmu_init } ,
{ . compatible = " arm,cortex-a5-pmu " , . data = armv7_a5_pmu_init } ,
{ . compatible = " arm,arm11mpcore-pmu " , . data = armv6mpcore_pmu_init } ,
2012-12-19 16:33:24 +00:00
{ . compatible = " arm,arm1176-pmu " , . data = armv6_1176_pmu_init } ,
{ . compatible = " arm,arm1136-pmu " , . data = armv6_1136_pmu_init } ,
2014-02-07 21:01:21 +00:00
{ . compatible = " qcom,krait-pmu " , . data = krait_pmu_init } ,
2012-07-29 13:09:14 +01:00
{ } ,
} ;
2012-12-21 14:02:24 -08:00
static struct platform_device_id cpu_pmu_plat_device_ids [ ] = {
2012-07-29 13:09:14 +01:00
{ . name = " arm-pmu " } ,
2014-05-22 11:49:18 +01:00
{ . name = " armv6-pmu " } ,
{ . name = " armv7-pmu " } ,
{ . name = " xscale-pmu " } ,
2012-07-29 13:09:14 +01:00
{ } ,
} ;
2014-05-23 18:11:14 +01:00
static const struct pmu_probe_info pmu_probe_table [ ] = {
ARM_PMU_PROBE ( ARM_CPU_PART_ARM1136 , armv6_1136_pmu_init ) ,
ARM_PMU_PROBE ( ARM_CPU_PART_ARM1156 , armv6_1156_pmu_init ) ,
ARM_PMU_PROBE ( ARM_CPU_PART_ARM1176 , armv6_1176_pmu_init ) ,
ARM_PMU_PROBE ( ARM_CPU_PART_ARM11MPCORE , armv6mpcore_pmu_init ) ,
ARM_PMU_PROBE ( ARM_CPU_PART_CORTEX_A8 , armv7_a8_pmu_init ) ,
ARM_PMU_PROBE ( ARM_CPU_PART_CORTEX_A9 , armv7_a9_pmu_init ) ,
XSCALE_PMU_PROBE ( ARM_CPU_XSCALE_ARCH_V1 , xscale1pmu_init ) ,
XSCALE_PMU_PROBE ( ARM_CPU_XSCALE_ARCH_V2 , xscale2pmu_init ) ,
{ /* sentinel value */ }
} ;
2012-07-29 13:09:14 +01:00
/*
* CPU PMU identification and probing .
*/
2012-12-21 14:02:24 -08:00
static int probe_current_pmu ( struct arm_pmu * pmu )
2012-07-29 13:09:14 +01:00
{
int cpu = get_cpu ( ) ;
2014-05-23 18:11:14 +01:00
unsigned int cpuid = read_cpuid_id ( ) ;
2012-07-31 10:11:23 +01:00
int ret = - ENODEV ;
2014-05-23 18:11:14 +01:00
const struct pmu_probe_info * info ;
2012-07-29 13:09:14 +01:00
pr_info ( " probing PMU on CPU %d \n " , cpu ) ;
2014-05-23 18:11:14 +01:00
for ( info = pmu_probe_table ; info - > init ! = NULL ; info + + ) {
if ( ( cpuid & info - > mask ) ! = info - > cpuid )
continue ;
ret = info - > init ( pmu ) ;
2014-06-24 19:43:15 +01:00
break ;
2012-07-29 13:09:14 +01:00
}
put_cpu ( ) ;
2012-07-31 10:11:23 +01:00
return ret ;
2012-07-29 13:09:14 +01:00
}
2012-12-21 14:02:24 -08:00
static int cpu_pmu_device_probe ( struct platform_device * pdev )
2012-07-29 13:09:14 +01:00
{
const struct of_device_id * of_id ;
2014-01-10 00:57:06 +01:00
const int ( * init_fn ) ( struct arm_pmu * ) ;
2012-07-29 13:09:14 +01:00
struct device_node * node = pdev - > dev . of_node ;
2012-07-31 10:11:23 +01:00
struct arm_pmu * pmu ;
int ret = - ENODEV ;
2012-07-29 13:09:14 +01:00
if ( cpu_pmu ) {
2014-10-23 15:59:35 +01:00
pr_info ( " attempt to register multiple PMU devices! \n " ) ;
2012-07-29 13:09:14 +01:00
return - ENOSPC ;
}
2012-07-31 10:11:23 +01:00
pmu = kzalloc ( sizeof ( struct arm_pmu ) , GFP_KERNEL ) ;
if ( ! pmu ) {
2014-10-23 15:59:35 +01:00
pr_info ( " failed to allocate PMU device! \n " ) ;
2012-07-31 10:11:23 +01:00
return - ENOMEM ;
}
2014-02-07 21:01:20 +00:00
cpu_pmu = pmu ;
cpu_pmu - > plat_device = pdev ;
2012-07-29 13:09:14 +01:00
if ( node & & ( of_id = of_match_node ( cpu_pmu_of_device_ids , pdev - > dev . of_node ) ) ) {
init_fn = of_id - > data ;
2012-07-31 10:11:23 +01:00
ret = init_fn ( pmu ) ;
2012-07-29 13:09:14 +01:00
} else {
2012-07-31 10:11:23 +01:00
ret = probe_current_pmu ( pmu ) ;
2012-07-29 13:09:14 +01:00
}
2012-07-31 10:11:23 +01:00
if ( ret ) {
2014-10-23 15:59:35 +01:00
pr_info ( " failed to probe PMU! \n " ) ;
2013-01-18 13:42:58 +00:00
goto out_free ;
2012-07-31 10:11:23 +01:00
}
2012-07-29 13:09:14 +01:00
2014-10-21 14:11:23 +01:00
ret = cpu_pmu_init ( cpu_pmu ) ;
if ( ret )
goto out_free ;
2012-09-12 10:53:23 +01:00
ret = armpmu_register ( cpu_pmu , - 1 ) ;
2014-10-21 14:11:23 +01:00
if ( ret )
goto out_destroy ;
2012-07-29 13:09:14 +01:00
2014-10-21 14:11:23 +01:00
return 0 ;
2013-01-18 13:42:58 +00:00
2014-10-21 14:11:23 +01:00
out_destroy :
cpu_pmu_destroy ( cpu_pmu ) ;
2013-01-18 13:42:58 +00:00
out_free :
2014-10-23 15:59:35 +01:00
pr_info ( " failed to register PMU devices! \n " ) ;
2013-01-18 13:42:58 +00:00
kfree ( pmu ) ;
return ret ;
2012-07-29 13:09:14 +01:00
}
static struct platform_driver cpu_pmu_driver = {
. driver = {
. name = " arm-pmu " ,
. pm = & armpmu_dev_pm_ops ,
. of_match_table = cpu_pmu_of_device_ids ,
} ,
. probe = cpu_pmu_device_probe ,
. id_table = cpu_pmu_plat_device_ids ,
} ;
static int __init register_pmu_driver ( void )
{
2014-10-23 15:23:35 +01:00
return platform_driver_register ( & cpu_pmu_driver ) ;
2012-07-29 13:09:14 +01:00
}
device_initcall ( register_pmu_driver ) ;