2012-07-29 13:09:14 +01:00
/*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*
* Copyright ( C ) 2012 ARM Limited
*
* Author : Will Deacon < will . deacon @ arm . com >
*/
# define pr_fmt(fmt) "CPU PMU: " fmt
# include <linux/bitmap.h>
# include <linux/export.h>
# include <linux/kernel.h>
# include <linux/of.h>
# include <linux/platform_device.h>
2012-07-31 10:11:23 +01:00
# include <linux/slab.h>
2012-07-29 13:09:14 +01:00
# include <linux/spinlock.h>
2014-02-07 21:01:19 +00:00
# include <linux/irq.h>
# include <linux/irqdesc.h>
2012-07-29 13:09:14 +01:00
# include <asm/cputype.h>
# include <asm/irq_regs.h>
# include <asm/pmu.h>
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu * cpu_pmu ;
2014-02-07 21:01:19 +00:00
static DEFINE_PER_CPU ( struct arm_pmu * , percpu_pmu ) ;
2012-07-29 13:09:14 +01:00
static DEFINE_PER_CPU ( struct perf_event * [ ARMPMU_MAX_HWEVENTS ] , hw_events ) ;
static DEFINE_PER_CPU ( unsigned long [ BITS_TO_LONGS ( ARMPMU_MAX_HWEVENTS ) ] , used_mask ) ;
static DEFINE_PER_CPU ( struct pmu_hw_events , cpu_hw_events ) ;
/*
* Despite the names , these two functions are CPU - specific and are used
* by the OProfile / perf code .
*/
const char * perf_pmu_name ( void )
{
if ( ! cpu_pmu )
return NULL ;
2012-09-21 14:23:47 +01:00
return cpu_pmu - > name ;
2012-07-29 13:09:14 +01:00
}
EXPORT_SYMBOL_GPL ( perf_pmu_name ) ;
int perf_num_counters ( void )
{
int max_events = 0 ;
if ( cpu_pmu ! = NULL )
max_events = cpu_pmu - > num_events ;
return max_events ;
}
EXPORT_SYMBOL_GPL ( perf_num_counters ) ;
/* Include the PMU-specific implementations. */
# include "perf_event_xscale.c"
# include "perf_event_v6.c"
# include "perf_event_v7.c"
static struct pmu_hw_events * cpu_pmu_get_cpu_events ( void )
{
2013-10-21 13:17:08 +01:00
return this_cpu_ptr ( & cpu_hw_events ) ;
2012-07-29 13:09:14 +01:00
}
2014-02-07 21:01:19 +00:00
static void cpu_pmu_enable_percpu_irq ( void * data )
{
struct arm_pmu * cpu_pmu = data ;
struct platform_device * pmu_device = cpu_pmu - > plat_device ;
int irq = platform_get_irq ( pmu_device , 0 ) ;
enable_percpu_irq ( irq , IRQ_TYPE_NONE ) ;
cpumask_set_cpu ( smp_processor_id ( ) , & cpu_pmu - > active_irqs ) ;
}
static void cpu_pmu_disable_percpu_irq ( void * data )
{
struct arm_pmu * cpu_pmu = data ;
struct platform_device * pmu_device = cpu_pmu - > plat_device ;
int irq = platform_get_irq ( pmu_device , 0 ) ;
cpumask_clear_cpu ( smp_processor_id ( ) , & cpu_pmu - > active_irqs ) ;
disable_percpu_irq ( irq ) ;
}
2012-07-30 12:00:02 +01:00
static void cpu_pmu_free_irq ( struct arm_pmu * cpu_pmu )
2012-07-31 10:34:25 +01:00
{
int i , irq , irqs ;
struct platform_device * pmu_device = cpu_pmu - > plat_device ;
irqs = min ( pmu_device - > num_resources , num_possible_cpus ( ) ) ;
2014-02-07 21:01:19 +00:00
irq = platform_get_irq ( pmu_device , 0 ) ;
if ( irq > = 0 & & irq_is_percpu ( irq ) ) {
on_each_cpu ( cpu_pmu_disable_percpu_irq , cpu_pmu , 1 ) ;
free_percpu_irq ( irq , & percpu_pmu ) ;
} else {
for ( i = 0 ; i < irqs ; + + i ) {
if ( ! cpumask_test_and_clear_cpu ( i , & cpu_pmu - > active_irqs ) )
continue ;
irq = platform_get_irq ( pmu_device , i ) ;
if ( irq > = 0 )
free_irq ( irq , cpu_pmu ) ;
}
2012-07-31 10:34:25 +01:00
}
}
2012-07-30 12:00:02 +01:00
static int cpu_pmu_request_irq ( struct arm_pmu * cpu_pmu , irq_handler_t handler )
2012-07-31 10:34:25 +01:00
{
int i , err , irq , irqs ;
struct platform_device * pmu_device = cpu_pmu - > plat_device ;
if ( ! pmu_device )
return - ENODEV ;
irqs = min ( pmu_device - > num_resources , num_possible_cpus ( ) ) ;
if ( irqs < 1 ) {
pr_err ( " no irqs for PMUs defined \n " ) ;
return - ENODEV ;
}
2014-02-07 21:01:19 +00:00
irq = platform_get_irq ( pmu_device , 0 ) ;
if ( irq > = 0 & & irq_is_percpu ( irq ) ) {
err = request_percpu_irq ( irq , handler , " arm-pmu " , & percpu_pmu ) ;
2012-07-31 10:34:25 +01:00
if ( err ) {
pr_err ( " unable to request IRQ%d for ARM PMU counters \n " ,
irq ) ;
return err ;
}
2014-02-07 21:01:19 +00:00
on_each_cpu ( cpu_pmu_enable_percpu_irq , cpu_pmu , 1 ) ;
} else {
for ( i = 0 ; i < irqs ; + + i ) {
err = 0 ;
irq = platform_get_irq ( pmu_device , i ) ;
if ( irq < 0 )
continue ;
/*
* If we have a single PMU interrupt that we can ' t shift ,
* assume that we ' re running on a uniprocessor machine and
* continue . Otherwise , continue without this interrupt .
*/
if ( irq_set_affinity ( irq , cpumask_of ( i ) ) & & irqs > 1 ) {
pr_warning ( " unable to set irq affinity (irq=%d, cpu=%u) \n " ,
irq , i ) ;
continue ;
}
err = request_irq ( irq , handler ,
IRQF_NOBALANCING | IRQF_NO_THREAD , " arm-pmu " ,
cpu_pmu ) ;
if ( err ) {
pr_err ( " unable to request IRQ%d for ARM PMU counters \n " ,
irq ) ;
return err ;
}
cpumask_set_cpu ( i , & cpu_pmu - > active_irqs ) ;
}
2012-07-31 10:34:25 +01:00
}
return 0 ;
}
2012-12-21 14:02:24 -08:00
static void cpu_pmu_init ( struct arm_pmu * cpu_pmu )
2012-07-29 13:09:14 +01:00
{
int cpu ;
for_each_possible_cpu ( cpu ) {
struct pmu_hw_events * events = & per_cpu ( cpu_hw_events , cpu ) ;
events - > events = per_cpu ( hw_events , cpu ) ;
events - > used_mask = per_cpu ( used_mask , cpu ) ;
raw_spin_lock_init ( & events - > pmu_lock ) ;
2014-02-07 21:01:19 +00:00
per_cpu ( percpu_pmu , cpu ) = cpu_pmu ;
2012-07-29 13:09:14 +01:00
}
2012-07-31 10:34:25 +01:00
cpu_pmu - > get_hw_events = cpu_pmu_get_cpu_events ;
cpu_pmu - > request_irq = cpu_pmu_request_irq ;
cpu_pmu - > free_irq = cpu_pmu_free_irq ;
2012-07-29 13:09:14 +01:00
/* Ensure the PMU has sane values out of reset. */
2013-01-14 17:27:35 +00:00
if ( cpu_pmu - > reset )
2012-07-30 12:00:02 +01:00
on_each_cpu ( cpu_pmu - > reset , cpu_pmu , 1 ) ;
2012-07-29 13:09:14 +01:00
}
/*
* PMU hardware loses all context when a CPU goes offline .
* When a CPU is hotplugged back in , since some hardware registers are
* UNKNOWN at reset , the PMU must be explicitly reset to avoid reading
* junk values out of them .
*/
2013-06-17 15:43:14 -04:00
static int cpu_pmu_notify ( struct notifier_block * b , unsigned long action ,
void * hcpu )
2012-07-29 13:09:14 +01:00
{
if ( ( action & ~ CPU_TASKS_FROZEN ) ! = CPU_STARTING )
return NOTIFY_DONE ;
if ( cpu_pmu & & cpu_pmu - > reset )
2012-07-30 12:00:02 +01:00
cpu_pmu - > reset ( cpu_pmu ) ;
2012-09-21 14:14:17 +01:00
else
return NOTIFY_DONE ;
2012-07-29 13:09:14 +01:00
return NOTIFY_OK ;
}
2013-06-17 15:43:14 -04:00
static struct notifier_block cpu_pmu_hotplug_notifier = {
2012-07-29 13:09:14 +01:00
. notifier_call = cpu_pmu_notify ,
} ;
/*
* PMU platform driver and devicetree bindings .
*/
2012-12-21 14:02:24 -08:00
static struct of_device_id cpu_pmu_of_device_ids [ ] = {
2012-07-29 13:09:14 +01:00
{ . compatible = " arm,cortex-a15-pmu " , . data = armv7_a15_pmu_init } ,
{ . compatible = " arm,cortex-a9-pmu " , . data = armv7_a9_pmu_init } ,
{ . compatible = " arm,cortex-a8-pmu " , . data = armv7_a8_pmu_init } ,
{ . compatible = " arm,cortex-a7-pmu " , . data = armv7_a7_pmu_init } ,
{ . compatible = " arm,cortex-a5-pmu " , . data = armv7_a5_pmu_init } ,
{ . compatible = " arm,arm11mpcore-pmu " , . data = armv6mpcore_pmu_init } ,
{ . compatible = " arm,arm1176-pmu " , . data = armv6pmu_init } ,
{ . compatible = " arm,arm1136-pmu " , . data = armv6pmu_init } ,
{ } ,
} ;
2012-12-21 14:02:24 -08:00
static struct platform_device_id cpu_pmu_plat_device_ids [ ] = {
2012-07-29 13:09:14 +01:00
{ . name = " arm-pmu " } ,
{ } ,
} ;
/*
* CPU PMU identification and probing .
*/
2012-12-21 14:02:24 -08:00
static int probe_current_pmu ( struct arm_pmu * pmu )
2012-07-29 13:09:14 +01:00
{
int cpu = get_cpu ( ) ;
2012-12-18 04:06:38 +00:00
unsigned long implementor = read_cpuid_implementor ( ) ;
unsigned long part_number = read_cpuid_part_number ( ) ;
2012-07-31 10:11:23 +01:00
int ret = - ENODEV ;
2012-07-29 13:09:14 +01:00
pr_info ( " probing PMU on CPU %d \n " , cpu ) ;
/* ARM Ltd CPUs. */
2012-12-18 04:06:38 +00:00
if ( implementor = = ARM_CPU_IMP_ARM ) {
2012-07-29 13:09:14 +01:00
switch ( part_number ) {
2012-12-18 04:06:38 +00:00
case ARM_CPU_PART_ARM1136 :
case ARM_CPU_PART_ARM1156 :
case ARM_CPU_PART_ARM1176 :
2012-07-31 10:11:23 +01:00
ret = armv6pmu_init ( pmu ) ;
2012-07-29 13:09:14 +01:00
break ;
2012-12-18 04:06:38 +00:00
case ARM_CPU_PART_ARM11MPCORE :
2012-07-31 10:11:23 +01:00
ret = armv6mpcore_pmu_init ( pmu ) ;
2012-07-29 13:09:14 +01:00
break ;
2012-12-18 04:06:38 +00:00
case ARM_CPU_PART_CORTEX_A8 :
2012-07-31 10:11:23 +01:00
ret = armv7_a8_pmu_init ( pmu ) ;
2012-07-29 13:09:14 +01:00
break ;
2012-12-18 04:06:38 +00:00
case ARM_CPU_PART_CORTEX_A9 :
2012-07-31 10:11:23 +01:00
ret = armv7_a9_pmu_init ( pmu ) ;
2012-07-29 13:09:14 +01:00
break ;
2012-12-18 04:06:38 +00:00
case ARM_CPU_PART_CORTEX_A5 :
2012-07-31 10:11:23 +01:00
ret = armv7_a5_pmu_init ( pmu ) ;
2012-07-29 13:09:14 +01:00
break ;
2012-12-18 04:06:38 +00:00
case ARM_CPU_PART_CORTEX_A15 :
2012-07-31 10:11:23 +01:00
ret = armv7_a15_pmu_init ( pmu ) ;
2012-07-29 13:09:14 +01:00
break ;
2012-12-18 04:06:38 +00:00
case ARM_CPU_PART_CORTEX_A7 :
2012-07-31 10:11:23 +01:00
ret = armv7_a7_pmu_init ( pmu ) ;
2012-07-29 13:09:14 +01:00
break ;
}
/* Intel CPUs [xscale]. */
2012-12-18 04:06:38 +00:00
} else if ( implementor = = ARM_CPU_IMP_INTEL ) {
switch ( xscale_cpu_arch_version ( ) ) {
case ARM_CPU_XSCALE_ARCH_V1 :
2012-07-31 10:11:23 +01:00
ret = xscale1pmu_init ( pmu ) ;
2012-07-29 13:09:14 +01:00
break ;
2012-12-18 04:06:38 +00:00
case ARM_CPU_XSCALE_ARCH_V2 :
2012-07-31 10:11:23 +01:00
ret = xscale2pmu_init ( pmu ) ;
2012-07-29 13:09:14 +01:00
break ;
}
}
put_cpu ( ) ;
2012-07-31 10:11:23 +01:00
return ret ;
2012-07-29 13:09:14 +01:00
}
2012-12-21 14:02:24 -08:00
static int cpu_pmu_device_probe ( struct platform_device * pdev )
2012-07-29 13:09:14 +01:00
{
const struct of_device_id * of_id ;
2014-01-10 00:57:06 +01:00
const int ( * init_fn ) ( struct arm_pmu * ) ;
2012-07-29 13:09:14 +01:00
struct device_node * node = pdev - > dev . of_node ;
2012-07-31 10:11:23 +01:00
struct arm_pmu * pmu ;
int ret = - ENODEV ;
2012-07-29 13:09:14 +01:00
if ( cpu_pmu ) {
pr_info ( " attempt to register multiple PMU devices! " ) ;
return - ENOSPC ;
}
2012-07-31 10:11:23 +01:00
pmu = kzalloc ( sizeof ( struct arm_pmu ) , GFP_KERNEL ) ;
if ( ! pmu ) {
pr_info ( " failed to allocate PMU device! " ) ;
return - ENOMEM ;
}
2012-07-29 13:09:14 +01:00
if ( node & & ( of_id = of_match_node ( cpu_pmu_of_device_ids , pdev - > dev . of_node ) ) ) {
init_fn = of_id - > data ;
2012-07-31 10:11:23 +01:00
ret = init_fn ( pmu ) ;
2012-07-29 13:09:14 +01:00
} else {
2012-07-31 10:11:23 +01:00
ret = probe_current_pmu ( pmu ) ;
2012-07-29 13:09:14 +01:00
}
2012-07-31 10:11:23 +01:00
if ( ret ) {
2013-01-18 13:42:58 +00:00
pr_info ( " failed to probe PMU! " ) ;
goto out_free ;
2012-07-31 10:11:23 +01:00
}
2012-07-29 13:09:14 +01:00
2012-07-31 10:11:23 +01:00
cpu_pmu = pmu ;
2012-07-29 13:09:14 +01:00
cpu_pmu - > plat_device = pdev ;
cpu_pmu_init ( cpu_pmu ) ;
2013-01-18 13:42:58 +00:00
ret = armpmu_register ( cpu_pmu , PERF_TYPE_RAW ) ;
2012-07-29 13:09:14 +01:00
2013-01-18 13:42:58 +00:00
if ( ! ret )
return 0 ;
out_free :
pr_info ( " failed to register PMU devices! " ) ;
kfree ( pmu ) ;
return ret ;
2012-07-29 13:09:14 +01:00
}
static struct platform_driver cpu_pmu_driver = {
. driver = {
. name = " arm-pmu " ,
. pm = & armpmu_dev_pm_ops ,
. of_match_table = cpu_pmu_of_device_ids ,
} ,
. probe = cpu_pmu_device_probe ,
. id_table = cpu_pmu_plat_device_ids ,
} ;
static int __init register_pmu_driver ( void )
{
2012-09-21 11:53:41 +01:00
int err ;
err = register_cpu_notifier ( & cpu_pmu_hotplug_notifier ) ;
if ( err )
return err ;
err = platform_driver_register ( & cpu_pmu_driver ) ;
if ( err )
unregister_cpu_notifier ( & cpu_pmu_hotplug_notifier ) ;
return err ;
2012-07-29 13:09:14 +01:00
}
device_initcall ( register_pmu_driver ) ;