2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2017-04-11 11:39:55 +03:00
/*
* ACPI probing code for ARM performance counters .
*
* Copyright ( C ) 2017 ARM Ltd .
*/
# include <linux/acpi.h>
# include <linux/cpumask.h>
# include <linux/init.h>
2018-02-05 19:41:59 +03:00
# include <linux/irq.h>
# include <linux/irqdesc.h>
2017-04-11 11:39:55 +03:00
# include <linux/percpu.h>
# include <linux/perf/arm_pmu.h>
# include <asm/cputype.h>
static DEFINE_PER_CPU ( struct arm_pmu * , probed_pmus ) ;
static DEFINE_PER_CPU ( int , pmu_irqs ) ;
static int arm_pmu_acpi_register_irq ( int cpu )
{
struct acpi_madt_generic_interrupt * gicc ;
int gsi , trigger ;
gicc = acpi_cpu_get_madt_gicc ( cpu ) ;
gsi = gicc - > performance_interrupt ;
2017-05-30 13:56:22 +03:00
/*
* Per the ACPI spec , the MADT cannot describe a PMU that doesn ' t
* have an interrupt . QEMU advertises this by using a GSI of zero ,
* which is not known to be valid on any hardware despite being
* valid per the spec . Take the pragmatic approach and reject a
* GSI of zero for now .
*/
if ( ! gsi )
return 0 ;
2017-04-11 11:39:55 +03:00
if ( gicc - > flags & ACPI_MADT_PERFORMANCE_IRQ_MODE )
trigger = ACPI_EDGE_SENSITIVE ;
else
trigger = ACPI_LEVEL_SENSITIVE ;
/*
* Helpfully , the MADT GICC doesn ' t have a polarity flag for the
* " performance interrupt " . Luckily , on compliant GICs the polarity is
* a fixed value in HW ( for both SPIs and PPIs ) that we cannot change
* from SW .
*
* Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy . This
* may not match the real polarity , but that should not matter .
*
* Other interrupt controllers are not supported with ACPI .
*/
return acpi_register_gsi ( NULL , gsi , trigger , ACPI_ACTIVE_HIGH ) ;
}
static void arm_pmu_acpi_unregister_irq ( int cpu )
{
struct acpi_madt_generic_interrupt * gicc ;
int gsi ;
gicc = acpi_cpu_get_madt_gicc ( cpu ) ;
gsi = gicc - > performance_interrupt ;
2020-02-26 08:45:10 +03:00
if ( gsi )
acpi_unregister_gsi ( gsi ) ;
2017-04-11 11:39:55 +03:00
}
2019-06-27 00:37:17 +03:00
# if IS_ENABLED(CONFIG_ARM_SPE_PMU)
static struct resource spe_resources [ ] = {
{
/* irq */
. flags = IORESOURCE_IRQ ,
}
} ;
static struct platform_device spe_dev = {
. name = ARMV8_SPE_PDEV_NAME ,
. id = - 1 ,
. resource = spe_resources ,
. num_resources = ARRAY_SIZE ( spe_resources )
} ;
/*
* For lack of a better place , hook the normal PMU MADT walk
* and create a SPE device if we detect a recent MADT with
* a homogeneous PPI mapping .
*/
static void arm_spe_acpi_register_device ( void )
{
int cpu , hetid , irq , ret ;
bool first = true ;
u16 gsi = 0 ;
/*
* Sanity check all the GICC tables for the same interrupt number .
* For now , we only support homogeneous ACPI / SPE machines .
*/
for_each_possible_cpu ( cpu ) {
struct acpi_madt_generic_interrupt * gicc ;
gicc = acpi_cpu_get_madt_gicc ( cpu ) ;
if ( gicc - > header . length < ACPI_MADT_GICC_SPE )
return ;
if ( first ) {
gsi = gicc - > spe_interrupt ;
if ( ! gsi )
return ;
hetid = find_acpi_cpu_topology_hetero_id ( cpu ) ;
first = false ;
} else if ( ( gsi ! = gicc - > spe_interrupt ) | |
( hetid ! = find_acpi_cpu_topology_hetero_id ( cpu ) ) ) {
pr_warn ( " ACPI: SPE must be homogeneous \n " ) ;
return ;
}
}
irq = acpi_register_gsi ( NULL , gsi , ACPI_LEVEL_SENSITIVE ,
ACPI_ACTIVE_HIGH ) ;
if ( irq < 0 ) {
pr_warn ( " ACPI: SPE Unable to register interrupt: %d \n " , gsi ) ;
return ;
}
spe_resources [ 0 ] . start = irq ;
ret = platform_device_register ( & spe_dev ) ;
if ( ret < 0 ) {
pr_warn ( " ACPI: SPE: Unable to register device \n " ) ;
acpi_unregister_gsi ( gsi ) ;
}
}
# else
static inline void arm_spe_acpi_register_device ( void )
{
}
# endif /* CONFIG_ARM_SPE_PMU */
2017-04-11 11:39:55 +03:00
static int arm_pmu_acpi_parse_irqs ( void )
{
int irq , cpu , irq_cpu , err ;
for_each_possible_cpu ( cpu ) {
irq = arm_pmu_acpi_register_irq ( cpu ) ;
if ( irq < 0 ) {
err = irq ;
pr_warn ( " Unable to parse ACPI PMU IRQ for CPU%d: %d \n " ,
cpu , err ) ;
goto out_err ;
} else if ( irq = = 0 ) {
pr_warn ( " No ACPI PMU IRQ for CPU%d \n " , cpu ) ;
}
2017-10-09 19:09:05 +03:00
/*
* Log and request the IRQ so the core arm_pmu code can manage
* it . We ' ll have to sanity - check IRQs later when we associate
* them with their PMUs .
*/
2017-04-11 11:39:55 +03:00
per_cpu ( pmu_irqs , cpu ) = irq ;
2017-10-09 19:09:05 +03:00
armpmu_request_irq ( irq , cpu ) ;
2017-04-11 11:39:55 +03:00
}
return 0 ;
out_err :
for_each_possible_cpu ( cpu ) {
irq = per_cpu ( pmu_irqs , cpu ) ;
if ( ! irq )
continue ;
arm_pmu_acpi_unregister_irq ( cpu ) ;
/*
* Blat all copies of the IRQ so that we only unregister the
* corresponding GSI once ( e . g . when we have PPIs ) .
*/
for_each_possible_cpu ( irq_cpu ) {
if ( per_cpu ( pmu_irqs , irq_cpu ) = = irq )
per_cpu ( pmu_irqs , irq_cpu ) = 0 ;
}
}
return err ;
}
static struct arm_pmu * arm_pmu_acpi_find_alloc_pmu ( void )
{
unsigned long cpuid = read_cpuid_id ( ) ;
struct arm_pmu * pmu ;
int cpu ;
for_each_possible_cpu ( cpu ) {
pmu = per_cpu ( probed_pmus , cpu ) ;
if ( ! pmu | | pmu - > acpi_cpuid ! = cpuid )
continue ;
return pmu ;
}
2018-02-05 19:41:58 +03:00
pmu = armpmu_alloc_atomic ( ) ;
2017-04-11 11:39:55 +03:00
if ( ! pmu ) {
pr_warn ( " Unable to allocate PMU for CPU%d \n " ,
smp_processor_id ( ) ) ;
return NULL ;
}
pmu - > acpi_cpuid = cpuid ;
return pmu ;
}
2018-02-05 19:41:59 +03:00
/*
* Check whether the new IRQ is compatible with those already associated with
* the PMU ( e . g . we don ' t have mismatched PPIs ) .
*/
static bool pmu_irq_matches ( struct arm_pmu * pmu , int irq )
{
struct pmu_hw_events __percpu * hw_events = pmu - > hw_events ;
int cpu ;
if ( ! irq )
return true ;
for_each_cpu ( cpu , & pmu - > supported_cpus ) {
int other_irq = per_cpu ( hw_events - > irq , cpu ) ;
if ( ! other_irq )
continue ;
if ( irq = = other_irq )
continue ;
if ( ! irq_is_percpu_devid ( irq ) & & ! irq_is_percpu_devid ( other_irq ) )
continue ;
pr_warn ( " mismatched PPIs detected \n " ) ;
return false ;
}
return true ;
}
2017-04-11 11:39:55 +03:00
/*
* This must run before the common arm_pmu hotplug logic , so that we can
* associate a CPU and its interrupt before the common code tries to manage the
* affinity and so on .
*
* Note that hotplug events are serialized , so we cannot race with another CPU
* coming up . The perf core won ' t open events while a hotplug event is in
* progress .
*/
static int arm_pmu_acpi_cpu_starting ( unsigned int cpu )
{
struct arm_pmu * pmu ;
struct pmu_hw_events __percpu * hw_events ;
int irq ;
/* If we've already probed this CPU, we have nothing to do */
if ( per_cpu ( probed_pmus , cpu ) )
return 0 ;
irq = per_cpu ( pmu_irqs , cpu ) ;
pmu = arm_pmu_acpi_find_alloc_pmu ( ) ;
if ( ! pmu )
return - ENOMEM ;
per_cpu ( probed_pmus , cpu ) = pmu ;
2018-02-05 19:41:59 +03:00
if ( pmu_irq_matches ( pmu , irq ) ) {
hw_events = pmu - > hw_events ;
per_cpu ( hw_events - > irq , cpu ) = irq ;
}
cpumask_set_cpu ( cpu , & pmu - > supported_cpus ) ;
2017-04-11 11:39:55 +03:00
/*
* Ideally , we ' d probe the PMU here when we find the first matching
* CPU . We can ' t do that for several reasons ; see the comment in
* arm_pmu_acpi_init ( ) .
*
* So for the time being , we ' re done .
*/
return 0 ;
}
int arm_pmu_acpi_probe ( armpmu_init_fn init_fn )
{
int pmu_idx = 0 ;
int cpu , ret ;
/*
* Initialise and register the set of PMUs which we know about right
* now . Ideally we ' d do this in arm_pmu_acpi_cpu_starting ( ) so that we
* could handle late hotplug , but this may lead to deadlock since we
* might try to register a hotplug notifier instance from within a
* hotplug notifier .
*
* There ' s also the problem of having access to the right init_fn ,
* without tying this too deeply into the " real " PMU driver .
*
* For the moment , as with the platform / DT case , we need at least one
* of a PMU ' s CPUs to be online at probe time .
*/
for_each_possible_cpu ( cpu ) {
struct arm_pmu * pmu = per_cpu ( probed_pmus , cpu ) ;
char * base_name ;
if ( ! pmu | | pmu - > name )
continue ;
ret = init_fn ( pmu ) ;
if ( ret = = - ENODEV ) {
/* PMU not handled by this driver, or not present */
continue ;
} else if ( ret ) {
pr_warn ( " Unable to initialise PMU for CPU%d \n " , cpu ) ;
return ret ;
}
base_name = pmu - > name ;
pmu - > name = kasprintf ( GFP_KERNEL , " %s_%d " , base_name , pmu_idx + + ) ;
if ( ! pmu - > name ) {
pr_warn ( " Unable to allocate PMU name for CPU%d \n " , cpu ) ;
return - ENOMEM ;
}
ret = armpmu_register ( pmu ) ;
if ( ret ) {
pr_warn ( " Failed to register PMU for CPU%d \n " , cpu ) ;
2017-09-20 09:56:38 +03:00
kfree ( pmu - > name ) ;
2017-04-11 11:39:55 +03:00
return ret ;
}
}
return 0 ;
}
static int arm_pmu_acpi_init ( void )
{
int ret ;
if ( acpi_disabled )
return 0 ;
2019-06-27 00:37:17 +03:00
arm_spe_acpi_register_device ( ) ;
2017-04-11 11:39:55 +03:00
ret = arm_pmu_acpi_parse_irqs ( ) ;
if ( ret )
return ret ;
ret = cpuhp_setup_state ( CPUHP_AP_PERF_ARM_ACPI_STARTING ,
" perf/arm/pmu_acpi:starting " ,
arm_pmu_acpi_cpu_starting , NULL ) ;
return ret ;
}
subsys_initcall ( arm_pmu_acpi_init )