2012-11-12 18:33:44 +04:00
/*
* linux / drivers / clocksource / arm_arch_timer . c
*
* Copyright ( C ) 2011 ARM Ltd .
* All Rights Reserved
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
clocksource/arm_arch_timer: Force per-CPU interrupt to be level-triggered
The ARM architected timer produces level-triggered interrupts (this
is mandated by the architecture). Unfortunately, a number of
device-trees get this wrong, and expose an edge-triggered interrupt.
Until now, this wasn't too much an issue, as the programming of the
trigger would fail (the corresponding PPI cannot be reconfigured),
and the kernel would be happy with this. But we're about to change
this, and trust DT a lot if the driver doesn't provide its own
trigger information. In that context, the timer breaks badly.
While we do need to fix the DTs, there is also some userspace out
there (kvmtool) that generates the same kind of broken DT on the
fly, and that will completely break with newer kernels.
As a safety measure, and to keep buggy software alive as well as
buying us some time to fix DTs all over the place, let's check
what trigger configuration has been given us by the firmware.
If this is not a level configuration, then we know that the
DT/ACPI configuration is bust, and we pick some defaults which
won't be worse than the existing setup.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Andrew Lunn <andrew@lunn.ch>
Cc: Liu Gang <Gang.Liu@nxp.com>
Cc: Mark Rutland <marc.rutland@arm.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Wenbin Song <Wenbin.Song@freescale.com>
Cc: Mingkai Hu <Mingkai.Hu@freescale.com>
Cc: Florian Fainelli <f.fainelli@gmail.com>
Cc: Kevin Hilman <khilman@baylibre.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Michal Simek <michal.simek@xilinx.com>
Cc: Jon Hunter <jonathanh@nvidia.com>
Cc: arm@kernel.org
Cc: bcm-kernel-feedback-list@broadcom.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ray Jui <rjui@broadcom.com>
Cc: "Hou Zhiqiang" <B48286@freescale.com>
Cc: Tirumalesh Chalamarla <tchalamarla@cavium.com>
Cc: linux-samsung-soc@vger.kernel.org
Cc: Yuan Yao <yao.yuan@nxp.com>
Cc: Jan Glauber <jglauber@cavium.com>
Cc: Gregory Clement <gregory.clement@free-electrons.com>
Cc: linux-amlogic@lists.infradead.org
Cc: soren.brinkmann@xilinx.com
Cc: Rajesh Bhagat <rajesh.bhagat@freescale.com>
Cc: Scott Branden <sbranden@broadcom.com>
Cc: Duc Dang <dhdang@apm.com>
Cc: Kukjin Kim <kgene@kernel.org>
Cc: Carlo Caione <carlo@caione.org>
Cc: Dinh Nguyen <dinguyen@opensource.altera.com>
Link: http://lkml.kernel.org/r/1470045256-9032-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2016-08-01 12:54:15 +03:00
# define pr_fmt(fmt) "arm_arch_timer: " fmt
2012-11-12 18:33:44 +04:00
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/device.h>
# include <linux/smp.h>
# include <linux/cpu.h>
2013-08-23 18:53:15 +04:00
# include <linux/cpu_pm.h>
2012-11-12 18:33:44 +04:00
# include <linux/clockchips.h>
2015-01-06 16:26:13 +03:00
# include <linux/clocksource.h>
2012-11-12 18:33:44 +04:00
# include <linux/interrupt.h>
# include <linux/of_irq.h>
2013-07-19 03:59:32 +04:00
# include <linux/of_address.h>
2012-11-12 18:33:44 +04:00
# include <linux/io.h>
2013-07-19 03:59:32 +04:00
# include <linux/slab.h>
2017-02-01 18:36:40 +03:00
# include <linux/sched/clock.h>
2013-07-19 03:21:18 +04:00
# include <linux/sched_clock.h>
2015-03-24 17:02:50 +03:00
# include <linux/acpi.h>
2012-11-12 18:33:44 +04:00
# include <asm/arch_timer.h>
2013-01-10 15:13:07 +04:00
# include <asm/virt.h>
2012-11-12 18:33:44 +04:00
# include <clocksource/arm_arch_timer.h>
2013-07-19 03:59:32 +04:00
# define CNTTIDR 0x08
# define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
2016-02-01 15:00:48 +03:00
# define CNTACR(n) (0x40 + ((n) * 4))
# define CNTACR_RPCT BIT(0)
# define CNTACR_RVCT BIT(1)
# define CNTACR_RFRQ BIT(2)
# define CNTACR_RVOFF BIT(3)
# define CNTACR_RWVT BIT(4)
# define CNTACR_RWPT BIT(5)
2013-07-19 03:59:32 +04:00
# define CNTVCT_LO 0x08
# define CNTVCT_HI 0x0c
# define CNTFRQ 0x10
# define CNTP_TVAL 0x28
# define CNTP_CTL 0x2c
# define CNTV_TVAL 0x38
# define CNTV_CTL 0x3c
# define ARCH_CP15_TIMER BIT(0)
# define ARCH_MEM_TIMER BIT(1)
static unsigned arch_timers_present __initdata ;
static void __iomem * arch_counter_base ;
struct arch_timer {
void __iomem * base ;
struct clock_event_device evt ;
} ;
# define to_arch_timer(e) container_of(e, struct arch_timer, evt)
2012-11-12 18:33:44 +04:00
static u32 arch_timer_rate ;
enum ppi_nr {
PHYS_SECURE_PPI ,
PHYS_NONSECURE_PPI ,
VIRT_PPI ,
HYP_PPI ,
MAX_TIMER_PPI
} ;
static int arch_timer_ppi [ MAX_TIMER_PPI ] ;
static struct clock_event_device __percpu * arch_timer_evt ;
2014-02-20 19:21:23 +04:00
static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI ;
clocksource: arch_arm_timer: Fix age-old arch timer C3STOP detection issue
ARM arch timers are tightly coupled with the CPU logic and lose context
on platform implementing HW power management when cores are powered
down at run-time. Marking the arch timers as C3STOP regardless of power
management capabilities causes issues on platforms with no power management,
since in that case the arch timers cannot possibly enter states where the
timer loses context at runtime and therefore can always be used as a high
resolution clockevent device.
In order to fix the C3STOP issue in a way compliant with how real HW
works, this patch adds a boolean property to the arch timer bindings
to define if the arch timer is managed by an always-on power domain.
This power domain is present on all ARM platforms to date, and manages
HW that must not be turned off, whatever the state of other HW
components (eg power controller). On platforms with no power management
capabilities, it is the only power domain present, which encompasses
and manages power supply for all HW components in the system.
If the timer is powered by the always-on power domain, the always-on
property must be present in the bindings which means that the timer cannot
be shutdown at runtime, so it is not a C3STOP clockevent device.
If the timer binding does not contain the always-on property, the timer is
assumed to be power-gateable, hence it must be defined as a C3STOP
clockevent device.
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Magnus Damm <damm@opensource.se>
Cc: Marc Carino <marc.ceeeee@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
2014-04-08 13:04:32 +04:00
static bool arch_timer_c3stop ;
2013-07-19 03:59:32 +04:00
static bool arch_timer_mem_use_virtual ;
2016-10-04 21:12:09 +03:00
static bool arch_counter_suspend_stop ;
2017-02-01 15:07:15 +03:00
static bool vdso_default = true ;
2012-11-12 18:33:44 +04:00
2016-06-27 19:30:13 +03:00
static bool evtstrm_enable = IS_ENABLED ( CONFIG_ARM_ARCH_TIMER_EVTSTREAM ) ;
static int __init early_evtstrm_cfg ( char * buf )
{
return strtobool ( buf , & evtstrm_enable ) ;
}
early_param ( " clocksource.arm_arch_timer.evtstrm " , early_evtstrm_cfg ) ;
2012-11-12 18:33:44 +04:00
/*
* Architected system timer support .
*/
2017-01-20 21:28:32 +03:00
static __always_inline
void arch_timer_reg_write ( int access , enum arch_timer_reg reg , u32 val ,
struct clock_event_device * clk )
{
if ( access = = ARCH_TIMER_MEM_PHYS_ACCESS ) {
struct arch_timer * timer = to_arch_timer ( clk ) ;
switch ( reg ) {
case ARCH_TIMER_REG_CTRL :
writel_relaxed ( val , timer - > base + CNTP_CTL ) ;
break ;
case ARCH_TIMER_REG_TVAL :
writel_relaxed ( val , timer - > base + CNTP_TVAL ) ;
break ;
}
} else if ( access = = ARCH_TIMER_MEM_VIRT_ACCESS ) {
struct arch_timer * timer = to_arch_timer ( clk ) ;
switch ( reg ) {
case ARCH_TIMER_REG_CTRL :
writel_relaxed ( val , timer - > base + CNTV_CTL ) ;
break ;
case ARCH_TIMER_REG_TVAL :
writel_relaxed ( val , timer - > base + CNTV_TVAL ) ;
break ;
}
} else {
arch_timer_reg_write_cp15 ( access , reg , val ) ;
}
}
static __always_inline
u32 arch_timer_reg_read ( int access , enum arch_timer_reg reg ,
struct clock_event_device * clk )
{
u32 val ;
if ( access = = ARCH_TIMER_MEM_PHYS_ACCESS ) {
struct arch_timer * timer = to_arch_timer ( clk ) ;
switch ( reg ) {
case ARCH_TIMER_REG_CTRL :
val = readl_relaxed ( timer - > base + CNTP_CTL ) ;
break ;
case ARCH_TIMER_REG_TVAL :
val = readl_relaxed ( timer - > base + CNTP_TVAL ) ;
break ;
}
} else if ( access = = ARCH_TIMER_MEM_VIRT_ACCESS ) {
struct arch_timer * timer = to_arch_timer ( clk ) ;
switch ( reg ) {
case ARCH_TIMER_REG_CTRL :
val = readl_relaxed ( timer - > base + CNTV_CTL ) ;
break ;
case ARCH_TIMER_REG_TVAL :
val = readl_relaxed ( timer - > base + CNTV_TVAL ) ;
break ;
}
} else {
val = arch_timer_reg_read_cp15 ( access , reg ) ;
}
return val ;
}
2017-02-01 14:53:46 +03:00
/*
* Default to cp15 based access because arm64 uses this function for
* sched_clock ( ) before DT is probed and the cp15 method is guaranteed
* to exist on arm64 . arm doesn ' t use this before DT is probed so even
* if we don ' t have the cp15 accessors we won ' t have a problem .
*/
u64 ( * arch_timer_read_counter ) ( void ) = arch_counter_get_cntvct ;
static u64 arch_counter_read ( struct clocksource * cs )
{
return arch_timer_read_counter ( ) ;
}
static u64 arch_counter_read_cc ( const struct cyclecounter * cc )
{
return arch_timer_read_counter ( ) ;
}
static struct clocksource clocksource_counter = {
. name = " arch_sys_counter " ,
. rating = 400 ,
. read = arch_counter_read ,
. mask = CLOCKSOURCE_MASK ( 56 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
static struct cyclecounter cyclecounter __ro_after_init = {
. read = arch_counter_read_cc ,
. mask = CLOCKSOURCE_MASK ( 56 ) ,
} ;
2016-09-22 11:35:17 +03:00
# ifdef CONFIG_FSL_ERRATUM_A008585
2017-02-06 19:47:41 +03:00
/*
* The number of retries is an arbitrary value well beyond the highest number
* of iterations the loop has been observed to take .
*/
# define __fsl_a008585_read_reg(reg) ({ \
u64 _old , _new ; \
int _retries = 200 ; \
\
do { \
_old = read_sysreg ( reg ) ; \
_new = read_sysreg ( reg ) ; \
_retries - - ; \
} while ( unlikely ( _old ! = _new ) & & _retries ) ; \
\
WARN_ON_ONCE ( ! _retries ) ; \
_new ; \
} )
static u32 notrace fsl_a008585_read_cntp_tval_el0 ( void )
2016-09-22 11:35:17 +03:00
{
return __fsl_a008585_read_reg ( cntp_tval_el0 ) ;
}
2017-02-06 19:47:41 +03:00
static u32 notrace fsl_a008585_read_cntv_tval_el0 ( void )
2016-09-22 11:35:17 +03:00
{
return __fsl_a008585_read_reg ( cntv_tval_el0 ) ;
}
2017-02-06 19:47:41 +03:00
static u64 notrace fsl_a008585_read_cntvct_el0 ( void )
2016-09-22 11:35:17 +03:00
{
return __fsl_a008585_read_reg ( cntvct_el0 ) ;
}
2017-02-06 19:47:41 +03:00
# endif
2017-02-06 19:47:42 +03:00
# ifdef CONFIG_HISILICON_ERRATUM_161010101
/*
* Verify whether the value of the second read is larger than the first by
* less than 32 is the only way to confirm the value is correct , so clear the
* lower 5 bits to check whether the difference is greater than 32 or not .
* Theoretically the erratum should not occur more than twice in succession
* when reading the system counter , but it is possible that some interrupts
* may lead to more than twice read errors , triggering the warning , so setting
* the number of retries far beyond the number of iterations the loop has been
* observed to take .
*/
# define __hisi_161010101_read_reg(reg) ({ \
u64 _old , _new ; \
int _retries = 50 ; \
\
do { \
_old = read_sysreg ( reg ) ; \
_new = read_sysreg ( reg ) ; \
_retries - - ; \
} while ( unlikely ( ( _new - _old ) > > 5 ) & & _retries ) ; \
\
WARN_ON_ONCE ( ! _retries ) ; \
_new ; \
} )
static u32 notrace hisi_161010101_read_cntp_tval_el0 ( void )
{
return __hisi_161010101_read_reg ( cntp_tval_el0 ) ;
}
static u32 notrace hisi_161010101_read_cntv_tval_el0 ( void )
{
return __hisi_161010101_read_reg ( cntv_tval_el0 ) ;
}
static u64 notrace hisi_161010101_read_cntvct_el0 ( void )
{
return __hisi_161010101_read_reg ( cntvct_el0 ) ;
}
# endif
2017-02-06 19:47:41 +03:00
# ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
2017-02-20 21:34:48 +03:00
DEFINE_PER_CPU ( const struct arch_timer_erratum_workaround * ,
timer_unstable_counter_workaround ) ;
2017-02-06 19:47:41 +03:00
EXPORT_SYMBOL_GPL ( timer_unstable_counter_workaround ) ;
DEFINE_STATIC_KEY_FALSE ( arch_timer_read_ool_enabled ) ;
EXPORT_SYMBOL_GPL ( arch_timer_read_ool_enabled ) ;
2017-01-27 13:27:09 +03:00
static void erratum_set_next_event_tval_generic ( const int access , unsigned long evt ,
struct clock_event_device * clk )
{
unsigned long ctrl ;
u64 cval = evt + arch_counter_get_cntvct ( ) ;
ctrl = arch_timer_reg_read ( access , ARCH_TIMER_REG_CTRL , clk ) ;
ctrl | = ARCH_TIMER_CTRL_ENABLE ;
ctrl & = ~ ARCH_TIMER_CTRL_IT_MASK ;
if ( access = = ARCH_TIMER_PHYS_ACCESS )
write_sysreg ( cval , cntp_cval_el0 ) ;
else
write_sysreg ( cval , cntv_cval_el0 ) ;
arch_timer_reg_write ( access , ARCH_TIMER_REG_CTRL , ctrl , clk ) ;
}
static int erratum_set_next_event_tval_virt ( unsigned long evt ,
struct clock_event_device * clk )
{
erratum_set_next_event_tval_generic ( ARCH_TIMER_VIRT_ACCESS , evt , clk ) ;
return 0 ;
}
static int erratum_set_next_event_tval_phys ( unsigned long evt ,
struct clock_event_device * clk )
{
erratum_set_next_event_tval_generic ( ARCH_TIMER_PHYS_ACCESS , evt , clk ) ;
return 0 ;
}
2017-02-06 19:47:41 +03:00
static const struct arch_timer_erratum_workaround ool_workarounds [ ] = {
# ifdef CONFIG_FSL_ERRATUM_A008585
{
2017-01-19 20:20:59 +03:00
. match_type = ate_match_dt ,
2017-02-06 19:47:41 +03:00
. id = " fsl,erratum-a008585 " ,
2017-01-19 20:20:59 +03:00
. desc = " Freescale erratum a005858 " ,
2017-02-06 19:47:41 +03:00
. read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0 ,
. read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0 ,
. read_cntvct_el0 = fsl_a008585_read_cntvct_el0 ,
2017-01-27 13:27:09 +03:00
. set_next_event_phys = erratum_set_next_event_tval_phys ,
. set_next_event_virt = erratum_set_next_event_tval_virt ,
2017-02-06 19:47:41 +03:00
} ,
# endif
2017-02-06 19:47:42 +03:00
# ifdef CONFIG_HISILICON_ERRATUM_161010101
{
2017-01-19 20:20:59 +03:00
. match_type = ate_match_dt ,
2017-02-06 19:47:42 +03:00
. id = " hisilicon,erratum-161010101 " ,
2017-01-19 20:20:59 +03:00
. desc = " HiSilicon erratum 161010101 " ,
2017-02-06 19:47:42 +03:00
. read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0 ,
. read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0 ,
. read_cntvct_el0 = hisi_161010101_read_cntvct_el0 ,
2017-01-27 13:27:09 +03:00
. set_next_event_phys = erratum_set_next_event_tval_phys ,
. set_next_event_virt = erratum_set_next_event_tval_virt ,
2017-02-06 19:47:42 +03:00
} ,
# endif
2017-02-06 19:47:41 +03:00
} ;
2017-01-19 20:20:59 +03:00
typedef bool ( * ate_match_fn_t ) ( const struct arch_timer_erratum_workaround * ,
const void * ) ;
static
bool arch_timer_check_dt_erratum ( const struct arch_timer_erratum_workaround * wa ,
const void * arg )
{
const struct device_node * np = arg ;
return of_property_read_bool ( np , wa - > id ) ;
}
2017-03-20 19:47:59 +03:00
static
bool arch_timer_check_local_cap_erratum ( const struct arch_timer_erratum_workaround * wa ,
const void * arg )
{
return this_cpu_has_cap ( ( uintptr_t ) wa - > id ) ;
}
2017-01-19 20:20:59 +03:00
static const struct arch_timer_erratum_workaround *
arch_timer_iterate_errata ( enum arch_timer_erratum_match_type type ,
ate_match_fn_t match_fn ,
void * arg )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( ool_workarounds ) ; i + + ) {
if ( ool_workarounds [ i ] . match_type ! = type )
continue ;
if ( match_fn ( & ool_workarounds [ i ] , arg ) )
return & ool_workarounds [ i ] ;
}
return NULL ;
}
static
2017-02-20 21:34:48 +03:00
void arch_timer_enable_workaround ( const struct arch_timer_erratum_workaround * wa ,
bool local )
2017-01-19 20:20:59 +03:00
{
2017-02-20 21:34:48 +03:00
int i ;
if ( local ) {
__this_cpu_write ( timer_unstable_counter_workaround , wa ) ;
} else {
for_each_possible_cpu ( i )
per_cpu ( timer_unstable_counter_workaround , i ) = wa ;
}
2017-01-19 20:20:59 +03:00
static_branch_enable ( & arch_timer_read_ool_enabled ) ;
2017-02-01 15:07:15 +03:00
/*
* Don ' t use the vdso fastpath if errata require using the
* out - of - line counter accessor . We may change our mind pretty
* late in the game ( with a per - CPU erratum , for example ) , so
* change both the default value and the vdso itself .
*/
if ( wa - > read_cntvct_el0 ) {
clocksource_counter . archdata . vdso_direct = false ;
vdso_default = false ;
}
2017-01-19 20:20:59 +03:00
}
static void arch_timer_check_ool_workaround ( enum arch_timer_erratum_match_type type ,
void * arg )
{
const struct arch_timer_erratum_workaround * wa ;
ate_match_fn_t match_fn = NULL ;
2017-03-20 19:47:59 +03:00
bool local = false ;
2017-01-19 20:20:59 +03:00
switch ( type ) {
case ate_match_dt :
match_fn = arch_timer_check_dt_erratum ;
break ;
2017-03-20 19:47:59 +03:00
case ate_match_local_cap_id :
match_fn = arch_timer_check_local_cap_erratum ;
local = true ;
break ;
2017-01-19 20:20:59 +03:00
default :
WARN_ON ( 1 ) ;
return ;
}
wa = arch_timer_iterate_errata ( type , match_fn , arg ) ;
if ( ! wa )
return ;
2017-03-20 19:47:59 +03:00
if ( needs_unstable_timer_counter_workaround ( ) ) {
2017-02-20 21:34:48 +03:00
const struct arch_timer_erratum_workaround * __wa ;
__wa = __this_cpu_read ( timer_unstable_counter_workaround ) ;
if ( __wa & & wa ! = __wa )
2017-03-20 19:47:59 +03:00
pr_warn ( " Can't enable workaround for %s (clashes with %s \n ) " ,
2017-02-20 21:34:48 +03:00
wa - > desc , __wa - > desc ) ;
if ( __wa )
return ;
2017-03-20 19:47:59 +03:00
}
2017-02-20 21:34:48 +03:00
arch_timer_enable_workaround ( wa , local ) ;
2017-03-20 19:47:59 +03:00
pr_info ( " Enabling %s workaround for %s \n " ,
local ? " local " : " global " , wa - > desc ) ;
2017-01-19 20:20:59 +03:00
}
2017-01-27 13:27:09 +03:00
# define erratum_handler(fn, r, ...) \
( { \
bool __val ; \
2017-02-20 21:34:48 +03:00
if ( needs_unstable_timer_counter_workaround ( ) ) { \
const struct arch_timer_erratum_workaround * __wa ; \
__wa = __this_cpu_read ( timer_unstable_counter_workaround ) ; \
if ( __wa & & __wa - > fn ) { \
r = __wa - > fn ( __VA_ARGS__ ) ; \
__val = true ; \
} else { \
__val = false ; \
} \
2017-01-27 13:27:09 +03:00
} else { \
__val = false ; \
} \
__val ; \
} )
2017-02-01 15:07:15 +03:00
static bool arch_timer_this_cpu_has_cntvct_wa ( void )
{
const struct arch_timer_erratum_workaround * wa ;
wa = __this_cpu_read ( timer_unstable_counter_workaround ) ;
return wa & & wa - > read_cntvct_el0 ;
}
2017-01-19 20:20:59 +03:00
# else
# define arch_timer_check_ool_workaround(t,a) do { } while(0)
2017-01-27 13:27:09 +03:00
# define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
# define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
2017-01-27 13:27:09 +03:00
# define erratum_handler(fn, r, ...) ({false;})
2017-02-01 15:07:15 +03:00
# define arch_timer_this_cpu_has_cntvct_wa() ({false;})
2017-02-06 19:47:41 +03:00
# endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
2016-09-22 11:35:17 +03:00
2013-07-19 03:59:28 +04:00
static __always_inline irqreturn_t timer_handler ( const int access ,
2012-11-12 18:33:44 +04:00
struct clock_event_device * evt )
{
unsigned long ctrl ;
2013-08-21 16:59:23 +04:00
2013-07-19 03:59:31 +04:00
ctrl = arch_timer_reg_read ( access , ARCH_TIMER_REG_CTRL , evt ) ;
2012-11-12 18:33:44 +04:00
if ( ctrl & ARCH_TIMER_CTRL_IT_STAT ) {
ctrl | = ARCH_TIMER_CTRL_IT_MASK ;
2013-07-19 03:59:31 +04:00
arch_timer_reg_write ( access , ARCH_TIMER_REG_CTRL , ctrl , evt ) ;
2012-11-12 18:33:44 +04:00
evt - > event_handler ( evt ) ;
return IRQ_HANDLED ;
}
return IRQ_NONE ;
}
static irqreturn_t arch_timer_handler_virt ( int irq , void * dev_id )
{
struct clock_event_device * evt = dev_id ;
return timer_handler ( ARCH_TIMER_VIRT_ACCESS , evt ) ;
}
static irqreturn_t arch_timer_handler_phys ( int irq , void * dev_id )
{
struct clock_event_device * evt = dev_id ;
return timer_handler ( ARCH_TIMER_PHYS_ACCESS , evt ) ;
}
2013-07-19 03:59:32 +04:00
static irqreturn_t arch_timer_handler_phys_mem ( int irq , void * dev_id )
{
struct clock_event_device * evt = dev_id ;
return timer_handler ( ARCH_TIMER_MEM_PHYS_ACCESS , evt ) ;
}
static irqreturn_t arch_timer_handler_virt_mem ( int irq , void * dev_id )
{
struct clock_event_device * evt = dev_id ;
return timer_handler ( ARCH_TIMER_MEM_VIRT_ACCESS , evt ) ;
}
2015-06-12 11:00:12 +03:00
static __always_inline int timer_shutdown ( const int access ,
struct clock_event_device * clk )
2012-11-12 18:33:44 +04:00
{
unsigned long ctrl ;
2015-06-12 11:00:12 +03:00
ctrl = arch_timer_reg_read ( access , ARCH_TIMER_REG_CTRL , clk ) ;
ctrl & = ~ ARCH_TIMER_CTRL_ENABLE ;
arch_timer_reg_write ( access , ARCH_TIMER_REG_CTRL , ctrl , clk ) ;
return 0 ;
2012-11-12 18:33:44 +04:00
}
2015-06-12 11:00:12 +03:00
static int arch_timer_shutdown_virt ( struct clock_event_device * clk )
2012-11-12 18:33:44 +04:00
{
2015-06-12 11:00:12 +03:00
return timer_shutdown ( ARCH_TIMER_VIRT_ACCESS , clk ) ;
2012-11-12 18:33:44 +04:00
}
2015-06-12 11:00:12 +03:00
static int arch_timer_shutdown_phys ( struct clock_event_device * clk )
2012-11-12 18:33:44 +04:00
{
2015-06-12 11:00:12 +03:00
return timer_shutdown ( ARCH_TIMER_PHYS_ACCESS , clk ) ;
2012-11-12 18:33:44 +04:00
}
2015-06-12 11:00:12 +03:00
static int arch_timer_shutdown_virt_mem ( struct clock_event_device * clk )
2013-07-19 03:59:32 +04:00
{
2015-06-12 11:00:12 +03:00
return timer_shutdown ( ARCH_TIMER_MEM_VIRT_ACCESS , clk ) ;
2012-11-12 18:33:44 +04:00
}
2015-06-12 11:00:12 +03:00
static int arch_timer_shutdown_phys_mem ( struct clock_event_device * clk )
2013-07-19 03:59:32 +04:00
{
2015-06-12 11:00:12 +03:00
return timer_shutdown ( ARCH_TIMER_MEM_PHYS_ACCESS , clk ) ;
2013-07-19 03:59:32 +04:00
}
2013-07-19 03:59:31 +04:00
static __always_inline void set_next_event ( const int access , unsigned long evt ,
2013-08-21 16:59:23 +04:00
struct clock_event_device * clk )
2012-11-12 18:33:44 +04:00
{
unsigned long ctrl ;
2013-07-19 03:59:31 +04:00
ctrl = arch_timer_reg_read ( access , ARCH_TIMER_REG_CTRL , clk ) ;
2012-11-12 18:33:44 +04:00
ctrl | = ARCH_TIMER_CTRL_ENABLE ;
ctrl & = ~ ARCH_TIMER_CTRL_IT_MASK ;
2013-07-19 03:59:31 +04:00
arch_timer_reg_write ( access , ARCH_TIMER_REG_TVAL , evt , clk ) ;
arch_timer_reg_write ( access , ARCH_TIMER_REG_CTRL , ctrl , clk ) ;
2012-11-12 18:33:44 +04:00
}
static int arch_timer_set_next_event_virt ( unsigned long evt ,
2013-07-19 03:59:31 +04:00
struct clock_event_device * clk )
2012-11-12 18:33:44 +04:00
{
2017-01-27 13:27:09 +03:00
int ret ;
if ( erratum_handler ( set_next_event_virt , ret , evt , clk ) )
return ret ;
2017-01-27 13:27:09 +03:00
2013-07-19 03:59:31 +04:00
set_next_event ( ARCH_TIMER_VIRT_ACCESS , evt , clk ) ;
2012-11-12 18:33:44 +04:00
return 0 ;
}
static int arch_timer_set_next_event_phys ( unsigned long evt ,
2013-07-19 03:59:31 +04:00
struct clock_event_device * clk )
2012-11-12 18:33:44 +04:00
{
2017-01-27 13:27:09 +03:00
int ret ;
if ( erratum_handler ( set_next_event_phys , ret , evt , clk ) )
return ret ;
2017-01-27 13:27:09 +03:00
2013-07-19 03:59:31 +04:00
set_next_event ( ARCH_TIMER_PHYS_ACCESS , evt , clk ) ;
2012-11-12 18:33:44 +04:00
return 0 ;
}
2013-07-19 03:59:32 +04:00
static int arch_timer_set_next_event_virt_mem ( unsigned long evt ,
struct clock_event_device * clk )
2012-11-12 18:33:44 +04:00
{
2013-07-19 03:59:32 +04:00
set_next_event ( ARCH_TIMER_MEM_VIRT_ACCESS , evt , clk ) ;
return 0 ;
}
static int arch_timer_set_next_event_phys_mem ( unsigned long evt ,
struct clock_event_device * clk )
{
set_next_event ( ARCH_TIMER_MEM_PHYS_ACCESS , evt , clk ) ;
return 0 ;
}
2013-08-21 16:59:23 +04:00
static void __arch_timer_setup ( unsigned type ,
struct clock_event_device * clk )
2013-07-19 03:59:32 +04:00
{
clk - > features = CLOCK_EVT_FEAT_ONESHOT ;
if ( type = = ARCH_CP15_TIMER ) {
clocksource: arch_arm_timer: Fix age-old arch timer C3STOP detection issue
ARM arch timers are tightly coupled with the CPU logic and lose context
on platform implementing HW power management when cores are powered
down at run-time. Marking the arch timers as C3STOP regardless of power
management capabilities causes issues on platforms with no power management,
since in that case the arch timers cannot possibly enter states where the
timer loses context at runtime and therefore can always be used as a high
resolution clockevent device.
In order to fix the C3STOP issue in a way compliant with how real HW
works, this patch adds a boolean property to the arch timer bindings
to define if the arch timer is managed by an always-on power domain.
This power domain is present on all ARM platforms to date, and manages
HW that must not be turned off, whatever the state of other HW
components (eg power controller). On platforms with no power management
capabilities, it is the only power domain present, which encompasses
and manages power supply for all HW components in the system.
If the timer is powered by the always-on power domain, the always-on
property must be present in the bindings which means that the timer cannot
be shutdown at runtime, so it is not a C3STOP clockevent device.
If the timer binding does not contain the always-on property, the timer is
assumed to be power-gateable, hence it must be defined as a C3STOP
clockevent device.
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Magnus Damm <damm@opensource.se>
Cc: Marc Carino <marc.ceeeee@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
2014-04-08 13:04:32 +04:00
if ( arch_timer_c3stop )
clk - > features | = CLOCK_EVT_FEAT_C3STOP ;
2013-07-19 03:59:32 +04:00
clk - > name = " arch_sys_timer " ;
clk - > rating = 450 ;
clk - > cpumask = cpumask_of ( smp_processor_id ( ) ) ;
2014-02-20 19:21:23 +04:00
clk - > irq = arch_timer_ppi [ arch_timer_uses_ppi ] ;
switch ( arch_timer_uses_ppi ) {
case VIRT_PPI :
2015-06-12 11:00:12 +03:00
clk - > set_state_shutdown = arch_timer_shutdown_virt ;
2015-12-23 14:29:12 +03:00
clk - > set_state_oneshot_stopped = arch_timer_shutdown_virt ;
2013-07-19 03:59:32 +04:00
clk - > set_next_event = arch_timer_set_next_event_virt ;
2014-02-20 19:21:23 +04:00
break ;
case PHYS_SECURE_PPI :
case PHYS_NONSECURE_PPI :
case HYP_PPI :
2015-06-12 11:00:12 +03:00
clk - > set_state_shutdown = arch_timer_shutdown_phys ;
2015-12-23 14:29:12 +03:00
clk - > set_state_oneshot_stopped = arch_timer_shutdown_phys ;
2013-07-19 03:59:32 +04:00
clk - > set_next_event = arch_timer_set_next_event_phys ;
2014-02-20 19:21:23 +04:00
break ;
default :
BUG ( ) ;
2013-07-19 03:59:32 +04:00
}
2016-09-22 11:35:17 +03:00
2017-03-20 19:47:59 +03:00
arch_timer_check_ool_workaround ( ate_match_local_cap_id , NULL ) ;
2012-11-12 18:33:44 +04:00
} else {
2014-01-07 02:56:17 +04:00
clk - > features | = CLOCK_EVT_FEAT_DYNIRQ ;
2013-07-19 03:59:32 +04:00
clk - > name = " arch_mem_timer " ;
clk - > rating = 400 ;
clk - > cpumask = cpu_all_mask ;
if ( arch_timer_mem_use_virtual ) {
2015-06-12 11:00:12 +03:00
clk - > set_state_shutdown = arch_timer_shutdown_virt_mem ;
2015-12-23 14:29:12 +03:00
clk - > set_state_oneshot_stopped = arch_timer_shutdown_virt_mem ;
2013-07-19 03:59:32 +04:00
clk - > set_next_event =
arch_timer_set_next_event_virt_mem ;
} else {
2015-06-12 11:00:12 +03:00
clk - > set_state_shutdown = arch_timer_shutdown_phys_mem ;
2015-12-23 14:29:12 +03:00
clk - > set_state_oneshot_stopped = arch_timer_shutdown_phys_mem ;
2013-07-19 03:59:32 +04:00
clk - > set_next_event =
arch_timer_set_next_event_phys_mem ;
}
2012-11-12 18:33:44 +04:00
}
2015-06-12 11:00:12 +03:00
clk - > set_state_shutdown ( clk ) ;
2012-11-12 18:33:44 +04:00
2013-07-19 03:59:32 +04:00
clockevents_config_and_register ( clk , arch_timer_rate , 0xf , 0x7fffffff ) ;
}
2012-11-12 18:33:44 +04:00
2014-09-29 03:50:06 +04:00
static void arch_timer_evtstrm_enable ( int divider )
{
u32 cntkctl = arch_timer_get_cntkctl ( ) ;
cntkctl & = ~ ARCH_TIMER_EVT_TRIGGER_MASK ;
/* Set the divider and enable virtual event stream */
cntkctl | = ( divider < < ARCH_TIMER_EVT_TRIGGER_SHIFT )
| ARCH_TIMER_VIRT_EVT_EN ;
arch_timer_set_cntkctl ( cntkctl ) ;
elf_hwcap | = HWCAP_EVTSTRM ;
# ifdef CONFIG_COMPAT
compat_elf_hwcap | = COMPAT_HWCAP_EVTSTRM ;
# endif
}
2013-08-23 18:32:29 +04:00
static void arch_timer_configure_evtstream ( void )
{
int evt_stream_div , pos ;
/* Find the closest power of two to the divisor */
evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ ;
pos = fls ( evt_stream_div ) ;
if ( pos > 1 & & ! ( evt_stream_div & ( 1 < < ( pos - 2 ) ) ) )
pos - - ;
/* enable event stream */
arch_timer_evtstrm_enable ( min ( pos , 15 ) ) ;
}
2014-09-29 03:50:06 +04:00
static void arch_counter_set_user_access ( void )
{
u32 cntkctl = arch_timer_get_cntkctl ( ) ;
2017-02-01 15:07:15 +03:00
/* Disable user access to the timers and both counters */
2014-09-29 03:50:06 +04:00
/* Also disable virtual event stream */
cntkctl & = ~ ( ARCH_TIMER_USR_PT_ACCESS_EN
| ARCH_TIMER_USR_VT_ACCESS_EN
2017-02-01 15:07:15 +03:00
| ARCH_TIMER_USR_VCT_ACCESS_EN
2014-09-29 03:50:06 +04:00
| ARCH_TIMER_VIRT_EVT_EN
| ARCH_TIMER_USR_PCT_ACCESS_EN ) ;
2017-02-01 15:07:15 +03:00
/*
* Enable user access to the virtual counter if it doesn ' t
* need to be workaround . The vdso may have been already
* disabled though .
*/
if ( arch_timer_this_cpu_has_cntvct_wa ( ) )
pr_info ( " CPU%d: Trapping CNTVCT access \n " , smp_processor_id ( ) ) ;
else
cntkctl | = ARCH_TIMER_USR_VCT_ACCESS_EN ;
2014-09-29 03:50:06 +04:00
arch_timer_set_cntkctl ( cntkctl ) ;
}
2014-02-20 19:21:23 +04:00
static bool arch_timer_has_nonsecure_ppi ( void )
{
return ( arch_timer_uses_ppi = = PHYS_SECURE_PPI & &
arch_timer_ppi [ PHYS_NONSECURE_PPI ] ) ;
}
clocksource/arm_arch_timer: Force per-CPU interrupt to be level-triggered
The ARM architected timer produces level-triggered interrupts (this
is mandated by the architecture). Unfortunately, a number of
device-trees get this wrong, and expose an edge-triggered interrupt.
Until now, this wasn't too much an issue, as the programming of the
trigger would fail (the corresponding PPI cannot be reconfigured),
and the kernel would be happy with this. But we're about to change
this, and trust DT a lot if the driver doesn't provide its own
trigger information. In that context, the timer breaks badly.
While we do need to fix the DTs, there is also some userspace out
there (kvmtool) that generates the same kind of broken DT on the
fly, and that will completely break with newer kernels.
As a safety measure, and to keep buggy software alive as well as
buying us some time to fix DTs all over the place, let's check
what trigger configuration has been given us by the firmware.
If this is not a level configuration, then we know that the
DT/ACPI configuration is bust, and we pick some defaults which
won't be worse than the existing setup.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Andrew Lunn <andrew@lunn.ch>
Cc: Liu Gang <Gang.Liu@nxp.com>
Cc: Mark Rutland <marc.rutland@arm.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Wenbin Song <Wenbin.Song@freescale.com>
Cc: Mingkai Hu <Mingkai.Hu@freescale.com>
Cc: Florian Fainelli <f.fainelli@gmail.com>
Cc: Kevin Hilman <khilman@baylibre.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Michal Simek <michal.simek@xilinx.com>
Cc: Jon Hunter <jonathanh@nvidia.com>
Cc: arm@kernel.org
Cc: bcm-kernel-feedback-list@broadcom.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ray Jui <rjui@broadcom.com>
Cc: "Hou Zhiqiang" <B48286@freescale.com>
Cc: Tirumalesh Chalamarla <tchalamarla@cavium.com>
Cc: linux-samsung-soc@vger.kernel.org
Cc: Yuan Yao <yao.yuan@nxp.com>
Cc: Jan Glauber <jglauber@cavium.com>
Cc: Gregory Clement <gregory.clement@free-electrons.com>
Cc: linux-amlogic@lists.infradead.org
Cc: soren.brinkmann@xilinx.com
Cc: Rajesh Bhagat <rajesh.bhagat@freescale.com>
Cc: Scott Branden <sbranden@broadcom.com>
Cc: Duc Dang <dhdang@apm.com>
Cc: Kukjin Kim <kgene@kernel.org>
Cc: Carlo Caione <carlo@caione.org>
Cc: Dinh Nguyen <dinguyen@opensource.altera.com>
Link: http://lkml.kernel.org/r/1470045256-9032-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2016-08-01 12:54:15 +03:00
static u32 check_ppi_trigger ( int irq )
{
u32 flags = irq_get_trigger_type ( irq ) ;
if ( flags ! = IRQF_TRIGGER_HIGH & & flags ! = IRQF_TRIGGER_LOW ) {
pr_warn ( " WARNING: Invalid trigger for IRQ%d, assuming level low \n " , irq ) ;
pr_warn ( " WARNING: Please fix your firmware \n " ) ;
flags = IRQF_TRIGGER_LOW ;
}
return flags ;
}
2016-07-13 20:16:39 +03:00
static int arch_timer_starting_cpu ( unsigned int cpu )
2013-07-19 03:59:32 +04:00
{
2016-07-13 20:16:39 +03:00
struct clock_event_device * clk = this_cpu_ptr ( arch_timer_evt ) ;
clocksource/arm_arch_timer: Force per-CPU interrupt to be level-triggered
The ARM architected timer produces level-triggered interrupts (this
is mandated by the architecture). Unfortunately, a number of
device-trees get this wrong, and expose an edge-triggered interrupt.
Until now, this wasn't too much an issue, as the programming of the
trigger would fail (the corresponding PPI cannot be reconfigured),
and the kernel would be happy with this. But we're about to change
this, and trust DT a lot if the driver doesn't provide its own
trigger information. In that context, the timer breaks badly.
While we do need to fix the DTs, there is also some userspace out
there (kvmtool) that generates the same kind of broken DT on the
fly, and that will completely break with newer kernels.
As a safety measure, and to keep buggy software alive as well as
buying us some time to fix DTs all over the place, let's check
what trigger configuration has been given us by the firmware.
If this is not a level configuration, then we know that the
DT/ACPI configuration is bust, and we pick some defaults which
won't be worse than the existing setup.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Andrew Lunn <andrew@lunn.ch>
Cc: Liu Gang <Gang.Liu@nxp.com>
Cc: Mark Rutland <marc.rutland@arm.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Wenbin Song <Wenbin.Song@freescale.com>
Cc: Mingkai Hu <Mingkai.Hu@freescale.com>
Cc: Florian Fainelli <f.fainelli@gmail.com>
Cc: Kevin Hilman <khilman@baylibre.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Michal Simek <michal.simek@xilinx.com>
Cc: Jon Hunter <jonathanh@nvidia.com>
Cc: arm@kernel.org
Cc: bcm-kernel-feedback-list@broadcom.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ray Jui <rjui@broadcom.com>
Cc: "Hou Zhiqiang" <B48286@freescale.com>
Cc: Tirumalesh Chalamarla <tchalamarla@cavium.com>
Cc: linux-samsung-soc@vger.kernel.org
Cc: Yuan Yao <yao.yuan@nxp.com>
Cc: Jan Glauber <jglauber@cavium.com>
Cc: Gregory Clement <gregory.clement@free-electrons.com>
Cc: linux-amlogic@lists.infradead.org
Cc: soren.brinkmann@xilinx.com
Cc: Rajesh Bhagat <rajesh.bhagat@freescale.com>
Cc: Scott Branden <sbranden@broadcom.com>
Cc: Duc Dang <dhdang@apm.com>
Cc: Kukjin Kim <kgene@kernel.org>
Cc: Carlo Caione <carlo@caione.org>
Cc: Dinh Nguyen <dinguyen@opensource.altera.com>
Link: http://lkml.kernel.org/r/1470045256-9032-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2016-08-01 12:54:15 +03:00
u32 flags ;
2016-07-13 20:16:39 +03:00
2013-07-19 03:59:32 +04:00
__arch_timer_setup ( ARCH_CP15_TIMER , clk ) ;
2012-11-12 18:33:44 +04:00
clocksource/arm_arch_timer: Force per-CPU interrupt to be level-triggered
The ARM architected timer produces level-triggered interrupts (this
is mandated by the architecture). Unfortunately, a number of
device-trees get this wrong, and expose an edge-triggered interrupt.
Until now, this wasn't too much an issue, as the programming of the
trigger would fail (the corresponding PPI cannot be reconfigured),
and the kernel would be happy with this. But we're about to change
this, and trust DT a lot if the driver doesn't provide its own
trigger information. In that context, the timer breaks badly.
While we do need to fix the DTs, there is also some userspace out
there (kvmtool) that generates the same kind of broken DT on the
fly, and that will completely break with newer kernels.
As a safety measure, and to keep buggy software alive as well as
buying us some time to fix DTs all over the place, let's check
what trigger configuration has been given us by the firmware.
If this is not a level configuration, then we know that the
DT/ACPI configuration is bust, and we pick some defaults which
won't be worse than the existing setup.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Andrew Lunn <andrew@lunn.ch>
Cc: Liu Gang <Gang.Liu@nxp.com>
Cc: Mark Rutland <marc.rutland@arm.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Wenbin Song <Wenbin.Song@freescale.com>
Cc: Mingkai Hu <Mingkai.Hu@freescale.com>
Cc: Florian Fainelli <f.fainelli@gmail.com>
Cc: Kevin Hilman <khilman@baylibre.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Michal Simek <michal.simek@xilinx.com>
Cc: Jon Hunter <jonathanh@nvidia.com>
Cc: arm@kernel.org
Cc: bcm-kernel-feedback-list@broadcom.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ray Jui <rjui@broadcom.com>
Cc: "Hou Zhiqiang" <B48286@freescale.com>
Cc: Tirumalesh Chalamarla <tchalamarla@cavium.com>
Cc: linux-samsung-soc@vger.kernel.org
Cc: Yuan Yao <yao.yuan@nxp.com>
Cc: Jan Glauber <jglauber@cavium.com>
Cc: Gregory Clement <gregory.clement@free-electrons.com>
Cc: linux-amlogic@lists.infradead.org
Cc: soren.brinkmann@xilinx.com
Cc: Rajesh Bhagat <rajesh.bhagat@freescale.com>
Cc: Scott Branden <sbranden@broadcom.com>
Cc: Duc Dang <dhdang@apm.com>
Cc: Kukjin Kim <kgene@kernel.org>
Cc: Carlo Caione <carlo@caione.org>
Cc: Dinh Nguyen <dinguyen@opensource.altera.com>
Link: http://lkml.kernel.org/r/1470045256-9032-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2016-08-01 12:54:15 +03:00
flags = check_ppi_trigger ( arch_timer_ppi [ arch_timer_uses_ppi ] ) ;
enable_percpu_irq ( arch_timer_ppi [ arch_timer_uses_ppi ] , flags ) ;
2014-02-20 19:21:23 +04:00
clocksource/arm_arch_timer: Force per-CPU interrupt to be level-triggered
The ARM architected timer produces level-triggered interrupts (this
is mandated by the architecture). Unfortunately, a number of
device-trees get this wrong, and expose an edge-triggered interrupt.
Until now, this wasn't too much an issue, as the programming of the
trigger would fail (the corresponding PPI cannot be reconfigured),
and the kernel would be happy with this. But we're about to change
this, and trust DT a lot if the driver doesn't provide its own
trigger information. In that context, the timer breaks badly.
While we do need to fix the DTs, there is also some userspace out
there (kvmtool) that generates the same kind of broken DT on the
fly, and that will completely break with newer kernels.
As a safety measure, and to keep buggy software alive as well as
buying us some time to fix DTs all over the place, let's check
what trigger configuration has been given us by the firmware.
If this is not a level configuration, then we know that the
DT/ACPI configuration is bust, and we pick some defaults which
won't be worse than the existing setup.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Andrew Lunn <andrew@lunn.ch>
Cc: Liu Gang <Gang.Liu@nxp.com>
Cc: Mark Rutland <marc.rutland@arm.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Wenbin Song <Wenbin.Song@freescale.com>
Cc: Mingkai Hu <Mingkai.Hu@freescale.com>
Cc: Florian Fainelli <f.fainelli@gmail.com>
Cc: Kevin Hilman <khilman@baylibre.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Michal Simek <michal.simek@xilinx.com>
Cc: Jon Hunter <jonathanh@nvidia.com>
Cc: arm@kernel.org
Cc: bcm-kernel-feedback-list@broadcom.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ray Jui <rjui@broadcom.com>
Cc: "Hou Zhiqiang" <B48286@freescale.com>
Cc: Tirumalesh Chalamarla <tchalamarla@cavium.com>
Cc: linux-samsung-soc@vger.kernel.org
Cc: Yuan Yao <yao.yuan@nxp.com>
Cc: Jan Glauber <jglauber@cavium.com>
Cc: Gregory Clement <gregory.clement@free-electrons.com>
Cc: linux-amlogic@lists.infradead.org
Cc: soren.brinkmann@xilinx.com
Cc: Rajesh Bhagat <rajesh.bhagat@freescale.com>
Cc: Scott Branden <sbranden@broadcom.com>
Cc: Duc Dang <dhdang@apm.com>
Cc: Kukjin Kim <kgene@kernel.org>
Cc: Carlo Caione <carlo@caione.org>
Cc: Dinh Nguyen <dinguyen@opensource.altera.com>
Link: http://lkml.kernel.org/r/1470045256-9032-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2016-08-01 12:54:15 +03:00
if ( arch_timer_has_nonsecure_ppi ( ) ) {
flags = check_ppi_trigger ( arch_timer_ppi [ PHYS_NONSECURE_PPI ] ) ;
enable_percpu_irq ( arch_timer_ppi [ PHYS_NONSECURE_PPI ] , flags ) ;
}
2012-11-12 18:33:44 +04:00
arch_counter_set_user_access ( ) ;
2016-06-27 19:30:13 +03:00
if ( evtstrm_enable )
2013-08-23 18:32:29 +04:00
arch_timer_configure_evtstream ( ) ;
2012-11-12 18:33:44 +04:00
return 0 ;
}
2013-07-19 03:59:32 +04:00
static void
arch_timer_detect_rate ( void __iomem * cntbase , struct device_node * np )
2012-11-12 18:33:44 +04:00
{
2013-07-19 03:59:32 +04:00
/* Who has more than one independent system counter? */
if ( arch_timer_rate )
return ;
2012-11-12 18:33:44 +04:00
2015-03-24 17:02:50 +03:00
/*
* Try to determine the frequency from the device tree or CNTFRQ ,
* if ACPI is enabled , get the frequency from CNTFRQ ONLY .
*/
if ( ! acpi_disabled | |
of_property_read_u32 ( np , " clock-frequency " , & arch_timer_rate ) ) {
2013-07-19 03:59:32 +04:00
if ( cntbase )
arch_timer_rate = readl_relaxed ( cntbase + CNTFRQ ) ;
else
arch_timer_rate = arch_timer_get_cntfrq ( ) ;
2012-11-12 18:33:44 +04:00
}
2013-07-19 03:59:32 +04:00
/* Check the timer frequency. */
if ( arch_timer_rate = = 0 )
pr_warn ( " Architected timer frequency not available \n " ) ;
}
static void arch_timer_banner ( unsigned type )
{
pr_info ( " Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s). \n " ,
type & ARCH_CP15_TIMER ? " cp15 " : " " ,
type = = ( ARCH_CP15_TIMER | ARCH_MEM_TIMER ) ? " and " : " " ,
type & ARCH_MEM_TIMER ? " mmio " : " " ,
2012-11-12 18:33:44 +04:00
( unsigned long ) arch_timer_rate / 1000000 ,
( unsigned long ) ( arch_timer_rate / 10000 ) % 100 ,
2013-07-19 03:59:32 +04:00
type & ARCH_CP15_TIMER ?
2014-02-20 19:21:23 +04:00
( arch_timer_uses_ppi = = VIRT_PPI ) ? " virt " : " phys " :
2013-07-19 03:59:32 +04:00
" " ,
type = = ( ARCH_CP15_TIMER | ARCH_MEM_TIMER ) ? " / " : " " ,
type & ARCH_MEM_TIMER ?
arch_timer_mem_use_virtual ? " virt " : " phys " :
" " ) ;
2012-11-12 18:33:44 +04:00
}
u32 arch_timer_get_rate ( void )
{
return arch_timer_rate ;
}
2013-07-19 03:59:32 +04:00
static u64 arch_counter_get_cntvct_mem ( void )
2012-11-12 18:33:44 +04:00
{
2013-07-19 03:59:32 +04:00
u32 vct_lo , vct_hi , tmp_hi ;
do {
vct_hi = readl_relaxed ( arch_counter_base + CNTVCT_HI ) ;
vct_lo = readl_relaxed ( arch_counter_base + CNTVCT_LO ) ;
tmp_hi = readl_relaxed ( arch_counter_base + CNTVCT_HI ) ;
} while ( vct_hi ! = tmp_hi ) ;
return ( ( u64 ) vct_hi < < 32 ) | vct_lo ;
2012-11-12 18:33:44 +04:00
}
2016-04-11 18:32:51 +03:00
static struct arch_timer_kvm_info arch_timer_kvm_info ;
struct arch_timer_kvm_info * arch_timer_get_kvm_info ( void )
{
return & arch_timer_kvm_info ;
}
2012-11-12 18:33:44 +04:00
2013-07-19 03:59:32 +04:00
static void __init arch_counter_register ( unsigned type )
{
u64 start_count ;
/* Register the CP15 based counter if we have one */
2014-09-29 03:50:06 +04:00
if ( type & ARCH_CP15_TIMER ) {
2014-02-20 19:21:23 +04:00
if ( IS_ENABLED ( CONFIG_ARM64 ) | | arch_timer_uses_ppi = = VIRT_PPI )
2014-11-24 10:02:44 +03:00
arch_timer_read_counter = arch_counter_get_cntvct ;
else
arch_timer_read_counter = arch_counter_get_cntpct ;
2016-09-22 11:35:17 +03:00
2017-02-01 15:07:15 +03:00
clocksource_counter . archdata . vdso_direct = vdso_default ;
2014-09-29 03:50:06 +04:00
} else {
2013-07-19 03:59:32 +04:00
arch_timer_read_counter = arch_counter_get_cntvct_mem ;
2014-09-29 03:50:06 +04:00
}
2016-10-04 21:12:09 +03:00
if ( ! arch_counter_suspend_stop )
clocksource_counter . flags | = CLOCK_SOURCE_SUSPEND_NONSTOP ;
2013-07-19 03:59:32 +04:00
start_count = arch_timer_read_counter ( ) ;
clocksource_register_hz ( & clocksource_counter , arch_timer_rate ) ;
cyclecounter . mult = clocksource_counter . mult ;
cyclecounter . shift = clocksource_counter . shift ;
2016-04-11 18:32:51 +03:00
timecounter_init ( & arch_timer_kvm_info . timecounter ,
& cyclecounter , start_count ) ;
2013-10-15 17:31:51 +04:00
/* 56 bits minimum, so we assume worst case rollover */
sched_clock_register ( arch_timer_read_counter , 56 , arch_timer_rate ) ;
2013-07-19 03:59:32 +04:00
}
2013-06-19 19:32:08 +04:00
static void arch_timer_stop ( struct clock_event_device * clk )
2012-11-12 18:33:44 +04:00
{
pr_debug ( " arch_timer_teardown disable IRQ%d cpu #%d \n " ,
clk - > irq , smp_processor_id ( ) ) ;
2014-02-20 19:21:23 +04:00
disable_percpu_irq ( arch_timer_ppi [ arch_timer_uses_ppi ] ) ;
if ( arch_timer_has_nonsecure_ppi ( ) )
disable_percpu_irq ( arch_timer_ppi [ PHYS_NONSECURE_PPI ] ) ;
2012-11-12 18:33:44 +04:00
2015-06-12 11:00:12 +03:00
clk - > set_state_shutdown ( clk ) ;
2012-11-12 18:33:44 +04:00
}
2016-07-13 20:16:39 +03:00
static int arch_timer_dying_cpu ( unsigned int cpu )
2012-11-12 18:33:44 +04:00
{
2016-07-13 20:16:39 +03:00
struct clock_event_device * clk = this_cpu_ptr ( arch_timer_evt ) ;
2012-11-12 18:33:44 +04:00
2016-07-13 20:16:39 +03:00
arch_timer_stop ( clk ) ;
return 0 ;
2012-11-12 18:33:44 +04:00
}
2013-08-23 18:53:15 +04:00
# ifdef CONFIG_CPU_PM
2017-04-04 19:05:16 +03:00
static DEFINE_PER_CPU ( unsigned long , saved_cntkctl ) ;
2013-08-23 18:53:15 +04:00
static int arch_timer_cpu_pm_notify ( struct notifier_block * self ,
unsigned long action , void * hcpu )
{
if ( action = = CPU_PM_ENTER )
2017-04-04 19:05:16 +03:00
__this_cpu_write ( saved_cntkctl , arch_timer_get_cntkctl ( ) ) ;
2013-08-23 18:53:15 +04:00
else if ( action = = CPU_PM_ENTER_FAILED | | action = = CPU_PM_EXIT )
2017-04-04 19:05:16 +03:00
arch_timer_set_cntkctl ( __this_cpu_read ( saved_cntkctl ) ) ;
2013-08-23 18:53:15 +04:00
return NOTIFY_OK ;
}
static struct notifier_block arch_timer_cpu_pm_notifier = {
. notifier_call = arch_timer_cpu_pm_notify ,
} ;
static int __init arch_timer_cpu_pm_init ( void )
{
return cpu_pm_register_notifier ( & arch_timer_cpu_pm_notifier ) ;
}
2016-07-13 20:16:39 +03:00
static void __init arch_timer_cpu_pm_deinit ( void )
{
WARN_ON ( cpu_pm_unregister_notifier ( & arch_timer_cpu_pm_notifier ) ) ;
}
2013-08-23 18:53:15 +04:00
# else
static int __init arch_timer_cpu_pm_init ( void )
{
return 0 ;
}
2016-07-13 20:16:39 +03:00
static void __init arch_timer_cpu_pm_deinit ( void )
{
}
2013-08-23 18:53:15 +04:00
# endif
2012-11-12 18:33:44 +04:00
static int __init arch_timer_register ( void )
{
int err ;
int ppi ;
arch_timer_evt = alloc_percpu ( struct clock_event_device ) ;
if ( ! arch_timer_evt ) {
err = - ENOMEM ;
goto out ;
}
2014-02-20 19:21:23 +04:00
ppi = arch_timer_ppi [ arch_timer_uses_ppi ] ;
switch ( arch_timer_uses_ppi ) {
case VIRT_PPI :
2012-11-12 18:33:44 +04:00
err = request_percpu_irq ( ppi , arch_timer_handler_virt ,
" arch_timer " , arch_timer_evt ) ;
2014-02-20 19:21:23 +04:00
break ;
case PHYS_SECURE_PPI :
case PHYS_NONSECURE_PPI :
2012-11-12 18:33:44 +04:00
err = request_percpu_irq ( ppi , arch_timer_handler_phys ,
" arch_timer " , arch_timer_evt ) ;
if ( ! err & & arch_timer_ppi [ PHYS_NONSECURE_PPI ] ) {
ppi = arch_timer_ppi [ PHYS_NONSECURE_PPI ] ;
err = request_percpu_irq ( ppi , arch_timer_handler_phys ,
" arch_timer " , arch_timer_evt ) ;
if ( err )
free_percpu_irq ( arch_timer_ppi [ PHYS_SECURE_PPI ] ,
arch_timer_evt ) ;
}
2014-02-20 19:21:23 +04:00
break ;
case HYP_PPI :
err = request_percpu_irq ( ppi , arch_timer_handler_phys ,
" arch_timer " , arch_timer_evt ) ;
break ;
default :
BUG ( ) ;
2012-11-12 18:33:44 +04:00
}
if ( err ) {
pr_err ( " arch_timer: can't register interrupt %d (%d) \n " ,
ppi , err ) ;
goto out_free ;
}
2013-08-23 18:53:15 +04:00
err = arch_timer_cpu_pm_init ( ) ;
if ( err )
goto out_unreg_notify ;
2012-11-12 18:33:44 +04:00
2016-07-13 20:16:39 +03:00
/* Register and immediately configure the timer on the boot CPU */
err = cpuhp_setup_state ( CPUHP_AP_ARM_ARCH_TIMER_STARTING ,
2016-12-21 22:19:54 +03:00
" clockevents/arm/arch_timer:starting " ,
2016-07-13 20:16:39 +03:00
arch_timer_starting_cpu , arch_timer_dying_cpu ) ;
if ( err )
goto out_unreg_cpupm ;
2012-11-12 18:33:44 +04:00
return 0 ;
2016-07-13 20:16:39 +03:00
out_unreg_cpupm :
arch_timer_cpu_pm_deinit ( ) ;
2013-08-23 18:53:15 +04:00
out_unreg_notify :
2014-02-20 19:21:23 +04:00
free_percpu_irq ( arch_timer_ppi [ arch_timer_uses_ppi ] , arch_timer_evt ) ;
if ( arch_timer_has_nonsecure_ppi ( ) )
free_percpu_irq ( arch_timer_ppi [ PHYS_NONSECURE_PPI ] ,
2012-11-12 18:33:44 +04:00
arch_timer_evt ) ;
out_free :
free_percpu ( arch_timer_evt ) ;
out :
return err ;
}
2013-07-19 03:59:32 +04:00
static int __init arch_timer_mem_register ( void __iomem * base , unsigned int irq )
{
int ret ;
irq_handler_t func ;
struct arch_timer * t ;
t = kzalloc ( sizeof ( * t ) , GFP_KERNEL ) ;
if ( ! t )
return - ENOMEM ;
t - > base = base ;
t - > evt . irq = irq ;
__arch_timer_setup ( ARCH_MEM_TIMER , & t - > evt ) ;
if ( arch_timer_mem_use_virtual )
func = arch_timer_handler_virt_mem ;
else
func = arch_timer_handler_phys_mem ;
ret = request_irq ( irq , func , IRQF_TIMER , " arch_mem_timer " , & t - > evt ) ;
if ( ret ) {
pr_err ( " arch_timer: Failed to request mem timer irq \n " ) ;
kfree ( t ) ;
}
return ret ;
}
static const struct of_device_id arch_timer_of_match [ ] __initconst = {
{ . compatible = " arm,armv7-timer " , } ,
{ . compatible = " arm,armv8-timer " , } ,
{ } ,
} ;
static const struct of_device_id arch_timer_mem_of_match [ ] __initconst = {
{ . compatible = " arm,armv7-timer-mem " , } ,
{ } ,
} ;
2014-09-29 03:50:05 +04:00
static bool __init
2015-03-31 13:12:22 +03:00
arch_timer_needs_probing ( int type , const struct of_device_id * matches )
2014-09-29 03:50:05 +04:00
{
struct device_node * dn ;
2015-03-31 13:12:22 +03:00
bool needs_probing = false ;
2014-09-29 03:50:05 +04:00
dn = of_find_matching_node ( NULL , matches ) ;
2014-10-15 19:06:20 +04:00
if ( dn & & of_device_is_available ( dn ) & & ! ( arch_timers_present & type ) )
2015-03-31 13:12:22 +03:00
needs_probing = true ;
2014-09-29 03:50:05 +04:00
of_node_put ( dn ) ;
2015-03-31 13:12:22 +03:00
return needs_probing ;
2014-09-29 03:50:05 +04:00
}
2016-06-06 18:55:40 +03:00
static int __init arch_timer_common_init ( void )
2013-07-19 03:59:32 +04:00
{
unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER ;
/* Wait until both nodes are probed if we have two timers */
if ( ( arch_timers_present & mask ) ! = mask ) {
2015-03-31 13:12:22 +03:00
if ( arch_timer_needs_probing ( ARCH_MEM_TIMER , arch_timer_mem_of_match ) )
2016-06-06 18:55:40 +03:00
return 0 ;
2015-03-31 13:12:22 +03:00
if ( arch_timer_needs_probing ( ARCH_CP15_TIMER , arch_timer_of_match ) )
2016-06-06 18:55:40 +03:00
return 0 ;
2013-07-19 03:59:32 +04:00
}
arch_timer_banner ( arch_timers_present ) ;
arch_counter_register ( arch_timers_present ) ;
2016-06-06 18:55:40 +03:00
return arch_timer_arch_init ( ) ;
2013-07-19 03:59:32 +04:00
}
2016-06-06 18:55:40 +03:00
static int __init arch_timer_init ( void )
2012-11-12 18:33:44 +04:00
{
2016-06-06 18:55:40 +03:00
int ret ;
2012-11-12 18:33:44 +04:00
/*
2013-01-10 15:13:07 +04:00
* If HYP mode is available , we know that the physical timer
* has been configured to be accessible from PL1 . Use it , so
* that a guest can use the virtual timer instead .
*
2012-11-12 18:33:44 +04:00
* If no interrupt provided for virtual timer , we ' ll have to
* stick to the physical timer . It ' d better be accessible . . .
2014-02-20 19:21:23 +04:00
*
* On ARMv8 .1 with VH extensions , the kernel runs in HYP . VHE
* accesses to CNTP_ * _EL1 registers are silently redirected to
* their CNTHP_ * _EL2 counterparts , and use a different PPI
* number .
2012-11-12 18:33:44 +04:00
*/
2013-01-10 15:13:07 +04:00
if ( is_hyp_mode_available ( ) | | ! arch_timer_ppi [ VIRT_PPI ] ) {
2014-02-20 19:21:23 +04:00
bool has_ppi ;
if ( is_kernel_in_hyp_mode ( ) ) {
arch_timer_uses_ppi = HYP_PPI ;
has_ppi = ! ! arch_timer_ppi [ HYP_PPI ] ;
} else {
arch_timer_uses_ppi = PHYS_SECURE_PPI ;
has_ppi = ( ! ! arch_timer_ppi [ PHYS_SECURE_PPI ] | |
! ! arch_timer_ppi [ PHYS_NONSECURE_PPI ] ) ;
}
2012-11-12 18:33:44 +04:00
2014-02-20 19:21:23 +04:00
if ( ! has_ppi ) {
2012-11-12 18:33:44 +04:00
pr_warn ( " arch_timer: No interrupt available, giving up \n " ) ;
2016-06-06 18:55:40 +03:00
return - EINVAL ;
2012-11-12 18:33:44 +04:00
}
}
2016-06-06 18:55:40 +03:00
ret = arch_timer_register ( ) ;
if ( ret )
return ret ;
ret = arch_timer_common_init ( ) ;
if ( ret )
return ret ;
2016-04-11 18:32:52 +03:00
arch_timer_kvm_info . virtual_irq = arch_timer_ppi [ VIRT_PPI ] ;
2016-06-06 18:55:40 +03:00
return 0 ;
2012-11-12 18:33:44 +04:00
}
2015-03-24 17:02:50 +03:00
2016-06-06 18:55:40 +03:00
static int __init arch_timer_of_init ( struct device_node * np )
2015-03-24 17:02:50 +03:00
{
int i ;
if ( arch_timers_present & ARCH_CP15_TIMER ) {
pr_warn ( " arch_timer: multiple nodes in dt, skipping \n " ) ;
2016-06-06 18:55:40 +03:00
return 0 ;
2015-03-24 17:02:50 +03:00
}
arch_timers_present | = ARCH_CP15_TIMER ;
for ( i = PHYS_SECURE_PPI ; i < MAX_TIMER_PPI ; i + + )
arch_timer_ppi [ i ] = irq_of_parse_and_map ( np , i ) ;
arch_timer_detect_rate ( NULL , np ) ;
arch_timer_c3stop = ! of_property_read_bool ( np , " always-on " ) ;
2017-01-19 20:20:59 +03:00
/* Check for globally applicable workarounds */
arch_timer_check_ool_workaround ( ate_match_dt , np ) ;
2016-09-22 11:35:17 +03:00
2015-03-24 17:02:50 +03:00
/*
* If we cannot rely on firmware initializing the timer registers then
* we should use the physical timers instead .
*/
if ( IS_ENABLED ( CONFIG_ARM ) & &
of_property_read_bool ( np , " arm,cpu-registers-not-fw-configured " ) )
2014-02-20 19:21:23 +04:00
arch_timer_uses_ppi = PHYS_SECURE_PPI ;
2015-03-24 17:02:50 +03:00
2016-10-04 21:12:09 +03:00
/* On some systems, the counter stops ticking when in suspend. */
arch_counter_suspend_stop = of_property_read_bool ( np ,
" arm,no-tick-in-suspend " ) ;
2016-06-06 18:55:40 +03:00
return arch_timer_init ( ) ;
2015-03-24 17:02:50 +03:00
}
2016-06-07 01:27:44 +03:00
CLOCKSOURCE_OF_DECLARE ( armv7_arch_timer , " arm,armv7-timer " , arch_timer_of_init ) ;
CLOCKSOURCE_OF_DECLARE ( armv8_arch_timer , " arm,armv8-timer " , arch_timer_of_init ) ;
2013-07-19 03:59:32 +04:00
2016-06-06 18:55:40 +03:00
static int __init arch_timer_mem_init ( struct device_node * np )
2013-07-19 03:59:32 +04:00
{
struct device_node * frame , * best_frame = NULL ;
void __iomem * cntctlbase , * base ;
2016-06-06 18:55:40 +03:00
unsigned int irq , ret = - EINVAL ;
2013-07-19 03:59:32 +04:00
u32 cnttidr ;
arch_timers_present | = ARCH_MEM_TIMER ;
cntctlbase = of_iomap ( np , 0 ) ;
if ( ! cntctlbase ) {
pr_err ( " arch_timer: Can't find CNTCTLBase \n " ) ;
2016-06-06 18:55:40 +03:00
return - ENXIO ;
2013-07-19 03:59:32 +04:00
}
cnttidr = readl_relaxed ( cntctlbase + CNTTIDR ) ;
/*
* Try to find a virtual capable frame . Otherwise fall back to a
* physical capable frame .
*/
for_each_available_child_of_node ( np , frame ) {
int n ;
2016-02-01 15:00:48 +03:00
u32 cntacr ;
2013-07-19 03:59:32 +04:00
if ( of_property_read_u32 ( frame , " frame-number " , & n ) ) {
pr_err ( " arch_timer: Missing frame-number \n " ) ;
of_node_put ( frame ) ;
2016-02-01 15:00:48 +03:00
goto out ;
2013-07-19 03:59:32 +04:00
}
2016-02-01 15:00:48 +03:00
/* Try enabling everything, and see what sticks */
cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT ;
writel_relaxed ( cntacr , cntctlbase + CNTACR ( n ) ) ;
cntacr = readl_relaxed ( cntctlbase + CNTACR ( n ) ) ;
if ( ( cnttidr & CNTTIDR_VIRT ( n ) ) & &
! ( ~ cntacr & ( CNTACR_RWVT | CNTACR_RVCT ) ) ) {
2013-07-19 03:59:32 +04:00
of_node_put ( best_frame ) ;
best_frame = frame ;
arch_timer_mem_use_virtual = true ;
break ;
}
2016-02-01 15:00:48 +03:00
if ( ~ cntacr & ( CNTACR_RWPT | CNTACR_RPCT ) )
continue ;
2013-07-19 03:59:32 +04:00
of_node_put ( best_frame ) ;
best_frame = of_node_get ( frame ) ;
}
2016-06-06 18:55:40 +03:00
ret = - ENXIO ;
2016-10-26 10:35:50 +03:00
base = arch_counter_base = of_io_request_and_map ( best_frame , 0 ,
" arch_mem_timer " ) ;
if ( IS_ERR ( base ) ) {
2013-07-19 03:59:32 +04:00
pr_err ( " arch_timer: Can't map frame's registers \n " ) ;
2016-02-01 15:00:48 +03:00
goto out ;
2013-07-19 03:59:32 +04:00
}
if ( arch_timer_mem_use_virtual )
irq = irq_of_parse_and_map ( best_frame , 1 ) ;
else
irq = irq_of_parse_and_map ( best_frame , 0 ) ;
2016-02-01 15:00:48 +03:00
2016-06-06 18:55:40 +03:00
ret = - EINVAL ;
2013-07-19 03:59:32 +04:00
if ( ! irq ) {
pr_err ( " arch_timer: Frame missing %s irq " ,
2013-08-21 16:59:23 +04:00
arch_timer_mem_use_virtual ? " virt " : " phys " ) ;
2016-02-01 15:00:48 +03:00
goto out ;
2013-07-19 03:59:32 +04:00
}
arch_timer_detect_rate ( base , np ) ;
2016-06-06 18:55:40 +03:00
ret = arch_timer_mem_register ( base , irq ) ;
if ( ret )
goto out ;
return arch_timer_common_init ( ) ;
2016-02-01 15:00:48 +03:00
out :
iounmap ( cntctlbase ) ;
of_node_put ( best_frame ) ;
2016-06-06 18:55:40 +03:00
return ret ;
2013-07-19 03:59:32 +04:00
}
2016-06-07 01:27:44 +03:00
CLOCKSOURCE_OF_DECLARE ( armv7_arch_timer_mem , " arm,armv7-timer-mem " ,
2013-07-19 03:59:32 +04:00
arch_timer_mem_init ) ;
2015-03-24 17:02:50 +03:00
# ifdef CONFIG_ACPI
static int __init map_generic_timer_interrupt ( u32 interrupt , u32 flags )
{
int trigger , polarity ;
if ( ! interrupt )
return 0 ;
trigger = ( flags & ACPI_GTDT_INTERRUPT_MODE ) ? ACPI_EDGE_SENSITIVE
: ACPI_LEVEL_SENSITIVE ;
polarity = ( flags & ACPI_GTDT_INTERRUPT_POLARITY ) ? ACPI_ACTIVE_LOW
: ACPI_ACTIVE_HIGH ;
return acpi_register_gsi ( NULL , interrupt , trigger , polarity ) ;
}
/* Initialize per-processor generic timer */
static int __init arch_timer_acpi_init ( struct acpi_table_header * table )
{
struct acpi_table_gtdt * gtdt ;
if ( arch_timers_present & ARCH_CP15_TIMER ) {
pr_warn ( " arch_timer: already initialized, skipping \n " ) ;
return - EINVAL ;
}
gtdt = container_of ( table , struct acpi_table_gtdt , header ) ;
arch_timers_present | = ARCH_CP15_TIMER ;
arch_timer_ppi [ PHYS_SECURE_PPI ] =
map_generic_timer_interrupt ( gtdt - > secure_el1_interrupt ,
gtdt - > secure_el1_flags ) ;
arch_timer_ppi [ PHYS_NONSECURE_PPI ] =
map_generic_timer_interrupt ( gtdt - > non_secure_el1_interrupt ,
gtdt - > non_secure_el1_flags ) ;
arch_timer_ppi [ VIRT_PPI ] =
map_generic_timer_interrupt ( gtdt - > virtual_timer_interrupt ,
gtdt - > virtual_timer_flags ) ;
arch_timer_ppi [ HYP_PPI ] =
map_generic_timer_interrupt ( gtdt - > non_secure_el2_interrupt ,
gtdt - > non_secure_el2_flags ) ;
/* Get the frequency from CNTFRQ */
arch_timer_detect_rate ( NULL , NULL ) ;
/* Always-on capability */
arch_timer_c3stop = ! ( gtdt - > non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON ) ;
arch_timer_init ( ) ;
return 0 ;
}
2015-09-28 17:49:17 +03:00
CLOCKSOURCE_ACPI_DECLARE ( arch_timer , ACPI_SIG_GTDT , arch_timer_acpi_init ) ;
2015-03-24 17:02:50 +03:00
# endif