2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-01-18 13:42:18 +04:00
/*
2016-10-31 23:46:38 +03:00
* Copyright ( C ) 2016 - 17 Synopsys , Inc . ( www . synopsys . com )
2013-01-18 13:42:18 +04:00
* Copyright ( C ) 2004 , 2007 - 2010 , 2011 - 2012 Synopsys , Inc . ( www . synopsys . com )
*/
2016-10-31 23:46:38 +03:00
/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
* programmed to go from @ count to @ limit and optionally interrupt .
* We ' ve designated TIMER0 for clockevents and TIMER1 for clocksource
2013-01-18 13:42:18 +04:00
*
2016-10-31 23:46:38 +03:00
* ARCv2 based HS38 cores have RTC ( in - core ) and GFRC ( inside ARConnect / MCIP )
* which are suitable for UP and SMP based clocksources respectively
2013-01-18 13:42:18 +04:00
*/
# include <linux/interrupt.h>
2019-05-24 08:40:10 +03:00
# include <linux/bits.h>
2016-01-14 09:50:08 +03:00
# include <linux/clk.h>
# include <linux/clk-provider.h>
2013-01-18 13:42:18 +04:00
# include <linux/clocksource.h>
# include <linux/clockchips.h>
2016-01-01 13:18:49 +03:00
# include <linux/cpu.h>
2016-01-01 15:28:45 +03:00
# include <linux/of.h>
# include <linux/of_irq.h>
2018-11-19 14:29:17 +03:00
# include <linux/sched_clock.h>
2013-01-18 13:42:18 +04:00
2016-10-31 23:06:19 +03:00
# include <soc/arc/timers.h>
2016-10-31 21:27:08 +03:00
# include <soc/arc/mcip.h>
2014-12-24 16:11:55 +03:00
2013-01-18 13:42:18 +04:00
2016-01-01 15:28:45 +03:00
static unsigned long arc_timer_freq ;
static int noinline arc_get_timer_clk ( struct device_node * node )
{
struct clk * clk ;
int ret ;
clk = of_clk_get ( node , 0 ) ;
if ( IS_ERR ( clk ) ) {
2017-03-09 12:47:10 +03:00
pr_err ( " timer missing clk \n " ) ;
2016-01-01 15:28:45 +03:00
return PTR_ERR ( clk ) ;
}
ret = clk_prepare_enable ( clk ) ;
if ( ret ) {
pr_err ( " Couldn't enable parent clk \n " ) ;
return ret ;
}
arc_timer_freq = clk_get_rate ( clk ) ;
return 0 ;
}
2013-01-18 13:42:18 +04:00
/********** Clock Source Device *********/
2016-11-01 00:26:41 +03:00
# ifdef CONFIG_ARC_TIMERS_64BIT
2014-12-24 16:11:55 +03:00
2016-12-21 22:32:01 +03:00
static u64 arc_read_gfrc ( struct clocksource * cs )
2014-12-24 16:11:55 +03:00
{
unsigned long flags ;
2016-11-03 21:38:52 +03:00
u32 l , h ;
2014-12-24 16:11:55 +03:00
2018-04-19 18:53:05 +03:00
/*
* From a programming model pov , there seems to be just one instance of
* MCIP_CMD / MCIP_READBACK however micro - architecturally there ' s
* an instance PER ARC CORE ( not per cluster ) , and there are dedicated
* hardware decode logic ( per core ) inside ARConnect to handle
* simultaneous read / write accesses from cores via those two registers .
* So several concurrent commands to ARConnect are OK if they are
* trying to access two different sub - components ( like GFRC ,
* inter - core interrupt , etc . . . ) . HW also supports simultaneously
* accessing GFRC by multiple cores .
* That ' s why it is safe to disable hard interrupts on the local CPU
* before access to GFRC instead of taking global MCIP spinlock
* defined in arch / arc / kernel / mcip . c
*/
2014-12-24 16:11:55 +03:00
local_irq_save ( flags ) ;
2016-01-22 11:57:50 +03:00
__mcip_cmd ( CMD_GFRC_READ_LO , 0 ) ;
2016-11-03 21:38:52 +03:00
l = read_aux_reg ( ARC_REG_MCIP_READBACK ) ;
2014-12-24 16:11:55 +03:00
2016-01-22 11:57:50 +03:00
__mcip_cmd ( CMD_GFRC_READ_HI , 0 ) ;
2016-11-03 21:38:52 +03:00
h = read_aux_reg ( ARC_REG_MCIP_READBACK ) ;
2014-12-24 16:11:55 +03:00
local_irq_restore ( flags ) ;
2016-12-21 22:32:01 +03:00
return ( ( ( u64 ) h ) < < 32 ) | l ;
2014-12-24 16:11:55 +03:00
}
2018-11-19 14:29:17 +03:00
static notrace u64 arc_gfrc_clock_read ( void )
{
return arc_read_gfrc ( NULL ) ;
}
2016-01-01 15:35:48 +03:00
static struct clocksource arc_counter_gfrc = {
2016-01-22 11:57:50 +03:00
. name = " ARConnect GFRC " ,
2014-12-24 16:11:55 +03:00
. rating = 400 ,
2016-01-01 15:35:48 +03:00
. read = arc_read_gfrc ,
2014-12-24 16:11:55 +03:00
. mask = CLOCKSOURCE_MASK ( 64 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
2016-06-15 15:50:12 +03:00
static int __init arc_cs_setup_gfrc ( struct device_node * node )
2016-01-01 15:35:48 +03:00
{
2016-10-31 23:02:31 +03:00
struct mcip_bcr mp ;
2016-01-01 15:35:48 +03:00
int ret ;
2016-10-31 23:02:31 +03:00
READ_BCR ( ARC_REG_MCIP_BCR , mp ) ;
if ( ! mp . gfrc ) {
2017-03-09 12:47:10 +03:00
pr_warn ( " Global-64-bit-Ctr clocksource not detected \n " ) ;
2016-06-15 15:50:12 +03:00
return - ENXIO ;
2016-10-31 23:02:31 +03:00
}
2016-01-01 15:35:48 +03:00
ret = arc_get_timer_clk ( node ) ;
if ( ret )
2016-06-15 15:50:12 +03:00
return ret ;
2016-01-01 15:35:48 +03:00
2018-11-19 14:29:17 +03:00
sched_clock_register ( arc_gfrc_clock_read , 64 , arc_timer_freq ) ;
2016-06-15 15:50:12 +03:00
return clocksource_register_hz ( & arc_counter_gfrc , arc_timer_freq ) ;
2016-01-01 15:35:48 +03:00
}
2017-05-26 17:56:11 +03:00
TIMER_OF_DECLARE ( arc_gfrc , " snps,archs-timer-gfrc " , arc_cs_setup_gfrc ) ;
2016-01-01 15:35:48 +03:00
2013-11-07 13:27:16 +04:00
# define AUX_RTC_CTRL 0x103
# define AUX_RTC_LOW 0x104
# define AUX_RTC_HIGH 0x105
2016-12-21 22:32:01 +03:00
static u64 arc_read_rtc ( struct clocksource * cs )
2013-11-07 13:27:16 +04:00
{
unsigned long status ;
2016-11-03 21:38:52 +03:00
u32 l , h ;
2013-11-07 13:27:16 +04:00
2016-11-01 00:09:52 +03:00
/*
* hardware has an internal state machine which tracks readout of
* low / high and updates the CTRL . status if
* - interrupt / exception taken between the two reads
* - high increments after low has been read
*/
do {
2016-11-03 21:38:52 +03:00
l = read_aux_reg ( AUX_RTC_LOW ) ;
h = read_aux_reg ( AUX_RTC_HIGH ) ;
2016-11-01 00:09:52 +03:00
status = read_aux_reg ( AUX_RTC_CTRL ) ;
2019-05-24 08:40:10 +03:00
} while ( ! ( status & BIT ( 31 ) ) ) ;
2013-11-07 13:27:16 +04:00
2016-12-21 22:32:01 +03:00
return ( ( ( u64 ) h ) < < 32 ) | l ;
2013-11-07 13:27:16 +04:00
}
2018-11-19 14:29:17 +03:00
static notrace u64 arc_rtc_clock_read ( void )
{
return arc_read_rtc ( NULL ) ;
}
2016-01-01 15:35:48 +03:00
static struct clocksource arc_counter_rtc = {
2013-11-07 13:27:16 +04:00
. name = " ARCv2 RTC " ,
. rating = 350 ,
2016-01-01 15:35:48 +03:00
. read = arc_read_rtc ,
2013-11-07 13:27:16 +04:00
. mask = CLOCKSOURCE_MASK ( 64 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
2016-06-15 15:50:12 +03:00
static int __init arc_cs_setup_rtc ( struct device_node * node )
2013-01-18 13:42:18 +04:00
{
2016-10-31 23:02:31 +03:00
struct bcr_timer timer ;
2016-01-01 15:35:48 +03:00
int ret ;
2016-10-31 23:02:31 +03:00
READ_BCR ( ARC_REG_TIMERS_BCR , timer ) ;
if ( ! timer . rtc ) {
2017-03-09 12:47:10 +03:00
pr_warn ( " Local-64-bit-Ctr clocksource not detected \n " ) ;
2016-06-15 15:50:12 +03:00
return - ENXIO ;
2016-10-31 23:02:31 +03:00
}
2016-01-01 15:35:48 +03:00
/* Local to CPU hence not usable in SMP */
2016-10-31 23:02:31 +03:00
if ( IS_ENABLED ( CONFIG_SMP ) ) {
2017-03-09 12:47:10 +03:00
pr_warn ( " Local-64-bit-Ctr not usable in SMP \n " ) ;
2016-06-15 15:50:12 +03:00
return - EINVAL ;
2016-10-31 23:02:31 +03:00
}
2016-01-01 15:35:48 +03:00
ret = arc_get_timer_clk ( node ) ;
if ( ret )
2016-06-15 15:50:12 +03:00
return ret ;
2013-01-18 13:42:18 +04:00
2016-01-01 15:35:48 +03:00
write_aux_reg ( AUX_RTC_CTRL , 1 ) ;
2018-11-19 14:29:17 +03:00
sched_clock_register ( arc_rtc_clock_read , 64 , arc_timer_freq ) ;
2016-06-15 15:50:12 +03:00
return clocksource_register_hz ( & arc_counter_rtc , arc_timer_freq ) ;
2013-01-18 13:42:18 +04:00
}
2017-05-26 17:56:11 +03:00
TIMER_OF_DECLARE ( arc_rtc , " snps,archs-timer-rtc " , arc_cs_setup_rtc ) ;
2016-01-01 15:35:48 +03:00
# endif
2013-01-18 13:42:18 +04:00
2016-01-01 15:35:48 +03:00
/*
* 32 bit TIMER1 to keep counting monotonically and wraparound
*/
2016-12-21 22:32:01 +03:00
static u64 arc_read_timer1 ( struct clocksource * cs )
2013-01-18 13:42:18 +04:00
{
2016-12-21 22:32:01 +03:00
return ( u64 ) read_aux_reg ( ARC_REG_TIMER1_CNT ) ;
2013-01-18 13:42:18 +04:00
}
2018-11-19 14:29:17 +03:00
static notrace u64 arc_timer1_clock_read ( void )
{
return arc_read_timer1 ( NULL ) ;
}
2016-01-01 15:35:48 +03:00
static struct clocksource arc_counter_timer1 = {
2013-01-18 13:42:18 +04:00
. name = " ARC Timer1 " ,
. rating = 300 ,
2016-01-01 15:35:48 +03:00
. read = arc_read_timer1 ,
2013-01-18 13:42:18 +04:00
. mask = CLOCKSOURCE_MASK ( 32 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
2016-06-15 15:50:12 +03:00
static int __init arc_cs_setup_timer1 ( struct device_node * node )
2016-01-01 15:35:48 +03:00
{
int ret ;
/* Local to CPU hence not usable in SMP */
if ( IS_ENABLED ( CONFIG_SMP ) )
2016-06-15 15:50:12 +03:00
return - EINVAL ;
2016-01-01 15:35:48 +03:00
ret = arc_get_timer_clk ( node ) ;
if ( ret )
2016-06-15 15:50:12 +03:00
return ret ;
2016-01-01 15:35:48 +03:00
2016-10-31 23:06:19 +03:00
write_aux_reg ( ARC_REG_TIMER1_LIMIT , ARC_TIMERN_MAX ) ;
2016-01-01 15:35:48 +03:00
write_aux_reg ( ARC_REG_TIMER1_CNT , 0 ) ;
2021-09-24 05:08:25 +03:00
write_aux_reg ( ARC_REG_TIMER1_CTRL , ARC_TIMER_CTRL_NH ) ;
2016-01-01 15:35:48 +03:00
2018-11-19 14:29:17 +03:00
sched_clock_register ( arc_timer1_clock_read , 32 , arc_timer_freq ) ;
2016-06-15 15:50:12 +03:00
return clocksource_register_hz ( & arc_counter_timer1 , arc_timer_freq ) ;
2016-01-01 15:35:48 +03:00
}
2013-11-07 13:27:16 +04:00
2013-01-18 13:42:18 +04:00
/********** Clock Event Device *********/
2016-01-01 15:28:45 +03:00
static int arc_timer_irq ;
2016-01-01 13:18:49 +03:00
2013-01-18 13:42:18 +04:00
/*
2014-06-25 15:44:03 +04:00
* Arm the timer to interrupt after @ cycles
2013-01-18 13:42:18 +04:00
* The distinction for oneshot / periodic is done in arc_event_timer_ack ( ) below
*/
2014-06-25 15:44:03 +04:00
static void arc_timer_event_setup ( unsigned int cycles )
2013-01-18 13:42:18 +04:00
{
2014-06-25 15:44:03 +04:00
write_aux_reg ( ARC_REG_TIMER0_LIMIT , cycles ) ;
2013-01-18 13:42:18 +04:00
write_aux_reg ( ARC_REG_TIMER0_CNT , 0 ) ; /* start from 0 */
2021-09-24 05:08:25 +03:00
write_aux_reg ( ARC_REG_TIMER0_CTRL , ARC_TIMER_CTRL_IE | ARC_TIMER_CTRL_NH ) ;
2013-01-18 13:42:18 +04:00
}
static int arc_clkevent_set_next_event ( unsigned long delta ,
struct clock_event_device * dev )
{
arc_timer_event_setup ( delta ) ;
return 0 ;
}
2015-07-16 14:26:14 +03:00
static int arc_clkevent_set_periodic ( struct clock_event_device * dev )
2013-01-18 13:42:18 +04:00
{
2015-07-16 14:26:14 +03:00
/*
* At X Hz , 1 sec = 1000 ms - > X cycles ;
* 10 ms - > X / 100 cycles
*/
2016-01-01 15:28:45 +03:00
arc_timer_event_setup ( arc_timer_freq / HZ ) ;
2015-07-16 14:26:14 +03:00
return 0 ;
2013-01-18 13:42:18 +04:00
}
static DEFINE_PER_CPU ( struct clock_event_device , arc_clockevent_device ) = {
2015-07-16 14:26:14 +03:00
. name = " ARC Timer0 " ,
. features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC ,
. rating = 300 ,
. set_next_event = arc_clkevent_set_next_event ,
. set_state_periodic = arc_clkevent_set_periodic ,
2013-01-18 13:42:18 +04:00
} ;
static irqreturn_t timer_irq_handler ( int irq , void * dev_id )
{
2014-01-24 23:12:37 +04:00
/*
* Note that generic IRQ core could have passed @ evt for @ dev_id if
* irq_set_chip_and_handler ( ) asked for handle_percpu_devid_irq ( )
*/
struct clock_event_device * evt = this_cpu_ptr ( & arc_clockevent_device ) ;
2015-07-16 14:26:14 +03:00
int irq_reenable = clockevent_state_periodic ( evt ) ;
2014-01-24 23:12:37 +04:00
/*
2018-02-21 22:31:31 +03:00
* 1. ACK the interrupt
* - For ARC700 , any write to CTRL reg ACKs it , so just rewrite
* Count when [ N ] ot [ H ] alted bit .
* - For HS3x , it is a bit subtle . On taken count - down interrupt ,
* IP bit [ 3 ] is set , which needs to be cleared for ACK ' ing .
* The write below can only update the other two bits , hence
* explicitly clears IP bit
* 2. Re - arm interrupt if periodic by writing to IE bit [ 0 ]
2014-01-24 23:12:37 +04:00
*/
2021-09-24 05:08:25 +03:00
write_aux_reg ( ARC_REG_TIMER0_CTRL , irq_reenable | ARC_TIMER_CTRL_NH ) ;
2014-01-24 23:12:37 +04:00
evt - > event_handler ( evt ) ;
2013-01-18 13:42:18 +04:00
return IRQ_HANDLED ;
}
2016-07-13 20:17:07 +03:00
static int arc_timer_starting_cpu ( unsigned int cpu )
2016-01-01 13:18:49 +03:00
{
struct clock_event_device * evt = this_cpu_ptr ( & arc_clockevent_device ) ;
evt - > cpumask = cpumask_of ( smp_processor_id ( ) ) ;
2016-10-31 23:06:19 +03:00
clockevents_config_and_register ( evt , arc_timer_freq , 0 , ARC_TIMERN_MAX ) ;
2016-07-13 20:17:07 +03:00
enable_percpu_irq ( arc_timer_irq , 0 ) ;
return 0 ;
2016-01-01 13:18:49 +03:00
}
2016-07-13 20:17:07 +03:00
static int arc_timer_dying_cpu ( unsigned int cpu )
{
disable_percpu_irq ( arc_timer_irq ) ;
return 0 ;
}
2016-01-01 13:18:49 +03:00
2013-01-18 13:42:18 +04:00
/*
2016-01-01 13:18:49 +03:00
* clockevent setup for boot CPU
2013-01-18 13:42:18 +04:00
*/
2016-06-15 15:50:12 +03:00
static int __init arc_clockevent_setup ( struct device_node * node )
2013-01-18 13:42:18 +04:00
{
2014-05-08 12:36:38 +04:00
struct clock_event_device * evt = this_cpu_ptr ( & arc_clockevent_device ) ;
2016-01-01 13:18:49 +03:00
int ret ;
2013-01-18 13:42:18 +04:00
2016-01-01 15:28:45 +03:00
arc_timer_irq = irq_of_parse_and_map ( node , 0 ) ;
2016-06-15 15:50:12 +03:00
if ( arc_timer_irq < = 0 ) {
2017-03-09 12:47:10 +03:00
pr_err ( " clockevent: missing irq \n " ) ;
2016-06-15 15:50:12 +03:00
return - EINVAL ;
}
2016-01-01 15:28:45 +03:00
ret = arc_get_timer_clk ( node ) ;
2020-04-29 18:12:23 +03:00
if ( ret )
2016-06-15 15:50:12 +03:00
return ret ;
2016-01-01 15:28:45 +03:00
2016-01-01 13:18:49 +03:00
/* Needs apriori irq_set_percpu_devid() done in intc map function */
ret = request_percpu_irq ( arc_timer_irq , timer_irq_handler ,
" Timer0 (per-cpu-tick) " , evt ) ;
2016-06-15 15:50:12 +03:00
if ( ret ) {
pr_err ( " clockevent: unable to request irq \n " ) ;
return ret ;
}
2016-01-28 10:26:03 +03:00
2016-07-13 20:17:07 +03:00
ret = cpuhp_setup_state ( CPUHP_AP_ARC_TIMER_STARTING ,
2016-12-21 22:19:54 +03:00
" clockevents/arc/timer:starting " ,
2016-07-13 20:17:07 +03:00
arc_timer_starting_cpu ,
arc_timer_dying_cpu ) ;
if ( ret ) {
2017-03-09 12:47:10 +03:00
pr_err ( " Failed to setup hotplug state \n " ) ;
2016-07-13 20:17:07 +03:00
return ret ;
}
2016-06-15 15:50:12 +03:00
return 0 ;
2013-01-18 13:42:18 +04:00
}
2016-01-01 15:35:48 +03:00
2016-06-15 15:50:12 +03:00
static int __init arc_of_timer_init ( struct device_node * np )
2016-01-01 15:35:48 +03:00
{
static int init_count = 0 ;
2016-06-15 15:50:12 +03:00
int ret ;
2016-01-01 15:35:48 +03:00
if ( ! init_count ) {
init_count = 1 ;
2016-06-15 15:50:12 +03:00
ret = arc_clockevent_setup ( np ) ;
2016-01-01 15:35:48 +03:00
} else {
2016-06-15 15:50:12 +03:00
ret = arc_cs_setup_timer1 ( np ) ;
2016-01-01 15:35:48 +03:00
}
2016-06-15 15:50:12 +03:00
return ret ;
2016-01-01 15:35:48 +03:00
}
2017-05-26 17:56:11 +03:00
TIMER_OF_DECLARE ( arc_clkevt , " snps,arc-timer " , arc_of_timer_init ) ;