2013-01-18 13:42:18 +04:00
/*
* Copyright ( C ) 2004 , 2007 - 2010 , 2011 - 2012 Synopsys , Inc . ( www . synopsys . com )
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* vineetg : Jan 1011
* - sched_clock ( ) no longer jiffies based . Uses the same clocksource
* as gtod
*
* Rajeshwarr / Vineetg : Mar 2008
* - Implemented CONFIG_GENERIC_TIME ( rather deleted arch specific code )
* for arch independent gettimeofday ( )
* - Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
*
* Vineetg : Mar 2008 : Forked off from time . c which now is time - jiff . c
*/
/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
* Each can programmed to go from @ count to @ limit and optionally
* interrupt when that happens .
* A write to Control Register clears the Interrupt
*
* We ' ve designated TIMER0 for events ( clockevents )
* while TIMER1 for free running ( clocksource )
*
* Newer ARC700 cores have 64 bit clk fetching RTSC insn , preferred over TIMER1
2015-03-07 14:36:09 +03:00
* which however is currently broken
2013-01-18 13:42:18 +04:00
*/
# include <linux/interrupt.h>
2016-01-14 09:50:08 +03:00
# include <linux/clk.h>
# include <linux/clk-provider.h>
2013-01-18 13:42:18 +04:00
# include <linux/clocksource.h>
# include <linux/clockchips.h>
2016-01-01 13:18:49 +03:00
# include <linux/cpu.h>
2016-01-01 15:28:45 +03:00
# include <linux/of.h>
# include <linux/of_irq.h>
2013-01-18 13:42:18 +04:00
# include <asm/irq.h>
# include <asm/arcregs.h>
2014-12-24 16:11:55 +03:00
# include <asm/mcip.h>
2013-05-14 11:58:17 +04:00
/* Timer related Aux registers */
# define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
# define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
# define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
# define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
# define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
# define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
2016-02-24 02:24:55 +03:00
# define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
# define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
2013-05-14 11:58:17 +04:00
2013-01-18 13:42:18 +04:00
# define ARC_TIMER_MAX 0xFFFFFFFF
2016-01-01 15:28:45 +03:00
static unsigned long arc_timer_freq ;
static int noinline arc_get_timer_clk ( struct device_node * node )
{
struct clk * clk ;
int ret ;
clk = of_clk_get ( node , 0 ) ;
if ( IS_ERR ( clk ) ) {
pr_err ( " timer missing clk " ) ;
return PTR_ERR ( clk ) ;
}
ret = clk_prepare_enable ( clk ) ;
if ( ret ) {
pr_err ( " Couldn't enable parent clk \n " ) ;
return ret ;
}
arc_timer_freq = clk_get_rate ( clk ) ;
return 0 ;
}
2013-01-18 13:42:18 +04:00
/********** Clock Source Device *********/
2016-01-22 11:57:50 +03:00
# ifdef CONFIG_ARC_HAS_GFRC
2014-12-24 16:11:55 +03:00
2016-01-01 15:35:48 +03:00
static cycle_t arc_read_gfrc ( struct clocksource * cs )
2014-12-24 16:11:55 +03:00
{
unsigned long flags ;
2016-11-03 21:38:52 +03:00
u32 l , h ;
2014-12-24 16:11:55 +03:00
local_irq_save ( flags ) ;
2016-01-22 11:57:50 +03:00
__mcip_cmd ( CMD_GFRC_READ_LO , 0 ) ;
2016-11-03 21:38:52 +03:00
l = read_aux_reg ( ARC_REG_MCIP_READBACK ) ;
2014-12-24 16:11:55 +03:00
2016-01-22 11:57:50 +03:00
__mcip_cmd ( CMD_GFRC_READ_HI , 0 ) ;
2016-11-03 21:38:52 +03:00
h = read_aux_reg ( ARC_REG_MCIP_READBACK ) ;
2014-12-24 16:11:55 +03:00
local_irq_restore ( flags ) ;
2016-11-03 21:38:52 +03:00
return ( ( ( cycle_t ) h ) < < 32 ) | l ;
2014-12-24 16:11:55 +03:00
}
2016-01-01 15:35:48 +03:00
static struct clocksource arc_counter_gfrc = {
2016-01-22 11:57:50 +03:00
. name = " ARConnect GFRC " ,
2014-12-24 16:11:55 +03:00
. rating = 400 ,
2016-01-01 15:35:48 +03:00
. read = arc_read_gfrc ,
2014-12-24 16:11:55 +03:00
. mask = CLOCKSOURCE_MASK ( 64 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
2016-06-15 15:50:12 +03:00
static int __init arc_cs_setup_gfrc ( struct device_node * node )
2016-01-01 15:35:48 +03:00
{
2016-10-31 23:02:31 +03:00
struct mcip_bcr mp ;
2016-01-01 15:35:48 +03:00
int ret ;
2016-10-31 23:02:31 +03:00
READ_BCR ( ARC_REG_MCIP_BCR , mp ) ;
if ( ! mp . gfrc ) {
pr_warn ( " Global-64-bit-Ctr clocksource not detected " ) ;
2016-06-15 15:50:12 +03:00
return - ENXIO ;
2016-10-31 23:02:31 +03:00
}
2016-01-01 15:35:48 +03:00
ret = arc_get_timer_clk ( node ) ;
if ( ret )
2016-06-15 15:50:12 +03:00
return ret ;
2016-01-01 15:35:48 +03:00
2016-06-15 15:50:12 +03:00
return clocksource_register_hz ( & arc_counter_gfrc , arc_timer_freq ) ;
2016-01-01 15:35:48 +03:00
}
2016-06-07 01:27:44 +03:00
CLOCKSOURCE_OF_DECLARE ( arc_gfrc , " snps,archs-timer-gfrc " , arc_cs_setup_gfrc ) ;
2016-01-01 15:35:48 +03:00
# endif
2014-12-24 16:11:55 +03:00
2013-11-07 13:27:16 +04:00
# ifdef CONFIG_ARC_HAS_RTC
# define AUX_RTC_CTRL 0x103
# define AUX_RTC_LOW 0x104
# define AUX_RTC_HIGH 0x105
2016-01-01 15:35:48 +03:00
static cycle_t arc_read_rtc ( struct clocksource * cs )
2013-11-07 13:27:16 +04:00
{
unsigned long status ;
2016-11-03 21:38:52 +03:00
u32 l , h ;
2013-11-07 13:27:16 +04:00
2016-11-01 00:09:52 +03:00
/*
* hardware has an internal state machine which tracks readout of
* low / high and updates the CTRL . status if
* - interrupt / exception taken between the two reads
* - high increments after low has been read
*/
do {
2016-11-03 21:38:52 +03:00
l = read_aux_reg ( AUX_RTC_LOW ) ;
h = read_aux_reg ( AUX_RTC_HIGH ) ;
2016-11-01 00:09:52 +03:00
status = read_aux_reg ( AUX_RTC_CTRL ) ;
} while ( ! ( status & _BITUL ( 31 ) ) ) ;
2013-11-07 13:27:16 +04:00
2016-11-03 21:38:52 +03:00
return ( ( ( cycle_t ) h ) < < 32 ) | l ;
2013-11-07 13:27:16 +04:00
}
2016-01-01 15:35:48 +03:00
static struct clocksource arc_counter_rtc = {
2013-11-07 13:27:16 +04:00
. name = " ARCv2 RTC " ,
. rating = 350 ,
2016-01-01 15:35:48 +03:00
. read = arc_read_rtc ,
2013-11-07 13:27:16 +04:00
. mask = CLOCKSOURCE_MASK ( 64 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
2016-06-15 15:50:12 +03:00
static int __init arc_cs_setup_rtc ( struct device_node * node )
2013-01-18 13:42:18 +04:00
{
2016-10-31 23:02:31 +03:00
struct bcr_timer timer ;
2016-01-01 15:35:48 +03:00
int ret ;
2016-10-31 23:02:31 +03:00
READ_BCR ( ARC_REG_TIMERS_BCR , timer ) ;
if ( ! timer . rtc ) {
pr_warn ( " Local-64-bit-Ctr clocksource not detected " ) ;
2016-06-15 15:50:12 +03:00
return - ENXIO ;
2016-10-31 23:02:31 +03:00
}
2016-01-01 15:35:48 +03:00
/* Local to CPU hence not usable in SMP */
2016-10-31 23:02:31 +03:00
if ( IS_ENABLED ( CONFIG_SMP ) ) {
pr_warn ( " Local-64-bit-Ctr not usable in SMP " ) ;
2016-06-15 15:50:12 +03:00
return - EINVAL ;
2016-10-31 23:02:31 +03:00
}
2016-01-01 15:35:48 +03:00
ret = arc_get_timer_clk ( node ) ;
if ( ret )
2016-06-15 15:50:12 +03:00
return ret ;
2013-01-18 13:42:18 +04:00
2016-01-01 15:35:48 +03:00
write_aux_reg ( AUX_RTC_CTRL , 1 ) ;
2016-06-15 15:50:12 +03:00
return clocksource_register_hz ( & arc_counter_rtc , arc_timer_freq ) ;
2013-01-18 13:42:18 +04:00
}
2016-06-07 01:27:44 +03:00
CLOCKSOURCE_OF_DECLARE ( arc_rtc , " snps,archs-timer-rtc " , arc_cs_setup_rtc ) ;
2016-01-01 15:35:48 +03:00
# endif
2013-01-18 13:42:18 +04:00
2016-01-01 15:35:48 +03:00
/*
* 32 bit TIMER1 to keep counting monotonically and wraparound
*/
static cycle_t arc_read_timer1 ( struct clocksource * cs )
2013-01-18 13:42:18 +04:00
{
return ( cycle_t ) read_aux_reg ( ARC_REG_TIMER1_CNT ) ;
}
2016-01-01 15:35:48 +03:00
static struct clocksource arc_counter_timer1 = {
2013-01-18 13:42:18 +04:00
. name = " ARC Timer1 " ,
. rating = 300 ,
2016-01-01 15:35:48 +03:00
. read = arc_read_timer1 ,
2013-01-18 13:42:18 +04:00
. mask = CLOCKSOURCE_MASK ( 32 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
2016-06-15 15:50:12 +03:00
static int __init arc_cs_setup_timer1 ( struct device_node * node )
2016-01-01 15:35:48 +03:00
{
int ret ;
/* Local to CPU hence not usable in SMP */
if ( IS_ENABLED ( CONFIG_SMP ) )
2016-06-15 15:50:12 +03:00
return - EINVAL ;
2016-01-01 15:35:48 +03:00
ret = arc_get_timer_clk ( node ) ;
if ( ret )
2016-06-15 15:50:12 +03:00
return ret ;
2016-01-01 15:35:48 +03:00
write_aux_reg ( ARC_REG_TIMER1_LIMIT , ARC_TIMER_MAX ) ;
write_aux_reg ( ARC_REG_TIMER1_CNT , 0 ) ;
write_aux_reg ( ARC_REG_TIMER1_CTRL , TIMER_CTRL_NH ) ;
2016-06-15 15:50:12 +03:00
return clocksource_register_hz ( & arc_counter_timer1 , arc_timer_freq ) ;
2016-01-01 15:35:48 +03:00
}
2013-11-07 13:27:16 +04:00
2013-01-18 13:42:18 +04:00
/********** Clock Event Device *********/
2016-01-01 15:28:45 +03:00
static int arc_timer_irq ;
2016-01-01 13:18:49 +03:00
2013-01-18 13:42:18 +04:00
/*
2014-06-25 15:44:03 +04:00
* Arm the timer to interrupt after @ cycles
2013-01-18 13:42:18 +04:00
* The distinction for oneshot / periodic is done in arc_event_timer_ack ( ) below
*/
2014-06-25 15:44:03 +04:00
static void arc_timer_event_setup ( unsigned int cycles )
2013-01-18 13:42:18 +04:00
{
2014-06-25 15:44:03 +04:00
write_aux_reg ( ARC_REG_TIMER0_LIMIT , cycles ) ;
2013-01-18 13:42:18 +04:00
write_aux_reg ( ARC_REG_TIMER0_CNT , 0 ) ; /* start from 0 */
write_aux_reg ( ARC_REG_TIMER0_CTRL , TIMER_CTRL_IE | TIMER_CTRL_NH ) ;
}
static int arc_clkevent_set_next_event ( unsigned long delta ,
struct clock_event_device * dev )
{
arc_timer_event_setup ( delta ) ;
return 0 ;
}
2015-07-16 14:26:14 +03:00
static int arc_clkevent_set_periodic ( struct clock_event_device * dev )
2013-01-18 13:42:18 +04:00
{
2015-07-16 14:26:14 +03:00
/*
* At X Hz , 1 sec = 1000 ms - > X cycles ;
* 10 ms - > X / 100 cycles
*/
2016-01-01 15:28:45 +03:00
arc_timer_event_setup ( arc_timer_freq / HZ ) ;
2015-07-16 14:26:14 +03:00
return 0 ;
2013-01-18 13:42:18 +04:00
}
static DEFINE_PER_CPU ( struct clock_event_device , arc_clockevent_device ) = {
2015-07-16 14:26:14 +03:00
. name = " ARC Timer0 " ,
. features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC ,
. rating = 300 ,
. set_next_event = arc_clkevent_set_next_event ,
. set_state_periodic = arc_clkevent_set_periodic ,
2013-01-18 13:42:18 +04:00
} ;
static irqreturn_t timer_irq_handler ( int irq , void * dev_id )
{
2014-01-24 23:12:37 +04:00
/*
* Note that generic IRQ core could have passed @ evt for @ dev_id if
* irq_set_chip_and_handler ( ) asked for handle_percpu_devid_irq ( )
*/
struct clock_event_device * evt = this_cpu_ptr ( & arc_clockevent_device ) ;
2015-07-16 14:26:14 +03:00
int irq_reenable = clockevent_state_periodic ( evt ) ;
2014-01-24 23:12:37 +04:00
/*
* Any write to CTRL reg ACks the interrupt , we rewrite the
* Count when [ N ] ot [ H ] alted bit .
* And re - arm it if perioid by [ I ] nterrupt [ E ] nable bit
*/
write_aux_reg ( ARC_REG_TIMER0_CTRL , irq_reenable | TIMER_CTRL_NH ) ;
evt - > event_handler ( evt ) ;
2013-01-18 13:42:18 +04:00
return IRQ_HANDLED ;
}
2016-07-13 20:17:07 +03:00
static int arc_timer_starting_cpu ( unsigned int cpu )
2016-01-01 13:18:49 +03:00
{
struct clock_event_device * evt = this_cpu_ptr ( & arc_clockevent_device ) ;
evt - > cpumask = cpumask_of ( smp_processor_id ( ) ) ;
2016-07-13 20:17:07 +03:00
clockevents_config_and_register ( evt , arc_timer_freq , 0 , ARC_TIMER_MAX ) ;
enable_percpu_irq ( arc_timer_irq , 0 ) ;
return 0 ;
2016-01-01 13:18:49 +03:00
}
2016-07-13 20:17:07 +03:00
static int arc_timer_dying_cpu ( unsigned int cpu )
{
disable_percpu_irq ( arc_timer_irq ) ;
return 0 ;
}
2016-01-01 13:18:49 +03:00
2013-01-18 13:42:18 +04:00
/*
2016-01-01 13:18:49 +03:00
* clockevent setup for boot CPU
2013-01-18 13:42:18 +04:00
*/
2016-06-15 15:50:12 +03:00
static int __init arc_clockevent_setup ( struct device_node * node )
2013-01-18 13:42:18 +04:00
{
2014-05-08 12:36:38 +04:00
struct clock_event_device * evt = this_cpu_ptr ( & arc_clockevent_device ) ;
2016-01-01 13:18:49 +03:00
int ret ;
2013-01-18 13:42:18 +04:00
2016-01-01 15:28:45 +03:00
arc_timer_irq = irq_of_parse_and_map ( node , 0 ) ;
2016-06-15 15:50:12 +03:00
if ( arc_timer_irq < = 0 ) {
pr_err ( " clockevent: missing irq " ) ;
return - EINVAL ;
}
2016-01-01 15:28:45 +03:00
ret = arc_get_timer_clk ( node ) ;
2016-06-15 15:50:12 +03:00
if ( ret ) {
pr_err ( " clockevent: missing clk " ) ;
return ret ;
}
2016-01-01 15:28:45 +03:00
2016-01-01 13:18:49 +03:00
/* Needs apriori irq_set_percpu_devid() done in intc map function */
ret = request_percpu_irq ( arc_timer_irq , timer_irq_handler ,
" Timer0 (per-cpu-tick) " , evt ) ;
2016-06-15 15:50:12 +03:00
if ( ret ) {
pr_err ( " clockevent: unable to request irq \n " ) ;
return ret ;
}
2016-01-28 10:26:03 +03:00
2016-07-13 20:17:07 +03:00
ret = cpuhp_setup_state ( CPUHP_AP_ARC_TIMER_STARTING ,
" AP_ARC_TIMER_STARTING " ,
arc_timer_starting_cpu ,
arc_timer_dying_cpu ) ;
if ( ret ) {
pr_err ( " Failed to setup hotplug state " ) ;
return ret ;
}
2016-06-15 15:50:12 +03:00
return 0 ;
2013-01-18 13:42:18 +04:00
}
2016-01-01 15:35:48 +03:00
2016-06-15 15:50:12 +03:00
static int __init arc_of_timer_init ( struct device_node * np )
2016-01-01 15:35:48 +03:00
{
static int init_count = 0 ;
2016-06-15 15:50:12 +03:00
int ret ;
2016-01-01 15:35:48 +03:00
if ( ! init_count ) {
init_count = 1 ;
2016-06-15 15:50:12 +03:00
ret = arc_clockevent_setup ( np ) ;
2016-01-01 15:35:48 +03:00
} else {
2016-06-15 15:50:12 +03:00
ret = arc_cs_setup_timer1 ( np ) ;
2016-01-01 15:35:48 +03:00
}
2016-06-15 15:50:12 +03:00
return ret ;
2016-01-01 15:35:48 +03:00
}
2016-06-07 01:27:44 +03:00
CLOCKSOURCE_OF_DECLARE ( arc_clkevt , " snps,arc-timer " , arc_of_timer_init ) ;
2013-01-18 13:42:18 +04:00
/*
* Called from start_kernel ( ) - boot CPU only
*/
void __init time_init ( void )
{
2016-01-14 09:50:08 +03:00
of_clk_init ( NULL ) ;
clocksource_probe ( ) ;
2013-01-18 13:42:18 +04:00
}