2005-06-24 09:01:16 +04:00
/*
* arch / xtensa / kernel / time . c
*
* Timer and clock support .
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2005 Tensilica Inc .
*
* Chris Zankel < chris @ zankel . net >
*/
2016-09-20 21:11:08 +03:00
# include <linux/clk.h>
2020-11-10 18:51:17 +03:00
# include <linux/of_clk.h>
2005-06-24 09:01:16 +04:00
# include <linux/errno.h>
2009-10-07 17:09:06 +04:00
# include <linux/sched.h>
2005-06-24 09:01:16 +04:00
# include <linux/time.h>
2009-03-04 23:39:12 +03:00
# include <linux/clocksource.h>
2013-06-18 09:48:53 +04:00
# include <linux/clockchips.h>
2005-06-24 09:01:16 +04:00
# include <linux/interrupt.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/irq.h>
# include <linux/profile.h>
# include <linux/delay.h>
2012-11-04 00:29:12 +04:00
# include <linux/irqdomain.h>
2013-06-17 12:29:46 +04:00
# include <linux/sched_clock.h>
2005-06-24 09:01:16 +04:00
# include <asm/timex.h>
# include <asm/platform.h>
2013-06-17 12:29:43 +04:00
unsigned long ccount_freq ; /* ccount Hz */
2014-01-19 20:00:48 +04:00
EXPORT_SYMBOL ( ccount_freq ) ;
2005-06-24 09:01:16 +04:00
2016-12-21 22:32:01 +03:00
static u64 ccount_read ( struct clocksource * cs )
2009-03-04 23:39:12 +03:00
{
2016-12-21 22:32:01 +03:00
return ( u64 ) get_ccount ( ) ;
2009-03-04 23:39:12 +03:00
}
2013-12-13 13:43:58 +04:00
static u64 notrace ccount_sched_clock_read ( void )
2013-06-17 12:29:46 +04:00
{
return get_ccount ( ) ;
}
2009-03-04 23:39:12 +03:00
static struct clocksource ccount_clocksource = {
. name = " ccount " ,
. rating = 200 ,
. read = ccount_read ,
. mask = CLOCKSOURCE_MASK ( 32 ) ,
2013-10-17 02:42:18 +04:00
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
2009-03-04 23:39:12 +03:00
} ;
2013-10-17 02:42:19 +04:00
struct ccount_timer {
2013-06-18 09:48:53 +04:00
struct clock_event_device evt ;
int irq_enabled ;
2013-10-17 02:42:19 +04:00
char name [ 24 ] ;
2013-06-18 09:48:53 +04:00
} ;
static int ccount_timer_set_next_event ( unsigned long delta ,
struct clock_event_device * dev )
{
unsigned long flags , next ;
int ret = 0 ;
local_irq_save ( flags ) ;
next = get_ccount ( ) + delta ;
set_linux_timer ( next ) ;
if ( next - get_ccount ( ) > delta )
ret = - ETIME ;
local_irq_restore ( flags ) ;
return ret ;
}
2015-07-16 14:26:33 +03:00
/*
* There is no way to disable the timer interrupt at the device level ,
* only at the intenable register itself . Since enable_irq / disable_irq
* calls are nested , we need to make sure that these calls are
* balanced .
*/
static int ccount_timer_shutdown ( struct clock_event_device * evt )
{
struct ccount_timer * timer =
container_of ( evt , struct ccount_timer , evt ) ;
if ( timer - > irq_enabled ) {
2018-01-29 20:09:41 +03:00
disable_irq_nosync ( evt - > irq ) ;
2015-07-16 14:26:33 +03:00
timer - > irq_enabled = 0 ;
}
return 0 ;
}
static int ccount_timer_set_oneshot ( struct clock_event_device * evt )
2013-06-18 09:48:53 +04:00
{
2013-10-17 02:42:19 +04:00
struct ccount_timer * timer =
container_of ( evt , struct ccount_timer , evt ) ;
2013-06-18 09:48:53 +04:00
2015-07-16 14:26:33 +03:00
if ( ! timer - > irq_enabled ) {
enable_irq ( evt - > irq ) ;
timer - > irq_enabled = 1 ;
2013-06-18 09:48:53 +04:00
}
2015-07-16 14:26:33 +03:00
return 0 ;
2013-06-18 09:48:53 +04:00
}
2019-01-25 02:09:21 +03:00
static DEFINE_PER_CPU ( struct ccount_timer , ccount_timer ) = {
. evt = {
. features = CLOCK_EVT_FEAT_ONESHOT ,
. rating = 300 ,
. set_next_event = ccount_timer_set_next_event ,
. set_state_shutdown = ccount_timer_shutdown ,
. set_state_oneshot = ccount_timer_set_oneshot ,
. tick_resume = ccount_timer_set_oneshot ,
} ,
} ;
static irqreturn_t timer_interrupt ( int irq , void * dev_id )
{
struct clock_event_device * evt = & this_cpu_ptr ( & ccount_timer ) - > evt ;
set_linux_timer ( get_linux_timer ( ) ) ;
evt - > event_handler ( evt ) ;
/* Allow platform to do something useful (Wdog). */
platform_heartbeat ( ) ;
return IRQ_HANDLED ;
}
2013-10-17 02:42:19 +04:00
void local_timer_setup ( unsigned cpu )
{
struct ccount_timer * timer = & per_cpu ( ccount_timer , cpu ) ;
struct clock_event_device * clockevent = & timer - > evt ;
timer - > irq_enabled = 1 ;
snprintf ( timer - > name , sizeof ( timer - > name ) , " ccount_clockevent_%u " , cpu ) ;
2019-01-25 02:09:21 +03:00
clockevent - > name = timer - > name ;
2013-10-17 02:42:19 +04:00
clockevent - > cpumask = cpumask_of ( cpu ) ;
clockevent - > irq = irq_create_mapping ( NULL , LINUX_TIMER_INT ) ;
if ( WARN ( ! clockevent - > irq , " error: can't map timer irq " ) )
return ;
clockevents_config_and_register ( clockevent , ccount_freq ,
0xf , 0xffffffff ) ;
}
2016-09-20 21:11:08 +03:00
# ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
# ifdef CONFIG_OF
static void __init calibrate_ccount ( void )
{
struct device_node * cpu ;
struct clk * clk ;
cpu = of_find_compatible_node ( NULL , NULL , " cdns,xtensa-cpu " ) ;
if ( cpu ) {
clk = of_clk_get ( cpu , 0 ) ;
2022-06-17 15:44:32 +03:00
of_node_put ( cpu ) ;
2016-09-20 21:11:08 +03:00
if ( ! IS_ERR ( clk ) ) {
ccount_freq = clk_get_rate ( clk ) ;
return ;
} else {
pr_warn ( " %s: CPU input clock not found \n " ,
__func__ ) ;
}
} else {
pr_warn ( " %s: CPU node not found in the device tree \n " ,
__func__ ) ;
}
platform_calibrate_ccount ( ) ;
}
# else
static inline void calibrate_ccount ( void )
{
platform_calibrate_ccount ( ) ;
}
# endif
# endif
2005-06-24 09:01:16 +04:00
void __init time_init ( void )
{
2020-03-04 03:41:11 +03:00
int irq ;
2016-09-20 21:11:08 +03:00
of_clk_init ( NULL ) ;
2005-09-23 08:44:23 +04:00
# ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
2016-11-05 00:45:08 +03:00
pr_info ( " Calibrating CPU frequency " ) ;
2016-09-20 21:11:08 +03:00
calibrate_ccount ( ) ;
2016-11-05 00:45:08 +03:00
pr_cont ( " %d.%02d MHz \n " ,
( int ) ccount_freq / 1000000 ,
( int ) ( ccount_freq / 10000 ) % 100 ) ;
2013-07-15 08:03:38 +04:00
# else
ccount_freq = CONFIG_XTENSA_CPU_CLOCK * 1000000UL ;
2005-06-24 09:01:16 +04:00
# endif
2016-09-20 21:11:08 +03:00
WARN ( ! ccount_freq ,
" %s: CPU clock frequency is not set up correctly \n " ,
__func__ ) ;
2013-07-15 09:24:22 +04:00
clocksource_register_hz ( & ccount_clocksource , ccount_freq ) ;
2013-10-17 02:42:19 +04:00
local_timer_setup ( 0 ) ;
2020-03-04 03:41:11 +03:00
irq = this_cpu_ptr ( & ccount_timer ) - > evt . irq ;
if ( request_irq ( irq , timer_interrupt , IRQF_TIMER , " timer " , NULL ) )
pr_err ( " Failed to request irq %d (timer) \n " , irq ) ;
2013-12-13 13:43:58 +04:00
sched_clock_register ( ccount_sched_clock_read , 32 , ccount_freq ) ;
2017-05-26 18:40:46 +03:00
timer_probe ( ) ;
2005-06-24 09:01:16 +04:00
}
# ifndef CONFIG_GENERIC_CALIBRATE_DELAY
2013-06-19 01:54:49 +04:00
void calibrate_delay ( void )
2005-06-24 09:01:16 +04:00
{
2013-07-15 09:24:22 +04:00
loops_per_jiffy = ccount_freq / HZ ;
2016-11-05 00:45:08 +03:00
pr_info ( " Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset \n " ,
loops_per_jiffy / ( 1000000 / HZ ) ,
( loops_per_jiffy / ( 10000 / HZ ) ) % 100 ) ;
2005-06-24 09:01:16 +04:00
}
# endif