2018-08-04 11:23:19 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2012 Regents of the University of California
* Copyright ( C ) 2017 SiFive
2019-08-21 17:58:36 +03:00
*
2019-10-28 15:10:37 +03:00
* All RISC - V systems have a timer attached to every hart . These timers can
* either be read from the " time " and " timeh " CSRs , and can use the SBI to
* setup events , or directly accessed using MMIO registers .
2018-08-04 11:23:19 +03:00
*/
# include <linux/clocksource.h>
# include <linux/clockchips.h>
# include <linux/cpu.h>
# include <linux/delay.h>
# include <linux/irq.h>
2020-06-01 12:15:41 +03:00
# include <linux/irqdomain.h>
2018-12-04 13:29:52 +03:00
# include <linux/sched_clock.h>
2019-10-28 15:10:37 +03:00
# include <linux/io-64-nonatomic-lo-hi.h>
2020-06-01 12:15:41 +03:00
# include <linux/interrupt.h>
# include <linux/of_irq.h>
2018-10-02 22:15:05 +03:00
# include <asm/smp.h>
2018-08-04 11:23:19 +03:00
# include <asm/sbi.h>
2020-08-17 15:42:50 +03:00
# include <asm/timex.h>
2019-10-28 15:10:37 +03:00
2018-08-04 11:23:19 +03:00
static int riscv_clock_next_event ( unsigned long delta ,
struct clock_event_device * ce )
{
2019-10-28 15:10:32 +03:00
csr_set ( CSR_IE , IE_TIE ) ;
2020-08-17 15:42:50 +03:00
sbi_set_timer ( get_cycles64 ( ) + delta ) ;
2018-08-04 11:23:19 +03:00
return 0 ;
}
2020-06-01 12:15:41 +03:00
static unsigned int riscv_clock_event_irq ;
2018-08-04 11:23:19 +03:00
static DEFINE_PER_CPU ( struct clock_event_device , riscv_clock_event ) = {
. name = " riscv_timer_clockevent " ,
. features = CLOCK_EVT_FEAT_ONESHOT ,
. rating = 100 ,
. set_next_event = riscv_clock_next_event ,
} ;
/*
* It is guaranteed that all the timers across all the harts are synchronized
* within one tick of each other , so while this could technically go
* backwards when hopping between CPUs , practically it won ' t happen .
*/
static unsigned long long riscv_clocksource_rdtime ( struct clocksource * cs )
{
return get_cycles64 ( ) ;
}
2019-12-23 11:46:14 +03:00
static u64 notrace riscv_sched_clock ( void )
2018-12-04 13:29:52 +03:00
{
return get_cycles64 ( ) ;
}
2019-08-03 07:27:20 +03:00
static struct clocksource riscv_clocksource = {
2018-08-04 11:23:19 +03:00
. name = " riscv_clocksource " ,
. rating = 300 ,
2019-03-23 00:54:11 +03:00
. mask = CLOCKSOURCE_MASK ( 64 ) ,
2018-08-04 11:23:19 +03:00
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
. read = riscv_clocksource_rdtime ,
} ;
static int riscv_timer_starting_cpu ( unsigned int cpu )
{
struct clock_event_device * ce = per_cpu_ptr ( & riscv_clock_event , cpu ) ;
ce - > cpumask = cpumask_of ( cpu ) ;
2020-06-01 12:15:41 +03:00
ce - > irq = riscv_clock_event_irq ;
2018-08-04 11:23:19 +03:00
clockevents_config_and_register ( ce , riscv_timebase , 100 , 0x7fffffff ) ;
2020-06-01 12:15:41 +03:00
enable_percpu_irq ( riscv_clock_event_irq ,
irq_get_trigger_type ( riscv_clock_event_irq ) ) ;
2018-08-04 11:23:19 +03:00
return 0 ;
}
static int riscv_timer_dying_cpu ( unsigned int cpu )
{
2020-06-01 12:15:41 +03:00
disable_percpu_irq ( riscv_clock_event_irq ) ;
2018-08-04 11:23:19 +03:00
return 0 ;
}
/* called directly from the low-level interrupt handler */
2020-06-01 12:15:41 +03:00
static irqreturn_t riscv_timer_interrupt ( int irq , void * dev_id )
2018-08-04 11:23:19 +03:00
{
struct clock_event_device * evdev = this_cpu_ptr ( & riscv_clock_event ) ;
2019-10-28 15:10:32 +03:00
csr_clear ( CSR_IE , IE_TIE ) ;
2018-08-04 11:23:19 +03:00
evdev - > event_handler ( evdev ) ;
2020-06-01 12:15:41 +03:00
return IRQ_HANDLED ;
2018-08-04 11:23:19 +03:00
}
static int __init riscv_timer_init_dt ( struct device_node * n )
{
2018-10-02 22:15:05 +03:00
int cpuid , hartid , error ;
2020-06-01 12:15:41 +03:00
struct device_node * child ;
struct irq_domain * domain ;
2018-08-04 11:23:19 +03:00
2018-10-02 22:15:05 +03:00
hartid = riscv_of_processor_hartid ( n ) ;
2019-02-13 23:18:10 +03:00
if ( hartid < 0 ) {
pr_warn ( " Not valid hartid for node [%pOF] error = [%d] \n " ,
n , hartid ) ;
return hartid ;
}
2018-10-02 22:15:05 +03:00
cpuid = riscv_hartid_to_cpuid ( hartid ) ;
2019-02-13 23:18:10 +03:00
if ( cpuid < 0 ) {
pr_warn ( " Invalid cpuid for hartid [%d] \n " , hartid ) ;
return cpuid ;
}
2018-10-02 22:15:05 +03:00
if ( cpuid ! = smp_processor_id ( ) )
2018-08-04 11:23:19 +03:00
return 0 ;
2020-06-01 12:15:41 +03:00
domain = NULL ;
child = of_get_compatible_child ( n , " riscv,cpu-intc " ) ;
if ( ! child ) {
pr_err ( " Failed to find INTC node [%pOF] \n " , n ) ;
return - ENODEV ;
}
domain = irq_find_host ( child ) ;
of_node_put ( child ) ;
if ( ! domain ) {
pr_err ( " Failed to find IRQ domain for node [%pOF] \n " , n ) ;
return - ENODEV ;
}
riscv_clock_event_irq = irq_create_mapping ( domain , RV_IRQ_TIMER ) ;
if ( ! riscv_clock_event_irq ) {
pr_err ( " Failed to map timer interrupt for node [%pOF] \n " , n ) ;
return - ENODEV ;
}
2019-02-13 23:18:10 +03:00
pr_info ( " %s: Registering clocksource cpuid [%d] hartid [%d] \n " ,
__func__ , cpuid , hartid ) ;
2019-08-03 07:27:20 +03:00
error = clocksource_register_hz ( & riscv_clocksource , riscv_timebase ) ;
2019-02-13 23:18:10 +03:00
if ( error ) {
pr_err ( " RISCV timer register failed [%d] for cpu = [%d] \n " ,
error , cpuid ) ;
return error ;
}
2018-08-04 11:23:19 +03:00
2019-03-23 00:54:11 +03:00
sched_clock_register ( riscv_sched_clock , 64 , riscv_timebase ) ;
2018-12-04 13:29:52 +03:00
2020-06-01 12:15:41 +03:00
error = request_percpu_irq ( riscv_clock_event_irq ,
riscv_timer_interrupt ,
" riscv-timer " , & riscv_clock_event ) ;
if ( error ) {
pr_err ( " registering percpu irq failed [%d] \n " , error ) ;
return error ;
}
2018-08-04 11:23:19 +03:00
error = cpuhp_setup_state ( CPUHP_AP_RISCV_TIMER_STARTING ,
" clockevents/riscv/timer:starting " ,
riscv_timer_starting_cpu , riscv_timer_dying_cpu ) ;
if ( error )
2019-02-13 23:18:10 +03:00
pr_err ( " cpu hp setup state failed for RISCV timer [%d] \n " ,
error ) ;
2018-08-04 11:23:19 +03:00
return error ;
}
TIMER_OF_DECLARE ( riscv_timer , " riscv " , riscv_timer_init_dt ) ;