2011-03-11 04:39:57 +03:00
/* linux/arch/arm/mach-exynos4/mct.c
*
* Copyright ( c ) 2011 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com
*
* EXYNOS4 MCT ( Multi - Core Timer ) support
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/irq.h>
# include <linux/err.h>
# include <linux/clk.h>
# include <linux/clockchips.h>
2013-02-16 04:40:51 +04:00
# include <linux/cpu.h>
2011-03-11 04:39:57 +03:00
# include <linux/platform_device.h>
# include <linux/delay.h>
# include <linux/percpu.h>
2012-11-15 10:48:56 +04:00
# include <linux/of.h>
2013-03-09 11:01:52 +04:00
# include <linux/of_irq.h>
# include <linux/of_address.h>
2013-03-09 11:10:03 +04:00
# include <linux/clocksource.h>
2011-03-11 04:39:57 +03:00
# include <asm/mach/time.h>
2013-03-09 11:01:47 +04:00
# define EXYNOS4_MCTREG(x) (x)
# define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
# define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
# define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110)
# define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200)
# define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204)
# define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208)
# define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240)
# define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244)
# define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
# define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
# define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
# define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
# define EXYNOS4_MCT_L_MASK (0xffffff00)
# define MCT_L_TCNTB_OFFSET (0x00)
# define MCT_L_ICNTB_OFFSET (0x08)
# define MCT_L_TCON_OFFSET (0x20)
# define MCT_L_INT_CSTAT_OFFSET (0x30)
# define MCT_L_INT_ENB_OFFSET (0x34)
# define MCT_L_WSTAT_OFFSET (0x40)
# define MCT_G_TCON_START (1 << 8)
# define MCT_G_TCON_COMP0_AUTO_INC (1 << 1)
# define MCT_G_TCON_COMP0_ENABLE (1 << 0)
# define MCT_L_TCON_INTERVAL_MODE (1 << 2)
# define MCT_L_TCON_INT_START (1 << 1)
# define MCT_L_TCON_TIMER_START (1 << 0)
2012-03-10 03:09:21 +04:00
# define TICK_BASE_CNT 1
2011-10-04 12:02:58 +04:00
enum {
MCT_INT_SPI ,
MCT_INT_PPI
} ;
2013-03-09 11:01:50 +04:00
enum {
MCT_G0_IRQ ,
MCT_G1_IRQ ,
MCT_G2_IRQ ,
MCT_G3_IRQ ,
MCT_L0_IRQ ,
MCT_L1_IRQ ,
MCT_L2_IRQ ,
MCT_L3_IRQ ,
MCT_NR_IRQS ,
} ;
2013-03-09 11:01:47 +04:00
static void __iomem * reg_base ;
2011-03-11 04:39:57 +03:00
static unsigned long clk_rate ;
2011-10-04 12:02:58 +04:00
static unsigned int mct_int_type ;
2013-03-09 11:01:50 +04:00
static int mct_irqs [ MCT_NR_IRQS ] ;
2011-03-11 04:39:57 +03:00
struct mct_clock_event_device {
2013-02-16 04:40:51 +04:00
struct clock_event_device evt ;
2013-03-09 11:01:47 +04:00
unsigned long base ;
2011-10-04 12:09:26 +04:00
char name [ 10 ] ;
2011-03-11 04:39:57 +03:00
} ;
2013-03-09 11:01:47 +04:00
static void exynos4_mct_write ( unsigned int value , unsigned long offset )
2011-03-11 04:39:57 +03:00
{
2013-03-09 11:01:47 +04:00
unsigned long stat_addr ;
2011-03-11 04:39:57 +03:00
u32 mask ;
u32 i ;
2013-03-09 11:01:47 +04:00
__raw_writel ( value , reg_base + offset ) ;
2011-03-11 04:39:57 +03:00
2013-03-09 11:01:47 +04:00
if ( likely ( offset > = EXYNOS4_MCT_L_BASE ( 0 ) ) ) {
stat_addr = ( offset & ~ EXYNOS4_MCT_L_MASK ) + MCT_L_WSTAT_OFFSET ;
switch ( offset & EXYNOS4_MCT_L_MASK ) {
case MCT_L_TCON_OFFSET :
2011-10-04 12:09:26 +04:00
mask = 1 < < 3 ; /* L_TCON write status */
break ;
2013-03-09 11:01:47 +04:00
case MCT_L_ICNTB_OFFSET :
2011-10-04 12:09:26 +04:00
mask = 1 < < 1 ; /* L_ICNTB write status */
break ;
2013-03-09 11:01:47 +04:00
case MCT_L_TCNTB_OFFSET :
2011-10-04 12:09:26 +04:00
mask = 1 < < 0 ; /* L_TCNTB write status */
break ;
default :
return ;
}
} else {
2013-03-09 11:01:47 +04:00
switch ( offset ) {
case EXYNOS4_MCT_G_TCON :
2011-10-04 12:09:26 +04:00
stat_addr = EXYNOS4_MCT_G_WSTAT ;
mask = 1 < < 16 ; /* G_TCON write status */
break ;
2013-03-09 11:01:47 +04:00
case EXYNOS4_MCT_G_COMP0_L :
2011-10-04 12:09:26 +04:00
stat_addr = EXYNOS4_MCT_G_WSTAT ;
mask = 1 < < 0 ; /* G_COMP0_L write status */
break ;
2013-03-09 11:01:47 +04:00
case EXYNOS4_MCT_G_COMP0_U :
2011-10-04 12:09:26 +04:00
stat_addr = EXYNOS4_MCT_G_WSTAT ;
mask = 1 < < 1 ; /* G_COMP0_U write status */
break ;
2013-03-09 11:01:47 +04:00
case EXYNOS4_MCT_G_COMP0_ADD_INCR :
2011-10-04 12:09:26 +04:00
stat_addr = EXYNOS4_MCT_G_WSTAT ;
mask = 1 < < 2 ; /* G_COMP0_ADD_INCR w status */
break ;
2013-03-09 11:01:47 +04:00
case EXYNOS4_MCT_G_CNT_L :
2011-10-04 12:09:26 +04:00
stat_addr = EXYNOS4_MCT_G_CNT_WSTAT ;
mask = 1 < < 0 ; /* G_CNT_L write status */
break ;
2013-03-09 11:01:47 +04:00
case EXYNOS4_MCT_G_CNT_U :
2011-10-04 12:09:26 +04:00
stat_addr = EXYNOS4_MCT_G_CNT_WSTAT ;
mask = 1 < < 1 ; /* G_CNT_U write status */
break ;
default :
return ;
}
2011-03-11 04:39:57 +03:00
}
/* Wait maximum 1 ms until written values are applied */
for ( i = 0 ; i < loops_per_jiffy / 1000 * HZ ; i + + )
2013-03-09 11:01:47 +04:00
if ( __raw_readl ( reg_base + stat_addr ) & mask ) {
__raw_writel ( mask , reg_base + stat_addr ) ;
2011-03-11 04:39:57 +03:00
return ;
}
2013-03-09 11:01:47 +04:00
panic ( " MCT hangs after writing %d (offset:0x%lx) \n " , value , offset ) ;
2011-03-11 04:39:57 +03:00
}
/* Clocksource handling */
static void exynos4_mct_frc_start ( u32 hi , u32 lo )
{
u32 reg ;
exynos4_mct_write ( lo , EXYNOS4_MCT_G_CNT_L ) ;
exynos4_mct_write ( hi , EXYNOS4_MCT_G_CNT_U ) ;
2013-03-09 11:01:47 +04:00
reg = __raw_readl ( reg_base + EXYNOS4_MCT_G_TCON ) ;
2011-03-11 04:39:57 +03:00
reg | = MCT_G_TCON_START ;
exynos4_mct_write ( reg , EXYNOS4_MCT_G_TCON ) ;
}
static cycle_t exynos4_frc_read ( struct clocksource * cs )
{
unsigned int lo , hi ;
2013-03-09 11:01:47 +04:00
u32 hi2 = __raw_readl ( reg_base + EXYNOS4_MCT_G_CNT_U ) ;
2011-03-11 04:39:57 +03:00
do {
hi = hi2 ;
2013-03-09 11:01:47 +04:00
lo = __raw_readl ( reg_base + EXYNOS4_MCT_G_CNT_L ) ;
hi2 = __raw_readl ( reg_base + EXYNOS4_MCT_G_CNT_U ) ;
2011-03-11 04:39:57 +03:00
} while ( hi ! = hi2 ) ;
return ( ( cycle_t ) hi < < 32 ) | lo ;
}
2011-09-02 09:10:52 +04:00
static void exynos4_frc_resume ( struct clocksource * cs )
{
exynos4_mct_frc_start ( 0 , 0 ) ;
}
2011-03-11 04:39:57 +03:00
struct clocksource mct_frc = {
. name = " mct-frc " ,
. rating = 400 ,
. read = exynos4_frc_read ,
. mask = CLOCKSOURCE_MASK ( 64 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
2011-09-02 09:10:52 +04:00
. resume = exynos4_frc_resume ,
2011-03-11 04:39:57 +03:00
} ;
static void __init exynos4_clocksource_init ( void )
{
exynos4_mct_frc_start ( 0 , 0 ) ;
if ( clocksource_register_hz ( & mct_frc , clk_rate ) )
panic ( " %s: can't register clocksource \n " , mct_frc . name ) ;
}
static void exynos4_mct_comp0_stop ( void )
{
unsigned int tcon ;
2013-03-09 11:01:47 +04:00
tcon = __raw_readl ( reg_base + EXYNOS4_MCT_G_TCON ) ;
2011-03-11 04:39:57 +03:00
tcon & = ~ ( MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC ) ;
exynos4_mct_write ( tcon , EXYNOS4_MCT_G_TCON ) ;
exynos4_mct_write ( 0 , EXYNOS4_MCT_G_INT_ENB ) ;
}
static void exynos4_mct_comp0_start ( enum clock_event_mode mode ,
unsigned long cycles )
{
unsigned int tcon ;
cycle_t comp_cycle ;
2013-03-09 11:01:47 +04:00
tcon = __raw_readl ( reg_base + EXYNOS4_MCT_G_TCON ) ;
2011-03-11 04:39:57 +03:00
if ( mode = = CLOCK_EVT_MODE_PERIODIC ) {
tcon | = MCT_G_TCON_COMP0_AUTO_INC ;
exynos4_mct_write ( cycles , EXYNOS4_MCT_G_COMP0_ADD_INCR ) ;
}
comp_cycle = exynos4_frc_read ( & mct_frc ) + cycles ;
exynos4_mct_write ( ( u32 ) comp_cycle , EXYNOS4_MCT_G_COMP0_L ) ;
exynos4_mct_write ( ( u32 ) ( comp_cycle > > 32 ) , EXYNOS4_MCT_G_COMP0_U ) ;
exynos4_mct_write ( 0x1 , EXYNOS4_MCT_G_INT_ENB ) ;
tcon | = MCT_G_TCON_COMP0_ENABLE ;
exynos4_mct_write ( tcon , EXYNOS4_MCT_G_TCON ) ;
}
static int exynos4_comp_set_next_event ( unsigned long cycles ,
struct clock_event_device * evt )
{
exynos4_mct_comp0_start ( evt - > mode , cycles ) ;
return 0 ;
}
static void exynos4_comp_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
2012-03-10 03:09:21 +04:00
unsigned long cycles_per_jiffy ;
2011-03-11 04:39:57 +03:00
exynos4_mct_comp0_stop ( ) ;
switch ( mode ) {
case CLOCK_EVT_MODE_PERIODIC :
2012-03-10 03:09:21 +04:00
cycles_per_jiffy =
( ( ( unsigned long long ) NSEC_PER_SEC / HZ * evt - > mult ) > > evt - > shift ) ;
exynos4_mct_comp0_start ( mode , cycles_per_jiffy ) ;
2011-03-11 04:39:57 +03:00
break ;
case CLOCK_EVT_MODE_ONESHOT :
case CLOCK_EVT_MODE_UNUSED :
case CLOCK_EVT_MODE_SHUTDOWN :
case CLOCK_EVT_MODE_RESUME :
break ;
}
}
static struct clock_event_device mct_comp_device = {
. name = " mct-comp " ,
. features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT ,
. rating = 250 ,
. set_next_event = exynos4_comp_set_next_event ,
. set_mode = exynos4_comp_set_mode ,
} ;
static irqreturn_t exynos4_mct_comp_isr ( int irq , void * dev_id )
{
struct clock_event_device * evt = dev_id ;
exynos4_mct_write ( 0x1 , EXYNOS4_MCT_G_INT_CSTAT ) ;
evt - > event_handler ( evt ) ;
return IRQ_HANDLED ;
}
static struct irqaction mct_comp_event_irq = {
. name = " mct_comp_irq " ,
. flags = IRQF_TIMER | IRQF_IRQPOLL ,
. handler = exynos4_mct_comp_isr ,
. dev_id = & mct_comp_device ,
} ;
static void exynos4_clockevent_init ( void )
{
mct_comp_device . cpumask = cpumask_of ( 0 ) ;
2013-01-12 15:50:05 +04:00
clockevents_config_and_register ( & mct_comp_device , clk_rate ,
0xf , 0xffffffff ) ;
2013-03-09 11:01:50 +04:00
setup_irq ( mct_irqs [ MCT_G0_IRQ ] , & mct_comp_event_irq ) ;
2011-03-11 04:39:57 +03:00
}
2011-12-08 05:04:49 +04:00
static DEFINE_PER_CPU ( struct mct_clock_event_device , percpu_mct_tick ) ;
2011-03-11 04:39:57 +03:00
/* Clock event handling */
static void exynos4_mct_tick_stop ( struct mct_clock_event_device * mevt )
{
unsigned long tmp ;
unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START ;
2013-03-09 11:01:47 +04:00
unsigned long offset = mevt - > base + MCT_L_TCON_OFFSET ;
2011-03-11 04:39:57 +03:00
2013-03-09 11:01:47 +04:00
tmp = __raw_readl ( reg_base + offset ) ;
2011-03-11 04:39:57 +03:00
if ( tmp & mask ) {
tmp & = ~ mask ;
2013-03-09 11:01:47 +04:00
exynos4_mct_write ( tmp , offset ) ;
2011-03-11 04:39:57 +03:00
}
}
static void exynos4_mct_tick_start ( unsigned long cycles ,
struct mct_clock_event_device * mevt )
{
unsigned long tmp ;
exynos4_mct_tick_stop ( mevt ) ;
tmp = ( 1 < < 31 ) | cycles ; /* MCT_L_UPDATE_ICNTB */
/* update interrupt count buffer */
exynos4_mct_write ( tmp , mevt - > base + MCT_L_ICNTB_OFFSET ) ;
2011-03-31 05:57:33 +04:00
/* enable MCT tick interrupt */
2011-03-11 04:39:57 +03:00
exynos4_mct_write ( 0x1 , mevt - > base + MCT_L_INT_ENB_OFFSET ) ;
2013-03-09 11:01:47 +04:00
tmp = __raw_readl ( reg_base + mevt - > base + MCT_L_TCON_OFFSET ) ;
2011-03-11 04:39:57 +03:00
tmp | = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
MCT_L_TCON_INTERVAL_MODE ;
exynos4_mct_write ( tmp , mevt - > base + MCT_L_TCON_OFFSET ) ;
}
static int exynos4_tick_set_next_event ( unsigned long cycles ,
struct clock_event_device * evt )
{
2011-11-03 06:13:12 +04:00
struct mct_clock_event_device * mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
2011-03-11 04:39:57 +03:00
exynos4_mct_tick_start ( cycles , mevt ) ;
return 0 ;
}
static inline void exynos4_tick_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
2011-11-03 06:13:12 +04:00
struct mct_clock_event_device * mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
2012-03-10 03:09:21 +04:00
unsigned long cycles_per_jiffy ;
2011-03-11 04:39:57 +03:00
exynos4_mct_tick_stop ( mevt ) ;
switch ( mode ) {
case CLOCK_EVT_MODE_PERIODIC :
2012-03-10 03:09:21 +04:00
cycles_per_jiffy =
( ( ( unsigned long long ) NSEC_PER_SEC / HZ * evt - > mult ) > > evt - > shift ) ;
exynos4_mct_tick_start ( cycles_per_jiffy , mevt ) ;
2011-03-11 04:39:57 +03:00
break ;
case CLOCK_EVT_MODE_ONESHOT :
case CLOCK_EVT_MODE_UNUSED :
case CLOCK_EVT_MODE_SHUTDOWN :
case CLOCK_EVT_MODE_RESUME :
break ;
}
}
2011-10-04 12:09:26 +04:00
static int exynos4_mct_tick_clear ( struct mct_clock_event_device * mevt )
2011-03-11 04:39:57 +03:00
{
2013-02-16 04:40:51 +04:00
struct clock_event_device * evt = & mevt - > evt ;
2011-03-11 04:39:57 +03:00
/*
* This is for supporting oneshot mode .
* Mct would generate interrupt periodically
* without explicit stopping .
*/
if ( evt - > mode ! = CLOCK_EVT_MODE_PERIODIC )
exynos4_mct_tick_stop ( mevt ) ;
/* Clear the MCT tick interrupt */
2013-03-09 11:01:47 +04:00
if ( __raw_readl ( reg_base + mevt - > base + MCT_L_INT_CSTAT_OFFSET ) & 1 ) {
2011-10-04 12:02:58 +04:00
exynos4_mct_write ( 0x1 , mevt - > base + MCT_L_INT_CSTAT_OFFSET ) ;
return 1 ;
} else {
return 0 ;
}
}
static irqreturn_t exynos4_mct_tick_isr ( int irq , void * dev_id )
{
struct mct_clock_event_device * mevt = dev_id ;
2013-02-16 04:40:51 +04:00
struct clock_event_device * evt = & mevt - > evt ;
2011-10-04 12:02:58 +04:00
exynos4_mct_tick_clear ( mevt ) ;
2011-03-11 04:39:57 +03:00
evt - > event_handler ( evt ) ;
return IRQ_HANDLED ;
}
2013-06-19 19:32:08 +04:00
static int exynos4_local_timer_setup ( struct clock_event_device * evt )
2011-03-11 04:39:57 +03:00
{
2011-11-03 06:13:12 +04:00
struct mct_clock_event_device * mevt ;
2011-03-11 04:39:57 +03:00
unsigned int cpu = smp_processor_id ( ) ;
2013-02-16 04:40:51 +04:00
mevt = container_of ( evt , struct mct_clock_event_device , evt ) ;
2011-03-11 04:39:57 +03:00
2011-11-03 06:13:12 +04:00
mevt - > base = EXYNOS4_MCT_L_BASE ( cpu ) ;
sprintf ( mevt - > name , " mct_tick%d " , cpu ) ;
2011-03-11 04:39:57 +03:00
2011-11-03 06:13:12 +04:00
evt - > name = mevt - > name ;
2011-03-11 04:39:57 +03:00
evt - > cpumask = cpumask_of ( cpu ) ;
evt - > set_next_event = exynos4_tick_set_next_event ;
evt - > set_mode = exynos4_tick_set_mode ;
evt - > features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT ;
evt - > rating = 450 ;
2013-01-12 15:50:05 +04:00
clockevents_config_and_register ( evt , clk_rate / ( TICK_BASE_CNT + 1 ) ,
0xf , 0x7fffffff ) ;
2011-03-11 04:39:57 +03:00
2012-03-10 03:09:21 +04:00
exynos4_mct_write ( TICK_BASE_CNT , mevt - > base + MCT_L_TCNTB_OFFSET ) ;
2011-03-11 04:39:57 +03:00
2011-10-04 12:02:58 +04:00
if ( mct_int_type = = MCT_INT_SPI ) {
2013-06-18 19:29:35 +04:00
evt - > irq = mct_irqs [ MCT_L0_IRQ + cpu ] ;
if ( request_irq ( evt - > irq , exynos4_mct_tick_isr ,
IRQF_TIMER | IRQF_NOBALANCING ,
evt - > name , mevt ) ) {
pr_err ( " exynos-mct: cannot register IRQ %d \n " ,
evt - > irq ) ;
return - EIO ;
2011-10-04 12:02:58 +04:00
}
2011-03-11 04:39:57 +03:00
} else {
2013-03-09 11:01:50 +04:00
enable_percpu_irq ( mct_irqs [ MCT_L0_IRQ ] , 0 ) ;
2011-03-11 04:39:57 +03:00
}
2011-08-24 11:07:39 +04:00
return 0 ;
2011-03-11 04:39:57 +03:00
}
2012-01-10 23:44:19 +04:00
static void exynos4_local_timer_stop ( struct clock_event_device * evt )
2011-03-11 04:39:57 +03:00
{
2011-07-22 15:52:37 +04:00
evt - > set_mode ( CLOCK_EVT_MODE_UNUSED , evt ) ;
2011-11-03 06:13:12 +04:00
if ( mct_int_type = = MCT_INT_SPI )
2013-06-18 19:29:35 +04:00
free_irq ( evt - > irq , this_cpu_ptr ( & percpu_mct_tick ) ) ;
2011-11-03 06:13:12 +04:00
else
2013-03-09 11:01:50 +04:00
disable_percpu_irq ( mct_irqs [ MCT_L0_IRQ ] ) ;
2011-03-11 04:39:57 +03:00
}
2012-01-10 23:44:19 +04:00
2013-07-24 01:51:34 +04:00
static int exynos4_mct_cpu_notify ( struct notifier_block * self ,
2013-02-16 04:40:51 +04:00
unsigned long action , void * hcpu )
{
struct mct_clock_event_device * mevt ;
2013-09-25 14:00:59 +04:00
unsigned int cpu ;
2013-02-16 04:40:51 +04:00
/*
* Grab cpu pointer in each case to avoid spurious
* preemptible warnings
*/
switch ( action & ~ CPU_TASKS_FROZEN ) {
case CPU_STARTING :
mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
exynos4_local_timer_setup ( & mevt - > evt ) ;
break ;
2013-09-25 14:00:59 +04:00
case CPU_ONLINE :
cpu = ( unsigned long ) hcpu ;
if ( mct_int_type = = MCT_INT_SPI )
irq_set_affinity ( mct_irqs [ MCT_L0_IRQ + cpu ] ,
cpumask_of ( cpu ) ) ;
break ;
2013-02-16 04:40:51 +04:00
case CPU_DYING :
mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
exynos4_local_timer_stop ( & mevt - > evt ) ;
break ;
}
return NOTIFY_OK ;
}
2013-07-24 01:51:34 +04:00
static struct notifier_block exynos4_mct_cpu_nb = {
2013-02-16 04:40:51 +04:00
. notifier_call = exynos4_mct_cpu_notify ,
2012-01-10 23:44:19 +04:00
} ;
2011-03-11 04:39:57 +03:00
2013-04-10 00:24:06 +04:00
static void __init exynos4_timer_resources ( struct device_node * np , void __iomem * base )
2011-03-11 04:39:57 +03:00
{
2013-02-16 04:40:51 +04:00
int err ;
struct mct_clock_event_device * mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
2013-03-09 12:10:37 +04:00
struct clk * mct_clk , * tick_clk ;
2011-03-11 04:39:57 +03:00
2013-03-09 12:10:31 +04:00
tick_clk = np ? of_clk_get_by_name ( np , " fin_pll " ) :
clk_get ( NULL , " fin_pll " ) ;
if ( IS_ERR ( tick_clk ) )
panic ( " %s: unable to determine tick clock rate \n " , __func__ ) ;
clk_rate = clk_get_rate ( tick_clk ) ;
2011-11-03 06:13:12 +04:00
2013-03-09 12:10:37 +04:00
mct_clk = np ? of_clk_get_by_name ( np , " mct " ) : clk_get ( NULL , " mct " ) ;
if ( IS_ERR ( mct_clk ) )
panic ( " %s: unable to retrieve mct clock instance \n " , __func__ ) ;
clk_prepare_enable ( mct_clk ) ;
2011-11-03 06:13:12 +04:00
2013-04-10 00:07:37 +04:00
reg_base = base ;
2013-03-09 11:01:52 +04:00
if ( ! reg_base )
panic ( " %s: unable to ioremap mct address space \n " , __func__ ) ;
2013-03-09 11:01:47 +04:00
2011-11-03 06:13:12 +04:00
if ( mct_int_type = = MCT_INT_PPI ) {
2013-03-09 11:01:50 +04:00
err = request_percpu_irq ( mct_irqs [ MCT_L0_IRQ ] ,
2011-11-03 06:13:12 +04:00
exynos4_mct_tick_isr , " MCT " ,
& percpu_mct_tick ) ;
WARN ( err , " MCT: can't request IRQ %d (%d) \n " ,
2013-03-09 11:01:50 +04:00
mct_irqs [ MCT_L0_IRQ ] , err ) ;
2013-09-25 14:00:59 +04:00
} else {
irq_set_affinity ( mct_irqs [ MCT_L0_IRQ ] , cpumask_of ( 0 ) ) ;
2011-11-03 06:13:12 +04:00
}
2012-01-10 23:44:19 +04:00
2013-02-16 04:40:51 +04:00
err = register_cpu_notifier ( & exynos4_mct_cpu_nb ) ;
if ( err )
goto out_irq ;
/* Immediately configure the timer on the boot CPU */
exynos4_local_timer_setup ( & mevt - > evt ) ;
return ;
out_irq :
free_percpu_irq ( mct_irqs [ MCT_L0_IRQ ] , & percpu_mct_tick ) ;
2011-03-11 04:39:57 +03:00
}
2013-04-10 13:35:29 +04:00
void __init mct_init ( void __iomem * base , int irq_g0 , int irq_l0 , int irq_l1 )
2011-03-11 04:39:57 +03:00
{
2013-04-10 13:35:29 +04:00
mct_irqs [ MCT_G0_IRQ ] = irq_g0 ;
mct_irqs [ MCT_L0_IRQ ] = irq_l0 ;
mct_irqs [ MCT_L1_IRQ ] = irq_l1 ;
mct_int_type = MCT_INT_SPI ;
2012-11-15 10:48:56 +04:00
2013-04-10 13:35:29 +04:00
exynos4_timer_resources ( NULL , base ) ;
2011-03-11 04:39:57 +03:00
exynos4_clocksource_init ( ) ;
exynos4_clockevent_init ( ) ;
}
2011-10-04 12:02:58 +04:00
2013-04-10 00:07:37 +04:00
static void __init mct_init_dt ( struct device_node * np , unsigned int int_type )
{
u32 nr_irqs , i ;
mct_int_type = int_type ;
/* This driver uses only one global timer interrupt */
mct_irqs [ MCT_G0_IRQ ] = irq_of_parse_and_map ( np , MCT_G0_IRQ ) ;
/*
* Find out the number of local irqs specified . The local
* timer irqs are specified after the four global timer
* irqs are specified .
*/
2013-04-20 00:00:04 +04:00
# ifdef CONFIG_OF
2013-04-10 00:07:37 +04:00
nr_irqs = of_irq_count ( np ) ;
2013-04-20 00:00:04 +04:00
# else
nr_irqs = 0 ;
# endif
2013-04-10 00:07:37 +04:00
for ( i = MCT_L0_IRQ ; i < nr_irqs ; i + + )
mct_irqs [ i ] = irq_of_parse_and_map ( np , i ) ;
2013-04-10 00:24:06 +04:00
exynos4_timer_resources ( np , of_iomap ( np , 0 ) ) ;
2011-03-11 04:39:57 +03:00
exynos4_clocksource_init ( ) ;
exynos4_clockevent_init ( ) ;
}
2013-04-10 00:07:37 +04:00
static void __init mct_init_spi ( struct device_node * np )
{
return mct_init_dt ( np , MCT_INT_SPI ) ;
}
static void __init mct_init_ppi ( struct device_node * np )
{
return mct_init_dt ( np , MCT_INT_PPI ) ;
}
CLOCKSOURCE_OF_DECLARE ( exynos4210 , " samsung,exynos4210-mct " , mct_init_spi ) ;
CLOCKSOURCE_OF_DECLARE ( exynos4412 , " samsung,exynos4412-mct " , mct_init_ppi ) ;