2011-03-11 10:39:57 +09:00
/* linux/arch/arm/mach-exynos4/mct.c
*
* Copyright ( c ) 2011 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com
*
* EXYNOS4 MCT ( Multi - Core Timer ) support
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/irq.h>
# include <linux/err.h>
# include <linux/clk.h>
# include <linux/clockchips.h>
2013-02-15 16:40:51 -08:00
# include <linux/cpu.h>
2011-03-11 10:39:57 +09:00
# include <linux/platform_device.h>
# include <linux/delay.h>
# include <linux/percpu.h>
2012-11-15 15:48:56 +09:00
# include <linux/of.h>
2013-03-09 16:01:52 +09:00
# include <linux/of_irq.h>
# include <linux/of_address.h>
2013-03-09 16:10:03 +09:00
# include <linux/clocksource.h>
2014-05-02 22:27:01 +09:00
# include <linux/sched_clock.h>
2011-03-11 10:39:57 +09:00
2013-03-09 16:01:47 +09:00
# define EXYNOS4_MCTREG(x) (x)
# define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
# define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
# define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110)
# define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200)
# define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204)
# define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208)
# define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240)
# define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244)
# define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
# define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
# define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
# define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
# define EXYNOS4_MCT_L_MASK (0xffffff00)
# define MCT_L_TCNTB_OFFSET (0x00)
# define MCT_L_ICNTB_OFFSET (0x08)
# define MCT_L_TCON_OFFSET (0x20)
# define MCT_L_INT_CSTAT_OFFSET (0x30)
# define MCT_L_INT_ENB_OFFSET (0x34)
# define MCT_L_WSTAT_OFFSET (0x40)
# define MCT_G_TCON_START (1 << 8)
# define MCT_G_TCON_COMP0_AUTO_INC (1 << 1)
# define MCT_G_TCON_COMP0_ENABLE (1 << 0)
# define MCT_L_TCON_INTERVAL_MODE (1 << 2)
# define MCT_L_TCON_INT_START (1 << 1)
# define MCT_L_TCON_TIMER_START (1 << 0)
2012-03-09 15:09:21 -08:00
# define TICK_BASE_CNT 1
2011-10-04 17:02:58 +09:00
enum {
MCT_INT_SPI ,
MCT_INT_PPI
} ;
2013-03-09 16:01:50 +09:00
enum {
MCT_G0_IRQ ,
MCT_G1_IRQ ,
MCT_G2_IRQ ,
MCT_G3_IRQ ,
MCT_L0_IRQ ,
MCT_L1_IRQ ,
MCT_L2_IRQ ,
MCT_L3_IRQ ,
2013-12-02 07:48:23 +09:00
MCT_L4_IRQ ,
MCT_L5_IRQ ,
MCT_L6_IRQ ,
MCT_L7_IRQ ,
2013-03-09 16:01:50 +09:00
MCT_NR_IRQS ,
} ;
2013-03-09 16:01:47 +09:00
static void __iomem * reg_base ;
2011-03-11 10:39:57 +09:00
static unsigned long clk_rate ;
2011-10-04 17:02:58 +09:00
static unsigned int mct_int_type ;
2013-03-09 16:01:50 +09:00
static int mct_irqs [ MCT_NR_IRQS ] ;
2011-03-11 10:39:57 +09:00
struct mct_clock_event_device {
2013-02-15 16:40:51 -08:00
struct clock_event_device evt ;
2013-03-09 16:01:47 +09:00
unsigned long base ;
2011-10-04 17:09:26 +09:00
char name [ 10 ] ;
2011-03-11 10:39:57 +09:00
} ;
2013-03-09 16:01:47 +09:00
static void exynos4_mct_write ( unsigned int value , unsigned long offset )
2011-03-11 10:39:57 +09:00
{
2013-03-09 16:01:47 +09:00
unsigned long stat_addr ;
2011-03-11 10:39:57 +09:00
u32 mask ;
u32 i ;
2014-07-05 06:43:20 +09:00
writel_relaxed ( value , reg_base + offset ) ;
2011-03-11 10:39:57 +09:00
2013-03-09 16:01:47 +09:00
if ( likely ( offset > = EXYNOS4_MCT_L_BASE ( 0 ) ) ) {
2014-10-22 03:37:08 +02:00
stat_addr = ( offset & EXYNOS4_MCT_L_MASK ) + MCT_L_WSTAT_OFFSET ;
switch ( offset & ~ EXYNOS4_MCT_L_MASK ) {
2013-03-09 16:01:47 +09:00
case MCT_L_TCON_OFFSET :
2011-10-04 17:09:26 +09:00
mask = 1 < < 3 ; /* L_TCON write status */
break ;
2013-03-09 16:01:47 +09:00
case MCT_L_ICNTB_OFFSET :
2011-10-04 17:09:26 +09:00
mask = 1 < < 1 ; /* L_ICNTB write status */
break ;
2013-03-09 16:01:47 +09:00
case MCT_L_TCNTB_OFFSET :
2011-10-04 17:09:26 +09:00
mask = 1 < < 0 ; /* L_TCNTB write status */
break ;
default :
return ;
}
} else {
2013-03-09 16:01:47 +09:00
switch ( offset ) {
case EXYNOS4_MCT_G_TCON :
2011-10-04 17:09:26 +09:00
stat_addr = EXYNOS4_MCT_G_WSTAT ;
mask = 1 < < 16 ; /* G_TCON write status */
break ;
2013-03-09 16:01:47 +09:00
case EXYNOS4_MCT_G_COMP0_L :
2011-10-04 17:09:26 +09:00
stat_addr = EXYNOS4_MCT_G_WSTAT ;
mask = 1 < < 0 ; /* G_COMP0_L write status */
break ;
2013-03-09 16:01:47 +09:00
case EXYNOS4_MCT_G_COMP0_U :
2011-10-04 17:09:26 +09:00
stat_addr = EXYNOS4_MCT_G_WSTAT ;
mask = 1 < < 1 ; /* G_COMP0_U write status */
break ;
2013-03-09 16:01:47 +09:00
case EXYNOS4_MCT_G_COMP0_ADD_INCR :
2011-10-04 17:09:26 +09:00
stat_addr = EXYNOS4_MCT_G_WSTAT ;
mask = 1 < < 2 ; /* G_COMP0_ADD_INCR w status */
break ;
2013-03-09 16:01:47 +09:00
case EXYNOS4_MCT_G_CNT_L :
2011-10-04 17:09:26 +09:00
stat_addr = EXYNOS4_MCT_G_CNT_WSTAT ;
mask = 1 < < 0 ; /* G_CNT_L write status */
break ;
2013-03-09 16:01:47 +09:00
case EXYNOS4_MCT_G_CNT_U :
2011-10-04 17:09:26 +09:00
stat_addr = EXYNOS4_MCT_G_CNT_WSTAT ;
mask = 1 < < 1 ; /* G_CNT_U write status */
break ;
default :
return ;
}
2011-03-11 10:39:57 +09:00
}
/* Wait maximum 1 ms until written values are applied */
for ( i = 0 ; i < loops_per_jiffy / 1000 * HZ ; i + + )
2014-07-05 06:43:20 +09:00
if ( readl_relaxed ( reg_base + stat_addr ) & mask ) {
writel_relaxed ( mask , reg_base + stat_addr ) ;
2011-03-11 10:39:57 +09:00
return ;
}
2013-03-09 16:01:47 +09:00
panic ( " MCT hangs after writing %d (offset:0x%lx) \n " , value , offset ) ;
2011-03-11 10:39:57 +09:00
}
/* Clocksource handling */
2014-06-12 00:18:48 +09:00
static void exynos4_mct_frc_start ( void )
2011-03-11 10:39:57 +09:00
{
u32 reg ;
2014-07-05 06:43:20 +09:00
reg = readl_relaxed ( reg_base + EXYNOS4_MCT_G_TCON ) ;
2011-03-11 10:39:57 +09:00
reg | = MCT_G_TCON_START ;
exynos4_mct_write ( reg , EXYNOS4_MCT_G_TCON ) ;
}
2014-07-05 06:43:26 +09:00
/**
* exynos4_read_count_64 - Read all 64 - bits of the global counter
*
* This will read all 64 - bits of the global counter taking care to make sure
* that the upper and lower half match . Note that reading the MCT can be quite
* slow ( hundreds of nanoseconds ) so you should use the 32 - bit ( lower half
* only ) version when possible .
*
* Returns the number of cycles in the global counter .
*/
static u64 exynos4_read_count_64 ( void )
2011-03-11 10:39:57 +09:00
{
unsigned int lo , hi ;
2014-07-05 06:43:20 +09:00
u32 hi2 = readl_relaxed ( reg_base + EXYNOS4_MCT_G_CNT_U ) ;
2011-03-11 10:39:57 +09:00
do {
hi = hi2 ;
2014-07-05 06:43:20 +09:00
lo = readl_relaxed ( reg_base + EXYNOS4_MCT_G_CNT_L ) ;
hi2 = readl_relaxed ( reg_base + EXYNOS4_MCT_G_CNT_U ) ;
2011-03-11 10:39:57 +09:00
} while ( hi ! = hi2 ) ;
return ( ( cycle_t ) hi < < 32 ) | lo ;
}
2014-07-05 06:43:26 +09:00
/**
* exynos4_read_count_32 - Read the lower 32 - bits of the global counter
*
* This will read just the lower 32 - bits of the global counter . This is marked
* as notrace so it can be used by the scheduler clock .
*
* Returns the number of cycles in the global counter ( lower 32 bits ) .
*/
static u32 notrace exynos4_read_count_32 ( void )
{
return readl_relaxed ( reg_base + EXYNOS4_MCT_G_CNT_L ) ;
}
2014-07-05 06:38:55 +09:00
static cycle_t exynos4_frc_read ( struct clocksource * cs )
{
2014-07-05 06:43:26 +09:00
return exynos4_read_count_32 ( ) ;
2014-07-05 06:38:55 +09:00
}
2011-09-02 14:10:52 +09:00
static void exynos4_frc_resume ( struct clocksource * cs )
{
2014-06-12 00:18:48 +09:00
exynos4_mct_frc_start ( ) ;
2011-09-02 14:10:52 +09:00
}
2015-04-30 13:42:52 +09:00
static struct clocksource mct_frc = {
2011-03-11 10:39:57 +09:00
. name = " mct-frc " ,
. rating = 400 ,
. read = exynos4_frc_read ,
2014-07-05 06:43:26 +09:00
. mask = CLOCKSOURCE_MASK ( 32 ) ,
2011-03-11 10:39:57 +09:00
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
2011-09-02 14:10:52 +09:00
. resume = exynos4_frc_resume ,
2011-03-11 10:39:57 +09:00
} ;
2014-05-02 22:27:01 +09:00
static u64 notrace exynos4_read_sched_clock ( void )
{
2014-07-05 06:43:26 +09:00
return exynos4_read_count_32 ( ) ;
2014-05-02 22:27:01 +09:00
}
2014-07-05 06:40:23 +09:00
static struct delay_timer exynos4_delay_timer ;
static cycles_t exynos4_read_current_timer ( void )
{
2014-07-05 06:43:26 +09:00
BUILD_BUG_ON_MSG ( sizeof ( cycles_t ) ! = sizeof ( u32 ) ,
" cycles_t needs to move to 32-bit for ARM64 usage " ) ;
return exynos4_read_count_32 ( ) ;
2014-07-05 06:40:23 +09:00
}
2011-03-11 10:39:57 +09:00
static void __init exynos4_clocksource_init ( void )
{
2014-06-12 00:18:48 +09:00
exynos4_mct_frc_start ( ) ;
2011-03-11 10:39:57 +09:00
2014-07-05 06:40:23 +09:00
exynos4_delay_timer . read_current_timer = & exynos4_read_current_timer ;
exynos4_delay_timer . freq = clk_rate ;
register_current_timer_delay ( & exynos4_delay_timer ) ;
2011-03-11 10:39:57 +09:00
if ( clocksource_register_hz ( & mct_frc , clk_rate ) )
panic ( " %s: can't register clocksource \n " , mct_frc . name ) ;
2014-05-02 22:27:01 +09:00
2014-07-05 06:43:26 +09:00
sched_clock_register ( exynos4_read_sched_clock , 32 , clk_rate ) ;
2011-03-11 10:39:57 +09:00
}
static void exynos4_mct_comp0_stop ( void )
{
unsigned int tcon ;
2014-07-05 06:43:20 +09:00
tcon = readl_relaxed ( reg_base + EXYNOS4_MCT_G_TCON ) ;
2011-03-11 10:39:57 +09:00
tcon & = ~ ( MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC ) ;
exynos4_mct_write ( tcon , EXYNOS4_MCT_G_TCON ) ;
exynos4_mct_write ( 0 , EXYNOS4_MCT_G_INT_ENB ) ;
}
static void exynos4_mct_comp0_start ( enum clock_event_mode mode ,
unsigned long cycles )
{
unsigned int tcon ;
cycle_t comp_cycle ;
2014-07-05 06:43:20 +09:00
tcon = readl_relaxed ( reg_base + EXYNOS4_MCT_G_TCON ) ;
2011-03-11 10:39:57 +09:00
if ( mode = = CLOCK_EVT_MODE_PERIODIC ) {
tcon | = MCT_G_TCON_COMP0_AUTO_INC ;
exynos4_mct_write ( cycles , EXYNOS4_MCT_G_COMP0_ADD_INCR ) ;
}
2014-07-05 06:43:26 +09:00
comp_cycle = exynos4_read_count_64 ( ) + cycles ;
2011-03-11 10:39:57 +09:00
exynos4_mct_write ( ( u32 ) comp_cycle , EXYNOS4_MCT_G_COMP0_L ) ;
exynos4_mct_write ( ( u32 ) ( comp_cycle > > 32 ) , EXYNOS4_MCT_G_COMP0_U ) ;
exynos4_mct_write ( 0x1 , EXYNOS4_MCT_G_INT_ENB ) ;
tcon | = MCT_G_TCON_COMP0_ENABLE ;
exynos4_mct_write ( tcon , EXYNOS4_MCT_G_TCON ) ;
}
static int exynos4_comp_set_next_event ( unsigned long cycles ,
struct clock_event_device * evt )
{
exynos4_mct_comp0_start ( evt - > mode , cycles ) ;
return 0 ;
}
static void exynos4_comp_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
2012-03-09 15:09:21 -08:00
unsigned long cycles_per_jiffy ;
2011-03-11 10:39:57 +09:00
exynos4_mct_comp0_stop ( ) ;
switch ( mode ) {
case CLOCK_EVT_MODE_PERIODIC :
2012-03-09 15:09:21 -08:00
cycles_per_jiffy =
( ( ( unsigned long long ) NSEC_PER_SEC / HZ * evt - > mult ) > > evt - > shift ) ;
exynos4_mct_comp0_start ( mode , cycles_per_jiffy ) ;
2011-03-11 10:39:57 +09:00
break ;
case CLOCK_EVT_MODE_ONESHOT :
case CLOCK_EVT_MODE_UNUSED :
case CLOCK_EVT_MODE_SHUTDOWN :
case CLOCK_EVT_MODE_RESUME :
break ;
}
}
static struct clock_event_device mct_comp_device = {
. name = " mct-comp " ,
. features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT ,
. rating = 250 ,
. set_next_event = exynos4_comp_set_next_event ,
. set_mode = exynos4_comp_set_mode ,
} ;
static irqreturn_t exynos4_mct_comp_isr ( int irq , void * dev_id )
{
struct clock_event_device * evt = dev_id ;
exynos4_mct_write ( 0x1 , EXYNOS4_MCT_G_INT_CSTAT ) ;
evt - > event_handler ( evt ) ;
return IRQ_HANDLED ;
}
static struct irqaction mct_comp_event_irq = {
. name = " mct_comp_irq " ,
. flags = IRQF_TIMER | IRQF_IRQPOLL ,
. handler = exynos4_mct_comp_isr ,
. dev_id = & mct_comp_device ,
} ;
static void exynos4_clockevent_init ( void )
{
mct_comp_device . cpumask = cpumask_of ( 0 ) ;
2013-01-12 11:50:05 +00:00
clockevents_config_and_register ( & mct_comp_device , clk_rate ,
0xf , 0xffffffff ) ;
2013-03-09 16:01:50 +09:00
setup_irq ( mct_irqs [ MCT_G0_IRQ ] , & mct_comp_event_irq ) ;
2011-03-11 10:39:57 +09:00
}
2011-12-08 10:04:49 +09:00
static DEFINE_PER_CPU ( struct mct_clock_event_device , percpu_mct_tick ) ;
2011-03-11 10:39:57 +09:00
/* Clock event handling */
static void exynos4_mct_tick_stop ( struct mct_clock_event_device * mevt )
{
unsigned long tmp ;
unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START ;
2013-03-09 16:01:47 +09:00
unsigned long offset = mevt - > base + MCT_L_TCON_OFFSET ;
2011-03-11 10:39:57 +09:00
2014-07-05 06:43:20 +09:00
tmp = readl_relaxed ( reg_base + offset ) ;
2011-03-11 10:39:57 +09:00
if ( tmp & mask ) {
tmp & = ~ mask ;
2013-03-09 16:01:47 +09:00
exynos4_mct_write ( tmp , offset ) ;
2011-03-11 10:39:57 +09:00
}
}
static void exynos4_mct_tick_start ( unsigned long cycles ,
struct mct_clock_event_device * mevt )
{
unsigned long tmp ;
exynos4_mct_tick_stop ( mevt ) ;
tmp = ( 1 < < 31 ) | cycles ; /* MCT_L_UPDATE_ICNTB */
/* update interrupt count buffer */
exynos4_mct_write ( tmp , mevt - > base + MCT_L_ICNTB_OFFSET ) ;
2011-03-30 22:57:33 -03:00
/* enable MCT tick interrupt */
2011-03-11 10:39:57 +09:00
exynos4_mct_write ( 0x1 , mevt - > base + MCT_L_INT_ENB_OFFSET ) ;
2014-07-05 06:43:20 +09:00
tmp = readl_relaxed ( reg_base + mevt - > base + MCT_L_TCON_OFFSET ) ;
2011-03-11 10:39:57 +09:00
tmp | = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
MCT_L_TCON_INTERVAL_MODE ;
exynos4_mct_write ( tmp , mevt - > base + MCT_L_TCON_OFFSET ) ;
}
static int exynos4_tick_set_next_event ( unsigned long cycles ,
struct clock_event_device * evt )
{
2011-11-03 11:13:12 +09:00
struct mct_clock_event_device * mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
2011-03-11 10:39:57 +09:00
exynos4_mct_tick_start ( cycles , mevt ) ;
return 0 ;
}
static inline void exynos4_tick_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
2011-11-03 11:13:12 +09:00
struct mct_clock_event_device * mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
2012-03-09 15:09:21 -08:00
unsigned long cycles_per_jiffy ;
2011-03-11 10:39:57 +09:00
exynos4_mct_tick_stop ( mevt ) ;
switch ( mode ) {
case CLOCK_EVT_MODE_PERIODIC :
2012-03-09 15:09:21 -08:00
cycles_per_jiffy =
( ( ( unsigned long long ) NSEC_PER_SEC / HZ * evt - > mult ) > > evt - > shift ) ;
exynos4_mct_tick_start ( cycles_per_jiffy , mevt ) ;
2011-03-11 10:39:57 +09:00
break ;
case CLOCK_EVT_MODE_ONESHOT :
case CLOCK_EVT_MODE_UNUSED :
case CLOCK_EVT_MODE_SHUTDOWN :
case CLOCK_EVT_MODE_RESUME :
break ;
}
}
2015-04-30 13:42:51 +09:00
static void exynos4_mct_tick_clear ( struct mct_clock_event_device * mevt )
2011-03-11 10:39:57 +09:00
{
2013-02-15 16:40:51 -08:00
struct clock_event_device * evt = & mevt - > evt ;
2011-03-11 10:39:57 +09:00
/*
* This is for supporting oneshot mode .
* Mct would generate interrupt periodically
* without explicit stopping .
*/
if ( evt - > mode ! = CLOCK_EVT_MODE_PERIODIC )
exynos4_mct_tick_stop ( mevt ) ;
/* Clear the MCT tick interrupt */
2015-04-30 13:42:51 +09:00
if ( readl_relaxed ( reg_base + mevt - > base + MCT_L_INT_CSTAT_OFFSET ) & 1 )
2011-10-04 17:02:58 +09:00
exynos4_mct_write ( 0x1 , mevt - > base + MCT_L_INT_CSTAT_OFFSET ) ;
}
static irqreturn_t exynos4_mct_tick_isr ( int irq , void * dev_id )
{
struct mct_clock_event_device * mevt = dev_id ;
2013-02-15 16:40:51 -08:00
struct clock_event_device * evt = & mevt - > evt ;
2011-10-04 17:02:58 +09:00
exynos4_mct_tick_clear ( mevt ) ;
2011-03-11 10:39:57 +09:00
evt - > event_handler ( evt ) ;
return IRQ_HANDLED ;
}
2015-06-21 23:41:39 +03:00
static int exynos4_local_timer_setup ( struct mct_clock_event_device * mevt )
2011-03-11 10:39:57 +09:00
{
2015-06-21 23:41:39 +03:00
struct clock_event_device * evt = & mevt - > evt ;
2011-03-11 10:39:57 +09:00
unsigned int cpu = smp_processor_id ( ) ;
2011-11-03 11:13:12 +09:00
mevt - > base = EXYNOS4_MCT_L_BASE ( cpu ) ;
2014-03-01 16:57:14 +03:00
snprintf ( mevt - > name , sizeof ( mevt - > name ) , " mct_tick%d " , cpu ) ;
2011-03-11 10:39:57 +09:00
2011-11-03 11:13:12 +09:00
evt - > name = mevt - > name ;
2011-03-11 10:39:57 +09:00
evt - > cpumask = cpumask_of ( cpu ) ;
evt - > set_next_event = exynos4_tick_set_next_event ;
evt - > set_mode = exynos4_tick_set_mode ;
evt - > features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT ;
evt - > rating = 450 ;
2012-03-09 15:09:21 -08:00
exynos4_mct_write ( TICK_BASE_CNT , mevt - > base + MCT_L_TCNTB_OFFSET ) ;
2011-03-11 10:39:57 +09:00
2011-10-04 17:02:58 +09:00
if ( mct_int_type = = MCT_INT_SPI ) {
2015-06-26 15:23:04 +02:00
if ( evt - > irq = = - 1 )
2013-06-19 00:29:35 +09:00
return - EIO ;
2015-06-26 15:23:04 +02:00
irq_force_affinity ( evt - > irq , cpumask_of ( cpu ) ) ;
enable_irq ( evt - > irq ) ;
2011-03-11 10:39:57 +09:00
} else {
2013-03-09 16:01:50 +09:00
enable_percpu_irq ( mct_irqs [ MCT_L0_IRQ ] , 0 ) ;
2011-03-11 10:39:57 +09:00
}
2014-04-16 14:36:45 +00:00
clockevents_config_and_register ( evt , clk_rate / ( TICK_BASE_CNT + 1 ) ,
0xf , 0x7fffffff ) ;
2011-08-24 16:07:39 +09:00
return 0 ;
2011-03-11 10:39:57 +09:00
}
2015-06-21 23:41:39 +03:00
static void exynos4_local_timer_stop ( struct mct_clock_event_device * mevt )
2011-03-11 10:39:57 +09:00
{
2015-06-21 23:41:39 +03:00
struct clock_event_device * evt = & mevt - > evt ;
2011-07-22 12:52:37 +01:00
evt - > set_mode ( CLOCK_EVT_MODE_UNUSED , evt ) ;
2015-06-26 15:23:04 +02:00
if ( mct_int_type = = MCT_INT_SPI ) {
if ( evt - > irq ! = - 1 )
disable_irq_nosync ( evt - > irq ) ;
} else {
2013-03-09 16:01:50 +09:00
disable_percpu_irq ( mct_irqs [ MCT_L0_IRQ ] ) ;
2015-06-26 15:23:04 +02:00
}
2011-03-11 10:39:57 +09:00
}
2012-01-10 19:44:19 +00:00
2013-07-23 14:51:34 -07:00
static int exynos4_mct_cpu_notify ( struct notifier_block * self ,
2013-02-15 16:40:51 -08:00
unsigned long action , void * hcpu )
{
struct mct_clock_event_device * mevt ;
/*
* Grab cpu pointer in each case to avoid spurious
* preemptible warnings
*/
switch ( action & ~ CPU_TASKS_FROZEN ) {
case CPU_STARTING :
mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
2015-06-21 23:41:39 +03:00
exynos4_local_timer_setup ( mevt ) ;
2013-02-15 16:40:51 -08:00
break ;
case CPU_DYING :
mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
2015-06-21 23:41:39 +03:00
exynos4_local_timer_stop ( mevt ) ;
2013-02-15 16:40:51 -08:00
break ;
}
return NOTIFY_OK ;
}
2013-07-23 14:51:34 -07:00
static struct notifier_block exynos4_mct_cpu_nb = {
2013-02-15 16:40:51 -08:00
. notifier_call = exynos4_mct_cpu_notify ,
2012-01-10 19:44:19 +00:00
} ;
2011-03-11 10:39:57 +09:00
2013-04-09 22:24:06 +02:00
static void __init exynos4_timer_resources ( struct device_node * np , void __iomem * base )
2011-03-11 10:39:57 +09:00
{
2015-06-26 15:23:04 +02:00
int err , cpu ;
2013-02-15 16:40:51 -08:00
struct mct_clock_event_device * mevt = this_cpu_ptr ( & percpu_mct_tick ) ;
2013-03-09 17:10:37 +09:00
struct clk * mct_clk , * tick_clk ;
2011-03-11 10:39:57 +09:00
2013-03-09 17:10:31 +09:00
tick_clk = np ? of_clk_get_by_name ( np , " fin_pll " ) :
clk_get ( NULL , " fin_pll " ) ;
if ( IS_ERR ( tick_clk ) )
panic ( " %s: unable to determine tick clock rate \n " , __func__ ) ;
clk_rate = clk_get_rate ( tick_clk ) ;
2011-11-03 11:13:12 +09:00
2013-03-09 17:10:37 +09:00
mct_clk = np ? of_clk_get_by_name ( np , " mct " ) : clk_get ( NULL , " mct " ) ;
if ( IS_ERR ( mct_clk ) )
panic ( " %s: unable to retrieve mct clock instance \n " , __func__ ) ;
clk_prepare_enable ( mct_clk ) ;
2011-11-03 11:13:12 +09:00
2013-04-09 22:07:37 +02:00
reg_base = base ;
2013-03-09 16:01:52 +09:00
if ( ! reg_base )
panic ( " %s: unable to ioremap mct address space \n " , __func__ ) ;
2013-03-09 16:01:47 +09:00
2011-11-03 11:13:12 +09:00
if ( mct_int_type = = MCT_INT_PPI ) {
2013-03-09 16:01:50 +09:00
err = request_percpu_irq ( mct_irqs [ MCT_L0_IRQ ] ,
2011-11-03 11:13:12 +09:00
exynos4_mct_tick_isr , " MCT " ,
& percpu_mct_tick ) ;
WARN ( err , " MCT: can't request IRQ %d (%d) \n " ,
2013-03-09 16:01:50 +09:00
mct_irqs [ MCT_L0_IRQ ] , err ) ;
2013-09-25 12:00:59 +02:00
} else {
2015-06-26 15:23:04 +02:00
for_each_possible_cpu ( cpu ) {
int mct_irq = mct_irqs [ MCT_L0_IRQ + cpu ] ;
struct mct_clock_event_device * pcpu_mevt =
per_cpu_ptr ( & percpu_mct_tick , cpu ) ;
pcpu_mevt - > evt . irq = - 1 ;
irq_set_status_flags ( mct_irq , IRQ_NOAUTOEN ) ;
if ( request_irq ( mct_irq ,
exynos4_mct_tick_isr ,
IRQF_TIMER | IRQF_NOBALANCING ,
pcpu_mevt - > name , pcpu_mevt ) ) {
pr_err ( " exynos-mct: cannot register IRQ (cpu%d) \n " ,
cpu ) ;
continue ;
}
pcpu_mevt - > evt . irq = mct_irq ;
}
2011-11-03 11:13:12 +09:00
}
2012-01-10 19:44:19 +00:00
2013-02-15 16:40:51 -08:00
err = register_cpu_notifier ( & exynos4_mct_cpu_nb ) ;
if ( err )
goto out_irq ;
/* Immediately configure the timer on the boot CPU */
2015-06-21 23:41:39 +03:00
exynos4_local_timer_setup ( mevt ) ;
2013-02-15 16:40:51 -08:00
return ;
out_irq :
free_percpu_irq ( mct_irqs [ MCT_L0_IRQ ] , & percpu_mct_tick ) ;
2011-03-11 10:39:57 +09:00
}
2013-04-09 22:07:37 +02:00
static void __init mct_init_dt ( struct device_node * np , unsigned int int_type )
{
u32 nr_irqs , i ;
mct_int_type = int_type ;
/* This driver uses only one global timer interrupt */
mct_irqs [ MCT_G0_IRQ ] = irq_of_parse_and_map ( np , MCT_G0_IRQ ) ;
/*
* Find out the number of local irqs specified . The local
* timer irqs are specified after the four global timer
* irqs are specified .
*/
2013-04-19 22:00:04 +02:00
# ifdef CONFIG_OF
2013-04-09 22:07:37 +02:00
nr_irqs = of_irq_count ( np ) ;
2013-04-19 22:00:04 +02:00
# else
nr_irqs = 0 ;
# endif
2013-04-09 22:07:37 +02:00
for ( i = MCT_L0_IRQ ; i < nr_irqs ; i + + )
mct_irqs [ i ] = irq_of_parse_and_map ( np , i ) ;
2013-04-09 22:24:06 +02:00
exynos4_timer_resources ( np , of_iomap ( np , 0 ) ) ;
2011-03-11 10:39:57 +09:00
exynos4_clocksource_init ( ) ;
exynos4_clockevent_init ( ) ;
}
2013-04-09 22:07:37 +02:00
static void __init mct_init_spi ( struct device_node * np )
{
return mct_init_dt ( np , MCT_INT_SPI ) ;
}
static void __init mct_init_ppi ( struct device_node * np )
{
return mct_init_dt ( np , MCT_INT_PPI ) ;
}
CLOCKSOURCE_OF_DECLARE ( exynos4210 , " samsung,exynos4210-mct " , mct_init_spi ) ;
CLOCKSOURCE_OF_DECLARE ( exynos4412 , " samsung,exynos4412-mct " , mct_init_ppi ) ;