2011-11-08 22:34:05 +04:00
/*
2007-11-26 15:11:58 +03:00
*
* Copyright ( C ) 2007 Google , Inc .
2012-09-05 23:28:52 +04:00
* Copyright ( c ) 2009 - 2012 , The Linux Foundation . All rights reserved .
2007-11-26 15:11:58 +03:00
*
* This software is licensed under the terms of the GNU General Public
* License version 2 , as published by the Free Software Foundation , and
* may be copied , distributed , and modified under those terms .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
*/
2011-11-08 22:34:04 +04:00
# include <linux/clocksource.h>
# include <linux/clockchips.h>
2013-02-16 05:31:31 +04:00
# include <linux/cpu.h>
2007-11-26 15:11:58 +03:00
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/irq.h>
2008-09-06 15:10:45 +04:00
# include <linux/io.h>
2012-09-05 23:28:53 +04:00
# include <linux/of.h>
# include <linux/of_address.h>
# include <linux/of_irq.h>
2013-06-02 10:39:40 +04:00
# include <linux/sched_clock.h>
2007-11-26 15:11:58 +03:00
# include <asm/mach/time.h>
2011-06-01 03:10:00 +04:00
2012-09-05 23:28:52 +04:00
# include "common.h"
2007-11-26 15:11:58 +03:00
2013-03-15 07:31:39 +04:00
# define TIMER_MATCH_VAL 0x0000
# define TIMER_COUNT_VAL 0x0004
# define TIMER_ENABLE 0x0008
# define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
# define TIMER_ENABLE_EN BIT(0)
# define TIMER_CLEAR 0x000C
# define DGT_CLK_CTL 0x10
# define DGT_CLK_CTL_DIV_4 0x3
# define TIMER_STS_GPT0_CLR_PEND BIT(10)
2007-11-26 15:11:58 +03:00
# define GPT_HZ 32768
2010-10-06 02:23:57 +04:00
2011-11-08 22:34:08 +04:00
# define MSM_DGT_SHIFT 5
2007-11-26 15:11:58 +03:00
2011-11-08 22:34:07 +04:00
static void __iomem * event_base ;
2013-03-15 07:31:39 +04:00
static void __iomem * sts_base ;
2011-11-08 22:34:06 +04:00
2007-11-26 15:11:58 +03:00
static irqreturn_t msm_timer_interrupt ( int irq , void * dev_id )
{
2013-02-16 05:31:31 +04:00
struct clock_event_device * evt = dev_id ;
2011-11-08 22:34:06 +04:00
/* Stop the timer tick */
if ( evt - > mode = = CLOCK_EVT_MODE_ONESHOT ) {
2011-11-08 22:34:07 +04:00
u32 ctrl = readl_relaxed ( event_base + TIMER_ENABLE ) ;
2011-11-08 22:34:06 +04:00
ctrl & = ~ TIMER_ENABLE_EN ;
2011-11-08 22:34:07 +04:00
writel_relaxed ( ctrl , event_base + TIMER_ENABLE ) ;
2011-11-08 22:34:06 +04:00
}
2007-11-26 15:11:58 +03:00
evt - > event_handler ( evt ) ;
return IRQ_HANDLED ;
}
static int msm_timer_set_next_event ( unsigned long cycles ,
struct clock_event_device * evt )
{
2011-11-08 22:34:07 +04:00
u32 ctrl = readl_relaxed ( event_base + TIMER_ENABLE ) ;
2007-11-26 15:11:58 +03:00
2013-03-15 07:31:37 +04:00
ctrl & = ~ TIMER_ENABLE_EN ;
writel_relaxed ( ctrl , event_base + TIMER_ENABLE ) ;
writel_relaxed ( ctrl , event_base + TIMER_CLEAR ) ;
2011-11-08 22:34:07 +04:00
writel_relaxed ( cycles , event_base + TIMER_MATCH_VAL ) ;
2013-03-15 07:31:39 +04:00
if ( sts_base )
while ( readl_relaxed ( sts_base ) & TIMER_STS_GPT0_CLR_PEND )
cpu_relax ( ) ;
2011-11-08 22:34:07 +04:00
writel_relaxed ( ctrl | TIMER_ENABLE_EN , event_base + TIMER_ENABLE ) ;
2007-11-26 15:11:58 +03:00
return 0 ;
}
static void msm_timer_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
2011-11-08 22:34:06 +04:00
u32 ctrl ;
2011-11-08 22:34:07 +04:00
ctrl = readl_relaxed ( event_base + TIMER_ENABLE ) ;
2011-11-08 22:34:06 +04:00
ctrl & = ~ ( TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN ) ;
2010-12-02 23:05:12 +03:00
2007-11-26 15:11:58 +03:00
switch ( mode ) {
case CLOCK_EVT_MODE_RESUME :
case CLOCK_EVT_MODE_PERIODIC :
break ;
case CLOCK_EVT_MODE_ONESHOT :
2011-11-08 22:34:06 +04:00
/* Timer is enabled in set_next_event */
2007-11-26 15:11:58 +03:00
break ;
case CLOCK_EVT_MODE_UNUSED :
case CLOCK_EVT_MODE_SHUTDOWN :
break ;
}
2011-11-08 22:34:07 +04:00
writel_relaxed ( ctrl , event_base + TIMER_ENABLE ) ;
2007-11-26 15:11:58 +03:00
}
2013-02-16 05:31:31 +04:00
static struct clock_event_device __percpu * msm_evt ;
2011-11-08 22:34:07 +04:00
static void __iomem * source_base ;
2012-02-22 05:39:37 +04:00
static notrace cycle_t msm_read_timer_count ( struct clocksource * cs )
2011-11-08 22:34:08 +04:00
{
return readl_relaxed ( source_base + TIMER_COUNT_VAL ) ;
}
2012-02-22 05:39:37 +04:00
static notrace cycle_t msm_read_timer_count_shift ( struct clocksource * cs )
2011-11-08 22:34:07 +04:00
{
/*
* Shift timer count down by a constant due to unreliable lower bits
* on some targets .
*/
2011-11-08 22:34:08 +04:00
return msm_read_timer_count ( cs ) > > MSM_DGT_SHIFT ;
2011-11-08 22:34:07 +04:00
}
static struct clocksource msm_clocksource = {
. name = " dg_timer " ,
. rating = 300 ,
. read = msm_read_timer_count ,
2011-11-08 22:34:08 +04:00
. mask = CLOCKSOURCE_MASK ( 32 ) ,
2011-11-08 22:34:07 +04:00
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
2007-11-26 15:11:58 +03:00
} ;
2013-02-16 05:31:31 +04:00
static int msm_timer_irq ;
static int msm_timer_has_ppi ;
2013-06-17 23:43:14 +04:00
static int msm_local_timer_setup ( struct clock_event_device * evt )
2012-01-10 23:44:19 +04:00
{
2013-02-16 05:31:31 +04:00
int cpu = smp_processor_id ( ) ;
int err ;
evt - > irq = msm_timer_irq ;
evt - > name = " msm_timer " ;
evt - > features = CLOCK_EVT_FEAT_ONESHOT ;
evt - > rating = 200 ;
2012-01-10 23:44:19 +04:00
evt - > set_mode = msm_timer_set_mode ;
evt - > set_next_event = msm_timer_set_next_event ;
2013-02-16 05:31:31 +04:00
evt - > cpumask = cpumask_of ( cpu ) ;
clockevents_config_and_register ( evt , GPT_HZ , 4 , 0xffffffff ) ;
if ( msm_timer_has_ppi ) {
enable_percpu_irq ( evt - > irq , IRQ_TYPE_EDGE_RISING ) ;
} else {
err = request_irq ( evt - > irq , msm_timer_interrupt ,
IRQF_TIMER | IRQF_NOBALANCING |
IRQF_TRIGGER_RISING , " gp_timer " , evt ) ;
if ( err )
pr_err ( " request_irq failed \n " ) ;
}
2012-01-10 23:44:19 +04:00
return 0 ;
}
static void msm_local_timer_stop ( struct clock_event_device * evt )
{
evt - > set_mode ( CLOCK_EVT_MODE_UNUSED , evt ) ;
disable_percpu_irq ( evt - > irq ) ;
}
2013-07-24 01:51:34 +04:00
static int msm_timer_cpu_notify ( struct notifier_block * self ,
2013-02-16 05:31:31 +04:00
unsigned long action , void * hcpu )
{
/*
* Grab cpu pointer in each case to avoid spurious
* preemptible warnings
*/
switch ( action & ~ CPU_TASKS_FROZEN ) {
case CPU_STARTING :
msm_local_timer_setup ( this_cpu_ptr ( msm_evt ) ) ;
break ;
case CPU_DYING :
msm_local_timer_stop ( this_cpu_ptr ( msm_evt ) ) ;
break ;
}
return NOTIFY_OK ;
}
2013-07-24 01:51:34 +04:00
static struct notifier_block msm_timer_cpu_nb = {
2013-02-16 05:31:31 +04:00
. notifier_call = msm_timer_cpu_notify ,
2012-01-10 23:44:19 +04:00
} ;
2013-11-16 03:26:16 +04:00
static u64 notrace msm_sched_clock_read ( void )
2012-02-22 05:39:37 +04:00
{
return msm_clocksource . read ( & msm_clocksource ) ;
}
2012-09-05 23:28:52 +04:00
static void __init msm_timer_init ( u32 dgt_hz , int sched_bits , int irq ,
bool percpu )
2007-11-26 15:11:58 +03:00
{
2011-11-08 22:34:07 +04:00
struct clocksource * cs = & msm_clocksource ;
2013-02-16 05:31:31 +04:00
int res = 0 ;
msm_timer_irq = irq ;
msm_timer_has_ppi = percpu ;
msm_evt = alloc_percpu ( struct clock_event_device ) ;
if ( ! msm_evt ) {
pr_err ( " memory allocation failed for clockevents \n " ) ;
goto err ;
}
2007-11-26 15:11:58 +03:00
2013-02-16 05:31:31 +04:00
if ( percpu )
res = request_percpu_irq ( irq , msm_timer_interrupt ,
" gp_timer " , msm_evt ) ;
2011-11-08 22:34:05 +04:00
2013-02-16 05:31:31 +04:00
if ( res ) {
pr_err ( " request_percpu_irq failed \n " ) ;
} else {
res = register_cpu_notifier ( & msm_timer_cpu_nb ) ;
if ( res ) {
free_percpu_irq ( irq , msm_evt ) ;
2011-11-08 22:34:05 +04:00
goto err ;
2011-07-22 15:52:37 +04:00
}
2013-02-16 05:31:31 +04:00
/* Immediately configure the timer on the boot CPU */
msm_local_timer_setup ( __this_cpu_ptr ( msm_evt ) ) ;
2007-11-26 15:11:58 +03:00
}
2011-11-08 22:34:05 +04:00
err :
2011-11-08 22:34:07 +04:00
writel_relaxed ( TIMER_ENABLE_EN , source_base + TIMER_ENABLE ) ;
2011-11-08 22:34:08 +04:00
res = clocksource_register_hz ( cs , dgt_hz ) ;
2011-11-08 22:34:05 +04:00
if ( res )
2011-11-08 22:34:07 +04:00
pr_err ( " clocksource_register failed \n " ) ;
2013-11-16 03:26:16 +04:00
sched_clock_register ( msm_sched_clock_read , sched_bits , dgt_hz ) ;
2007-11-26 15:11:58 +03:00
}
2012-09-05 23:28:53 +04:00
# ifdef CONFIG_OF
2013-07-25 00:54:30 +04:00
static void __init msm_dt_timer_init ( struct device_node * np )
2012-09-05 23:28:53 +04:00
{
u32 freq ;
int irq ;
struct resource res ;
u32 percpu_offset ;
2013-03-15 07:31:38 +04:00
void __iomem * base ;
void __iomem * cpu0_base ;
2012-09-05 23:28:53 +04:00
2013-03-15 07:31:38 +04:00
base = of_iomap ( np , 0 ) ;
if ( ! base ) {
2012-09-05 23:28:53 +04:00
pr_err ( " Failed to map event base \n " ) ;
return ;
}
2013-03-15 07:31:38 +04:00
/* We use GPT0 for the clockevent */
irq = irq_of_parse_and_map ( np , 1 ) ;
2012-09-05 23:28:53 +04:00
if ( irq < = 0 ) {
pr_err ( " Can't get irq \n " ) ;
return ;
}
2013-03-15 07:31:38 +04:00
/* We use CPU0's DGT for the clocksource */
2012-09-05 23:28:53 +04:00
if ( of_property_read_u32 ( np , " cpu-offset " , & percpu_offset ) )
percpu_offset = 0 ;
if ( of_address_to_resource ( np , 0 , & res ) ) {
pr_err ( " Failed to parse DGT resource \n " ) ;
return ;
}
2013-03-15 07:31:38 +04:00
cpu0_base = ioremap ( res . start + percpu_offset , resource_size ( & res ) ) ;
if ( ! cpu0_base ) {
2012-09-05 23:28:53 +04:00
pr_err ( " Failed to map source base \n " ) ;
return ;
}
if ( of_property_read_u32 ( np , " clock-frequency " , & freq ) ) {
pr_err ( " Unknown frequency \n " ) ;
return ;
}
2013-03-15 07:31:38 +04:00
event_base = base + 0x4 ;
2013-03-15 07:31:39 +04:00
sts_base = base + 0x88 ;
2013-03-15 07:31:38 +04:00
source_base = cpu0_base + 0x24 ;
freq / = 4 ;
writel_relaxed ( DGT_CLK_CTL_DIV_4 , source_base + DGT_CLK_CTL ) ;
2012-09-05 23:28:53 +04:00
msm_timer_init ( freq , 32 , irq , ! ! percpu_offset ) ;
}
2013-07-25 00:54:30 +04:00
CLOCKSOURCE_OF_DECLARE ( kpss_timer , " qcom,kpss-timer " , msm_dt_timer_init ) ;
CLOCKSOURCE_OF_DECLARE ( scss_timer , " qcom,scss-timer " , msm_dt_timer_init ) ;
2012-09-05 23:28:53 +04:00
# endif
2013-03-15 07:31:39 +04:00
static int __init msm_timer_map ( phys_addr_t addr , u32 event , u32 source ,
u32 sts )
2012-09-05 23:28:52 +04:00
{
2013-03-15 07:31:38 +04:00
void __iomem * base ;
base = ioremap ( addr , SZ_256 ) ;
if ( ! base ) {
pr_err ( " Failed to map timer base \n " ) ;
return - ENOMEM ;
2012-09-05 23:28:52 +04:00
}
2013-03-15 07:31:38 +04:00
event_base = base + event ;
source_base = base + source ;
2013-03-15 07:31:39 +04:00
if ( sts )
sts_base = base + sts ;
2013-03-15 07:31:38 +04:00
2012-09-05 23:28:52 +04:00
return 0 ;
}
2012-11-08 23:40:59 +04:00
void __init msm7x01_timer_init ( void )
2012-09-05 23:28:52 +04:00
{
struct clocksource * cs = & msm_clocksource ;
2013-03-15 07:31:39 +04:00
if ( msm_timer_map ( 0xc0100000 , 0x0 , 0x10 , 0x0 ) )
2012-09-05 23:28:52 +04:00
return ;
cs - > read = msm_read_timer_count_shift ;
cs - > mask = CLOCKSOURCE_MASK ( ( 32 - MSM_DGT_SHIFT ) ) ;
/* 600 KHz */
msm_timer_init ( 19200000 > > MSM_DGT_SHIFT , 32 - MSM_DGT_SHIFT , 7 ,
false ) ;
}
2012-11-08 23:40:59 +04:00
void __init msm7x30_timer_init ( void )
2012-09-05 23:28:52 +04:00
{
2013-03-15 07:31:39 +04:00
if ( msm_timer_map ( 0xc0100000 , 0x4 , 0x24 , 0x80 ) )
2012-09-05 23:28:52 +04:00
return ;
msm_timer_init ( 24576000 / 4 , 32 , 1 , false ) ;
}
2012-11-08 23:40:59 +04:00
void __init qsd8x50_timer_init ( void )
2012-09-05 23:28:52 +04:00
{
2013-03-15 07:31:39 +04:00
if ( msm_timer_map ( 0xAC100000 , 0x0 , 0x10 , 0x34 ) )
2012-09-05 23:28:52 +04:00
return ;
msm_timer_init ( 19200000 / 4 , 32 , 7 , false ) ;
}