2008-02-29 12:24:23 +08:00
/*
* Based on arm clockevents implementation and old bfin time tick .
*
2009-09-24 14:11:24 +00:00
* Copyright 2008 - 2009 Analog Devics Inc .
* 2008 GeoTechnologies
* Vitja Makarov
2008-02-29 12:24:23 +08:00
*
2009-09-24 14:11:24 +00:00
* Licensed under the GPL - 2
2008-02-29 12:24:23 +08:00
*/
2009-09-24 14:11:24 +00:00
2008-02-29 12:24:23 +08:00
# include <linux/module.h>
# include <linux/profile.h>
# include <linux/interrupt.h>
# include <linux/time.h>
2008-04-24 05:07:29 +08:00
# include <linux/timex.h>
2008-02-29 12:24:23 +08:00
# include <linux/irq.h>
# include <linux/clocksource.h>
# include <linux/clockchips.h>
2008-04-25 04:58:29 +08:00
# include <linux/cpufreq.h>
2008-02-29 12:24:23 +08:00
# include <asm/blackfin.h>
2008-04-25 04:58:29 +08:00
# include <asm/time.h>
2009-05-15 11:01:59 +00:00
# include <asm/gptimers.h>
2010-01-20 10:56:24 +00:00
# include <asm/nmi.h>
2008-02-29 12:24:23 +08:00
2009-09-15 06:50:51 +00:00
# if defined(CONFIG_CYCLES_CLOCKSOURCE)
static notrace cycle_t bfin_read_cycles ( struct clocksource * cs )
2008-02-29 12:24:23 +08:00
{
2010-01-27 11:16:32 +00:00
# ifdef CONFIG_CPU_FREQ
2008-05-07 11:41:26 +08:00
return __bfin_cycles_off + ( get_cycles ( ) < < __bfin_cycles_mod ) ;
2010-01-27 11:16:32 +00:00
# else
return get_cycles ( ) ;
# endif
2008-02-29 12:24:23 +08:00
}
2009-05-15 11:01:59 +00:00
static struct clocksource bfin_cs_cycles = {
. name = " bfin_cs_cycles " ,
2009-09-14 04:41:00 +00:00
. rating = 400 ,
2009-05-15 11:01:59 +00:00
. read = bfin_read_cycles ,
2008-02-29 12:24:23 +08:00
. mask = CLOCKSOURCE_MASK ( 64 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
2009-09-15 06:50:51 +00:00
static inline unsigned long long bfin_cs_cycles_sched_clock ( void )
2009-04-21 12:24:00 -07:00
{
2009-12-04 03:32:11 +00:00
return clocksource_cyc2ns ( bfin_read_cycles ( & bfin_cs_cycles ) ,
bfin_cs_cycles . mult , bfin_cs_cycles . shift ) ;
2009-04-21 12:24:00 -07:00
}
2009-05-15 11:01:59 +00:00
static int __init bfin_cs_cycles_init ( void )
2008-02-29 12:24:23 +08:00
{
2010-04-26 20:20:07 -07:00
if ( clocksource_register_hz ( & bfin_cs_cycles , get_cclk ( ) ) )
2008-02-29 12:24:23 +08:00
panic ( " failed to register clocksource " ) ;
return 0 ;
}
2009-05-15 11:01:59 +00:00
# else
# define bfin_cs_cycles_init()
# endif
# ifdef CONFIG_GPTMR0_CLOCKSOURCE
void __init setup_gptimer0 ( void )
{
disable_gptimers ( TIMER0bit ) ;
2012-05-16 18:11:10 +08:00
# ifdef CONFIG_BF60x
bfin_write16 ( TIMER_DATA_IMSK , 0 ) ;
set_gptimer_config ( TIMER0_id , TIMER_OUT_DIS
| TIMER_MODE_PWM_CONT | TIMER_PULSE_HI | TIMER_IRQ_PER ) ;
# else
2009-05-15 11:01:59 +00:00
set_gptimer_config ( TIMER0_id , \
TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM ) ;
2012-05-16 18:11:10 +08:00
# endif
2009-05-15 11:01:59 +00:00
set_gptimer_period ( TIMER0_id , - 1 ) ;
set_gptimer_pwidth ( TIMER0_id , - 2 ) ;
SSYNC ( ) ;
enable_gptimers ( TIMER0bit ) ;
}
2009-09-15 02:08:50 +00:00
static cycle_t bfin_read_gptimer0 ( struct clocksource * cs )
2009-05-15 11:01:59 +00:00
{
return bfin_read_TIMER0_COUNTER ( ) ;
}
static struct clocksource bfin_cs_gptimer0 = {
. name = " bfin_cs_gptimer0 " ,
2009-09-14 04:41:00 +00:00
. rating = 350 ,
2009-05-15 11:01:59 +00:00
. read = bfin_read_gptimer0 ,
. mask = CLOCKSOURCE_MASK ( 32 ) ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
2009-09-15 06:50:51 +00:00
static inline unsigned long long bfin_cs_gptimer0_sched_clock ( void )
{
2009-12-04 03:32:11 +00:00
return clocksource_cyc2ns ( bfin_read_TIMER0_COUNTER ( ) ,
bfin_cs_gptimer0 . mult , bfin_cs_gptimer0 . shift ) ;
2009-09-15 06:50:51 +00:00
}
2009-05-15 11:01:59 +00:00
static int __init bfin_cs_gptimer0_init ( void )
{
setup_gptimer0 ( ) ;
2008-02-29 12:24:23 +08:00
2010-04-26 20:20:07 -07:00
if ( clocksource_register_hz ( & bfin_cs_gptimer0 , get_sclk ( ) ) )
2009-05-15 11:01:59 +00:00
panic ( " failed to register clocksource " ) ;
return 0 ;
}
2008-02-29 12:24:23 +08:00
# else
2009-05-15 11:01:59 +00:00
# define bfin_cs_gptimer0_init()
2008-02-29 12:24:23 +08:00
# endif
2009-09-15 06:50:51 +00:00
# if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
/* prefer to use cycles since it has higher rating */
notrace unsigned long long sched_clock ( void )
{
# if defined(CONFIG_CYCLES_CLOCKSOURCE)
return bfin_cs_cycles_sched_clock ( ) ;
# else
return bfin_cs_gptimer0_sched_clock ( ) ;
# endif
}
# endif
2009-05-15 11:01:59 +00:00
# if defined(CONFIG_TICKSOURCE_GPTMR0)
2009-12-28 10:21:49 +00:00
static int bfin_gptmr0_set_next_event ( unsigned long cycles ,
2008-02-29 12:24:23 +08:00
struct clock_event_device * evt )
{
2009-05-15 11:01:59 +00:00
disable_gptimers ( TIMER0bit ) ;
/* it starts counting three SCLK cycles after the TIMENx bit is set */
set_gptimer_pwidth ( TIMER0_id , cycles - 3 ) ;
enable_gptimers ( TIMER0bit ) ;
return 0 ;
}
2015-07-16 16:56:16 +05:30
static int bfin_gptmr0_set_periodic ( struct clock_event_device * evt )
2009-05-15 11:01:59 +00:00
{
2012-05-16 18:11:10 +08:00
# ifndef CONFIG_BF60x
2015-07-16 16:56:16 +05:30
set_gptimer_config ( TIMER0_id ,
TIMER_OUT_DIS | TIMER_IRQ_ENA |
TIMER_PERIOD_CNT | TIMER_MODE_PWM ) ;
2012-05-16 18:11:10 +08:00
# else
2015-07-16 16:56:16 +05:30
set_gptimer_config ( TIMER0_id ,
TIMER_OUT_DIS | TIMER_MODE_PWM_CONT |
TIMER_PULSE_HI | TIMER_IRQ_PER ) ;
2012-05-16 18:11:10 +08:00
# endif
2015-07-16 16:56:16 +05:30
set_gptimer_period ( TIMER0_id , get_sclk ( ) / HZ ) ;
set_gptimer_pwidth ( TIMER0_id , get_sclk ( ) / HZ - 1 ) ;
enable_gptimers ( TIMER0bit ) ;
return 0 ;
}
static int bfin_gptmr0_set_oneshot ( struct clock_event_device * evt )
{
disable_gptimers ( TIMER0bit ) ;
2012-05-16 18:11:10 +08:00
# ifndef CONFIG_BF60x
2015-07-16 16:56:16 +05:30
set_gptimer_config ( TIMER0_id ,
TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM ) ;
2012-05-16 18:11:10 +08:00
# else
2015-07-16 16:56:16 +05:30
set_gptimer_config ( TIMER0_id ,
TIMER_OUT_DIS | TIMER_MODE_PWM | TIMER_PULSE_HI |
TIMER_IRQ_WID_DLY ) ;
2012-05-16 18:11:10 +08:00
# endif
2015-07-16 16:56:16 +05:30
set_gptimer_period ( TIMER0_id , 0 ) ;
return 0 ;
}
static int bfin_gptmr0_shutdown ( struct clock_event_device * evt )
{
disable_gptimers ( TIMER0bit ) ;
return 0 ;
2009-05-15 11:01:59 +00:00
}
2009-12-28 10:21:49 +00:00
static void bfin_gptmr0_ack ( void )
2009-05-15 11:01:59 +00:00
{
2012-05-16 18:11:10 +08:00
clear_gptimer_intr ( TIMER0_id ) ;
2009-05-15 11:01:59 +00:00
}
2009-12-28 10:21:49 +00:00
static void __init bfin_gptmr0_init ( void )
2009-05-15 11:01:59 +00:00
{
disable_gptimers ( TIMER0bit ) ;
}
2009-12-28 10:21:49 +00:00
# ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__ ( ( l1_text ) )
# endif
irqreturn_t bfin_gptmr0_interrupt ( int irq , void * dev_id )
2009-05-15 11:01:59 +00:00
{
2009-12-28 10:21:49 +00:00
struct clock_event_device * evt = dev_id ;
smp_mb ( ) ;
2011-04-04 15:26:11 +00:00
/*
* We want to ACK before we handle so that we can handle smaller timer
* intervals . This way if the timer expires again while we ' re handling
* things , we ' re more likely to see that 2 nd int rather than swallowing
* it by ACKing the int at the end of this handler .
*/
2009-12-28 10:21:49 +00:00
bfin_gptmr0_ack ( ) ;
2011-04-04 15:26:11 +00:00
evt - > event_handler ( evt ) ;
2009-12-28 10:21:49 +00:00
return IRQ_HANDLED ;
2009-05-15 11:01:59 +00:00
}
2009-12-28 10:21:49 +00:00
static struct irqaction gptmr0_irq = {
. name = " Blackfin GPTimer0 " ,
2011-09-07 16:10:03 +08:00
. flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU ,
2009-12-28 10:21:49 +00:00
. handler = bfin_gptmr0_interrupt ,
} ;
2009-05-15 11:01:59 +00:00
2009-12-28 10:21:49 +00:00
static struct clock_event_device clockevent_gptmr0 = {
2015-07-16 16:56:16 +05:30
. name = " bfin_gptimer0 " ,
. rating = 300 ,
. irq = IRQ_TIMER0 ,
. shift = 32 ,
. features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT ,
. set_next_event = bfin_gptmr0_set_next_event ,
. set_state_shutdown = bfin_gptmr0_shutdown ,
. set_state_periodic = bfin_gptmr0_set_periodic ,
. set_state_oneshot = bfin_gptmr0_set_oneshot ,
2009-12-28 10:21:49 +00:00
} ;
static void __init bfin_gptmr0_clockevent_init ( struct clock_event_device * evt )
{
unsigned long clock_tick ;
clock_tick = get_sclk ( ) ;
evt - > mult = div_sc ( clock_tick , NSEC_PER_SEC , evt - > shift ) ;
evt - > max_delta_ns = clockevent_delta2ns ( - 1 , evt ) ;
evt - > min_delta_ns = clockevent_delta2ns ( 100 , evt ) ;
evt - > cpumask = cpumask_of ( 0 ) ;
clockevents_register_device ( evt ) ;
}
# endif /* CONFIG_TICKSOURCE_GPTMR0 */
# if defined(CONFIG_TICKSOURCE_CORETMR)
/* per-cpu local core timer */
2011-12-12 11:04:05 +08:00
DEFINE_PER_CPU ( struct clock_event_device , coretmr_events ) ;
2009-12-28 10:21:49 +00:00
static int bfin_coretmr_set_next_event ( unsigned long cycles ,
2009-05-15 11:01:59 +00:00
struct clock_event_device * evt )
{
bfin_write_TCNTL ( TMPWR ) ;
CSYNC ( ) ;
2008-02-29 12:24:23 +08:00
bfin_write_TCOUNT ( cycles ) ;
CSYNC ( ) ;
2009-05-15 11:01:59 +00:00
bfin_write_TCNTL ( TMPWR | TMREN ) ;
2008-02-29 12:24:23 +08:00
return 0 ;
}
2015-07-16 16:56:16 +05:30
static int bfin_coretmr_set_periodic ( struct clock_event_device * evt )
{
unsigned long tcount = ( ( get_cclk ( ) / ( HZ * TIME_SCALE ) ) - 1 ) ;
bfin_write_TCNTL ( TMPWR ) ;
CSYNC ( ) ;
bfin_write_TSCALE ( TIME_SCALE - 1 ) ;
bfin_write_TPERIOD ( tcount ) ;
bfin_write_TCOUNT ( tcount ) ;
CSYNC ( ) ;
bfin_write_TCNTL ( TMPWR | TMREN | TAUTORLD ) ;
return 0 ;
}
static int bfin_coretmr_set_oneshot ( struct clock_event_device * evt )
2008-02-29 12:24:23 +08:00
{
2015-07-16 16:56:16 +05:30
bfin_write_TCNTL ( TMPWR ) ;
CSYNC ( ) ;
bfin_write_TSCALE ( TIME_SCALE - 1 ) ;
bfin_write_TPERIOD ( 0 ) ;
bfin_write_TCOUNT ( 0 ) ;
return 0 ;
}
static int bfin_coretmr_shutdown ( struct clock_event_device * evt )
{
bfin_write_TCNTL ( 0 ) ;
CSYNC ( ) ;
return 0 ;
2008-02-29 12:24:23 +08:00
}
2009-12-28 10:21:49 +00:00
void bfin_coretmr_init ( void )
2008-02-29 12:24:23 +08:00
{
/* power up the timer, but don't enable it just yet */
bfin_write_TCNTL ( TMPWR ) ;
CSYNC ( ) ;
2009-12-28 10:21:49 +00:00
/* the TSCALE prescaler counter. */
2008-04-25 04:58:29 +08:00
bfin_write_TSCALE ( TIME_SCALE - 1 ) ;
2008-02-29 12:24:23 +08:00
bfin_write_TPERIOD ( 0 ) ;
bfin_write_TCOUNT ( 0 ) ;
CSYNC ( ) ;
}
2009-12-28 10:21:49 +00:00
# ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__ ( ( l1_text ) )
# endif
2011-12-12 11:04:05 +08:00
2009-12-28 10:21:49 +00:00
irqreturn_t bfin_coretmr_interrupt ( int irq , void * dev_id )
2009-05-15 11:01:59 +00:00
{
2009-12-28 10:21:49 +00:00
int cpu = smp_processor_id ( ) ;
struct clock_event_device * evt = & per_cpu ( coretmr_events , cpu ) ;
2009-05-15 11:01:59 +00:00
smp_mb ( ) ;
2008-02-29 12:24:23 +08:00
evt - > event_handler ( evt ) ;
2010-01-20 10:56:24 +00:00
touch_nmi_watchdog ( ) ;
2008-02-29 12:24:23 +08:00
return IRQ_HANDLED ;
}
2009-12-28 10:21:49 +00:00
static struct irqaction coretmr_irq = {
. name = " Blackfin CoreTimer " ,
2011-09-07 16:10:03 +08:00
. flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU ,
2009-12-28 10:21:49 +00:00
. handler = bfin_coretmr_interrupt ,
} ;
2008-02-29 12:24:23 +08:00
2009-12-28 10:21:49 +00:00
void bfin_coretmr_clockevent_init ( void )
{
unsigned long clock_tick ;
unsigned int cpu = smp_processor_id ( ) ;
struct clock_event_device * evt = & per_cpu ( coretmr_events , cpu ) ;
2012-05-16 18:11:10 +08:00
# ifdef CONFIG_SMP
evt - > broadcast = smp_timer_broadcast ;
# endif
2009-12-28 10:21:49 +00:00
evt - > name = " bfin_core_timer " ;
evt - > rating = 350 ;
evt - > irq = - 1 ;
evt - > shift = 32 ;
evt - > features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT ;
evt - > set_next_event = bfin_coretmr_set_next_event ;
2015-07-16 16:56:16 +05:30
evt - > set_state_shutdown = bfin_coretmr_shutdown ;
evt - > set_state_periodic = bfin_coretmr_set_periodic ;
evt - > set_state_oneshot = bfin_coretmr_set_oneshot ;
2009-12-28 10:21:49 +00:00
clock_tick = get_cclk ( ) / TIME_SCALE ;
evt - > mult = div_sc ( clock_tick , NSEC_PER_SEC , evt - > shift ) ;
evt - > max_delta_ns = clockevent_delta2ns ( - 1 , evt ) ;
evt - > min_delta_ns = clockevent_delta2ns ( 100 , evt ) ;
evt - > cpumask = cpumask_of ( cpu ) ;
clockevents_register_device ( evt ) ;
2008-02-29 12:24:23 +08:00
}
2009-12-28 10:21:49 +00:00
# endif /* CONFIG_TICKSOURCE_CORETMR */
2008-02-29 12:24:23 +08:00
2010-03-03 19:57:24 -08:00
void read_persistent_clock ( struct timespec * ts )
2008-02-29 12:24:23 +08:00
{
time_t secs_since_1970 = ( 365 * 37 + 9 ) * 24 * 60 * 60 ; /* 1 Jan 2007 */
2010-03-03 19:57:24 -08:00
ts - > tv_sec = secs_since_1970 ;
ts - > tv_nsec = 0 ;
}
void __init time_init ( void )
{
2008-02-29 12:24:23 +08:00
# ifdef CONFIG_RTC_DRV_BFIN
/* [#2663] hack to filter junk RTC values that would cause
* userspace to have to deal with time values greater than
* 2 ^ 31 seconds ( which uClibc cannot cope with yet )
*/
if ( ( bfin_read_RTC_STAT ( ) & 0xC0000000 ) = = 0xC0000000 ) {
printk ( KERN_NOTICE " bfin-rtc: invalid date; resetting \n " ) ;
bfin_write_RTC_STAT ( 0 ) ;
}
# endif
2009-05-15 11:01:59 +00:00
bfin_cs_cycles_init ( ) ;
bfin_cs_gptimer0_init ( ) ;
2009-12-28 10:21:49 +00:00
# if defined(CONFIG_TICKSOURCE_CORETMR)
bfin_coretmr_init ( ) ;
setup_irq ( IRQ_CORETMR , & coretmr_irq ) ;
bfin_coretmr_clockevent_init ( ) ;
# endif
# if defined(CONFIG_TICKSOURCE_GPTMR0)
bfin_gptmr0_init ( ) ;
setup_irq ( IRQ_TIMER0 , & gptmr0_irq ) ;
gptmr0_irq . dev_id = & clockevent_gptmr0 ;
bfin_gptmr0_clockevent_init ( & clockevent_gptmr0 ) ;
# endif
# if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
# error at least one clock event device is required
# endif
2008-02-29 12:24:23 +08:00
}