2008-02-29 12:24:23 +08:00
/*
* linux / arch / kernel / time - ts . c
*
* Based on arm clockevents implementation and old bfin time tick .
*
* Copyright ( C ) 2008 , GeoTechnologies , Vitja Makarov
*
* This code is licenced under the GPL version 2. For details see
* kernel - base / COPYING .
*/
# include <linux/module.h>
# include <linux/profile.h>
# include <linux/interrupt.h>
# include <linux/time.h>
2008-04-24 05:07:29 +08:00
# include <linux/timex.h>
2008-02-29 12:24:23 +08:00
# include <linux/irq.h>
# include <linux/clocksource.h>
# include <linux/clockchips.h>
2008-04-25 04:58:29 +08:00
# include <linux/cpufreq.h>
2008-02-29 12:24:23 +08:00
# include <asm/blackfin.h>
2008-04-25 04:58:29 +08:00
# include <asm/time.h>
2009-05-15 11:01:59 +00:00
# include <asm/gptimers.h>
2008-02-29 12:24:23 +08:00
2009-05-15 11:01:59 +00:00
# if defined(CONFIG_CYCLES_CLOCKSOURCE)
2008-02-29 12:24:23 +08:00
2008-04-25 04:58:29 +08:00
/* Accelerators for sched_clock()
* convert from cycles ( 64 bits ) = > nanoseconds ( 64 bits )
* basic equation :
* ns = cycles / ( freq / ns_per_sec )
* ns = cycles * ( ns_per_sec / freq )
* ns = cycles * ( 10 ^ 9 / ( cpu_khz * 10 ^ 3 ) )
* ns = cycles * ( 10 ^ 6 / cpu_khz )
*
* Then we use scaling math ( suggested by george @ mvista . com ) to get :
* ns = cycles * ( 10 ^ 6 * SC / cpu_khz ) / SC
* ns = cycles * cyc2ns_scale / SC
*
* And since SC is a constant power of two , we can convert the div
* into a shift .
*
* We can use khz divisor instead of mhz to keep a better precision , since
* cyc2ns_scale is limited to 10 ^ 6 * 2 ^ 10 , which fits in 32 bits .
* ( mathieu . desnoyers @ polymtl . ca )
*
* - johnstul @ us . ibm . com " math is hard, lets go shopping! "
*/
2008-02-29 12:24:23 +08:00
static unsigned long cyc2ns_scale ;
# define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline void set_cyc2ns_scale ( unsigned long cpu_khz )
{
cyc2ns_scale = ( 1000000 < < CYC2NS_SCALE_FACTOR ) / cpu_khz ;
}
static inline unsigned long long cycles_2_ns ( cycle_t cyc )
{
return ( cyc * cyc2ns_scale ) > > CYC2NS_SCALE_FACTOR ;
}
2009-05-15 11:01:59 +00:00
static cycle_t bfin_read_cycles ( struct clocksource * cs )
2008-02-29 12:24:23 +08:00
{
2008-05-07 11:41:26 +08:00
return __bfin_cycles_off + ( get_cycles ( ) < < __bfin_cycles_mod ) ;
2008-02-29 12:24:23 +08:00
}
2009-05-15 11:01:59 +00:00
static struct clocksource bfin_cs_cycles = {
. name = " bfin_cs_cycles " ,
2008-02-29 12:24:23 +08:00
. rating = 350 ,
2009-05-15 11:01:59 +00:00
. read = bfin_read_cycles ,
2008-02-29 12:24:23 +08:00
. mask = CLOCKSOURCE_MASK ( 64 ) ,
. shift = 22 ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
2009-04-21 12:24:00 -07:00
unsigned long long sched_clock ( void )
{
2009-05-15 11:01:59 +00:00
return cycles_2_ns ( bfin_read_cycles ( & bfin_cs_cycles ) ) ;
2009-04-21 12:24:00 -07:00
}
2009-05-15 11:01:59 +00:00
static int __init bfin_cs_cycles_init ( void )
2008-02-29 12:24:23 +08:00
{
set_cyc2ns_scale ( get_cclk ( ) / 1000 ) ;
2009-05-15 11:01:59 +00:00
bfin_cs_cycles . mult = \
clocksource_hz2mult ( get_cclk ( ) , bfin_cs_cycles . shift ) ;
2008-02-29 12:24:23 +08:00
2009-05-15 11:01:59 +00:00
if ( clocksource_register ( & bfin_cs_cycles ) )
2008-02-29 12:24:23 +08:00
panic ( " failed to register clocksource " ) ;
return 0 ;
}
2009-05-15 11:01:59 +00:00
# else
# define bfin_cs_cycles_init()
# endif
# ifdef CONFIG_GPTMR0_CLOCKSOURCE
void __init setup_gptimer0 ( void )
{
disable_gptimers ( TIMER0bit ) ;
set_gptimer_config ( TIMER0_id , \
TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM ) ;
set_gptimer_period ( TIMER0_id , - 1 ) ;
set_gptimer_pwidth ( TIMER0_id , - 2 ) ;
SSYNC ( ) ;
enable_gptimers ( TIMER0bit ) ;
}
static cycle_t bfin_read_gptimer0 ( void )
{
return bfin_read_TIMER0_COUNTER ( ) ;
}
static struct clocksource bfin_cs_gptimer0 = {
. name = " bfin_cs_gptimer0 " ,
. rating = 400 ,
. read = bfin_read_gptimer0 ,
. mask = CLOCKSOURCE_MASK ( 32 ) ,
. shift = 22 ,
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
} ;
static int __init bfin_cs_gptimer0_init ( void )
{
setup_gptimer0 ( ) ;
2008-02-29 12:24:23 +08:00
2009-05-15 11:01:59 +00:00
bfin_cs_gptimer0 . mult = \
clocksource_hz2mult ( get_sclk ( ) , bfin_cs_gptimer0 . shift ) ;
if ( clocksource_register ( & bfin_cs_gptimer0 ) )
panic ( " failed to register clocksource " ) ;
return 0 ;
}
2008-02-29 12:24:23 +08:00
# else
2009-05-15 11:01:59 +00:00
# define bfin_cs_gptimer0_init()
2008-02-29 12:24:23 +08:00
# endif
2009-05-15 11:01:59 +00:00
# ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__ ( ( l1_text ) )
# endif
irqreturn_t timer_interrupt ( int irq , void * dev_id ) ;
static int bfin_timer_set_next_event ( unsigned long , \
struct clock_event_device * ) ;
static void bfin_timer_set_mode ( enum clock_event_mode , \
struct clock_event_device * ) ;
static struct clock_event_device clockevent_bfin = {
# if defined(CONFIG_TICKSOURCE_GPTMR0)
. name = " bfin_gptimer0 " ,
. rating = 300 ,
. irq = IRQ_TIMER0 ,
# else
. name = " bfin_core_timer " ,
. rating = 350 ,
. irq = IRQ_CORETMR ,
# endif
. shift = 32 ,
. features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT ,
. set_next_event = bfin_timer_set_next_event ,
. set_mode = bfin_timer_set_mode ,
} ;
static struct irqaction bfin_timer_irq = {
# if defined(CONFIG_TICKSOURCE_GPTMR0)
. name = " Blackfin GPTimer0 " ,
# else
. name = " Blackfin CoreTimer " ,
# endif
. flags = IRQF_DISABLED | IRQF_TIMER | \
IRQF_IRQPOLL | IRQF_PERCPU ,
. handler = timer_interrupt ,
. dev_id = & clockevent_bfin ,
} ;
# if defined(CONFIG_TICKSOURCE_GPTMR0)
2008-02-29 12:24:23 +08:00
static int bfin_timer_set_next_event ( unsigned long cycles ,
struct clock_event_device * evt )
{
2009-05-15 11:01:59 +00:00
disable_gptimers ( TIMER0bit ) ;
/* it starts counting three SCLK cycles after the TIMENx bit is set */
set_gptimer_pwidth ( TIMER0_id , cycles - 3 ) ;
enable_gptimers ( TIMER0bit ) ;
return 0 ;
}
static void bfin_timer_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
switch ( mode ) {
case CLOCK_EVT_MODE_PERIODIC : {
set_gptimer_config ( TIMER0_id , \
TIMER_OUT_DIS | TIMER_IRQ_ENA | \
TIMER_PERIOD_CNT | TIMER_MODE_PWM ) ;
set_gptimer_period ( TIMER0_id , get_sclk ( ) / HZ ) ;
set_gptimer_pwidth ( TIMER0_id , get_sclk ( ) / HZ - 1 ) ;
enable_gptimers ( TIMER0bit ) ;
break ;
}
case CLOCK_EVT_MODE_ONESHOT :
disable_gptimers ( TIMER0bit ) ;
set_gptimer_config ( TIMER0_id , \
TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM ) ;
set_gptimer_period ( TIMER0_id , 0 ) ;
break ;
case CLOCK_EVT_MODE_UNUSED :
case CLOCK_EVT_MODE_SHUTDOWN :
disable_gptimers ( TIMER0bit ) ;
break ;
case CLOCK_EVT_MODE_RESUME :
break ;
}
}
static void bfin_timer_ack ( void )
{
set_gptimer_status ( TIMER_GROUP1 , TIMER_STATUS_TIMIL0 ) ;
}
static void __init bfin_timer_init ( void )
{
disable_gptimers ( TIMER0bit ) ;
}
static unsigned long __init bfin_clockevent_check ( void )
{
setup_irq ( IRQ_TIMER0 , & bfin_timer_irq ) ;
return get_sclk ( ) ;
}
# else /* CONFIG_TICKSOURCE_CORETMR */
static int bfin_timer_set_next_event ( unsigned long cycles ,
struct clock_event_device * evt )
{
bfin_write_TCNTL ( TMPWR ) ;
CSYNC ( ) ;
2008-02-29 12:24:23 +08:00
bfin_write_TCOUNT ( cycles ) ;
CSYNC ( ) ;
2009-05-15 11:01:59 +00:00
bfin_write_TCNTL ( TMPWR | TMREN ) ;
2008-02-29 12:24:23 +08:00
return 0 ;
}
static void bfin_timer_set_mode ( enum clock_event_mode mode ,
2009-05-15 11:01:59 +00:00
struct clock_event_device * evt )
2008-02-29 12:24:23 +08:00
{
switch ( mode ) {
case CLOCK_EVT_MODE_PERIODIC : {
2008-04-25 04:58:29 +08:00
unsigned long tcount = ( ( get_cclk ( ) / ( HZ * TIME_SCALE ) ) - 1 ) ;
2008-02-29 12:24:23 +08:00
bfin_write_TCNTL ( TMPWR ) ;
CSYNC ( ) ;
2009-05-15 11:01:59 +00:00
bfin_write_TSCALE ( TIME_SCALE - 1 ) ;
2008-02-29 12:24:23 +08:00
bfin_write_TPERIOD ( tcount ) ;
bfin_write_TCOUNT ( tcount ) ;
CSYNC ( ) ;
2009-05-15 11:01:59 +00:00
bfin_write_TCNTL ( TMPWR | TMREN | TAUTORLD ) ;
2008-02-29 12:24:23 +08:00
break ;
}
case CLOCK_EVT_MODE_ONESHOT :
2009-05-15 11:01:59 +00:00
bfin_write_TCNTL ( TMPWR ) ;
CSYNC ( ) ;
2008-05-07 11:41:26 +08:00
bfin_write_TSCALE ( TIME_SCALE - 1 ) ;
2009-05-15 11:01:59 +00:00
bfin_write_TPERIOD ( 0 ) ;
2008-02-29 12:24:23 +08:00
bfin_write_TCOUNT ( 0 ) ;
break ;
case CLOCK_EVT_MODE_UNUSED :
case CLOCK_EVT_MODE_SHUTDOWN :
bfin_write_TCNTL ( 0 ) ;
CSYNC ( ) ;
break ;
case CLOCK_EVT_MODE_RESUME :
break ;
}
}
2009-05-15 11:01:59 +00:00
static void bfin_timer_ack ( void )
{
}
2008-02-29 12:24:23 +08:00
static void __init bfin_timer_init ( void )
{
/* power up the timer, but don't enable it just yet */
bfin_write_TCNTL ( TMPWR ) ;
CSYNC ( ) ;
/*
* the TSCALE prescaler counter .
*/
2008-04-25 04:58:29 +08:00
bfin_write_TSCALE ( TIME_SCALE - 1 ) ;
2008-02-29 12:24:23 +08:00
bfin_write_TPERIOD ( 0 ) ;
bfin_write_TCOUNT ( 0 ) ;
CSYNC ( ) ;
}
2009-05-15 11:01:59 +00:00
static unsigned long __init bfin_clockevent_check ( void )
{
setup_irq ( IRQ_CORETMR , & bfin_timer_irq ) ;
return get_cclk ( ) / TIME_SCALE ;
}
void __init setup_core_timer ( void )
{
bfin_timer_init ( ) ;
bfin_timer_set_mode ( CLOCK_EVT_MODE_PERIODIC , NULL ) ;
}
# endif /* CONFIG_TICKSOURCE_GPTMR0 */
2008-02-29 12:24:23 +08:00
/*
* timer_interrupt ( ) needs to keep up the real - time clock ,
* as well as call the " do_timer() " routine every clocktick
*/
irqreturn_t timer_interrupt ( int irq , void * dev_id )
{
struct clock_event_device * evt = dev_id ;
2009-05-15 11:01:59 +00:00
smp_mb ( ) ;
2008-02-29 12:24:23 +08:00
evt - > event_handler ( evt ) ;
2009-05-15 11:01:59 +00:00
bfin_timer_ack ( ) ;
2008-02-29 12:24:23 +08:00
return IRQ_HANDLED ;
}
static int __init bfin_clockevent_init ( void )
{
2008-05-07 11:41:26 +08:00
unsigned long timer_clk ;
2009-05-15 11:01:59 +00:00
timer_clk = bfin_clockevent_check ( ) ;
2008-05-07 11:41:26 +08:00
2008-02-29 12:24:23 +08:00
bfin_timer_init ( ) ;
2008-05-07 11:41:26 +08:00
clockevent_bfin . mult = div_sc ( timer_clk , NSEC_PER_SEC , clockevent_bfin . shift ) ;
2008-02-29 12:24:23 +08:00
clockevent_bfin . max_delta_ns = clockevent_delta2ns ( - 1 , & clockevent_bfin ) ;
clockevent_bfin . min_delta_ns = clockevent_delta2ns ( 100 , & clockevent_bfin ) ;
2008-12-13 21:20:26 +10:30
clockevent_bfin . cpumask = cpumask_of ( 0 ) ;
2008-02-29 12:24:23 +08:00
clockevents_register_device ( & clockevent_bfin ) ;
return 0 ;
}
void __init time_init ( void )
{
time_t secs_since_1970 = ( 365 * 37 + 9 ) * 24 * 60 * 60 ; /* 1 Jan 2007 */
# ifdef CONFIG_RTC_DRV_BFIN
/* [#2663] hack to filter junk RTC values that would cause
* userspace to have to deal with time values greater than
* 2 ^ 31 seconds ( which uClibc cannot cope with yet )
*/
if ( ( bfin_read_RTC_STAT ( ) & 0xC0000000 ) = = 0xC0000000 ) {
printk ( KERN_NOTICE " bfin-rtc: invalid date; resetting \n " ) ;
bfin_write_RTC_STAT ( 0 ) ;
}
# endif
/* Initialize xtime. From now on, xtime is updated with timer interrupts */
xtime . tv_sec = secs_since_1970 ;
xtime . tv_nsec = 0 ;
set_normalized_timespec ( & wall_to_monotonic , - xtime . tv_sec , - xtime . tv_nsec ) ;
2009-05-15 11:01:59 +00:00
bfin_cs_cycles_init ( ) ;
bfin_cs_gptimer0_init ( ) ;
2008-02-29 12:24:23 +08:00
bfin_clockevent_init ( ) ;
}