2005-04-17 02:20:36 +04:00
/*
* linux / arch / parisc / kernel / time . c
*
* Copyright ( C ) 1991 , 1992 , 1995 Linus Torvalds
* Modifications for ARM ( C ) 1994 , 1995 , 1996 , 1997 Russell King
* Copyright ( C ) 1999 SuSE GmbH , ( Philipp Rumpf , prumpf @ tux . org )
*
* 1994 - 07 - 02 Alan Modra
* fixed set_rtc_mmss , fixed time . year for > = 2000 , new mktime
* 1998 - 12 - 20 Updated NTP code according to technical memorandum Jan ' 96
* " A Kernel Model for Precision Timekeeping " by Dave Mills
*/
# include <linux/errno.h>
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/param.h>
# include <linux/string.h>
# include <linux/mm.h>
# include <linux/interrupt.h>
# include <linux/time.h>
# include <linux/init.h>
# include <linux/smp.h>
# include <linux/profile.h>
2007-01-03 01:54:16 +03:00
# include <linux/clocksource.h>
2008-09-10 18:24:07 +04:00
# include <linux/platform_device.h>
2005-04-17 02:20:36 +04:00
# include <asm/uaccess.h>
# include <asm/io.h>
# include <asm/irq.h>
# include <asm/param.h>
# include <asm/pdc.h>
# include <asm/led.h>
# include <linux/timex.h>
2006-09-09 10:29:22 +04:00
static unsigned long clocktick __read_mostly ; /* timer cycles per tick */
2005-04-17 02:20:36 +04:00
2006-10-05 01:12:52 +04:00
/*
* We keep time on PA - RISC Linux by using the Interval Timer which is
* a pair of registers ; one is read - only and one is write - only ; both
* accessed through CR16 . The read - only register is 32 or 64 bits wide ,
* and increments by 1 every CPU clock tick . The architecture only
* guarantees us a rate between 0.5 and 2 , but all implementations use a
* rate of 1. The write - only register is 32 - bits wide . When the lowest
* 32 bits of the read - only register compare equal to the write - only
* register , it raises a maskable external interrupt . Each processor has
* an Interval Timer of its own and they are not synchronised .
*
* We want to generate an interrupt every 1 / HZ seconds . So we program
* CR16 to interrupt every @ clocktick cycles . The it_value in cpu_data
* is programmed with the intended time of the next tick . We can be
* held off for an arbitrarily long period of time by interrupts being
* disabled , so we may miss one or more ticks .
*/
2006-10-07 16:01:11 +04:00
irqreturn_t timer_interrupt ( int irq , void * dev_id )
2005-04-17 02:20:36 +04:00
{
2006-09-09 10:29:22 +04:00
unsigned long now ;
unsigned long next_tick ;
2006-10-05 01:12:52 +04:00
unsigned long cycles_elapsed , ticks_elapsed ;
2006-09-10 23:57:55 +04:00
unsigned long cycles_remainder ;
unsigned int cpu = smp_processor_id ( ) ;
2006-10-07 16:01:11 +04:00
struct cpuinfo_parisc * cpuinfo = & cpu_data [ cpu ] ;
2005-04-17 02:20:36 +04:00
2006-09-05 00:56:11 +04:00
/* gcc can optimize for "read-only" case with a local clocktick */
2006-09-10 23:57:55 +04:00
unsigned long cpt = clocktick ;
2006-09-05 00:56:11 +04:00
2006-10-07 06:47:23 +04:00
profile_tick ( CPU_PROFILING ) ;
2005-04-17 02:20:36 +04:00
2006-09-09 10:29:22 +04:00
/* Initialize next_tick to the expected tick time. */
2006-10-07 16:01:11 +04:00
next_tick = cpuinfo - > it_value ;
2005-04-17 02:20:36 +04:00
2006-09-09 10:29:22 +04:00
/* Get current interval timer.
* CR16 reads as 64 bits in CPU wide mode .
* CR16 reads as 32 bits in CPU narrow mode .
2005-04-17 02:20:36 +04:00
*/
2006-09-09 10:29:22 +04:00
now = mfctl ( 16 ) ;
2005-04-17 02:20:36 +04:00
2006-09-09 10:29:22 +04:00
cycles_elapsed = now - next_tick ;
2006-09-10 23:57:55 +04:00
if ( ( cycles_elapsed > > 5 ) < cpt ) {
/* use "cheap" math (add/subtract) instead
* of the more expensive div / mul method
2006-09-09 10:29:22 +04:00
*/
2006-09-05 00:56:11 +04:00
cycles_remainder = cycles_elapsed ;
2006-10-05 01:12:52 +04:00
ticks_elapsed = 1 ;
2006-09-10 23:57:55 +04:00
while ( cycles_remainder > cpt ) {
cycles_remainder - = cpt ;
2006-10-05 01:12:52 +04:00
ticks_elapsed + + ;
2006-09-10 23:57:55 +04:00
}
2006-09-05 00:56:11 +04:00
} else {
2006-09-10 23:57:55 +04:00
cycles_remainder = cycles_elapsed % cpt ;
2006-10-05 01:12:52 +04:00
ticks_elapsed = 1 + cycles_elapsed / cpt ;
2006-09-05 00:56:11 +04:00
}
2006-09-09 10:29:22 +04:00
/* Can we differentiate between "early CR16" (aka Scenario 1) and
* " long delay " ( aka Scenario 3 ) ? I don ' t think so .
*
* We expected timer_interrupt to be delivered at least a few hundred
* cycles after the IT fires . But it ' s arbitrary how much time passes
* before we call it " late " . I ' ve picked one second .
*/
2007-01-03 21:25:37 +03:00
if ( unlikely ( ticks_elapsed > HZ ) ) {
2006-09-09 10:29:22 +04:00
/* Scenario 3: very long delay? bad in any case */
2006-09-05 00:56:11 +04:00
printk ( KERN_CRIT " timer_interrupt(CPU %d): delayed! "
2006-09-10 23:57:55 +04:00
" cycles %lX rem %lX "
2006-09-09 10:29:22 +04:00
" next/now %lX/%lX \n " ,
cpu ,
2006-09-10 23:57:55 +04:00
cycles_elapsed , cycles_remainder ,
2006-09-09 10:29:22 +04:00
next_tick , now ) ;
}
2006-09-10 23:57:55 +04:00
/* convert from "division remainder" to "remainder of clock tick" */
cycles_remainder = cpt - cycles_remainder ;
2006-09-09 10:29:22 +04:00
/* Determine when (in CR16 cycles) next IT interrupt will fire.
* We want IT to fire modulo clocktick even if we miss / skip some .
* But those interrupts don ' t in fact get delivered that regularly .
*/
2006-09-10 23:57:55 +04:00
next_tick = now + cycles_remainder ;
2006-10-07 16:01:11 +04:00
cpuinfo - > it_value = next_tick ;
2006-09-05 00:56:11 +04:00
/* Skip one clocktick on purpose if we are likely to miss next_tick.
2006-09-10 23:57:55 +04:00
* We want to avoid the new next_tick being less than CR16 .
* If that happened , itimer wouldn ' t fire until CR16 wrapped .
* We ' ll catch the tick we missed on the tick after that .
*/
if ( ! ( cycles_remainder > > 13 ) )
next_tick + = cpt ;
2006-09-09 10:29:22 +04:00
/* Program the IT when to deliver the next interrupt. */
2006-10-07 16:01:11 +04:00
/* Only bottom 32-bits of next_tick are written to cr16. */
2006-09-05 00:56:11 +04:00
mtctl ( next_tick , 16 ) ;
2005-04-17 02:20:36 +04:00
2006-09-10 23:57:55 +04:00
/* Done mucking with unreliable delivery of interrupts.
* Go do system house keeping .
2006-09-09 10:29:22 +04:00
*/
2006-10-07 16:01:11 +04:00
if ( ! - - cpuinfo - > prof_counter ) {
cpuinfo - > prof_counter = cpuinfo - > prof_multiplier ;
update_process_times ( user_mode ( get_irq_regs ( ) ) ) ;
}
2006-09-10 23:57:55 +04:00
if ( cpu = = 0 ) {
write_seqlock ( & xtime_lock ) ;
2006-10-05 01:12:52 +04:00
do_timer ( ticks_elapsed ) ;
2006-09-10 23:57:55 +04:00
write_sequnlock ( & xtime_lock ) ;
2005-04-17 02:20:36 +04:00
}
2006-09-10 23:57:55 +04:00
2005-04-17 02:20:36 +04:00
return IRQ_HANDLED ;
}
2005-10-22 06:42:18 +04:00
unsigned long profile_pc ( struct pt_regs * regs )
{
unsigned long pc = instruction_pointer ( regs ) ;
if ( regs - > gr [ 0 ] & PSW_N )
pc - = 4 ;
# ifdef CONFIG_SMP
if ( in_lock_functions ( pc ) )
pc = regs - > gr [ 2 ] ;
# endif
return pc ;
}
EXPORT_SYMBOL ( profile_pc ) ;
2007-01-03 01:54:16 +03:00
/* clock source code */
2005-04-17 02:20:36 +04:00
2007-01-03 01:54:16 +03:00
static cycle_t read_cr16 ( void )
2005-04-17 02:20:36 +04:00
{
2007-01-03 01:54:16 +03:00
return get_cycles ( ) ;
2005-04-17 02:20:36 +04:00
}
2006-09-09 10:29:22 +04:00
2007-01-03 01:54:16 +03:00
static struct clocksource clocksource_cr16 = {
. name = " cr16 " ,
. rating = 300 ,
. read = read_cr16 ,
. mask = CLOCKSOURCE_MASK ( BITS_PER_LONG ) ,
. mult = 0 , /* to be set */
. shift = 22 ,
2007-02-27 04:15:18 +03:00
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
2007-01-03 01:54:16 +03:00
} ;
2006-09-09 10:29:22 +04:00
2007-02-27 05:24:56 +03:00
# ifdef CONFIG_SMP
int update_cr16_clocksource ( void )
2005-04-17 02:20:36 +04:00
{
2007-05-11 23:42:34 +04:00
/* since the cr16 cycle counters are not synchronized across CPUs,
2007-01-03 21:25:37 +03:00
we ' ll check if we should switch to a safe clocksource : */
if ( clocksource_cr16 . rating ! = 0 & & num_online_cpus ( ) > 1 ) {
2007-02-27 04:10:42 +03:00
clocksource_change_rating ( & clocksource_cr16 , 0 ) ;
2007-10-18 11:03:45 +04:00
return 1 ;
2005-04-17 02:20:36 +04:00
}
2007-10-18 11:03:45 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2007-02-27 06:21:22 +03:00
# else
int update_cr16_clocksource ( void )
{
return 0 ; /* no change */
}
2007-02-27 05:24:56 +03:00
# endif /*CONFIG_SMP*/
2005-04-17 02:20:36 +04:00
2006-09-03 11:02:16 +04:00
void __init start_cpu_itimer ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
unsigned long next_tick = mfctl ( 16 ) + clocktick ;
mtctl ( next_tick , 16 ) ; /* kick off Interval Timer (CR16) */
cpu_data [ cpu ] . it_value = next_tick ;
}
2008-09-10 18:24:07 +04:00
struct platform_device rtc_parisc_dev = {
. name = " rtc-parisc " ,
. id = - 1 ,
} ;
static int __init rtc_init ( void )
{
int ret ;
ret = platform_device_register ( & rtc_parisc_dev ) ;
if ( ret < 0 )
printk ( KERN_ERR " unable to register rtc device... \n " ) ;
/* not necessarily an error */
return 0 ;
}
module_init ( rtc_init ) ;
2005-04-17 02:20:36 +04:00
void __init time_init ( void )
{
static struct pdc_tod tod_data ;
2007-01-03 01:54:16 +03:00
unsigned long current_cr16_khz ;
2005-04-17 02:20:36 +04:00
clocktick = ( 100 * PAGE0 - > mem_10msec ) / HZ ;
2006-09-03 11:02:16 +04:00
start_cpu_itimer ( ) ; /* get CPU 0 started */
2005-04-17 02:20:36 +04:00
2007-01-03 01:54:16 +03:00
/* register at clocksource framework */
current_cr16_khz = PAGE0 - > mem_10msec / 10 ; /* kHz */
clocksource_cr16 . mult = clocksource_khz2mult ( current_cr16_khz ,
clocksource_cr16 . shift ) ;
clocksource_register ( & clocksource_cr16 ) ;
2006-10-06 07:45:45 +04:00
if ( pdc_tod_read ( & tod_data ) = = 0 ) {
unsigned long flags ;
write_seqlock_irqsave ( & xtime_lock , flags ) ;
2005-04-17 02:20:36 +04:00
xtime . tv_sec = tod_data . tod_sec ;
xtime . tv_nsec = tod_data . tod_usec * 1000 ;
set_normalized_timespec ( & wall_to_monotonic ,
- xtime . tv_sec , - xtime . tv_nsec ) ;
2006-10-06 07:45:45 +04:00
write_sequnlock_irqrestore ( & xtime_lock , flags ) ;
2005-04-17 02:20:36 +04:00
} else {
printk ( KERN_ERR " Error reading tod clock \n " ) ;
xtime . tv_sec = 0 ;
xtime . tv_nsec = 0 ;
}
}