2005-04-16 15:20:36 -07:00
/*
* linux / arch / parisc / kernel / time . c
*
* Copyright ( C ) 1991 , 1992 , 1995 Linus Torvalds
* Modifications for ARM ( C ) 1994 , 1995 , 1996 , 1997 Russell King
* Copyright ( C ) 1999 SuSE GmbH , ( Philipp Rumpf , prumpf @ tux . org )
*
* 1994 - 07 - 02 Alan Modra
* fixed set_rtc_mmss , fixed time . year for > = 2000 , new mktime
* 1998 - 12 - 20 Updated NTP code according to technical memorandum Jan ' 96
* " A Kernel Model for Precision Timekeeping " by Dave Mills
*/
# include <linux/errno.h>
# include <linux/module.h>
2016-05-30 20:57:55 +02:00
# include <linux/rtc.h>
2005-04-16 15:20:36 -07:00
# include <linux/sched.h>
2017-02-01 16:36:40 +01:00
# include <linux/sched/clock.h>
2016-11-22 18:08:30 +01:00
# include <linux/sched_clock.h>
2005-04-16 15:20:36 -07:00
# include <linux/kernel.h>
# include <linux/param.h>
# include <linux/string.h>
# include <linux/mm.h>
# include <linux/interrupt.h>
# include <linux/time.h>
# include <linux/init.h>
# include <linux/smp.h>
# include <linux/profile.h>
2007-01-02 23:54:16 +01:00
# include <linux/clocksource.h>
2008-09-10 14:24:07 +00:00
# include <linux/platform_device.h>
2009-02-09 00:43:36 +01:00
# include <linux/ftrace.h>
2005-04-16 15:20:36 -07:00
2016-12-24 11:46:01 -08:00
# include <linux/uaccess.h>
2005-04-16 15:20:36 -07:00
# include <asm/io.h>
# include <asm/irq.h>
parisc: move definition of PAGE0 to asm/page.h
This was defined in asm/pdc.h which needs to include asm/page.h for
__PAGE_OFFSET. This leads to an include loop so that page.h eventually will
include pdc.h again. While this is no problem because of header guards, it is
a problem because some symbols may be undefined. Such an error is this:
In file included from include/linux/bitops.h:35:0,
from include/asm-generic/getorder.h:7,
from arch/parisc/include/asm/page.h:162,
from arch/parisc/include/asm/pdc.h:346,
from arch/parisc/include/asm/processor.h:16,
from arch/parisc/include/asm/spinlock.h:6,
from arch/parisc/include/asm/atomic.h:20,
from include/linux/atomic.h:4,
from include/linux/sysfs.h:20,
from include/linux/kobject.h:21,
from include/linux/device.h:17,
from include/linux/eisa.h:5,
from arch/parisc/kernel/pci.c:11:
arch/parisc/include/asm/bitops.h: In function ‘set_bit’:
arch/parisc/include/asm/bitops.h:82:2: error: implicit declaration of function ‘_atomic_spin_lock_irqsave’ [-Werror=implicit-function-declaration]
arch/parisc/include/asm/bitops.h:84:2: error: implicit declaration of function ‘_atomic_spin_unlock_irqrestore’ [-Werror=implicit-function-declaration]
Signed-off-by: Rolf Eike Beer <eike-kernel@sf-tec.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-05-10 23:08:17 +02:00
# include <asm/page.h>
2005-04-16 15:20:36 -07:00
# include <asm/param.h>
# include <asm/pdc.h>
# include <asm/led.h>
# include <linux/timex.h>
2006-09-08 23:29:22 -07:00
static unsigned long clocktick __read_mostly ; /* timer cycles per tick */
2005-04-16 15:20:36 -07:00
2006-10-04 15:12:52 -06:00
/*
* We keep time on PA - RISC Linux by using the Interval Timer which is
* a pair of registers ; one is read - only and one is write - only ; both
* accessed through CR16 . The read - only register is 32 or 64 bits wide ,
* and increments by 1 every CPU clock tick . The architecture only
* guarantees us a rate between 0.5 and 2 , but all implementations use a
* rate of 1. The write - only register is 32 - bits wide . When the lowest
* 32 bits of the read - only register compare equal to the write - only
* register , it raises a maskable external interrupt . Each processor has
* an Interval Timer of its own and they are not synchronised .
*
* We want to generate an interrupt every 1 / HZ seconds . So we program
* CR16 to interrupt every @ clocktick cycles . The it_value in cpu_data
* is programmed with the intended time of the next tick . We can be
* held off for an arbitrarily long period of time by interrupts being
* disabled , so we may miss one or more ticks .
*/
2009-02-09 00:43:36 +01:00
irqreturn_t __irq_entry timer_interrupt ( int irq , void * dev_id )
2005-04-16 15:20:36 -07:00
{
2016-12-20 20:51:10 +01:00
unsigned long now ;
2006-09-08 23:29:22 -07:00
unsigned long next_tick ;
2016-12-20 20:51:10 +01:00
unsigned long ticks_elapsed = 0 ;
2006-09-10 12:57:55 -07:00
unsigned int cpu = smp_processor_id ( ) ;
2008-12-31 03:12:10 +00:00
struct cpuinfo_parisc * cpuinfo = & per_cpu ( cpu_data , cpu ) ;
2005-04-16 15:20:36 -07:00
2006-09-04 13:56:11 -07:00
/* gcc can optimize for "read-only" case with a local clocktick */
2006-09-10 12:57:55 -07:00
unsigned long cpt = clocktick ;
2006-09-04 13:56:11 -07:00
2006-10-06 20:47:23 -06:00
profile_tick ( CPU_PROFILING ) ;
2005-04-16 15:20:36 -07:00
2016-12-20 20:51:10 +01:00
/* Initialize next_tick to the old expected tick time. */
2006-10-07 06:01:11 -06:00
next_tick = cpuinfo - > it_value ;
2005-04-16 15:20:36 -07:00
2016-12-20 20:51:10 +01:00
/* Calculate how many ticks have elapsed. */
do {
+ + ticks_elapsed ;
next_tick + = cpt ;
now = mfctl ( 16 ) ;
} while ( next_tick - now > cpt ) ;
2006-09-10 12:57:55 -07:00
2016-12-20 20:51:10 +01:00
/* Store (in CR16 cycles) up to when we are accounting right now. */
2006-10-07 06:01:11 -06:00
cpuinfo - > it_value = next_tick ;
2006-09-04 13:56:11 -07:00
2016-12-20 20:51:10 +01:00
/* Go do system house keeping. */
if ( cpu = = 0 )
xtime_update ( ticks_elapsed ) ;
update_process_times ( user_mode ( get_irq_regs ( ) ) ) ;
2005-04-16 15:20:36 -07:00
2016-12-20 20:51:10 +01:00
/* Skip clockticks on purpose if we know we would miss those.
2009-06-01 00:20:23 +00:00
* The new CR16 must be " later " than current CR16 otherwise
* itimer would not fire until CR16 wrapped - e . g 4 seconds
* later on a 1 Ghz processor . We ' ll account for the missed
2016-12-20 20:51:10 +01:00
* ticks on the next timer interrupt .
* We want IT to fire modulo clocktick even if we miss / skip some .
* But those interrupts don ' t in fact get delivered that regularly .
2009-06-01 00:20:23 +00:00
*
* " next_tick - now " will always give the difference regardless
* if one or the other wrapped . If " now " is " bigger " we ' ll end up
* with a very large unsigned number .
*/
2016-12-20 20:51:10 +01:00
while ( next_tick - mfctl ( 16 ) > cpt )
next_tick + = cpt ;
2009-06-01 00:20:23 +00:00
2016-12-20 20:51:10 +01:00
/* Program the IT when to deliver the next interrupt.
* Only bottom 32 - bits of next_tick are writable in CR16 !
* Timer interrupt will be delivered at least a few hundred cycles
* after the IT fires , so if we are too close ( < = 500 cycles ) to the
* next cycle , simply skip it .
2006-09-08 23:29:22 -07:00
*/
2016-12-20 20:51:10 +01:00
if ( next_tick - mfctl ( 16 ) < = 500 )
next_tick + = cpt ;
mtctl ( next_tick , 16 ) ;
2006-09-10 12:57:55 -07:00
2005-04-16 15:20:36 -07:00
return IRQ_HANDLED ;
}
2005-10-21 22:42:18 -04:00
unsigned long profile_pc ( struct pt_regs * regs )
{
unsigned long pc = instruction_pointer ( regs ) ;
if ( regs - > gr [ 0 ] & PSW_N )
pc - = 4 ;
# ifdef CONFIG_SMP
if ( in_lock_functions ( pc ) )
pc = regs - > gr [ 2 ] ;
# endif
return pc ;
}
EXPORT_SYMBOL ( profile_pc ) ;
2007-01-02 23:54:16 +01:00
/* clock source code */
2005-04-16 15:20:36 -07:00
2016-12-21 20:32:01 +01:00
static u64 notrace read_cr16 ( struct clocksource * cs )
2005-04-16 15:20:36 -07:00
{
2007-01-02 23:54:16 +01:00
return get_cycles ( ) ;
2005-04-16 15:20:36 -07:00
}
2006-09-08 23:29:22 -07:00
2007-01-02 23:54:16 +01:00
static struct clocksource clocksource_cr16 = {
. name = " cr16 " ,
. rating = 300 ,
. read = read_cr16 ,
. mask = CLOCKSOURCE_MASK ( BITS_PER_LONG ) ,
2007-02-26 20:15:18 -05:00
. flags = CLOCK_SOURCE_IS_CONTINUOUS ,
2007-01-02 23:54:16 +01:00
} ;
2006-09-08 23:29:22 -07:00
2006-09-03 00:02:16 -07:00
void __init start_cpu_itimer ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
unsigned long next_tick = mfctl ( 16 ) + clocktick ;
mtctl ( next_tick , 16 ) ; /* kick off Interval Timer (CR16) */
2008-12-31 03:12:10 +00:00
per_cpu ( cpu_data , cpu ) . it_value = next_tick ;
2006-09-03 00:02:16 -07:00
}
2016-05-30 20:57:55 +02:00
# if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
static int rtc_generic_get_time ( struct device * dev , struct rtc_time * tm )
{
struct pdc_tod tod_data ;
memset ( tm , 0 , sizeof ( * tm ) ) ;
if ( pdc_tod_read ( & tod_data ) < 0 )
return - EOPNOTSUPP ;
/* we treat tod_sec as unsigned, so this can work until year 2106 */
rtc_time64_to_tm ( tod_data . tod_sec , tm ) ;
return rtc_valid_tm ( tm ) ;
}
static int rtc_generic_set_time ( struct device * dev , struct rtc_time * tm )
{
time64_t secs = rtc_tm_to_time64 ( tm ) ;
if ( pdc_tod_set ( secs , 0 ) < 0 )
return - EOPNOTSUPP ;
return 0 ;
}
static const struct rtc_class_ops rtc_generic_ops = {
. read_time = rtc_generic_get_time ,
. set_time = rtc_generic_set_time ,
} ;
2008-09-10 14:24:07 +00:00
static int __init rtc_init ( void )
{
2015-09-08 17:50:03 +02:00
struct platform_device * pdev ;
2008-09-10 14:24:07 +00:00
2016-05-30 20:57:55 +02:00
pdev = platform_device_register_data ( NULL , " rtc-generic " , - 1 ,
& rtc_generic_ops ,
sizeof ( rtc_generic_ops ) ) ;
2015-09-08 17:50:03 +02:00
return PTR_ERR_OR_ZERO ( pdev ) ;
2008-09-10 14:24:07 +00:00
}
2015-09-08 17:50:03 +02:00
device_initcall ( rtc_init ) ;
2016-05-30 20:57:55 +02:00
# endif
2008-09-10 14:24:07 +00:00
2009-12-23 04:14:03 +00:00
void read_persistent_clock ( struct timespec * ts )
2005-04-16 15:20:36 -07:00
{
static struct pdc_tod tod_data ;
2009-12-23 04:14:03 +00:00
if ( pdc_tod_read ( & tod_data ) = = 0 ) {
ts - > tv_sec = tod_data . tod_sec ;
ts - > tv_nsec = tod_data . tod_usec * 1000 ;
} else {
printk ( KERN_ERR " Error reading tod clock \n " ) ;
ts - > tv_sec = 0 ;
ts - > tv_nsec = 0 ;
}
}
2016-04-20 21:34:15 +02:00
2016-11-22 18:08:30 +01:00
static u64 notrace read_cr16_sched_clock ( void )
2016-04-20 21:34:15 +02:00
{
2016-11-22 18:08:30 +01:00
return get_cycles ( ) ;
2016-04-20 21:34:15 +02:00
}
/*
* timer interrupt and sched_clock ( ) initialization
*/
2009-12-23 04:14:03 +00:00
void __init time_init ( void )
{
2016-11-22 18:08:30 +01:00
unsigned long cr16_hz ;
2005-04-16 15:20:36 -07:00
clocktick = ( 100 * PAGE0 - > mem_10msec ) / HZ ;
2006-09-03 00:02:16 -07:00
start_cpu_itimer ( ) ; /* get CPU 0 started */
2005-04-16 15:20:36 -07:00
2016-11-22 18:08:30 +01:00
cr16_hz = 100 * PAGE0 - > mem_10msec ; /* Hz */
/* register as sched_clock source */
sched_clock_register ( read_cr16_sched_clock , BITS_PER_LONG , cr16_hz ) ;
2005-04-16 15:20:36 -07:00
}
2016-12-26 12:46:01 +01:00
static int __init init_cr16_clocksource ( void )
{
/*
2017-01-08 11:01:11 +01:00
* The cr16 interval timers are not syncronized across CPUs on
* different sockets , so mark them unstable and lower rating on
* multi - socket SMP systems .
2016-12-26 12:46:01 +01:00
*/
if ( num_online_cpus ( ) > 1 ) {
2017-01-08 11:01:11 +01:00
int cpu ;
unsigned long cpu0_loc ;
cpu0_loc = per_cpu ( cpu_data , 0 ) . cpu_loc ;
for_each_online_cpu ( cpu ) {
2017-10-18 22:25:00 +02:00
if ( cpu = = 0 )
continue ;
if ( ( cpu0_loc ! = 0 ) & &
( cpu0_loc = = per_cpu ( cpu_data , cpu ) . cpu_loc ) )
2017-01-08 11:01:11 +01:00
continue ;
clocksource_cr16 . name = " cr16_unstable " ;
clocksource_cr16 . flags = CLOCK_SOURCE_UNSTABLE ;
clocksource_cr16 . rating = 0 ;
break ;
}
2016-12-26 12:46:01 +01:00
}
2017-01-08 11:01:11 +01:00
/* XXX: We may want to mark sched_clock stable here if cr16 clocks are
* in sync :
* ( clocksource_cr16 . flags = = CLOCK_SOURCE_IS_CONTINUOUS ) */
2016-12-26 12:46:01 +01:00
/* register at clocksource framework */
clocksource_register_hz ( & clocksource_cr16 ,
100 * PAGE0 - > mem_10msec ) ;
return 0 ;
}
device_initcall ( init_cr16_clocksource ) ;