2006-06-26 11:25:05 +04:00
/*
* linux / kernel / time / clocksource . c
*
* This file contains the functions which manage clocksource drivers .
*
* Copyright ( C ) 2004 , 2005 IBM , John Stultz ( johnstul @ us . ibm . com )
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*
* TODO WishList :
* o Allow clocksource drivers to be unregistered
*/
2011-12-15 03:28:51 +04:00
# include <linux/device.h>
2006-06-26 11:25:05 +04:00
# include <linux/clocksource.h>
# include <linux/init.h>
# include <linux/module.h>
[PATCH] kernel/time/clocksource.c needs struct task_struct on m68k
kernel/time/clocksource.c needs struct task_struct on m68k.
Because it uses spin_unlock_irq(), which, on m68k, uses hardirq_count(), which
uses preempt_count(), which needs to dereference struct task_struct, we
have to include sched.h. Because it would cause a loop inclusion, we
cannot include sched.h in any other of asm-m68k/system.h,
linux/thread_info.h, linux/hardirq.h, which leaves this ugly include in
a C file as the only simple solution.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-10 12:43:43 +03:00
# include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
2007-02-16 12:28:03 +03:00
# include <linux/tick.h>
2009-08-18 19:09:42 +04:00
# include <linux/kthread.h>
2006-06-26 11:25:05 +04:00
2009-02-12 08:03:34 +03:00
void timecounter_init ( struct timecounter * tc ,
const struct cyclecounter * cc ,
u64 start_tstamp )
{
tc - > cc = cc ;
tc - > cycle_last = cc - > read ( cc ) ;
tc - > nsec = start_tstamp ;
}
2009-11-12 06:06:30 +03:00
EXPORT_SYMBOL_GPL ( timecounter_init ) ;
2009-02-12 08:03:34 +03:00
/**
* timecounter_read_delta - get nanoseconds since last call of this function
* @ tc : Pointer to time counter
*
* When the underlying cycle counter runs over , this will be handled
* correctly as long as it does not run over more than once between
* calls .
*
* The first call to this function for a new time counter initializes
* the time tracking and returns an undefined result .
*/
static u64 timecounter_read_delta ( struct timecounter * tc )
{
cycle_t cycle_now , cycle_delta ;
u64 ns_offset ;
/* read cycle counter: */
cycle_now = tc - > cc - > read ( tc - > cc ) ;
/* calculate the delta since the last timecounter_read_delta(): */
cycle_delta = ( cycle_now - tc - > cycle_last ) & tc - > cc - > mask ;
/* convert to nanoseconds: */
ns_offset = cyclecounter_cyc2ns ( tc - > cc , cycle_delta ) ;
/* update time stamp of timecounter_read_delta() call: */
tc - > cycle_last = cycle_now ;
return ns_offset ;
}
u64 timecounter_read ( struct timecounter * tc )
{
u64 nsec ;
/* increment time by nanoseconds since last call */
nsec = timecounter_read_delta ( tc ) ;
nsec + = tc - > nsec ;
tc - > nsec = nsec ;
return nsec ;
}
2009-11-12 06:06:30 +03:00
EXPORT_SYMBOL_GPL ( timecounter_read ) ;
2009-02-12 08:03:34 +03:00
u64 timecounter_cyc2time ( struct timecounter * tc ,
cycle_t cycle_tstamp )
{
u64 cycle_delta = ( cycle_tstamp - tc - > cycle_last ) & tc - > cc - > mask ;
u64 nsec ;
/*
* Instead of always treating cycle_tstamp as more recent
* than tc - > cycle_last , detect when it is too far in the
* future and treat it as old time stamp instead .
*/
if ( cycle_delta > tc - > cc - > mask / 2 ) {
cycle_delta = ( tc - > cycle_last - cycle_tstamp ) & tc - > cc - > mask ;
nsec = tc - > nsec - cyclecounter_cyc2ns ( tc - > cc , cycle_delta ) ;
} else {
nsec = cyclecounter_cyc2ns ( tc - > cc , cycle_delta ) + tc - > nsec ;
}
return nsec ;
}
2009-11-12 06:06:30 +03:00
EXPORT_SYMBOL_GPL ( timecounter_cyc2time ) ;
2009-02-12 08:03:34 +03:00
2009-11-11 17:05:29 +03:00
/**
* clocks_calc_mult_shift - calculate mult / shift factors for scaled math of clocks
* @ mult : pointer to mult variable
* @ shift : pointer to shift variable
* @ from : frequency to convert from
* @ to : frequency to convert to
2011-01-11 20:18:12 +03:00
* @ maxsec : guaranteed runtime conversion range in seconds
2009-11-11 17:05:29 +03:00
*
* The function evaluates the shift / mult pair for the scaled math
* operations of clocksources and clockevents .
*
* @ to and @ from are frequency values in HZ . For clock sources @ to is
* NSEC_PER_SEC = = 1 GHz and @ from is the counter frequency . For clock
* event @ to is the counter frequency and @ from is NSEC_PER_SEC .
*
2011-01-11 20:18:12 +03:00
* The @ maxsec conversion range argument controls the time frame in
2009-11-11 17:05:29 +03:00
* seconds which must be covered by the runtime conversion with the
* calculated mult and shift factors . This guarantees that no 64 bit
* overflow happens when the input value of the conversion is
* multiplied with the calculated mult factor . Larger ranges may
* reduce the conversion accuracy by chosing smaller mult and shift
* factors .
*/
void
2011-01-11 20:18:12 +03:00
clocks_calc_mult_shift ( u32 * mult , u32 * shift , u32 from , u32 to , u32 maxsec )
2009-11-11 17:05:29 +03:00
{
u64 tmp ;
u32 sft , sftacc = 32 ;
/*
* Calculate the shift factor which is limiting the conversion
* range :
*/
2011-01-11 20:18:12 +03:00
tmp = ( ( u64 ) maxsec * from ) > > 32 ;
2009-11-11 17:05:29 +03:00
while ( tmp ) {
tmp > > = 1 ;
sftacc - - ;
}
/*
* Find the conversion shift / mult pair which has the best
* accuracy and fits the maxsec conversion range :
*/
for ( sft = 32 ; sft > 0 ; sft - - ) {
tmp = ( u64 ) to < < sft ;
2010-12-16 22:03:27 +03:00
tmp + = from / 2 ;
2009-11-11 17:05:29 +03:00
do_div ( tmp , from ) ;
if ( ( tmp > > sftacc ) = = 0 )
break ;
}
* mult = tmp ;
* shift = sft ;
}
2006-06-26 11:25:05 +04:00
/*[Clocksource internal variables]---------
* curr_clocksource :
2009-08-14 17:47:21 +04:00
* currently selected clocksource .
2006-06-26 11:25:05 +04:00
* clocksource_list :
* linked list with the registered clocksources
2009-08-14 17:47:30 +04:00
* clocksource_mutex :
* protects manipulations to curr_clocksource and the clocksource_list
2006-06-26 11:25:05 +04:00
* override_name :
* Name of the user - specified clocksource .
*/
2009-08-14 17:47:21 +04:00
static struct clocksource * curr_clocksource ;
2006-06-26 11:25:05 +04:00
static LIST_HEAD ( clocksource_list ) ;
2009-08-14 17:47:30 +04:00
static DEFINE_MUTEX ( clocksource_mutex ) ;
2006-06-26 11:25:05 +04:00
static char override_name [ 32 ] ;
2009-09-14 21:49:02 +04:00
static int finished_booting ;
2006-06-26 11:25:05 +04:00
2007-02-16 12:27:43 +03:00
# ifdef CONFIG_CLOCKSOURCE_WATCHDOG
2009-09-11 17:33:05 +04:00
static void clocksource_watchdog_work ( struct work_struct * work ) ;
2007-02-16 12:27:43 +03:00
static LIST_HEAD ( watchdog_list ) ;
static struct clocksource * watchdog ;
static struct timer_list watchdog_timer ;
2009-09-11 17:33:05 +04:00
static DECLARE_WORK ( watchdog_work , clocksource_watchdog_work ) ;
2007-02-16 12:27:43 +03:00
static DEFINE_SPINLOCK ( watchdog_lock ) ;
2009-08-14 17:47:24 +04:00
static int watchdog_running ;
2011-09-12 15:32:23 +04:00
static atomic_t watchdog_reset_pending ;
2007-05-09 13:35:15 +04:00
2009-08-18 19:09:42 +04:00
static int clocksource_watchdog_kthread ( void * data ) ;
2009-08-19 13:26:09 +04:00
static void __clocksource_change_rating ( struct clocksource * cs , int rating ) ;
2009-08-14 17:47:25 +04:00
2007-02-16 12:27:43 +03:00
/*
2007-05-09 13:33:40 +04:00
* Interval : 0.5 sec Threshold : 0.0625 s
2007-02-16 12:27:43 +03:00
*/
# define WATCHDOG_INTERVAL (HZ >> 1)
2007-05-09 13:33:40 +04:00
# define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
2007-02-16 12:27:43 +03:00
2009-08-18 19:09:42 +04:00
static void clocksource_watchdog_work ( struct work_struct * work )
{
/*
* If kthread_run fails the next watchdog scan over the
* watchdog_list will find the unstable clock again .
*/
kthread_run ( clocksource_watchdog_kthread , NULL , " kwatchdog " ) ;
}
2009-08-28 22:25:24 +04:00
static void __clocksource_unstable ( struct clocksource * cs )
2007-02-16 12:27:43 +03:00
{
cs - > flags & = ~ ( CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG ) ;
2009-08-14 17:47:25 +04:00
cs - > flags | = CLOCK_SOURCE_UNSTABLE ;
2009-09-14 21:49:02 +04:00
if ( finished_booting )
schedule_work ( & watchdog_work ) ;
2007-02-16 12:27:43 +03:00
}
2009-08-28 22:25:24 +04:00
static void clocksource_unstable ( struct clocksource * cs , int64_t delta )
{
printk ( KERN_WARNING " Clocksource %s unstable (delta = %Ld ns) \n " ,
cs - > name , delta ) ;
__clocksource_unstable ( cs ) ;
}
/**
* clocksource_mark_unstable - mark clocksource unstable via watchdog
* @ cs : clocksource to be marked unstable
*
* This function is called instead of clocksource_change_rating from
* cpu hotplug code to avoid a deadlock between the clocksource mutex
* and the cpu hotplug mutex . It defers the update of the clocksource
* to the watchdog thread .
*/
void clocksource_mark_unstable ( struct clocksource * cs )
{
unsigned long flags ;
spin_lock_irqsave ( & watchdog_lock , flags ) ;
if ( ! ( cs - > flags & CLOCK_SOURCE_UNSTABLE ) ) {
if ( list_empty ( & cs - > wd_list ) )
list_add ( & cs - > wd_list , & watchdog_list ) ;
__clocksource_unstable ( cs ) ;
}
spin_unlock_irqrestore ( & watchdog_lock , flags ) ;
}
2007-02-16 12:27:43 +03:00
static void clocksource_watchdog ( unsigned long data )
{
2009-08-14 17:47:25 +04:00
struct clocksource * cs ;
2007-02-16 12:27:43 +03:00
cycle_t csnow , wdnow ;
int64_t wd_nsec , cs_nsec ;
2011-09-12 15:32:23 +04:00
int next_cpu , reset_pending ;
2007-02-16 12:27:43 +03:00
spin_lock ( & watchdog_lock ) ;
2009-08-14 17:47:24 +04:00
if ( ! watchdog_running )
goto out ;
2007-02-16 12:27:43 +03:00
2011-09-12 15:32:23 +04:00
reset_pending = atomic_read ( & watchdog_reset_pending ) ;
2009-08-14 17:47:25 +04:00
list_for_each_entry ( cs , & watchdog_list , wd_list ) {
/* Clocksource already marked unstable? */
2009-08-18 19:09:42 +04:00
if ( cs - > flags & CLOCK_SOURCE_UNSTABLE ) {
2009-09-14 21:49:02 +04:00
if ( finished_booting )
schedule_work ( & watchdog_work ) ;
2009-08-14 17:47:25 +04:00
continue ;
2009-08-18 19:09:42 +04:00
}
2009-08-14 17:47:25 +04:00
2011-06-16 18:22:08 +04:00
local_irq_disable ( ) ;
2009-04-21 23:24:00 +04:00
csnow = cs - > read ( cs ) ;
2011-06-16 18:22:08 +04:00
wdnow = watchdog - > read ( watchdog ) ;
local_irq_enable ( ) ;
2007-05-09 13:35:15 +04:00
2009-08-14 17:47:22 +04:00
/* Clocksource initialized ? */
2011-09-12 15:32:23 +04:00
if ( ! ( cs - > flags & CLOCK_SOURCE_WATCHDOG ) | |
atomic_read ( & watchdog_reset_pending ) ) {
2009-08-14 17:47:22 +04:00
cs - > flags | = CLOCK_SOURCE_WATCHDOG ;
2011-06-16 18:22:08 +04:00
cs - > wd_last = wdnow ;
cs - > cs_last = csnow ;
2007-05-09 13:35:15 +04:00
continue ;
}
2011-06-16 18:22:08 +04:00
wd_nsec = clocksource_cyc2ns ( ( wdnow - cs - > wd_last ) & watchdog - > mask ,
watchdog - > mult , watchdog - > shift ) ;
cs_nsec = clocksource_cyc2ns ( ( csnow - cs - > cs_last ) &
2009-08-14 17:47:26 +04:00
cs - > mask , cs - > mult , cs - > shift ) ;
2011-06-16 18:22:08 +04:00
cs - > cs_last = csnow ;
cs - > wd_last = wdnow ;
2011-09-12 15:32:23 +04:00
if ( atomic_read ( & watchdog_reset_pending ) )
continue ;
2011-06-16 18:22:08 +04:00
/* Check the deviation from the watchdog clocksource. */
2011-09-12 15:32:23 +04:00
if ( ( abs ( cs_nsec - wd_nsec ) > WATCHDOG_THRESHOLD ) ) {
2009-08-14 17:47:22 +04:00
clocksource_unstable ( cs , cs_nsec - wd_nsec ) ;
continue ;
}
if ( ! ( cs - > flags & CLOCK_SOURCE_VALID_FOR_HRES ) & &
( cs - > flags & CLOCK_SOURCE_IS_CONTINUOUS ) & &
( watchdog - > flags & CLOCK_SOURCE_IS_CONTINUOUS ) ) {
cs - > flags | = CLOCK_SOURCE_VALID_FOR_HRES ;
/*
* We just marked the clocksource as highres - capable ,
* notify the rest of the system as well so that we
* transition into high - res mode :
*/
tick_clock_notify ( ) ;
2007-02-16 12:27:43 +03:00
}
}
2011-09-12 15:32:23 +04:00
/*
* We only clear the watchdog_reset_pending , when we did a
* full cycle through all clocksources .
*/
if ( reset_pending )
atomic_dec ( & watchdog_reset_pending ) ;
2009-08-14 17:47:25 +04:00
/*
* Cycle through CPUs to check if the CPUs stay synchronized
* to each other .
*/
next_cpu = cpumask_next ( raw_smp_processor_id ( ) , cpu_online_mask ) ;
if ( next_cpu > = nr_cpu_ids )
next_cpu = cpumask_first ( cpu_online_mask ) ;
watchdog_timer . expires + = WATCHDOG_INTERVAL ;
add_timer_on ( & watchdog_timer , next_cpu ) ;
2009-08-14 17:47:24 +04:00
out :
2007-02-16 12:27:43 +03:00
spin_unlock ( & watchdog_lock ) ;
}
2009-08-14 17:47:23 +04:00
2009-08-14 17:47:24 +04:00
static inline void clocksource_start_watchdog ( void )
{
if ( watchdog_running | | ! watchdog | | list_empty ( & watchdog_list ) )
return ;
init_timer ( & watchdog_timer ) ;
watchdog_timer . function = clocksource_watchdog ;
watchdog_timer . expires = jiffies + WATCHDOG_INTERVAL ;
add_timer_on ( & watchdog_timer , cpumask_first ( cpu_online_mask ) ) ;
watchdog_running = 1 ;
}
static inline void clocksource_stop_watchdog ( void )
{
if ( ! watchdog_running | | ( watchdog & & ! list_empty ( & watchdog_list ) ) )
return ;
del_timer ( & watchdog_timer ) ;
watchdog_running = 0 ;
}
2009-08-14 17:47:23 +04:00
static inline void clocksource_reset_watchdog ( void )
{
struct clocksource * cs ;
list_for_each_entry ( cs , & watchdog_list , wd_list )
cs - > flags & = ~ CLOCK_SOURCE_WATCHDOG ;
}
2007-05-09 13:35:15 +04:00
static void clocksource_resume_watchdog ( void )
{
2011-09-12 15:32:23 +04:00
atomic_inc ( & watchdog_reset_pending ) ;
2007-05-09 13:35:15 +04:00
}
2009-08-14 17:47:24 +04:00
static void clocksource_enqueue_watchdog ( struct clocksource * cs )
2007-02-16 12:27:43 +03:00
{
unsigned long flags ;
spin_lock_irqsave ( & watchdog_lock , flags ) ;
if ( cs - > flags & CLOCK_SOURCE_MUST_VERIFY ) {
2009-08-14 17:47:24 +04:00
/* cs is a clocksource to be watched. */
2007-02-16 12:27:43 +03:00
list_add ( & cs - > wd_list , & watchdog_list ) ;
2009-08-14 17:47:24 +04:00
cs - > flags & = ~ CLOCK_SOURCE_WATCHDOG ;
2007-03-25 16:42:51 +04:00
} else {
2009-08-14 17:47:24 +04:00
/* cs is a watchdog. */
2007-03-25 16:42:51 +04:00
if ( cs - > flags & CLOCK_SOURCE_IS_CONTINUOUS )
2007-02-16 12:27:43 +03:00
cs - > flags | = CLOCK_SOURCE_VALID_FOR_HRES ;
2009-08-14 17:47:24 +04:00
/* Pick the best watchdog. */
2007-02-16 12:27:43 +03:00
if ( ! watchdog | | cs - > rating > watchdog - > rating ) {
watchdog = cs ;
/* Reset watchdog cycles */
2009-08-14 17:47:23 +04:00
clocksource_reset_watchdog ( ) ;
2007-02-16 12:27:43 +03:00
}
}
2009-08-14 17:47:24 +04:00
/* Check if the watchdog timer needs to be started. */
clocksource_start_watchdog ( ) ;
2007-02-16 12:27:43 +03:00
spin_unlock_irqrestore ( & watchdog_lock , flags ) ;
}
2009-08-14 17:47:24 +04:00
static void clocksource_dequeue_watchdog ( struct clocksource * cs )
{
struct clocksource * tmp ;
unsigned long flags ;
spin_lock_irqsave ( & watchdog_lock , flags ) ;
if ( cs - > flags & CLOCK_SOURCE_MUST_VERIFY ) {
/* cs is a watched clocksource. */
list_del_init ( & cs - > wd_list ) ;
} else if ( cs = = watchdog ) {
/* Reset watchdog cycles */
clocksource_reset_watchdog ( ) ;
/* Current watchdog is removed. Find an alternative. */
watchdog = NULL ;
list_for_each_entry ( tmp , & clocksource_list , list ) {
if ( tmp = = cs | | tmp - > flags & CLOCK_SOURCE_MUST_VERIFY )
continue ;
if ( ! watchdog | | tmp - > rating > watchdog - > rating )
watchdog = tmp ;
}
}
cs - > flags & = ~ CLOCK_SOURCE_WATCHDOG ;
/* Check if the watchdog timer needs to be stopped. */
clocksource_stop_watchdog ( ) ;
spin_unlock_irqrestore ( & watchdog_lock , flags ) ;
}
2009-08-18 19:09:42 +04:00
static int clocksource_watchdog_kthread ( void * data )
2009-08-14 17:47:25 +04:00
{
struct clocksource * cs , * tmp ;
unsigned long flags ;
2009-08-15 15:20:42 +04:00
LIST_HEAD ( unstable ) ;
2009-08-14 17:47:25 +04:00
2009-08-19 13:26:09 +04:00
mutex_lock ( & clocksource_mutex ) ;
2009-08-14 17:47:25 +04:00
spin_lock_irqsave ( & watchdog_lock , flags ) ;
list_for_each_entry_safe ( cs , tmp , & watchdog_list , wd_list )
if ( cs - > flags & CLOCK_SOURCE_UNSTABLE ) {
list_del_init ( & cs - > wd_list ) ;
2009-08-15 15:20:42 +04:00
list_add ( & cs - > wd_list , & unstable ) ;
2009-08-14 17:47:25 +04:00
}
/* Check if the watchdog timer needs to be stopped. */
clocksource_stop_watchdog ( ) ;
2009-08-15 15:20:42 +04:00
spin_unlock_irqrestore ( & watchdog_lock , flags ) ;
/* Needs to be done outside of watchdog lock */
list_for_each_entry_safe ( cs , tmp , & unstable , wd_list ) {
list_del_init ( & cs - > wd_list ) ;
2009-08-19 13:26:09 +04:00
__clocksource_change_rating ( cs , 0 ) ;
2009-08-15 15:20:42 +04:00
}
2009-08-19 13:26:09 +04:00
mutex_unlock ( & clocksource_mutex ) ;
2009-08-18 19:09:42 +04:00
return 0 ;
2009-08-14 17:47:25 +04:00
}
2009-08-14 17:47:24 +04:00
# else /* CONFIG_CLOCKSOURCE_WATCHDOG */
static void clocksource_enqueue_watchdog ( struct clocksource * cs )
2007-02-16 12:27:43 +03:00
{
if ( cs - > flags & CLOCK_SOURCE_IS_CONTINUOUS )
cs - > flags | = CLOCK_SOURCE_VALID_FOR_HRES ;
}
2007-05-09 13:35:15 +04:00
2009-08-14 17:47:24 +04:00
static inline void clocksource_dequeue_watchdog ( struct clocksource * cs ) { }
2007-05-09 13:35:15 +04:00
static inline void clocksource_resume_watchdog ( void ) { }
2009-09-14 21:49:02 +04:00
static inline int clocksource_watchdog_kthread ( void * data ) { return 0 ; }
2009-08-14 17:47:24 +04:00
# endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
2007-02-16 12:27:43 +03:00
2010-02-03 01:41:41 +03:00
/**
* clocksource_suspend - suspend the clocksource ( s )
*/
void clocksource_suspend ( void )
{
struct clocksource * cs ;
list_for_each_entry_reverse ( cs , & clocksource_list , list )
if ( cs - > suspend )
cs - > suspend ( cs ) ;
}
2007-05-09 13:35:15 +04:00
/**
* clocksource_resume - resume the clocksource ( s )
*/
void clocksource_resume ( void )
{
2007-10-19 10:39:58 +04:00
struct clocksource * cs ;
2007-05-09 13:35:15 +04:00
2009-08-14 17:47:30 +04:00
list_for_each_entry ( cs , & clocksource_list , list )
2007-05-09 13:35:15 +04:00
if ( cs - > resume )
2010-02-03 01:41:39 +03:00
cs - > resume ( cs ) ;
2007-05-09 13:35:15 +04:00
clocksource_resume_watchdog ( ) ;
}
2008-02-15 23:55:54 +03:00
/**
* clocksource_touch_watchdog - Update watchdog
*
* Update the watchdog after exception contexts such as kgdb so as not
2010-01-26 14:51:10 +03:00
* to incorrectly trip the watchdog . This might fail when the kernel
* was stopped in code which holds watchdog_lock .
2008-02-15 23:55:54 +03:00
*/
void clocksource_touch_watchdog ( void )
{
clocksource_resume_watchdog ( ) ;
}
2011-11-01 01:06:35 +04:00
/**
* clocksource_max_adjustment - Returns max adjustment amount
* @ cs : Pointer to clocksource
*
*/
static u32 clocksource_max_adjustment ( struct clocksource * cs )
{
u64 ret ;
/*
2012-03-15 07:28:56 +04:00
* We won ' t try to correct for more than 11 % adjustments ( 110 , 000 ppm ) ,
2011-11-01 01:06:35 +04:00
*/
ret = ( u64 ) cs - > mult * 11 ;
do_div ( ret , 100 ) ;
return ( u32 ) ret ;
}
2009-08-18 21:45:10 +04:00
/**
* clocksource_max_deferment - Returns max time the clocksource can be deferred
* @ cs : Pointer to clocksource
*
*/
static u64 clocksource_max_deferment ( struct clocksource * cs )
{
u64 max_nsecs , max_cycles ;
/*
* Calculate the maximum number of cycles that we can pass to the
* cyc2ns function without overflowing a 64 - bit signed result . The
2011-11-01 01:06:35 +04:00
* maximum number of cycles is equal to ULLONG_MAX / ( cs - > mult + cs - > maxadj )
* which is equivalent to the below .
* max_cycles < ( 2 ^ 63 ) / ( cs - > mult + cs - > maxadj )
* max_cycles < 2 ^ ( log2 ( ( 2 ^ 63 ) / ( cs - > mult + cs - > maxadj ) ) )
* max_cycles < 2 ^ ( log2 ( 2 ^ 63 ) - log2 ( cs - > mult + cs - > maxadj ) )
* max_cycles < 2 ^ ( 63 - log2 ( cs - > mult + cs - > maxadj ) )
* max_cycles < 1 < < ( 63 - log2 ( cs - > mult + cs - > maxadj ) )
2009-08-18 21:45:10 +04:00
* Please note that we add 1 to the result of the log2 to account for
* any rounding errors , ensure the above inequality is satisfied and
* no overflow will occur .
*/
2011-11-01 01:06:35 +04:00
max_cycles = 1ULL < < ( 63 - ( ilog2 ( cs - > mult + cs - > maxadj ) + 1 ) ) ;
2009-08-18 21:45:10 +04:00
/*
* The actual maximum number of cycles we can defer the clocksource is
* determined by the minimum of max_cycles and cs - > mask .
2011-11-01 01:06:35 +04:00
* Note : Here we subtract the maxadj to make sure we don ' t sleep for
* too long if there ' s a large negative adjustment .
2009-08-18 21:45:10 +04:00
*/
max_cycles = min_t ( u64 , max_cycles , ( u64 ) cs - > mask ) ;
2011-11-01 01:06:35 +04:00
max_nsecs = clocksource_cyc2ns ( max_cycles , cs - > mult - cs - > maxadj ,
cs - > shift ) ;
2009-08-18 21:45:10 +04:00
/*
* To ensure that the clocksource does not wrap whilst we are idle ,
* limit the time the clocksource can be deferred by 12.5 % . Please
* note a margin of 12.5 % is used because this can be computed with
* a shift , versus say 10 % which would require division .
*/
2011-12-02 07:22:41 +04:00
return max_nsecs - ( max_nsecs > > 3 ) ;
2009-08-18 21:45:10 +04:00
}
2010-07-14 04:56:20 +04:00
# ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
2006-06-26 11:25:05 +04:00
/**
2009-08-14 17:47:21 +04:00
* clocksource_select - Select the best clocksource available
2006-06-26 11:25:05 +04:00
*
2009-08-14 17:47:30 +04:00
* Private function . Must hold clocksource_mutex when called .
2006-06-26 11:25:05 +04:00
*
2007-02-16 12:27:33 +03:00
* Select the clocksource with the best rating , or the clocksource ,
* which is selected by userspace override .
2006-06-26 11:25:05 +04:00
*/
2009-08-14 17:47:21 +04:00
static void clocksource_select ( void )
2006-06-26 11:25:05 +04:00
{
2009-08-14 17:47:21 +04:00
struct clocksource * best , * cs ;
2007-02-16 12:27:43 +03:00
2009-08-14 17:47:30 +04:00
if ( ! finished_booting | | list_empty ( & clocksource_list ) )
2009-08-14 17:47:21 +04:00
return ;
/* First clocksource on the list has the best rating. */
best = list_first_entry ( & clocksource_list , struct clocksource , list ) ;
/* Check for the override clocksource. */
list_for_each_entry ( cs , & clocksource_list , list ) {
if ( strcmp ( cs - > name , override_name ) ! = 0 )
continue ;
/*
* Check to make sure we don ' t switch to a non - highres
* capable clocksource if the tick code is in oneshot
* mode ( highres or nohz )
*/
if ( ! ( cs - > flags & CLOCK_SOURCE_VALID_FOR_HRES ) & &
tick_oneshot_mode_active ( ) ) {
/* Override clocksource cannot be used. */
printk ( KERN_WARNING " Override clocksource %s is not "
" HRT compatible. Cannot switch while in "
" HRT/NOHZ mode \n " , cs - > name ) ;
override_name [ 0 ] = 0 ;
} else
/* Override clocksource can be used. */
best = cs ;
break ;
}
2009-08-14 17:47:30 +04:00
if ( curr_clocksource ! = best ) {
printk ( KERN_INFO " Switching to clocksource %s \n " , best - > name ) ;
curr_clocksource = best ;
timekeeping_notify ( curr_clocksource ) ;
}
2009-08-14 17:47:21 +04:00
}
2006-06-26 11:25:05 +04:00
2010-07-14 04:56:20 +04:00
# else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
2009-09-14 21:49:02 +04:00
static inline void clocksource_select ( void ) { }
# endif
2009-08-14 17:47:30 +04:00
/*
* clocksource_done_booting - Called near the end of core bootup
*
* Hack to avoid lots of clocksource churn at boot time .
* We use fs_initcall because we want this to start before
* device_initcall but after subsys_initcall .
*/
static int __init clocksource_done_booting ( void )
{
2010-03-01 23:34:43 +03:00
mutex_lock ( & clocksource_mutex ) ;
curr_clocksource = clocksource_default_clock ( ) ;
mutex_unlock ( & clocksource_mutex ) ;
2009-08-14 17:47:30 +04:00
finished_booting = 1 ;
2009-09-14 21:49:02 +04:00
/*
* Run the watchdog first to eliminate unstable clock sources
*/
clocksource_watchdog_kthread ( NULL ) ;
2009-09-14 21:51:11 +04:00
mutex_lock ( & clocksource_mutex ) ;
2009-08-14 17:47:30 +04:00
clocksource_select ( ) ;
2009-09-14 21:51:11 +04:00
mutex_unlock ( & clocksource_mutex ) ;
2009-08-14 17:47:30 +04:00
return 0 ;
}
fs_initcall ( clocksource_done_booting ) ;
2007-02-16 12:27:33 +03:00
/*
* Enqueue the clocksource sorted by rating
2006-06-26 11:25:05 +04:00
*/
2009-08-14 17:47:21 +04:00
static void clocksource_enqueue ( struct clocksource * cs )
2006-06-26 11:25:05 +04:00
{
2009-08-14 17:47:21 +04:00
struct list_head * entry = & clocksource_list ;
struct clocksource * tmp ;
2007-02-16 12:27:33 +03:00
2009-08-14 17:47:21 +04:00
list_for_each_entry ( tmp , & clocksource_list , list )
2007-02-16 12:27:33 +03:00
/* Keep track of the place, where to insert */
2009-08-14 17:47:21 +04:00
if ( tmp - > rating > = cs - > rating )
entry = & tmp - > list ;
list_add ( & cs - > list , entry ) ;
2006-06-26 11:25:05 +04:00
}
2010-05-08 05:07:38 +04:00
/**
2010-07-14 04:56:28 +04:00
* __clocksource_updatefreq_scale - Used update clocksource with new freq
2011-12-19 13:13:19 +04:00
* @ cs : clocksource to be registered
2010-05-08 05:07:38 +04:00
* @ scale : Scale factor multiplied against freq to get clocksource hz
* @ freq : clocksource frequency ( cycles per second ) divided by scale
*
2010-07-14 04:56:28 +04:00
* This should only be called from the clocksource - > enable ( ) method .
2010-05-08 05:07:38 +04:00
*
* This * SHOULD NOT * be called directly ! Please use the
2010-07-14 04:56:28 +04:00
* clocksource_updatefreq_hz ( ) or clocksource_updatefreq_khz helper functions .
2010-05-08 05:07:38 +04:00
*/
2010-07-14 04:56:28 +04:00
void __clocksource_updatefreq_scale ( struct clocksource * cs , u32 scale , u32 freq )
2010-05-08 05:07:38 +04:00
{
2011-05-20 12:50:52 +04:00
u64 sec ;
2010-05-08 05:07:38 +04:00
/*
2011-05-19 01:33:40 +04:00
* Calc the maximum number of seconds which we can run before
* wrapping around . For clocksources which have a mask > 32 bit
* we need to limit the max sleep time to have a good
* conversion precision . 10 minutes is still a reasonable
* amount . That results in a shift value of 24 for a
* clocksource with mask > = 40 bit and f > = 4 GHz . That maps to
* ~ 0.06 ppm granularity for NTP . We apply the same 12.5 %
* margin as we do in clocksource_max_deferment ( )
2010-05-08 05:07:38 +04:00
*/
2011-12-02 07:22:41 +04:00
sec = ( cs - > mask - ( cs - > mask > > 3 ) ) ;
2011-05-19 01:33:40 +04:00
do_div ( sec , freq ) ;
do_div ( sec , scale ) ;
if ( ! sec )
sec = 1 ;
else if ( sec > 600 & & cs - > mask > UINT_MAX )
sec = 600 ;
2010-05-08 05:07:38 +04:00
clocks_calc_mult_shift ( & cs - > mult , & cs - > shift , freq ,
2011-05-19 01:33:40 +04:00
NSEC_PER_SEC / scale , sec * scale ) ;
2011-11-01 01:06:35 +04:00
/*
* for clocksources that have large mults , to avoid overflow .
* Since mult may be adjusted by ntp , add an safety extra margin
*
*/
cs - > maxadj = clocksource_max_adjustment ( cs ) ;
while ( ( cs - > mult + cs - > maxadj < cs - > mult )
| | ( cs - > mult - cs - > maxadj > cs - > mult ) ) {
cs - > mult > > = 1 ;
cs - > shift - - ;
cs - > maxadj = clocksource_max_adjustment ( cs ) ;
}
2010-05-08 05:07:38 +04:00
cs - > max_idle_ns = clocksource_max_deferment ( cs ) ;
2010-07-14 04:56:28 +04:00
}
EXPORT_SYMBOL_GPL ( __clocksource_updatefreq_scale ) ;
/**
* __clocksource_register_scale - Used to install new clocksources
2011-12-19 13:13:19 +04:00
* @ cs : clocksource to be registered
2010-07-14 04:56:28 +04:00
* @ scale : Scale factor multiplied against freq to get clocksource hz
* @ freq : clocksource frequency ( cycles per second ) divided by scale
*
* Returns - EBUSY if registration fails , zero otherwise .
*
* This * SHOULD NOT * be called directly ! Please use the
* clocksource_register_hz ( ) or clocksource_register_khz helper functions .
*/
int __clocksource_register_scale ( struct clocksource * cs , u32 scale , u32 freq )
{
tree-wide: fix comment/printk typos
"gadget", "through", "command", "maintain", "maintain", "controller", "address",
"between", "initiali[zs]e", "instead", "function", "select", "already",
"equal", "access", "management", "hierarchy", "registration", "interest",
"relative", "memory", "offset", "already",
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2010-11-01 22:38:34 +03:00
/* Initialize mult/shift and max_idle_ns */
2010-07-14 04:56:28 +04:00
__clocksource_updatefreq_scale ( cs , scale , freq ) ;
2010-05-08 05:07:38 +04:00
2010-07-14 04:56:28 +04:00
/* Add clocksource to the clcoksource list */
2010-05-08 05:07:38 +04:00
mutex_lock ( & clocksource_mutex ) ;
clocksource_enqueue ( cs ) ;
clocksource_enqueue_watchdog ( cs ) ;
2011-05-05 05:16:50 +04:00
clocksource_select ( ) ;
2010-05-08 05:07:38 +04:00
mutex_unlock ( & clocksource_mutex ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( __clocksource_register_scale ) ;
2006-06-26 11:25:05 +04:00
/**
2006-06-26 11:25:14 +04:00
* clocksource_register - Used to install new clocksources
2011-12-19 13:13:19 +04:00
* @ cs : clocksource to be registered
2006-06-26 11:25:05 +04:00
*
* Returns - EBUSY if registration fails , zero otherwise .
*/
2009-08-14 17:47:21 +04:00
int clocksource_register ( struct clocksource * cs )
2006-06-26 11:25:05 +04:00
{
2011-11-01 01:06:35 +04:00
/* calculate max adjustment for given mult/shift */
cs - > maxadj = clocksource_max_adjustment ( cs ) ;
WARN_ONCE ( cs - > mult + cs - > maxadj < cs - > mult ,
" Clocksource %s might overflow on 11%% adjustment \n " ,
cs - > name ) ;
2009-08-18 21:45:10 +04:00
/* calculate max idle time permitted for this clocksource */
cs - > max_idle_ns = clocksource_max_deferment ( cs ) ;
2009-08-14 17:47:30 +04:00
mutex_lock ( & clocksource_mutex ) ;
2009-08-14 17:47:21 +04:00
clocksource_enqueue ( cs ) ;
2009-08-14 17:47:24 +04:00
clocksource_enqueue_watchdog ( cs ) ;
2011-05-05 05:16:50 +04:00
clocksource_select ( ) ;
2009-08-14 17:47:30 +04:00
mutex_unlock ( & clocksource_mutex ) ;
2009-08-14 17:47:21 +04:00
return 0 ;
2006-06-26 11:25:05 +04:00
}
2006-06-26 11:25:14 +04:00
EXPORT_SYMBOL ( clocksource_register ) ;
2006-06-26 11:25:05 +04:00
2009-08-19 13:26:09 +04:00
static void __clocksource_change_rating ( struct clocksource * cs , int rating )
{
list_del ( & cs - > list ) ;
cs - > rating = rating ;
clocksource_enqueue ( cs ) ;
clocksource_select ( ) ;
}
2006-06-26 11:25:05 +04:00
/**
2007-02-16 12:27:33 +03:00
* clocksource_change_rating - Change the rating of a registered clocksource
2011-12-19 13:13:19 +04:00
* @ cs : clocksource to be changed
* @ rating : new rating
2006-06-26 11:25:05 +04:00
*/
2007-02-16 12:27:33 +03:00
void clocksource_change_rating ( struct clocksource * cs , int rating )
2006-06-26 11:25:05 +04:00
{
2009-08-14 17:47:30 +04:00
mutex_lock ( & clocksource_mutex ) ;
2009-08-19 13:26:09 +04:00
__clocksource_change_rating ( cs , rating ) ;
2009-08-14 17:47:30 +04:00
mutex_unlock ( & clocksource_mutex ) ;
2006-06-26 11:25:05 +04:00
}
2009-08-14 17:47:24 +04:00
EXPORT_SYMBOL ( clocksource_change_rating ) ;
2006-06-26 11:25:05 +04:00
2008-01-30 15:30:02 +03:00
/**
* clocksource_unregister - remove a registered clocksource
2011-12-19 13:13:19 +04:00
* @ cs : clocksource to be unregistered
2008-01-30 15:30:02 +03:00
*/
void clocksource_unregister ( struct clocksource * cs )
{
2009-08-14 17:47:30 +04:00
mutex_lock ( & clocksource_mutex ) ;
2009-08-14 17:47:24 +04:00
clocksource_dequeue_watchdog ( cs ) ;
2008-01-30 15:30:02 +03:00
list_del ( & cs - > list ) ;
2009-08-14 17:47:21 +04:00
clocksource_select ( ) ;
2009-08-14 17:47:30 +04:00
mutex_unlock ( & clocksource_mutex ) ;
2008-01-30 15:30:02 +03:00
}
2009-08-14 17:47:24 +04:00
EXPORT_SYMBOL ( clocksource_unregister ) ;
2008-01-30 15:30:02 +03:00
2006-12-10 13:21:30 +03:00
# ifdef CONFIG_SYSFS
2006-06-26 11:25:05 +04:00
/**
* sysfs_show_current_clocksources - sysfs interface for current clocksource
* @ dev : unused
2011-12-19 13:13:19 +04:00
* @ attr : unused
2006-06-26 11:25:05 +04:00
* @ buf : char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing current clocksource .
*/
static ssize_t
2011-12-15 03:28:51 +04:00
sysfs_show_current_clocksources ( struct device * dev ,
struct device_attribute * attr , char * buf )
2006-06-26 11:25:05 +04:00
{
2008-02-06 12:36:53 +03:00
ssize_t count = 0 ;
2006-06-26 11:25:05 +04:00
2009-08-14 17:47:30 +04:00
mutex_lock ( & clocksource_mutex ) ;
2008-02-06 12:36:53 +03:00
count = snprintf ( buf , PAGE_SIZE , " %s \n " , curr_clocksource - > name ) ;
2009-08-14 17:47:30 +04:00
mutex_unlock ( & clocksource_mutex ) ;
2006-06-26 11:25:05 +04:00
2008-02-06 12:36:53 +03:00
return count ;
2006-06-26 11:25:05 +04:00
}
/**
* sysfs_override_clocksource - interface for manually overriding clocksource
* @ dev : unused
2011-12-19 13:13:19 +04:00
* @ attr : unused
2006-06-26 11:25:05 +04:00
* @ buf : name of override clocksource
* @ count : length of buffer
*
* Takes input from sysfs interface for manually overriding the default
2009-10-06 14:42:51 +04:00
* clocksource selection .
2006-06-26 11:25:05 +04:00
*/
2011-12-15 03:28:51 +04:00
static ssize_t sysfs_override_clocksource ( struct device * dev ,
struct device_attribute * attr ,
2006-06-26 11:25:05 +04:00
const char * buf , size_t count )
{
size_t ret = count ;
2007-02-16 12:27:33 +03:00
2006-06-26 11:25:05 +04:00
/* strings from sysfs write are not 0 terminated! */
if ( count > = sizeof ( override_name ) )
return - EINVAL ;
/* strip of \n: */
if ( buf [ count - 1 ] = = ' \n ' )
count - - ;
2009-08-14 17:47:30 +04:00
mutex_lock ( & clocksource_mutex ) ;
2006-06-26 11:25:05 +04:00
2007-02-16 12:27:33 +03:00
if ( count > 0 )
memcpy ( override_name , buf , count ) ;
2006-06-26 11:25:05 +04:00
override_name [ count ] = 0 ;
2009-08-14 17:47:21 +04:00
clocksource_select ( ) ;
2006-06-26 11:25:05 +04:00
2009-08-14 17:47:30 +04:00
mutex_unlock ( & clocksource_mutex ) ;
2006-06-26 11:25:05 +04:00
return ret ;
}
/**
* sysfs_show_available_clocksources - sysfs interface for listing clocksource
* @ dev : unused
2011-12-19 13:13:19 +04:00
* @ attr : unused
2006-06-26 11:25:05 +04:00
* @ buf : char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing registered clocksources
*/
static ssize_t
2011-12-15 03:28:51 +04:00
sysfs_show_available_clocksources ( struct device * dev ,
struct device_attribute * attr ,
2008-07-01 20:48:41 +04:00
char * buf )
2006-06-26 11:25:05 +04:00
{
2007-10-19 10:39:58 +04:00
struct clocksource * src ;
2008-02-06 12:36:53 +03:00
ssize_t count = 0 ;
2006-06-26 11:25:05 +04:00
2009-08-14 17:47:30 +04:00
mutex_lock ( & clocksource_mutex ) ;
2007-10-19 10:39:58 +04:00
list_for_each_entry ( src , & clocksource_list , list ) {
2009-06-12 13:29:27 +04:00
/*
* Don ' t show non - HRES clocksource if the tick code is
* in one shot mode ( highres = on or nohz = on )
*/
if ( ! tick_oneshot_mode_active ( ) | |
( src - > flags & CLOCK_SOURCE_VALID_FOR_HRES ) )
2009-01-22 08:53:22 +03:00
count + = snprintf ( buf + count ,
2008-02-06 12:36:53 +03:00
max ( ( ssize_t ) PAGE_SIZE - count , ( ssize_t ) 0 ) ,
" %s " , src - > name ) ;
2006-06-26 11:25:05 +04:00
}
2009-08-14 17:47:30 +04:00
mutex_unlock ( & clocksource_mutex ) ;
2006-06-26 11:25:05 +04:00
2008-02-06 12:36:53 +03:00
count + = snprintf ( buf + count ,
max ( ( ssize_t ) PAGE_SIZE - count , ( ssize_t ) 0 ) , " \n " ) ;
2006-06-26 11:25:05 +04:00
2008-02-06 12:36:53 +03:00
return count ;
2006-06-26 11:25:05 +04:00
}
/*
* Sysfs setup bits :
*/
2011-12-15 03:28:51 +04:00
static DEVICE_ATTR ( current_clocksource , 0644 , sysfs_show_current_clocksources ,
2006-12-10 13:21:33 +03:00
sysfs_override_clocksource ) ;
2006-06-26 11:25:05 +04:00
2011-12-15 03:28:51 +04:00
static DEVICE_ATTR ( available_clocksource , 0444 ,
2006-12-10 13:21:33 +03:00
sysfs_show_available_clocksources , NULL ) ;
2006-06-26 11:25:05 +04:00
2011-12-15 03:28:51 +04:00
static struct bus_type clocksource_subsys = {
2007-12-20 04:09:39 +03:00
. name = " clocksource " ,
2011-12-15 03:28:51 +04:00
. dev_name = " clocksource " ,
2006-06-26 11:25:05 +04:00
} ;
2011-12-15 03:28:51 +04:00
static struct device device_clocksource = {
2006-06-26 11:25:05 +04:00
. id = 0 ,
2011-12-15 03:28:51 +04:00
. bus = & clocksource_subsys ,
2006-06-26 11:25:05 +04:00
} ;
2006-06-26 11:25:06 +04:00
static int __init init_clocksource_sysfs ( void )
2006-06-26 11:25:05 +04:00
{
2011-12-15 03:28:51 +04:00
int error = subsys_system_register ( & clocksource_subsys , NULL ) ;
2006-06-26 11:25:05 +04:00
if ( ! error )
2011-12-15 03:28:51 +04:00
error = device_register ( & device_clocksource ) ;
2006-06-26 11:25:05 +04:00
if ( ! error )
2011-12-15 03:28:51 +04:00
error = device_create_file (
2006-06-26 11:25:05 +04:00
& device_clocksource ,
2011-12-15 03:28:51 +04:00
& dev_attr_current_clocksource ) ;
2006-06-26 11:25:05 +04:00
if ( ! error )
2011-12-15 03:28:51 +04:00
error = device_create_file (
2006-06-26 11:25:05 +04:00
& device_clocksource ,
2011-12-15 03:28:51 +04:00
& dev_attr_available_clocksource ) ;
2006-06-26 11:25:05 +04:00
return error ;
}
device_initcall ( init_clocksource_sysfs ) ;
2006-12-10 13:21:30 +03:00
# endif /* CONFIG_SYSFS */
2006-06-26 11:25:05 +04:00
/**
* boot_override_clocksource - boot clock override
* @ str : override name
*
* Takes a clocksource = boot argument and uses it
* as the clocksource override name .
*/
static int __init boot_override_clocksource ( char * str )
{
2009-08-14 17:47:30 +04:00
mutex_lock ( & clocksource_mutex ) ;
2006-06-26 11:25:05 +04:00
if ( str )
strlcpy ( override_name , str , sizeof ( override_name ) ) ;
2009-08-14 17:47:30 +04:00
mutex_unlock ( & clocksource_mutex ) ;
2006-06-26 11:25:05 +04:00
return 1 ;
}
__setup ( " clocksource= " , boot_override_clocksource ) ;
/**
* boot_override_clock - Compatibility layer for deprecated boot option
* @ str : override name
*
* DEPRECATED ! Takes a clock = boot argument and uses it
* as the clocksource override name
*/
static int __init boot_override_clock ( char * str )
{
2006-06-26 11:25:12 +04:00
if ( ! strcmp ( str , " pmtmr " ) ) {
printk ( " Warning: clock=pmtmr is deprecated. "
" Use clocksource=acpi_pm. \n " ) ;
return boot_override_clocksource ( " acpi_pm " ) ;
}
printk ( " Warning! clock= boot option is deprecated. "
" Use clocksource=xyz \n " ) ;
2006-06-26 11:25:05 +04:00
return boot_override_clocksource ( str ) ;
}
__setup ( " clock= " , boot_override_clock ) ;