2008-05-03 20:29:28 +04:00
/*
* sched_clock for unstable cpu clocks
*
* Copyright ( C ) 2008 Red Hat , Inc . , Peter Zijlstra < pzijlstr @ redhat . com >
*
2008-07-09 08:15:33 +04:00
* Updates and enhancements :
* Copyright ( C ) 2008 Red Hat , Inc . Steven Rostedt < srostedt @ redhat . com >
*
2008-05-03 20:29:28 +04:00
* Based on code by :
* Ingo Molnar < mingo @ redhat . com >
* Guillaume Chazarain < guichaz @ gmail . com >
*
2010-05-25 12:48:51 +04:00
*
* What :
*
* cpu_clock ( i ) provides a fast ( execution time ) high resolution
* clock with bounded drift between CPUs . The value of cpu_clock ( i )
* is monotonic for constant i . The timestamp returned is in nanoseconds .
*
* # # # # # # # # # # # # # # # # # # # # # # # # # BIG FAT WARNING # # # # # # # # # # # # # # # # # # # # # # # # # #
* # when comparing cpu_clock ( i ) to cpu_clock ( j ) for i ! = j , time can #
* # go backwards ! ! #
* # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
*
* There is no strict promise about the base , although it tends to start
* at 0 on boot ( but people really shouldn ' t rely on that ) .
*
* cpu_clock ( i ) - - can be used from any context , including NMI .
* sched_clock_cpu ( i ) - - must be used with local IRQs disabled ( implied by NMI )
* local_clock ( ) - - is cpu_clock ( ) on the current cpu .
*
* How :
*
* The implementation either uses sched_clock ( ) when
* ! CONFIG_HAVE_UNSTABLE_SCHED_CLOCK , which means in that case the
* sched_clock ( ) is assumed to provide these properties ( mostly it means
* the architecture provides a globally synchronized highres time source ) .
*
* Otherwise it tries to create a semi stable clock from a mixture of other
* clocks , including :
*
* - GTOD ( clock monotomic )
2008-05-03 20:29:28 +04:00
* - sched_clock ( )
* - explicit idle events
*
2010-05-25 12:48:51 +04:00
* We use GTOD as base and use sched_clock ( ) deltas to improve resolution . The
* deltas are filtered to provide monotonicity and keeping it within an
* expected window .
2008-05-03 20:29:28 +04:00
*
* Furthermore , explicit sleep and wakeup hooks allow us to account for time
* that is otherwise invisible ( TSC gets stopped ) .
*
2010-05-25 12:48:51 +04:00
*
* Notes :
*
* The ! IRQ - safetly of sched_clock ( ) and sched_clock_cpu ( ) comes from things
* like cpufreq interrupts that can change the base clock ( TSC ) multiplier
* and cause funny jumps in time - - although the filtering provided by
* sched_clock_cpu ( ) should mitigate serious artifacts we cannot rely on it
* in general since for ! CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
* sched_clock ( ) .
2008-05-03 20:29:28 +04:00
*/
# include <linux/spinlock.h>
2008-05-12 23:21:14 +04:00
# include <linux/hardirq.h>
2008-05-03 20:29:28 +04:00
# include <linux/module.h>
2009-02-26 22:20:29 +03:00
# include <linux/percpu.h>
# include <linux/ktime.h>
# include <linux/sched.h>
2008-05-03 20:29:28 +04:00
2008-07-25 22:45:00 +04:00
/*
* Scheduler clock - returns current time in nanosec units .
* This is default implementation .
* Architectures and sub - architectures can override this .
*/
unsigned long long __attribute__ ( ( weak ) ) sched_clock ( void )
{
2009-05-08 17:24:49 +04:00
return ( unsigned long long ) ( jiffies - INITIAL_JIFFIES )
* ( NSEC_PER_SEC / HZ ) ;
2008-07-25 22:45:00 +04:00
}
2010-04-15 10:54:59 +04:00
EXPORT_SYMBOL_GPL ( sched_clock ) ;
2008-05-03 20:29:28 +04:00
2010-11-19 23:11:09 +03:00
__read_mostly int sched_clock_running ;
2008-08-11 10:59:03 +04:00
2008-05-03 20:29:28 +04:00
# ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2009-02-26 22:20:29 +03:00
__read_mostly int sched_clock_stable ;
2008-05-03 20:29:28 +04:00
struct sched_clock_data {
u64 tick_raw ;
u64 tick_gtod ;
u64 clock ;
} ;
static DEFINE_PER_CPU_SHARED_ALIGNED ( struct sched_clock_data , sched_clock_data ) ;
static inline struct sched_clock_data * this_scd ( void )
{
return & __get_cpu_var ( sched_clock_data ) ;
}
static inline struct sched_clock_data * cpu_sdc ( int cpu )
{
return & per_cpu ( sched_clock_data , cpu ) ;
}
void sched_clock_init ( void )
{
u64 ktime_now = ktime_to_ns ( ktime_get ( ) ) ;
int cpu ;
for_each_possible_cpu ( cpu ) {
struct sched_clock_data * scd = cpu_sdc ( cpu ) ;
2008-05-29 12:07:15 +04:00
scd - > tick_raw = 0 ;
2008-05-03 20:29:28 +04:00
scd - > tick_gtod = ktime_now ;
scd - > clock = ktime_now ;
}
2008-05-29 12:07:15 +04:00
sched_clock_running = 1 ;
2008-05-03 20:29:28 +04:00
}
2008-08-25 19:15:34 +04:00
/*
2009-02-26 22:20:29 +03:00
* min , max except they take wrapping into account
2008-08-25 19:15:34 +04:00
*/
static inline u64 wrap_min ( u64 x , u64 y )
{
return ( s64 ) ( x - y ) < 0 ? x : y ;
}
static inline u64 wrap_max ( u64 x , u64 y )
{
return ( s64 ) ( x - y ) > 0 ? x : y ;
}
2008-05-03 20:29:28 +04:00
/*
* update the percpu scd from the raw @ now value
*
* - filter out backward motion
2008-08-25 19:15:34 +04:00
* - use the GTOD tick value to create a window to filter crazy TSC values
2008-05-03 20:29:28 +04:00
*/
2009-09-18 22:14:01 +04:00
static u64 sched_clock_local ( struct sched_clock_data * scd )
2008-05-03 20:29:28 +04:00
{
2009-09-18 22:14:01 +04:00
u64 now , clock , old_clock , min_clock , max_clock ;
s64 delta ;
2008-05-03 20:29:28 +04:00
2009-09-18 22:14:01 +04:00
again :
now = sched_clock ( ) ;
delta = now - scd - > tick_raw ;
2008-08-25 19:15:34 +04:00
if ( unlikely ( delta < 0 ) )
delta = 0 ;
2008-05-03 20:29:28 +04:00
2009-09-18 22:14:01 +04:00
old_clock = scd - > clock ;
2008-08-25 19:15:34 +04:00
/*
* scd - > clock = clamp ( scd - > tick_gtod + delta ,
2009-02-26 22:20:29 +03:00
* max ( scd - > tick_gtod , scd - > clock ) ,
* scd - > tick_gtod + TICK_NSEC ) ;
2008-08-25 19:15:34 +04:00
*/
2008-05-03 20:29:28 +04:00
2008-08-25 19:15:34 +04:00
clock = scd - > tick_gtod + delta ;
2009-09-18 22:14:01 +04:00
min_clock = wrap_max ( scd - > tick_gtod , old_clock ) ;
max_clock = wrap_max ( old_clock , scd - > tick_gtod + TICK_NSEC ) ;
2008-05-03 20:29:28 +04:00
2008-08-25 19:15:34 +04:00
clock = wrap_max ( clock , min_clock ) ;
clock = wrap_min ( clock , max_clock ) ;
2008-05-03 20:29:28 +04:00
2009-09-30 22:36:19 +04:00
if ( cmpxchg64 ( & scd - > clock , old_clock , clock ) ! = old_clock )
2009-09-18 22:14:01 +04:00
goto again ;
2008-07-30 12:15:55 +04:00
2009-09-18 22:14:01 +04:00
return clock ;
2008-05-03 20:29:28 +04:00
}
2009-09-18 22:14:01 +04:00
static u64 sched_clock_remote ( struct sched_clock_data * scd )
2008-05-03 20:29:28 +04:00
{
2009-09-18 22:14:01 +04:00
struct sched_clock_data * my_scd = this_scd ( ) ;
u64 this_clock , remote_clock ;
u64 * ptr , old_val , val ;
sched_clock_local ( my_scd ) ;
again :
this_clock = my_scd - > clock ;
remote_clock = scd - > clock ;
/*
* Use the opportunity that we have both locks
* taken to couple the two clocks : we take the
* larger time as the latest time for both
* runqueues . ( this creates monotonic movement )
*/
if ( likely ( ( s64 ) ( remote_clock - this_clock ) < 0 ) ) {
ptr = & scd - > clock ;
old_val = remote_clock ;
val = this_clock ;
2008-05-03 20:29:28 +04:00
} else {
2009-09-18 22:14:01 +04:00
/*
* Should be rare , but possible :
*/
ptr = & my_scd - > clock ;
old_val = this_clock ;
val = remote_clock ;
2008-05-03 20:29:28 +04:00
}
2009-09-18 22:14:01 +04:00
2009-09-30 22:36:19 +04:00
if ( cmpxchg64 ( ptr , old_val , val ) ! = old_val )
2009-09-18 22:14:01 +04:00
goto again ;
return val ;
2008-05-03 20:29:28 +04:00
}
2010-05-25 12:48:51 +04:00
/*
* Similar to cpu_clock ( ) , but requires local IRQs to be disabled .
*
* See cpu_clock ( ) .
*/
2008-05-03 20:29:28 +04:00
u64 sched_clock_cpu ( int cpu )
{
2009-02-26 22:20:29 +03:00
struct sched_clock_data * scd ;
2009-09-18 22:14:01 +04:00
u64 clock ;
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
2008-05-03 20:29:28 +04:00
2009-02-26 22:20:29 +03:00
if ( sched_clock_stable )
return sched_clock ( ) ;
2008-05-29 12:07:15 +04:00
if ( unlikely ( ! sched_clock_running ) )
return 0ull ;
2009-09-18 22:14:01 +04:00
scd = cpu_sdc ( cpu ) ;
2008-05-03 20:29:28 +04:00
2009-09-18 22:14:01 +04:00
if ( cpu ! = smp_processor_id ( ) )
clock = sched_clock_remote ( scd ) ;
else
clock = sched_clock_local ( scd ) ;
2008-04-14 10:50:02 +04:00
2008-05-03 20:29:28 +04:00
return clock ;
}
void sched_clock_tick ( void )
{
2009-02-26 23:40:16 +03:00
struct sched_clock_data * scd ;
2008-05-03 20:29:28 +04:00
u64 now , now_gtod ;
2009-02-26 23:40:16 +03:00
if ( sched_clock_stable )
return ;
2008-05-29 12:07:15 +04:00
if ( unlikely ( ! sched_clock_running ) )
return ;
2008-05-03 20:29:28 +04:00
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
2009-02-26 23:40:16 +03:00
scd = this_scd ( ) ;
2008-05-03 20:29:28 +04:00
now_gtod = ktime_to_ns ( ktime_get ( ) ) ;
2008-07-09 08:15:32 +04:00
now = sched_clock ( ) ;
2008-05-03 20:29:28 +04:00
scd - > tick_raw = now ;
scd - > tick_gtod = now_gtod ;
2009-09-18 22:14:01 +04:00
sched_clock_local ( scd ) ;
2008-05-03 20:29:28 +04:00
}
/*
* We are going deep - idle ( irqs are disabled ) :
*/
void sched_clock_idle_sleep_event ( void )
{
sched_clock_cpu ( smp_processor_id ( ) ) ;
}
EXPORT_SYMBOL_GPL ( sched_clock_idle_sleep_event ) ;
/*
* We just idled delta nanoseconds ( called with irqs disabled ) :
*/
void sched_clock_idle_wakeup_event ( u64 delta_ns )
{
2008-12-23 01:05:28 +03:00
if ( timekeeping_suspended )
return ;
2008-08-25 19:15:34 +04:00
sched_clock_tick ( ) ;
2008-05-03 20:29:28 +04:00
touch_softlockup_watchdog ( ) ;
}
EXPORT_SYMBOL_GPL ( sched_clock_idle_wakeup_event ) ;
2010-05-25 12:48:51 +04:00
/*
* As outlined at the top , provides a fast , high resolution , nanosecond
* time source that is monotonic per cpu argument and has bounded drift
* between cpus .
*
* # # # # # # # # # # # # # # # # # # # # # # # # # BIG FAT WARNING # # # # # # # # # # # # # # # # # # # # # # # # # #
* # when comparing cpu_clock ( i ) to cpu_clock ( j ) for i ! = j , time can #
* # go backwards ! ! #
* # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
*/
u64 cpu_clock ( int cpu )
2009-12-14 05:25:02 +03:00
{
2010-05-25 12:48:51 +04:00
u64 clock ;
2009-12-14 05:25:02 +03:00
unsigned long flags ;
local_irq_save ( flags ) ;
clock = sched_clock_cpu ( cpu ) ;
local_irq_restore ( flags ) ;
return clock ;
}
2010-05-25 12:48:51 +04:00
/*
* Similar to cpu_clock ( ) for the current cpu . Time will only be observed
* to be monotonic if care is taken to only compare timestampt taken on the
* same CPU .
*
* See cpu_clock ( ) .
*/
u64 local_clock ( void )
{
u64 clock ;
unsigned long flags ;
local_irq_save ( flags ) ;
clock = sched_clock_cpu ( smp_processor_id ( ) ) ;
local_irq_restore ( flags ) ;
return clock ;
}
2009-02-26 23:40:16 +03:00
# else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void sched_clock_init ( void )
{
sched_clock_running = 1 ;
}
u64 sched_clock_cpu ( int cpu )
{
if ( unlikely ( ! sched_clock_running ) )
return 0 ;
return sched_clock ( ) ;
}
2010-05-25 12:48:51 +04:00
u64 cpu_clock ( int cpu )
2008-06-27 15:41:15 +04:00
{
2009-12-14 05:25:02 +03:00
return sched_clock_cpu ( cpu ) ;
}
2008-06-27 15:41:15 +04:00
2010-05-25 12:48:51 +04:00
u64 local_clock ( void )
{
return sched_clock_cpu ( 0 ) ;
}
2009-12-14 05:25:02 +03:00
# endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
2008-06-27 15:41:15 +04:00
2008-06-27 16:49:35 +04:00
EXPORT_SYMBOL_GPL ( cpu_clock ) ;
2010-05-25 12:48:51 +04:00
EXPORT_SYMBOL_GPL ( local_clock ) ;