2008-05-03 20:29:28 +04:00
/*
* sched_clock for unstable cpu clocks
*
* Copyright ( C ) 2008 Red Hat , Inc . , Peter Zijlstra < pzijlstr @ redhat . com >
*
2008-07-09 08:15:33 +04:00
* Updates and enhancements :
* Copyright ( C ) 2008 Red Hat , Inc . Steven Rostedt < srostedt @ redhat . com >
*
2008-05-03 20:29:28 +04:00
* Based on code by :
* Ingo Molnar < mingo @ redhat . com >
* Guillaume Chazarain < guichaz @ gmail . com >
*
* Create a semi stable clock from a mixture of other events , including :
* - gtod
* - jiffies
* - sched_clock ( )
* - explicit idle events
*
* We use gtod as base and the unstable clock deltas . The deltas are filtered ,
* making it monotonic and keeping it within an expected window . This window
* is set up using jiffies .
*
* Furthermore , explicit sleep and wakeup hooks allow us to account for time
* that is otherwise invisible ( TSC gets stopped ) .
*
* The clock : sched_clock_cpu ( ) is monotonic per cpu , and should be somewhat
* consistent between cpus ( never more than 1 jiffies difference ) .
*/
# include <linux/sched.h>
# include <linux/percpu.h>
# include <linux/spinlock.h>
# include <linux/ktime.h>
# include <linux/module.h>
# ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2008-07-09 08:15:33 +04:00
# define MULTI_SHIFT 15
/* Max is double, Min is 1/2 */
# define MAX_MULTI (2LL << MULTI_SHIFT)
# define MIN_MULTI (1LL << (MULTI_SHIFT-1))
2008-05-03 20:29:28 +04:00
struct sched_clock_data {
/*
* Raw spinlock - this is a special case : this might be called
* from within instrumentation code so we dont want to do any
* instrumentation ourselves .
*/
raw_spinlock_t lock ;
2008-07-07 22:16:50 +04:00
unsigned long tick_jiffies ;
2008-05-03 20:29:28 +04:00
u64 prev_raw ;
u64 tick_raw ;
u64 tick_gtod ;
u64 clock ;
2008-07-09 08:15:33 +04:00
s64 multi ;
2008-07-07 22:16:52 +04:00
# ifdef CONFIG_NO_HZ
int check_max ;
# endif
2008-05-03 20:29:28 +04:00
} ;
static DEFINE_PER_CPU_SHARED_ALIGNED ( struct sched_clock_data , sched_clock_data ) ;
static inline struct sched_clock_data * this_scd ( void )
{
return & __get_cpu_var ( sched_clock_data ) ;
}
static inline struct sched_clock_data * cpu_sdc ( int cpu )
{
return & per_cpu ( sched_clock_data , cpu ) ;
}
2008-05-29 12:07:15 +04:00
static __read_mostly int sched_clock_running ;
2008-05-03 20:29:28 +04:00
void sched_clock_init ( void )
{
u64 ktime_now = ktime_to_ns ( ktime_get ( ) ) ;
2008-05-29 12:07:15 +04:00
unsigned long now_jiffies = jiffies ;
2008-05-03 20:29:28 +04:00
int cpu ;
for_each_possible_cpu ( cpu ) {
struct sched_clock_data * scd = cpu_sdc ( cpu ) ;
scd - > lock = ( raw_spinlock_t ) __RAW_SPIN_LOCK_UNLOCKED ;
2008-07-07 22:16:50 +04:00
scd - > tick_jiffies = now_jiffies ;
2008-05-29 12:07:15 +04:00
scd - > prev_raw = 0 ;
scd - > tick_raw = 0 ;
2008-05-03 20:29:28 +04:00
scd - > tick_gtod = ktime_now ;
scd - > clock = ktime_now ;
2008-07-09 08:15:33 +04:00
scd - > multi = 1 < < MULTI_SHIFT ;
2008-07-07 22:16:52 +04:00
# ifdef CONFIG_NO_HZ
scd - > check_max = 1 ;
# endif
2008-05-03 20:29:28 +04:00
}
2008-05-29 12:07:15 +04:00
sched_clock_running = 1 ;
2008-05-03 20:29:28 +04:00
}
2008-07-07 22:16:52 +04:00
# ifdef CONFIG_NO_HZ
/*
* The dynamic ticks makes the delta jiffies inaccurate . This
* prevents us from checking the maximum time update .
* Disable the maximum check during stopped ticks .
*/
void sched_clock_tick_stop ( int cpu )
{
struct sched_clock_data * scd = cpu_sdc ( cpu ) ;
scd - > check_max = 0 ;
}
void sched_clock_tick_start ( int cpu )
{
struct sched_clock_data * scd = cpu_sdc ( cpu ) ;
scd - > check_max = 1 ;
}
static int check_max ( struct sched_clock_data * scd )
{
return scd - > check_max ;
}
# else
static int check_max ( struct sched_clock_data * scd )
{
return 1 ;
}
# endif /* CONFIG_NO_HZ */
2008-05-03 20:29:28 +04:00
/*
* update the percpu scd from the raw @ now value
*
* - filter out backward motion
* - use jiffies to generate a min , max window to clip the raw values
*/
2008-07-09 08:15:31 +04:00
static void __update_sched_clock ( struct sched_clock_data * scd , u64 now , u64 * time )
2008-05-03 20:29:28 +04:00
{
unsigned long now_jiffies = jiffies ;
2008-07-07 22:16:50 +04:00
long delta_jiffies = now_jiffies - scd - > tick_jiffies ;
2008-05-03 20:29:28 +04:00
u64 clock = scd - > clock ;
u64 min_clock , max_clock ;
s64 delta = now - scd - > prev_raw ;
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
2008-07-07 22:16:51 +04:00
2008-07-09 08:15:33 +04:00
/*
* At schedule tick the clock can be just under the gtod . We don ' t
* want to push it too prematurely .
*/
min_clock = scd - > tick_gtod + ( delta_jiffies * TICK_NSEC ) ;
if ( min_clock > TICK_NSEC )
min_clock - = TICK_NSEC / 2 ;
2008-05-03 20:29:28 +04:00
if ( unlikely ( delta < 0 ) ) {
clock + + ;
goto out ;
}
2008-07-07 22:16:51 +04:00
/*
* The clock must stay within a jiffie of the gtod .
* But since we may be at the start of a jiffy or the end of one
* we add another jiffy buffer .
*/
max_clock = scd - > tick_gtod + ( 2 + delta_jiffies ) * TICK_NSEC ;
2008-05-03 20:29:28 +04:00
2008-07-09 08:15:33 +04:00
delta * = scd - > multi ;
delta > > = MULTI_SHIFT ;
2008-05-03 20:29:28 +04:00
2008-07-07 22:16:52 +04:00
if ( unlikely ( clock + delta > max_clock ) & & check_max ( scd ) ) {
2008-05-03 20:29:28 +04:00
if ( clock < max_clock )
clock = max_clock ;
else
clock + + ;
} else {
clock + = delta ;
}
out :
if ( unlikely ( clock < min_clock ) )
clock = min_clock ;
2008-07-09 08:15:31 +04:00
if ( time )
* time = clock ;
else {
scd - > prev_raw = now ;
scd - > clock = clock ;
}
2008-05-03 20:29:28 +04:00
}
static void lock_double_clock ( struct sched_clock_data * data1 ,
struct sched_clock_data * data2 )
{
if ( data1 < data2 ) {
__raw_spin_lock ( & data1 - > lock ) ;
__raw_spin_lock ( & data2 - > lock ) ;
} else {
__raw_spin_lock ( & data2 - > lock ) ;
__raw_spin_lock ( & data1 - > lock ) ;
}
}
u64 sched_clock_cpu ( int cpu )
{
struct sched_clock_data * scd = cpu_sdc ( cpu ) ;
u64 now , clock ;
2008-05-29 12:07:15 +04:00
if ( unlikely ( ! sched_clock_running ) )
return 0ull ;
2008-05-03 20:29:28 +04:00
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
now = sched_clock ( ) ;
if ( cpu ! = raw_smp_processor_id ( ) ) {
/*
* in order to update a remote cpu ' s clock based on our
* unstable raw time rebase it against :
* tick_raw ( offset between raw counters )
* tick_gotd ( tick offset between cpus )
*/
struct sched_clock_data * my_scd = this_scd ( ) ;
lock_double_clock ( scd , my_scd ) ;
now - = my_scd - > tick_raw ;
now + = scd - > tick_raw ;
2008-07-08 03:49:41 +04:00
now + = my_scd - > tick_gtod ;
now - = scd - > tick_gtod ;
2008-05-03 20:29:28 +04:00
__raw_spin_unlock ( & my_scd - > lock ) ;
2008-07-09 08:15:31 +04:00
__update_sched_clock ( scd , now , & clock ) ;
__raw_spin_unlock ( & scd - > lock ) ;
2008-05-03 20:29:28 +04:00
} else {
__raw_spin_lock ( & scd - > lock ) ;
2008-07-09 08:15:31 +04:00
__update_sched_clock ( scd , now , NULL ) ;
clock = scd - > clock ;
__raw_spin_unlock ( & scd - > lock ) ;
2008-05-03 20:29:28 +04:00
}
return clock ;
}
void sched_clock_tick ( void )
{
struct sched_clock_data * scd = this_scd ( ) ;
2008-07-07 22:16:50 +04:00
unsigned long now_jiffies = jiffies ;
2008-07-09 08:15:33 +04:00
s64 mult , delta_gtod , delta_raw ;
2008-05-03 20:29:28 +04:00
u64 now , now_gtod ;
2008-05-29 12:07:15 +04:00
if ( unlikely ( ! sched_clock_running ) )
return ;
2008-05-03 20:29:28 +04:00
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
now_gtod = ktime_to_ns ( ktime_get ( ) ) ;
2008-07-09 08:15:32 +04:00
now = sched_clock ( ) ;
2008-05-03 20:29:28 +04:00
__raw_spin_lock ( & scd - > lock ) ;
2008-07-09 08:15:31 +04:00
__update_sched_clock ( scd , now , NULL ) ;
2008-05-03 20:29:28 +04:00
/*
* update tick_gtod after __update_sched_clock ( ) because that will
* already observe 1 new jiffy ; adding a new tick_gtod to that would
* increase the clock 2 jiffies .
*/
2008-07-09 08:15:33 +04:00
delta_gtod = now_gtod - scd - > tick_gtod ;
delta_raw = now - scd - > tick_raw ;
if ( ( long ) delta_raw > 0 ) {
mult = delta_gtod < < MULTI_SHIFT ;
do_div ( mult , delta_raw ) ;
scd - > multi = mult ;
if ( scd - > multi > MAX_MULTI )
scd - > multi = MAX_MULTI ;
else if ( scd - > multi < MIN_MULTI )
scd - > multi = MIN_MULTI ;
} else
scd - > multi = 1 < < MULTI_SHIFT ;
2008-05-03 20:29:28 +04:00
scd - > tick_raw = now ;
scd - > tick_gtod = now_gtod ;
2008-07-09 08:15:33 +04:00
scd - > tick_jiffies = now_jiffies ;
2008-05-03 20:29:28 +04:00
__raw_spin_unlock ( & scd - > lock ) ;
}
/*
* We are going deep - idle ( irqs are disabled ) :
*/
void sched_clock_idle_sleep_event ( void )
{
sched_clock_cpu ( smp_processor_id ( ) ) ;
}
EXPORT_SYMBOL_GPL ( sched_clock_idle_sleep_event ) ;
/*
* We just idled delta nanoseconds ( called with irqs disabled ) :
*/
void sched_clock_idle_wakeup_event ( u64 delta_ns )
{
struct sched_clock_data * scd = this_scd ( ) ;
u64 now = sched_clock ( ) ;
/*
* Override the previous timestamp and ignore all
* sched_clock ( ) deltas that occured while we idled ,
* and use the PM - provided delta_ns to advance the
* rq clock :
*/
__raw_spin_lock ( & scd - > lock ) ;
scd - > prev_raw = now ;
scd - > clock + = delta_ns ;
2008-07-09 08:15:33 +04:00
scd - > multi = 1 < < MULTI_SHIFT ;
2008-05-03 20:29:28 +04:00
__raw_spin_unlock ( & scd - > lock ) ;
touch_softlockup_watchdog ( ) ;
}
EXPORT_SYMBOL_GPL ( sched_clock_idle_wakeup_event ) ;
# endif
/*
* Scheduler clock - returns current time in nanosec units .
* This is default implementation .
* Architectures and sub - architectures can override this .
*/
unsigned long long __attribute__ ( ( weak ) ) sched_clock ( void )
{
return ( unsigned long long ) jiffies * ( NSEC_PER_SEC / HZ ) ;
}
2008-06-27 15:41:15 +04:00
unsigned long long cpu_clock ( int cpu )
{
unsigned long long clock ;
unsigned long flags ;
2008-06-29 17:01:59 +04:00
local_irq_save ( flags ) ;
2008-06-27 15:41:15 +04:00
clock = sched_clock_cpu ( cpu ) ;
2008-06-29 17:01:59 +04:00
local_irq_restore ( flags ) ;
2008-06-27 15:41:15 +04:00
return clock ;
}
2008-06-27 16:49:35 +04:00
EXPORT_SYMBOL_GPL ( cpu_clock ) ;