2010-12-15 22:23:07 +03:00
/*
* sched_clock . c : support for extending counters to full 64 - bit ns counter
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/clocksource.h>
# include <linux/init.h>
# include <linux/jiffies.h>
2013-07-19 03:21:16 +04:00
# include <linux/ktime.h>
2010-12-15 22:23:07 +03:00
# include <linux/kernel.h>
2012-09-09 21:39:28 +04:00
# include <linux/moduleparam.h>
2010-12-15 22:23:07 +03:00
# include <linux/sched.h>
2012-02-04 16:31:27 +04:00
# include <linux/syscore_ops.h>
2013-07-19 03:21:16 +04:00
# include <linux/hrtimer.h>
2013-06-02 10:39:40 +04:00
# include <linux/sched_clock.h>
2013-07-19 03:21:15 +04:00
# include <linux/seqlock.h>
2013-07-19 03:21:17 +04:00
# include <linux/bitops.h>
2010-12-15 22:23:07 +03:00
2011-12-15 15:19:23 +04:00
struct clock_data {
2013-07-19 03:21:16 +04:00
ktime_t wrap_kt ;
2011-12-15 15:19:23 +04:00
u64 epoch_ns ;
2013-07-19 03:21:17 +04:00
u64 epoch_cyc ;
2013-07-19 03:21:15 +04:00
seqcount_t seq ;
2013-02-09 02:14:59 +04:00
unsigned long rate ;
2011-12-15 15:19:23 +04:00
u32 mult ;
u32 shift ;
2012-08-07 22:05:10 +04:00
bool suspended ;
2011-12-15 15:19:23 +04:00
} ;
2013-07-19 03:21:16 +04:00
static struct hrtimer sched_clock_timer ;
2012-09-09 21:39:28 +04:00
static int irqtime = - 1 ;
core_param ( irqtime , irqtime , int , 0400 ) ;
2011-12-15 15:19:23 +04:00
static struct clock_data cd = {
. mult = NSEC_PER_SEC / HZ ,
} ;
2013-07-19 03:21:17 +04:00
static u64 __read_mostly sched_clock_mask ;
2011-12-15 15:19:23 +04:00
2013-07-19 03:21:17 +04:00
static u64 notrace jiffy_sched_clock_read ( void )
2011-12-15 15:19:23 +04:00
{
2013-07-19 03:21:17 +04:00
/*
* We don ' t need to use get_jiffies_64 on 32 - bit arches here
* because we register with BITS_PER_LONG
*/
return ( u64 ) ( jiffies - INITIAL_JIFFIES ) ;
}
static u64 __read_mostly ( * read_sched_clock ) ( void ) = jiffy_sched_clock_read ;
2011-12-15 15:19:23 +04:00
2013-04-18 20:33:40 +04:00
static inline u64 notrace cyc_to_ns ( u64 cyc , u32 mult , u32 shift )
2011-12-15 15:19:23 +04:00
{
return ( cyc * mult ) > > shift ;
}
2013-07-19 03:21:19 +04:00
unsigned long long notrace sched_clock ( void )
2011-12-15 15:19:23 +04:00
{
u64 epoch_ns ;
2013-07-19 03:21:17 +04:00
u64 epoch_cyc ;
u64 cyc ;
2013-07-19 03:21:15 +04:00
unsigned long seq ;
2013-06-18 02:40:58 +04:00
if ( cd . suspended )
return cd . epoch_ns ;
2011-12-15 15:19:23 +04:00
do {
2014-01-03 03:11:14 +04:00
seq = raw_read_seqcount_begin ( & cd . seq ) ;
2011-12-15 15:19:23 +04:00
epoch_cyc = cd . epoch_cyc ;
epoch_ns = cd . epoch_ns ;
2013-07-19 03:21:15 +04:00
} while ( read_seqcount_retry ( & cd . seq , seq ) ) ;
2011-12-15 15:19:23 +04:00
2013-06-18 02:40:58 +04:00
cyc = read_sched_clock ( ) ;
cyc = ( cyc - epoch_cyc ) & sched_clock_mask ;
return epoch_ns + cyc_to_ns ( cyc , cd . mult , cd . shift ) ;
2011-12-15 15:19:23 +04:00
}
/*
* Atomically update the sched_clock epoch .
*/
static void notrace update_sched_clock ( void )
{
unsigned long flags ;
2013-07-19 03:21:17 +04:00
u64 cyc ;
2011-12-15 15:19:23 +04:00
u64 ns ;
cyc = read_sched_clock ( ) ;
ns = cd . epoch_ns +
cyc_to_ns ( ( cyc - cd . epoch_cyc ) & sched_clock_mask ,
cd . mult , cd . shift ) ;
2013-07-19 03:21:15 +04:00
2011-12-15 15:19:23 +04:00
raw_local_irq_save ( flags ) ;
2014-01-03 03:11:14 +04:00
raw_write_seqcount_begin ( & cd . seq ) ;
2011-12-15 15:19:23 +04:00
cd . epoch_ns = ns ;
2013-02-09 08:52:45 +04:00
cd . epoch_cyc = cyc ;
2014-01-03 03:11:14 +04:00
raw_write_seqcount_end ( & cd . seq ) ;
2011-12-15 15:19:23 +04:00
raw_local_irq_restore ( flags ) ;
}
2010-12-15 22:23:07 +03:00
2013-07-19 03:21:16 +04:00
static enum hrtimer_restart sched_clock_poll ( struct hrtimer * hrt )
2010-12-15 22:23:07 +03:00
{
2011-12-15 15:19:23 +04:00
update_sched_clock ( ) ;
2013-07-19 03:21:16 +04:00
hrtimer_forward_now ( hrt , cd . wrap_kt ) ;
return HRTIMER_RESTART ;
2010-12-15 22:23:07 +03:00
}
2013-07-19 03:21:17 +04:00
void __init sched_clock_register ( u64 ( * read ) ( void ) , int bits ,
unsigned long rate )
2010-12-15 22:23:07 +03:00
{
2014-02-17 22:45:36 +04:00
u64 res , wrap , new_mask , new_epoch , cyc , ns ;
u32 new_mult , new_shift ;
ktime_t new_wrap_kt ;
2013-07-19 03:21:16 +04:00
unsigned long r ;
2010-12-15 22:23:07 +03:00
char r_unit ;
2013-02-09 02:14:59 +04:00
if ( cd . rate > rate )
return ;
2011-12-15 15:19:23 +04:00
WARN_ON ( ! irqs_disabled ( ) ) ;
2010-12-15 22:23:07 +03:00
/* calculate the mult/shift to convert counter ticks to ns. */
2014-02-17 22:45:36 +04:00
clocks_calc_mult_shift ( & new_mult , & new_shift , rate , NSEC_PER_SEC , 3600 ) ;
new_mask = CLOCKSOURCE_MASK ( bits ) ;
/* calculate how many ns until we wrap */
wrap = clocks_calc_max_nsecs ( new_mult , new_shift , 0 , new_mask ) ;
new_wrap_kt = ns_to_ktime ( wrap - ( wrap > > 3 ) ) ;
/* update epoch for new counter and update epoch_ns from old counter*/
new_epoch = read ( ) ;
cyc = read_sched_clock ( ) ;
ns = cd . epoch_ns + cyc_to_ns ( ( cyc - cd . epoch_cyc ) & sched_clock_mask ,
cd . mult , cd . shift ) ;
raw_write_seqcount_begin ( & cd . seq ) ;
read_sched_clock = read ;
sched_clock_mask = new_mask ;
cd . rate = rate ;
cd . wrap_kt = new_wrap_kt ;
cd . mult = new_mult ;
cd . shift = new_shift ;
cd . epoch_cyc = new_epoch ;
cd . epoch_ns = ns ;
raw_write_seqcount_end ( & cd . seq ) ;
2010-12-15 22:23:07 +03:00
r = rate ;
if ( r > = 4000000 ) {
r / = 1000000 ;
r_unit = ' M ' ;
2011-12-15 15:19:23 +04:00
} else if ( r > = 1000 ) {
2010-12-15 22:23:07 +03:00
r / = 1000 ;
r_unit = ' k ' ;
2011-12-15 15:19:23 +04:00
} else
r_unit = ' ' ;
2010-12-15 22:23:07 +03:00
/* calculate the ns resolution of this counter */
2014-02-17 22:45:36 +04:00
res = cyc_to_ns ( 1ULL , new_mult , new_shift ) ;
2013-07-19 03:21:16 +04:00
pr_info ( " sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns \n " ,
bits , r , r_unit , res , wrap ) ;
2010-12-15 22:23:07 +03:00
2012-09-09 21:39:28 +04:00
/* Enable IRQ time accounting if we have a fast enough sched_clock */
if ( irqtime > 0 | | ( irqtime = = - 1 & & rate > = 1000000 ) )
enable_sched_clock_irqtime ( ) ;
2011-12-15 15:19:23 +04:00
pr_debug ( " Registered %pF as sched_clock source \n " , read ) ;
}
2011-01-11 19:23:04 +03:00
void __init sched_clock_postinit ( void )
{
2011-12-15 15:19:23 +04:00
/*
* If no sched_clock function has been provided at that point ,
* make it the final one one .
*/
if ( read_sched_clock = = jiffy_sched_clock_read )
2013-07-19 03:21:17 +04:00
sched_clock_register ( jiffy_sched_clock_read , BITS_PER_LONG , HZ ) ;
2011-12-15 15:19:23 +04:00
2013-07-19 03:21:16 +04:00
update_sched_clock ( ) ;
/*
* Start the timer to keep sched_clock ( ) properly updated and
* sets the initial epoch .
*/
hrtimer_init ( & sched_clock_timer , CLOCK_MONOTONIC , HRTIMER_MODE_REL ) ;
sched_clock_timer . function = sched_clock_poll ;
hrtimer_start ( & sched_clock_timer , cd . wrap_kt , HRTIMER_MODE_REL ) ;
2011-01-11 19:23:04 +03:00
}
2012-02-04 16:31:27 +04:00
static int sched_clock_suspend ( void )
{
2014-07-24 08:03:50 +04:00
update_sched_clock ( ) ;
hrtimer_cancel ( & sched_clock_timer ) ;
2012-10-23 22:00:03 +04:00
cd . suspended = true ;
2012-02-04 16:31:27 +04:00
return 0 ;
}
2012-08-07 22:05:10 +04:00
static void sched_clock_resume ( void )
{
2012-10-23 22:00:03 +04:00
cd . epoch_cyc = read_sched_clock ( ) ;
2014-07-24 08:03:50 +04:00
hrtimer_start ( & sched_clock_timer , cd . wrap_kt , HRTIMER_MODE_REL ) ;
2012-10-23 22:00:03 +04:00
cd . suspended = false ;
2012-08-07 22:05:10 +04:00
}
2012-02-04 16:31:27 +04:00
static struct syscore_ops sched_clock_ops = {
. suspend = sched_clock_suspend ,
2012-08-07 22:05:10 +04:00
. resume = sched_clock_resume ,
2012-02-04 16:31:27 +04:00
} ;
static int __init sched_clock_syscore_init ( void )
{
register_syscore_ops ( & sched_clock_ops ) ;
return 0 ;
}
device_initcall ( sched_clock_syscore_init ) ;