2010-12-15 19:23:07 +00:00
/*
* sched_clock . c : support for extending counters to full 64 - bit ns counter
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/clocksource.h>
# include <linux/init.h>
# include <linux/jiffies.h>
2013-07-18 16:21:16 -07:00
# include <linux/ktime.h>
2010-12-15 19:23:07 +00:00
# include <linux/kernel.h>
2012-09-09 18:39:28 +01:00
# include <linux/moduleparam.h>
2010-12-15 19:23:07 +00:00
# include <linux/sched.h>
2012-02-04 12:31:27 +00:00
# include <linux/syscore_ops.h>
2013-07-18 16:21:16 -07:00
# include <linux/hrtimer.h>
2013-06-01 23:39:40 -07:00
# include <linux/sched_clock.h>
2013-07-18 16:21:15 -07:00
# include <linux/seqlock.h>
2013-07-18 16:21:17 -07:00
# include <linux/bitops.h>
2010-12-15 19:23:07 +00:00
2011-12-15 12:19:23 +01:00
struct clock_data {
2013-07-18 16:21:16 -07:00
ktime_t wrap_kt ;
2011-12-15 12:19:23 +01:00
u64 epoch_ns ;
2013-07-18 16:21:17 -07:00
u64 epoch_cyc ;
2013-07-18 16:21:15 -07:00
seqcount_t seq ;
2013-02-08 16:14:59 -06:00
unsigned long rate ;
2011-12-15 12:19:23 +01:00
u32 mult ;
u32 shift ;
2012-08-07 19:05:10 +01:00
bool suspended ;
2011-12-15 12:19:23 +01:00
} ;
2013-07-18 16:21:16 -07:00
static struct hrtimer sched_clock_timer ;
2012-09-09 18:39:28 +01:00
static int irqtime = - 1 ;
core_param ( irqtime , irqtime , int , 0400 ) ;
2011-12-15 12:19:23 +01:00
static struct clock_data cd = {
. mult = NSEC_PER_SEC / HZ ,
} ;
2013-07-18 16:21:17 -07:00
static u64 __read_mostly sched_clock_mask ;
2011-12-15 12:19:23 +01:00
2013-07-18 16:21:17 -07:00
static u64 notrace jiffy_sched_clock_read ( void )
2011-12-15 12:19:23 +01:00
{
2013-07-18 16:21:17 -07:00
/*
* We don ' t need to use get_jiffies_64 on 32 - bit arches here
* because we register with BITS_PER_LONG
*/
return ( u64 ) ( jiffies - INITIAL_JIFFIES ) ;
}
static u32 __read_mostly ( * read_sched_clock_32 ) ( void ) ;
static u64 notrace read_sched_clock_32_wrapper ( void )
{
return read_sched_clock_32 ( ) ;
2011-12-15 12:19:23 +01:00
}
2013-07-18 16:21:17 -07:00
static u64 __read_mostly ( * read_sched_clock ) ( void ) = jiffy_sched_clock_read ;
2011-12-15 12:19:23 +01:00
2013-04-18 17:33:40 +01:00
static inline u64 notrace cyc_to_ns ( u64 cyc , u32 mult , u32 shift )
2011-12-15 12:19:23 +01:00
{
return ( cyc * mult ) > > shift ;
}
2013-06-17 15:40:58 -07:00
static unsigned long long notrace sched_clock_32 ( void )
2011-12-15 12:19:23 +01:00
{
u64 epoch_ns ;
2013-07-18 16:21:17 -07:00
u64 epoch_cyc ;
u64 cyc ;
2013-07-18 16:21:15 -07:00
unsigned long seq ;
2013-06-17 15:40:58 -07:00
if ( cd . suspended )
return cd . epoch_ns ;
2011-12-15 12:19:23 +01:00
do {
2013-07-18 16:21:15 -07:00
seq = read_seqcount_begin ( & cd . seq ) ;
2011-12-15 12:19:23 +01:00
epoch_cyc = cd . epoch_cyc ;
epoch_ns = cd . epoch_ns ;
2013-07-18 16:21:15 -07:00
} while ( read_seqcount_retry ( & cd . seq , seq ) ) ;
2011-12-15 12:19:23 +01:00
2013-06-17 15:40:58 -07:00
cyc = read_sched_clock ( ) ;
cyc = ( cyc - epoch_cyc ) & sched_clock_mask ;
return epoch_ns + cyc_to_ns ( cyc , cd . mult , cd . shift ) ;
2011-12-15 12:19:23 +01:00
}
/*
* Atomically update the sched_clock epoch .
*/
static void notrace update_sched_clock ( void )
{
unsigned long flags ;
2013-07-18 16:21:17 -07:00
u64 cyc ;
2011-12-15 12:19:23 +01:00
u64 ns ;
cyc = read_sched_clock ( ) ;
ns = cd . epoch_ns +
cyc_to_ns ( ( cyc - cd . epoch_cyc ) & sched_clock_mask ,
cd . mult , cd . shift ) ;
2013-07-18 16:21:15 -07:00
2011-12-15 12:19:23 +01:00
raw_local_irq_save ( flags ) ;
2013-07-18 16:21:15 -07:00
write_seqcount_begin ( & cd . seq ) ;
2011-12-15 12:19:23 +01:00
cd . epoch_ns = ns ;
2013-02-09 05:52:45 +01:00
cd . epoch_cyc = cyc ;
2013-07-18 16:21:15 -07:00
write_seqcount_end ( & cd . seq ) ;
2011-12-15 12:19:23 +01:00
raw_local_irq_restore ( flags ) ;
}
2010-12-15 19:23:07 +00:00
2013-07-18 16:21:16 -07:00
static enum hrtimer_restart sched_clock_poll ( struct hrtimer * hrt )
2010-12-15 19:23:07 +00:00
{
2011-12-15 12:19:23 +01:00
update_sched_clock ( ) ;
2013-07-18 16:21:16 -07:00
hrtimer_forward_now ( hrt , cd . wrap_kt ) ;
return HRTIMER_RESTART ;
2010-12-15 19:23:07 +00:00
}
2013-07-18 16:21:17 -07:00
void __init sched_clock_register ( u64 ( * read ) ( void ) , int bits ,
unsigned long rate )
2010-12-15 19:23:07 +00:00
{
2013-07-18 16:21:16 -07:00
unsigned long r ;
2010-12-15 19:23:07 +00:00
u64 res , wrap ;
char r_unit ;
2013-02-08 16:14:59 -06:00
if ( cd . rate > rate )
return ;
2011-12-15 12:19:23 +01:00
WARN_ON ( ! irqs_disabled ( ) ) ;
read_sched_clock = read ;
2013-07-18 16:21:17 -07:00
sched_clock_mask = CLOCKSOURCE_MASK ( bits ) ;
2013-02-08 16:14:59 -06:00
cd . rate = rate ;
2010-12-15 19:23:07 +00:00
/* calculate the mult/shift to convert counter ticks to ns. */
2013-07-18 16:21:17 -07:00
clocks_calc_mult_shift ( & cd . mult , & cd . shift , rate , NSEC_PER_SEC , 3600 ) ;
2010-12-15 19:23:07 +00:00
r = rate ;
if ( r > = 4000000 ) {
r / = 1000000 ;
r_unit = ' M ' ;
2011-12-15 12:19:23 +01:00
} else if ( r > = 1000 ) {
2010-12-15 19:23:07 +00:00
r / = 1000 ;
r_unit = ' k ' ;
2011-12-15 12:19:23 +01:00
} else
r_unit = ' ' ;
2010-12-15 19:23:07 +00:00
/* calculate how many ns until we wrap */
2013-07-18 16:21:17 -07:00
wrap = clocks_calc_max_nsecs ( cd . mult , cd . shift , 0 , sched_clock_mask ) ;
2013-07-18 16:21:16 -07:00
cd . wrap_kt = ns_to_ktime ( wrap - ( wrap > > 3 ) ) ;
2010-12-15 19:23:07 +00:00
/* calculate the ns resolution of this counter */
2011-12-15 12:19:23 +01:00
res = cyc_to_ns ( 1ULL , cd . mult , cd . shift ) ;
2013-07-18 16:21:16 -07:00
pr_info ( " sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns \n " ,
bits , r , r_unit , res , wrap ) ;
2010-12-15 19:23:07 +00:00
2011-12-15 12:19:23 +01:00
update_sched_clock ( ) ;
2010-12-15 19:23:07 +00:00
/*
* Ensure that sched_clock ( ) starts off at 0 ns
*/
2011-12-15 12:19:23 +01:00
cd . epoch_ns = 0 ;
2012-09-09 18:39:28 +01:00
/* Enable IRQ time accounting if we have a fast enough sched_clock */
if ( irqtime > 0 | | ( irqtime = = - 1 & & rate > = 1000000 ) )
enable_sched_clock_irqtime ( ) ;
2011-12-15 12:19:23 +01:00
pr_debug ( " Registered %pF as sched_clock source \n " , read ) ;
}
2013-07-18 16:21:17 -07:00
void __init setup_sched_clock ( u32 ( * read ) ( void ) , int bits , unsigned long rate )
{
read_sched_clock_32 = read ;
sched_clock_register ( read_sched_clock_32_wrapper , bits , rate ) ;
}
2013-04-01 13:53:38 -05:00
unsigned long long __read_mostly ( * sched_clock_func ) ( void ) = sched_clock_32 ;
unsigned long long notrace sched_clock ( void )
{
return sched_clock_func ( ) ;
}
2011-01-11 16:23:04 +00:00
void __init sched_clock_postinit ( void )
{
2011-12-15 12:19:23 +01:00
/*
* If no sched_clock function has been provided at that point ,
* make it the final one one .
*/
if ( read_sched_clock = = jiffy_sched_clock_read )
2013-07-18 16:21:17 -07:00
sched_clock_register ( jiffy_sched_clock_read , BITS_PER_LONG , HZ ) ;
2011-12-15 12:19:23 +01:00
2013-07-18 16:21:16 -07:00
update_sched_clock ( ) ;
/*
* Start the timer to keep sched_clock ( ) properly updated and
* sets the initial epoch .
*/
hrtimer_init ( & sched_clock_timer , CLOCK_MONOTONIC , HRTIMER_MODE_REL ) ;
sched_clock_timer . function = sched_clock_poll ;
hrtimer_start ( & sched_clock_timer , cd . wrap_kt , HRTIMER_MODE_REL ) ;
2011-01-11 16:23:04 +00:00
}
2012-02-04 12:31:27 +00:00
static int sched_clock_suspend ( void )
{
2013-07-18 16:21:16 -07:00
sched_clock_poll ( & sched_clock_timer ) ;
2012-10-23 19:00:03 +01:00
cd . suspended = true ;
2012-02-04 12:31:27 +00:00
return 0 ;
}
2012-08-07 19:05:10 +01:00
static void sched_clock_resume ( void )
{
2012-10-23 19:00:03 +01:00
cd . epoch_cyc = read_sched_clock ( ) ;
cd . suspended = false ;
2012-08-07 19:05:10 +01:00
}
2012-02-04 12:31:27 +00:00
static struct syscore_ops sched_clock_ops = {
. suspend = sched_clock_suspend ,
2012-08-07 19:05:10 +01:00
. resume = sched_clock_resume ,
2012-02-04 12:31:27 +00:00
} ;
static int __init sched_clock_syscore_init ( void )
{
register_syscore_ops ( & sched_clock_ops ) ;
return 0 ;
}
device_initcall ( sched_clock_syscore_init ) ;