2018-10-31 21:21:09 +03:00
// SPDX-License-Identifier: GPL-2.0
2010-12-15 22:23:07 +03:00
/*
2018-10-31 21:21:08 +03:00
* Generic sched_clock ( ) support , to extend low level hardware time
* counters to full 64 - bit ns values .
2010-12-15 22:23:07 +03:00
*/
# include <linux/clocksource.h>
# include <linux/init.h>
# include <linux/jiffies.h>
2013-07-19 03:21:16 +04:00
# include <linux/ktime.h>
2010-12-15 22:23:07 +03:00
# include <linux/kernel.h>
time/sched_clock: Round the frequency reported to nearest rather than down
The frequency reported for clock sources are rounded down, which gives
misleading figures, e.g.:
I/O ASIC clock frequency 24999480Hz
sched_clock: 32 bits at 24MHz, resolution 40ns, wraps every 85901132779ns
MIPS counter frequency 59998512Hz
sched_clock: 32 bits at 59MHz, resolution 16ns, wraps every 35792281591ns
Rounding to nearest is more adequate:
I/O ASIC clock frequency 24999664Hz
sched_clock: 32 bits at 25MHz, resolution 40ns, wraps every 85900499947ns
MIPS counter frequency 59999728Hz
sched_clock: 32 bits at 60MHz, resolution 16ns, wraps every 35791556599ns
Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: John Stultz <jstultz@google.com>
Link: https://lore.kernel.org/r/alpine.DEB.2.21.2204240055590.9383@angie.orcam.me.uk
2022-04-24 14:47:20 +03:00
# include <linux/math.h>
2012-09-09 21:39:28 +04:00
# include <linux/moduleparam.h>
2010-12-15 22:23:07 +03:00
# include <linux/sched.h>
2017-02-01 18:36:40 +03:00
# include <linux/sched/clock.h>
2012-02-04 16:31:27 +04:00
# include <linux/syscore_ops.h>
2013-07-19 03:21:16 +04:00
# include <linux/hrtimer.h>
2013-06-02 10:39:40 +04:00
# include <linux/sched_clock.h>
2013-07-19 03:21:15 +04:00
# include <linux/seqlock.h>
2013-07-19 03:21:17 +04:00
# include <linux/bitops.h>
2010-12-15 22:23:07 +03:00
2019-10-22 16:12:26 +03:00
# include "timekeeping.h"
2015-03-26 22:23:23 +03:00
/**
2015-03-27 09:08:06 +03:00
* struct clock_data - all data needed for sched_clock ( ) ( including
2015-03-26 22:23:23 +03:00
* registration of a new clock source )
*
2015-03-26 22:23:26 +03:00
* @ seq : Sequence counter for protecting updates . The lowest
* bit is the index for @ read_data .
2015-03-26 22:23:23 +03:00
* @ read_data : Data required to read from sched_clock .
2015-03-27 09:08:06 +03:00
* @ wrap_kt : Duration for which clock can run before wrapping .
* @ rate : Tick rate of the registered clock .
* @ actual_read_sched_clock : Registered hardware level clock read function .
2015-03-26 22:23:23 +03:00
*
* The ordering of this structure has been chosen to optimize cache
2015-03-27 09:08:06 +03:00
* performance . In particular ' seq ' and ' read_data [ 0 ] ' ( combined ) should fit
* into a single 64 - byte cache line .
2015-03-26 22:23:23 +03:00
*/
struct clock_data {
2020-08-27 14:40:40 +03:00
seqcount_latch_t seq ;
2015-03-27 09:08:06 +03:00
struct clock_read_data read_data [ 2 ] ;
ktime_t wrap_kt ;
unsigned long rate ;
2015-03-26 22:23:24 +03:00
u64 ( * actual_read_sched_clock ) ( void ) ;
2015-03-26 22:23:23 +03:00
} ;
2013-07-19 03:21:16 +04:00
static struct hrtimer sched_clock_timer ;
2012-09-09 21:39:28 +04:00
static int irqtime = - 1 ;
core_param ( irqtime , irqtime , int , 0400 ) ;
2011-12-15 15:19:23 +04:00
2013-07-19 03:21:17 +04:00
static u64 notrace jiffy_sched_clock_read ( void )
2011-12-15 15:19:23 +04:00
{
2013-07-19 03:21:17 +04:00
/*
* We don ' t need to use get_jiffies_64 on 32 - bit arches here
* because we register with BITS_PER_LONG
*/
return ( u64 ) ( jiffies - INITIAL_JIFFIES ) ;
}
2015-03-26 22:23:23 +03:00
static struct clock_data cd ____cacheline_aligned = {
2015-03-26 22:23:26 +03:00
. read_data [ 0 ] = { . mult = NSEC_PER_SEC / HZ ,
. read_sched_clock = jiffy_sched_clock_read , } ,
2015-03-26 22:23:24 +03:00
. actual_read_sched_clock = jiffy_sched_clock_read ,
2015-03-26 22:23:23 +03:00
} ;
2011-12-15 15:19:23 +04:00
2023-05-19 13:21:00 +03:00
static __always_inline u64 cyc_to_ns ( u64 cyc , u32 mult , u32 shift )
2011-12-15 15:19:23 +04:00
{
return ( cyc * mult ) > > shift ;
}
2020-09-29 11:20:27 +03:00
notrace struct clock_read_data * sched_clock_read_begin ( unsigned int * seq )
2020-07-16 08:11:24 +03:00
{
2020-07-16 08:11:25 +03:00
* seq = raw_read_seqcount_latch ( & cd . seq ) ;
2020-07-16 08:11:24 +03:00
return cd . read_data + ( * seq & 1 ) ;
}
2020-09-29 11:20:27 +03:00
notrace int sched_clock_read_retry ( unsigned int seq )
2020-07-16 08:11:24 +03:00
{
2023-05-19 13:20:59 +03:00
return raw_read_seqcount_latch_retry ( & cd . seq , seq ) ;
2020-07-16 08:11:24 +03:00
}
2023-05-19 13:21:00 +03:00
unsigned long long noinstr sched_clock_noinstr ( void )
2011-12-15 15:19:23 +04:00
{
2015-03-26 22:23:26 +03:00
struct clock_read_data * rd ;
2023-05-19 13:21:00 +03:00
unsigned int seq ;
u64 cyc , res ;
2013-06-18 02:40:58 +04:00
2011-12-15 15:19:23 +04:00
do {
2023-05-19 13:21:00 +03:00
seq = raw_read_seqcount_latch ( & cd . seq ) ;
rd = cd . read_data + ( seq & 1 ) ;
2015-03-26 22:23:22 +03:00
2015-03-26 22:23:24 +03:00
cyc = ( rd - > read_sched_clock ( ) - rd - > epoch_cyc ) &
rd - > sched_clock_mask ;
res = rd - > epoch_ns + cyc_to_ns ( cyc , rd - > mult , rd - > shift ) ;
2023-05-19 13:21:00 +03:00
} while ( raw_read_seqcount_latch_retry ( & cd . seq , seq ) ) ;
2011-12-15 15:19:23 +04:00
2015-03-26 22:23:22 +03:00
return res ;
2011-12-15 15:19:23 +04:00
}
2023-05-19 13:21:00 +03:00
unsigned long long notrace sched_clock ( void )
{
unsigned long long ns ;
preempt_disable_notrace ( ) ;
ns = sched_clock_noinstr ( ) ;
preempt_enable_notrace ( ) ;
return ns ;
}
2015-03-26 22:23:26 +03:00
/*
* Updating the data required to read the clock .
*
2015-03-27 09:08:06 +03:00
* sched_clock ( ) will never observe mis - matched data even if called from
2015-03-26 22:23:26 +03:00
* an NMI . We do this by maintaining an odd / even copy of the data and
2015-03-27 09:08:06 +03:00
* steering sched_clock ( ) to one or the other using a sequence counter .
* In order to preserve the data cache profile of sched_clock ( ) as much
2015-03-26 22:23:26 +03:00
* as possible the system reverts back to the even copy when the update
* completes ; the odd copy is used * only * during an update .
*/
static void update_clock_read_data ( struct clock_read_data * rd )
{
/* update the backup (odd) copy with the new data */
cd . read_data [ 1 ] = * rd ;
/* steer readers towards the odd copy */
raw_write_seqcount_latch ( & cd . seq ) ;
/* now its safe for us to update the normal (even) copy */
cd . read_data [ 0 ] = * rd ;
/* switch readers back to the even copy */
raw_write_seqcount_latch ( & cd . seq ) ;
}
2011-12-15 15:19:23 +04:00
/*
2015-03-27 09:08:06 +03:00
* Atomically update the sched_clock ( ) epoch .
2011-12-15 15:19:23 +04:00
*/
2015-03-26 22:23:25 +03:00
static void update_sched_clock ( void )
2011-12-15 15:19:23 +04:00
{
2013-07-19 03:21:17 +04:00
u64 cyc ;
2011-12-15 15:19:23 +04:00
u64 ns ;
2015-03-26 22:23:26 +03:00
struct clock_read_data rd ;
rd = cd . read_data [ 0 ] ;
2011-12-15 15:19:23 +04:00
2015-03-26 22:23:24 +03:00
cyc = cd . actual_read_sched_clock ( ) ;
2015-03-27 09:08:06 +03:00
ns = rd . epoch_ns + cyc_to_ns ( ( cyc - rd . epoch_cyc ) & rd . sched_clock_mask , rd . mult , rd . shift ) ;
2015-03-26 22:23:26 +03:00
rd . epoch_ns = ns ;
rd . epoch_cyc = cyc ;
update_clock_read_data ( & rd ) ;
2011-12-15 15:19:23 +04:00
}
2010-12-15 22:23:07 +03:00
2013-07-19 03:21:16 +04:00
static enum hrtimer_restart sched_clock_poll ( struct hrtimer * hrt )
2010-12-15 22:23:07 +03:00
{
2011-12-15 15:19:23 +04:00
update_sched_clock ( ) ;
2013-07-19 03:21:16 +04:00
hrtimer_forward_now ( hrt , cd . wrap_kt ) ;
2015-03-27 09:08:06 +03:00
2013-07-19 03:21:16 +04:00
return HRTIMER_RESTART ;
2010-12-15 22:23:07 +03:00
}
2015-03-27 09:08:06 +03:00
void __init
sched_clock_register ( u64 ( * read ) ( void ) , int bits , unsigned long rate )
2010-12-15 22:23:07 +03:00
{
2014-02-17 22:45:36 +04:00
u64 res , wrap , new_mask , new_epoch , cyc , ns ;
u32 new_mult , new_shift ;
2020-01-07 04:06:29 +03:00
unsigned long r , flags ;
2010-12-15 22:23:07 +03:00
char r_unit ;
2015-03-26 22:23:26 +03:00
struct clock_read_data rd ;
2010-12-15 22:23:07 +03:00
2013-02-09 02:14:59 +04:00
if ( cd . rate > rate )
return ;
2020-01-07 04:06:29 +03:00
/* Cannot register a sched_clock with interrupts on */
local_irq_save ( flags ) ;
2010-12-15 22:23:07 +03:00
2015-03-27 09:08:06 +03:00
/* Calculate the mult/shift to convert counter ticks to ns. */
2014-02-17 22:45:36 +04:00
clocks_calc_mult_shift ( & new_mult , & new_shift , rate , NSEC_PER_SEC , 3600 ) ;
new_mask = CLOCKSOURCE_MASK ( bits ) ;
2015-03-26 22:23:22 +03:00
cd . rate = rate ;
2014-02-17 22:45:36 +04:00
2015-03-27 09:08:06 +03:00
/* Calculate how many nanosecs until we risk wrapping */
2015-03-12 07:16:31 +03:00
wrap = clocks_calc_max_nsecs ( new_mult , new_shift , 0 , new_mask , NULL ) ;
2015-03-26 22:23:22 +03:00
cd . wrap_kt = ns_to_ktime ( wrap ) ;
2014-02-17 22:45:36 +04:00
2015-03-26 22:23:26 +03:00
rd = cd . read_data [ 0 ] ;
2015-03-27 09:08:06 +03:00
/* Update epoch for new counter and update 'epoch_ns' from old counter*/
2014-02-17 22:45:36 +04:00
new_epoch = read ( ) ;
2015-03-26 22:23:24 +03:00
cyc = cd . actual_read_sched_clock ( ) ;
2015-03-27 09:08:06 +03:00
ns = rd . epoch_ns + cyc_to_ns ( ( cyc - rd . epoch_cyc ) & rd . sched_clock_mask , rd . mult , rd . shift ) ;
2015-03-26 22:23:24 +03:00
cd . actual_read_sched_clock = read ;
2014-02-17 22:45:36 +04:00
2015-03-27 09:08:06 +03:00
rd . read_sched_clock = read ;
rd . sched_clock_mask = new_mask ;
rd . mult = new_mult ;
rd . shift = new_shift ;
rd . epoch_cyc = new_epoch ;
rd . epoch_ns = ns ;
2015-03-26 22:23:26 +03:00
update_clock_read_data ( & rd ) ;
2010-12-15 22:23:07 +03:00
2017-02-17 10:51:03 +03:00
if ( sched_clock_timer . function ! = NULL ) {
/* update timeout for clock wrap */
2020-03-09 21:15:29 +03:00
hrtimer_start ( & sched_clock_timer , cd . wrap_kt ,
HRTIMER_MODE_REL_HARD ) ;
2017-02-17 10:51:03 +03:00
}
2010-12-15 22:23:07 +03:00
r = rate ;
if ( r > = 4000000 ) {
time/sched_clock: Round the frequency reported to nearest rather than down
The frequency reported for clock sources are rounded down, which gives
misleading figures, e.g.:
I/O ASIC clock frequency 24999480Hz
sched_clock: 32 bits at 24MHz, resolution 40ns, wraps every 85901132779ns
MIPS counter frequency 59998512Hz
sched_clock: 32 bits at 59MHz, resolution 16ns, wraps every 35792281591ns
Rounding to nearest is more adequate:
I/O ASIC clock frequency 24999664Hz
sched_clock: 32 bits at 25MHz, resolution 40ns, wraps every 85900499947ns
MIPS counter frequency 59999728Hz
sched_clock: 32 bits at 60MHz, resolution 16ns, wraps every 35791556599ns
Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: John Stultz <jstultz@google.com>
Link: https://lore.kernel.org/r/alpine.DEB.2.21.2204240055590.9383@angie.orcam.me.uk
2022-04-24 14:47:20 +03:00
r = DIV_ROUND_CLOSEST ( r , 1000000 ) ;
2010-12-15 22:23:07 +03:00
r_unit = ' M ' ;
2022-04-24 14:47:30 +03:00
} else if ( r > = 4000 ) {
r = DIV_ROUND_CLOSEST ( r , 1000 ) ;
r_unit = ' k ' ;
2015-03-27 09:08:06 +03:00
} else {
2022-04-24 14:47:30 +03:00
r_unit = ' ' ;
2015-03-27 09:08:06 +03:00
}
/* Calculate the ns resolution of this counter */
2014-02-17 22:45:36 +04:00
res = cyc_to_ns ( 1ULL , new_mult , new_shift ) ;
2013-07-19 03:21:16 +04:00
pr_info ( " sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns \n " ,
bits , r , r_unit , res , wrap ) ;
2010-12-15 22:23:07 +03:00
2015-03-27 09:08:06 +03:00
/* Enable IRQ time accounting if we have a fast enough sched_clock() */
2012-09-09 21:39:28 +04:00
if ( irqtime > 0 | | ( irqtime = = - 1 & & rate > = 1000000 ) )
enable_sched_clock_irqtime ( ) ;
2020-01-07 04:06:29 +03:00
local_irq_restore ( flags ) ;
2019-03-25 22:32:28 +03:00
pr_debug ( " Registered %pS as sched_clock source \n " , read ) ;
2011-12-15 15:19:23 +04:00
}
2018-07-19 23:55:41 +03:00
void __init generic_sched_clock_init ( void )
2011-01-11 19:23:04 +03:00
{
2011-12-15 15:19:23 +04:00
/*
2015-03-27 09:08:06 +03:00
* If no sched_clock ( ) function has been provided at that point ,
2020-08-07 06:32:48 +03:00
* make it the final one .
2011-12-15 15:19:23 +04:00
*/
2015-03-26 22:23:24 +03:00
if ( cd . actual_read_sched_clock = = jiffy_sched_clock_read )
2013-07-19 03:21:17 +04:00
sched_clock_register ( jiffy_sched_clock_read , BITS_PER_LONG , HZ ) ;
2011-12-15 15:19:23 +04:00
2013-07-19 03:21:16 +04:00
update_sched_clock ( ) ;
/*
* Start the timer to keep sched_clock ( ) properly updated and
* sets the initial epoch .
*/
2020-03-09 21:15:29 +03:00
hrtimer_init ( & sched_clock_timer , CLOCK_MONOTONIC , HRTIMER_MODE_REL_HARD ) ;
2013-07-19 03:21:16 +04:00
sched_clock_timer . function = sched_clock_poll ;
2020-03-09 21:15:29 +03:00
hrtimer_start ( & sched_clock_timer , cd . wrap_kt , HRTIMER_MODE_REL_HARD ) ;
2011-01-11 19:23:04 +03:00
}
2012-02-04 16:31:27 +04:00
2015-03-26 22:23:24 +03:00
/*
* Clock read function for use when the clock is suspended .
*
* This function makes it appear to sched_clock ( ) as if the clock
* stopped counting at its last update .
2015-03-26 22:23:26 +03:00
*
* This function must only be called from the critical
* section in sched_clock ( ) . It relies on the read_seqcount_retry ( )
* at the end of the critical section to be sure we observe the
2015-03-27 09:08:06 +03:00
* correct copy of ' epoch_cyc ' .
2015-03-26 22:23:24 +03:00
*/
static u64 notrace suspended_sched_clock_read ( void )
{
2020-08-27 14:40:37 +03:00
unsigned int seq = raw_read_seqcount_latch ( & cd . seq ) ;
2015-03-26 22:23:26 +03:00
return cd . read_data [ seq & 1 ] . epoch_cyc ;
2015-03-26 22:23:24 +03:00
}
2019-03-29 05:59:09 +03:00
int sched_clock_suspend ( void )
2012-02-04 16:31:27 +04:00
{
2015-03-26 22:23:26 +03:00
struct clock_read_data * rd = & cd . read_data [ 0 ] ;
2015-03-26 22:23:23 +03:00
2014-07-24 08:03:50 +04:00
update_sched_clock ( ) ;
hrtimer_cancel ( & sched_clock_timer ) ;
2015-03-26 22:23:24 +03:00
rd - > read_sched_clock = suspended_sched_clock_read ;
2015-03-27 09:08:06 +03:00
2012-02-04 16:31:27 +04:00
return 0 ;
}
2019-03-29 05:59:09 +03:00
void sched_clock_resume ( void )
2012-08-07 22:05:10 +04:00
{
2015-03-26 22:23:26 +03:00
struct clock_read_data * rd = & cd . read_data [ 0 ] ;
2015-03-26 22:23:23 +03:00
2015-03-26 22:23:24 +03:00
rd - > epoch_cyc = cd . actual_read_sched_clock ( ) ;
2020-03-09 21:15:29 +03:00
hrtimer_start ( & sched_clock_timer , cd . wrap_kt , HRTIMER_MODE_REL_HARD ) ;
2015-03-26 22:23:24 +03:00
rd - > read_sched_clock = cd . actual_read_sched_clock ;
2012-08-07 22:05:10 +04:00
}
2012-02-04 16:31:27 +04:00
static struct syscore_ops sched_clock_ops = {
2015-03-27 09:08:06 +03:00
. suspend = sched_clock_suspend ,
. resume = sched_clock_resume ,
2012-02-04 16:31:27 +04:00
} ;
static int __init sched_clock_syscore_init ( void )
{
register_syscore_ops ( & sched_clock_ops ) ;
2015-03-27 09:08:06 +03:00
2012-02-04 16:31:27 +04:00
return 0 ;
}
device_initcall ( sched_clock_syscore_init ) ;