2018-08-16 11:23:53 -04:00
// SPDX-License-Identifier: GPL-2.0
2009-02-26 18:47:11 +01:00
/*
* tracing clocks
*
* Copyright ( C ) 2009 Red Hat , Inc . , Ingo Molnar < mingo @ redhat . com >
*
* Implements 3 trace clock variants , with differing scalability / precision
* tradeoffs :
*
* - local : CPU - local trace clock
* - medium : scalable global clock with some jitter
* - global : globally monotonic , serialized clock
*
* Tracer plugins will chose a default from these clocks .
*/
# include <linux/spinlock.h>
2010-02-28 19:42:38 +01:00
# include <linux/irqflags.h>
2009-02-26 18:47:11 +01:00
# include <linux/hardirq.h>
# include <linux/module.h>
# include <linux/percpu.h>
# include <linux/sched.h>
2017-02-01 16:36:40 +01:00
# include <linux/sched/clock.h>
2009-02-26 18:47:11 +01:00
# include <linux/ktime.h>
2009-03-22 19:11:11 +02:00
# include <linux/trace_clock.h>
2009-02-26 18:47:11 +01:00
/*
* trace_clock_local ( ) : the simplest and least coherent tracing clock .
*
* Useful for tracing that does not cross to other CPUs nor
* does it go through idle events .
*/
u64 notrace trace_clock_local ( void )
{
2009-03-10 19:03:43 +01:00
u64 clock ;
2009-02-26 18:47:11 +01:00
/*
* sched_clock ( ) is an architecture implemented , fast , scalable ,
* lockless clock . It is not guaranteed to be coherent across
* CPUs , nor across CPU idle events .
*/
2010-06-03 09:36:50 -04:00
preempt_disable_notrace ( ) ;
2009-03-10 19:03:43 +01:00
clock = sched_clock ( ) ;
2010-06-03 09:36:50 -04:00
preempt_enable_notrace ( ) ;
2009-03-10 19:03:43 +01:00
return clock ;
2009-02-26 18:47:11 +01:00
}
2012-11-15 11:27:26 -08:00
EXPORT_SYMBOL_GPL ( trace_clock_local ) ;
2009-02-26 18:47:11 +01:00
/*
2011-03-30 22:57:33 -03:00
* trace_clock ( ) : ' between ' trace clock . Not completely serialized ,
2009-02-26 18:47:11 +01:00
* but not completely incorrect when crossing CPUs either .
*
* This is based on cpu_clock ( ) , which will allow at most ~ 1 jiffy of
* jitter between CPUs . So it ' s a pretty scalable clock , but there
* can be offsets in the trace data .
*/
u64 notrace trace_clock ( void )
{
2010-05-25 10:48:51 +02:00
return local_clock ( ) ;
2009-02-26 18:47:11 +01:00
}
2015-04-30 08:10:24 -07:00
EXPORT_SYMBOL_GPL ( trace_clock ) ;
2009-02-26 18:47:11 +01:00
2013-03-14 13:13:45 -04:00
/*
* trace_jiffy_clock ( ) : Simply use jiffies as a clock counter .
2014-07-18 11:43:01 -07:00
* Note that this use of jiffies_64 is not completely safe on
* 32 - bit systems . But the window is tiny , and the effect if
* we are affected is that we will have an obviously bogus
* timestamp on a trace event - i . e . not life threatening .
2013-03-14 13:13:45 -04:00
*/
u64 notrace trace_clock_jiffies ( void )
{
2014-07-18 11:43:01 -07:00
return jiffies_64_to_clock_t ( jiffies_64 - INITIAL_JIFFIES ) ;
2013-03-14 13:13:45 -04:00
}
2015-04-30 08:10:24 -07:00
EXPORT_SYMBOL_GPL ( trace_clock_jiffies ) ;
2009-02-26 18:47:11 +01:00
/*
* trace_clock_global ( ) : special globally coherent trace clock
*
* It has higher overhead than the other trace clocks but is still
* an order of magnitude faster than GTOD derived hardware clocks .
*
* Used by plugins that need globally coherent timestamps .
*/
2009-09-15 12:24:22 -04:00
/* keep prev_time and lock in the same cacheline. */
static struct {
u64 prev_time ;
2009-12-02 19:49:50 +01:00
arch_spinlock_t lock ;
2009-09-15 12:24:22 -04:00
} trace_clock_struct ____cacheline_aligned_in_smp =
{
2009-12-03 12:38:57 +01:00
. lock = ( arch_spinlock_t ) __ARCH_SPIN_LOCK_UNLOCKED ,
2009-09-15 12:24:22 -04:00
} ;
2009-02-26 18:47:11 +01:00
u64 notrace trace_clock_global ( void )
{
unsigned long flags ;
int this_cpu ;
u64 now ;
2018-04-04 14:50:15 -04:00
raw_local_irq_save ( flags ) ;
2009-02-26 18:47:11 +01:00
this_cpu = raw_smp_processor_id ( ) ;
2012-12-27 11:49:45 +09:00
now = sched_clock_cpu ( this_cpu ) ;
2009-02-26 18:47:11 +01:00
/*
* If in an NMI context then dont risk lockups and return the
* cpu_clock ( ) time :
*/
if ( unlikely ( in_nmi ( ) ) )
goto out ;
2009-12-02 20:01:25 +01:00
arch_spin_lock ( & trace_clock_struct . lock ) ;
2009-02-26 18:47:11 +01:00
/*
* TODO : if this happens often then maybe we should reset
2009-09-15 12:24:22 -04:00
* my_scd - > clock to prev_time + 1 , to make sure
2009-02-26 18:47:11 +01:00
* we start ticking with the local clock from now on ?
*/
2009-09-15 12:24:22 -04:00
if ( ( s64 ) ( now - trace_clock_struct . prev_time ) < 0 )
now = trace_clock_struct . prev_time + 1 ;
2009-02-26 18:47:11 +01:00
2009-09-15 12:24:22 -04:00
trace_clock_struct . prev_time = now ;
2009-02-26 18:47:11 +01:00
2009-12-02 20:01:25 +01:00
arch_spin_unlock ( & trace_clock_struct . lock ) ;
2009-02-26 18:47:11 +01:00
out :
2018-04-04 14:50:15 -04:00
raw_local_irq_restore ( flags ) ;
2009-02-26 18:47:11 +01:00
return now ;
}
2015-04-30 08:10:24 -07:00
EXPORT_SYMBOL_GPL ( trace_clock_global ) ;
2011-09-19 11:35:58 -04:00
static atomic64_t trace_counter ;
/*
* trace_clock_counter ( ) : simply an atomic counter .
* Use the trace_counter " counter " for cases where you do not care
* about timings , but are interested in strict ordering .
*/
u64 notrace trace_clock_counter ( void )
{
return atomic64_add_return ( 1 , & trace_counter ) ;
}