2009-02-26 20:47:11 +03:00
/*
* tracing clocks
*
* Copyright ( C ) 2009 Red Hat , Inc . , Ingo Molnar < mingo @ redhat . com >
*
* Implements 3 trace clock variants , with differing scalability / precision
* tradeoffs :
*
* - local : CPU - local trace clock
* - medium : scalable global clock with some jitter
* - global : globally monotonic , serialized clock
*
* Tracer plugins will chose a default from these clocks .
*/
# include <linux/spinlock.h>
2010-02-28 21:42:38 +03:00
# include <linux/irqflags.h>
2009-02-26 20:47:11 +03:00
# include <linux/hardirq.h>
# include <linux/module.h>
# include <linux/percpu.h>
# include <linux/sched.h>
# include <linux/ktime.h>
2009-03-22 20:11:11 +03:00
# include <linux/trace_clock.h>
2009-02-26 20:47:11 +03:00
/*
* trace_clock_local ( ) : the simplest and least coherent tracing clock .
*
* Useful for tracing that does not cross to other CPUs nor
* does it go through idle events .
*/
u64 notrace trace_clock_local ( void )
{
2009-03-10 21:03:43 +03:00
u64 clock ;
2009-02-26 20:47:11 +03:00
/*
* sched_clock ( ) is an architecture implemented , fast , scalable ,
* lockless clock . It is not guaranteed to be coherent across
* CPUs , nor across CPU idle events .
*/
2010-06-03 17:36:50 +04:00
preempt_disable_notrace ( ) ;
2009-03-10 21:03:43 +03:00
clock = sched_clock ( ) ;
2010-06-03 17:36:50 +04:00
preempt_enable_notrace ( ) ;
2009-03-10 21:03:43 +03:00
return clock ;
2009-02-26 20:47:11 +03:00
}
2012-11-15 23:27:26 +04:00
EXPORT_SYMBOL_GPL ( trace_clock_local ) ;
2009-02-26 20:47:11 +03:00
/*
2011-03-31 05:57:33 +04:00
* trace_clock ( ) : ' between ' trace clock . Not completely serialized ,
2009-02-26 20:47:11 +03:00
* but not completely incorrect when crossing CPUs either .
*
* This is based on cpu_clock ( ) , which will allow at most ~ 1 jiffy of
* jitter between CPUs . So it ' s a pretty scalable clock , but there
* can be offsets in the trace data .
*/
u64 notrace trace_clock ( void )
{
2010-05-25 12:48:51 +04:00
return local_clock ( ) ;
2009-02-26 20:47:11 +03:00
}
2015-04-30 18:10:24 +03:00
EXPORT_SYMBOL_GPL ( trace_clock ) ;
2009-02-26 20:47:11 +03:00
2013-03-14 21:13:45 +04:00
/*
* trace_jiffy_clock ( ) : Simply use jiffies as a clock counter .
2014-07-18 22:43:01 +04:00
* Note that this use of jiffies_64 is not completely safe on
* 32 - bit systems . But the window is tiny , and the effect if
* we are affected is that we will have an obviously bogus
* timestamp on a trace event - i . e . not life threatening .
2013-03-14 21:13:45 +04:00
*/
u64 notrace trace_clock_jiffies ( void )
{
2014-07-18 22:43:01 +04:00
return jiffies_64_to_clock_t ( jiffies_64 - INITIAL_JIFFIES ) ;
2013-03-14 21:13:45 +04:00
}
2015-04-30 18:10:24 +03:00
EXPORT_SYMBOL_GPL ( trace_clock_jiffies ) ;
2009-02-26 20:47:11 +03:00
/*
* trace_clock_global ( ) : special globally coherent trace clock
*
* It has higher overhead than the other trace clocks but is still
* an order of magnitude faster than GTOD derived hardware clocks .
*
* Used by plugins that need globally coherent timestamps .
*/
2009-09-15 20:24:22 +04:00
/* keep prev_time and lock in the same cacheline. */
static struct {
u64 prev_time ;
2009-12-02 21:49:50 +03:00
arch_spinlock_t lock ;
2009-09-15 20:24:22 +04:00
} trace_clock_struct ____cacheline_aligned_in_smp =
{
2009-12-03 14:38:57 +03:00
. lock = ( arch_spinlock_t ) __ARCH_SPIN_LOCK_UNLOCKED ,
2009-09-15 20:24:22 +04:00
} ;
2009-02-26 20:47:11 +03:00
u64 notrace trace_clock_global ( void )
{
unsigned long flags ;
int this_cpu ;
u64 now ;
2010-03-24 05:57:37 +03:00
local_irq_save ( flags ) ;
2009-02-26 20:47:11 +03:00
this_cpu = raw_smp_processor_id ( ) ;
2012-12-27 06:49:45 +04:00
now = sched_clock_cpu ( this_cpu ) ;
2009-02-26 20:47:11 +03:00
/*
* If in an NMI context then dont risk lockups and return the
* cpu_clock ( ) time :
*/
if ( unlikely ( in_nmi ( ) ) )
goto out ;
2009-12-02 22:01:25 +03:00
arch_spin_lock ( & trace_clock_struct . lock ) ;
2009-02-26 20:47:11 +03:00
/*
* TODO : if this happens often then maybe we should reset
2009-09-15 20:24:22 +04:00
* my_scd - > clock to prev_time + 1 , to make sure
2009-02-26 20:47:11 +03:00
* we start ticking with the local clock from now on ?
*/
2009-09-15 20:24:22 +04:00
if ( ( s64 ) ( now - trace_clock_struct . prev_time ) < 0 )
now = trace_clock_struct . prev_time + 1 ;
2009-02-26 20:47:11 +03:00
2009-09-15 20:24:22 +04:00
trace_clock_struct . prev_time = now ;
2009-02-26 20:47:11 +03:00
2009-12-02 22:01:25 +03:00
arch_spin_unlock ( & trace_clock_struct . lock ) ;
2009-02-26 20:47:11 +03:00
out :
2010-03-24 05:57:37 +03:00
local_irq_restore ( flags ) ;
2009-02-26 20:47:11 +03:00
return now ;
}
2015-04-30 18:10:24 +03:00
EXPORT_SYMBOL_GPL ( trace_clock_global ) ;
2011-09-19 19:35:58 +04:00
static atomic64_t trace_counter ;
/*
* trace_clock_counter ( ) : simply an atomic counter .
* Use the trace_counter " counter " for cases where you do not care
* about timings , but are interested in strict ordering .
*/
u64 notrace trace_clock_counter ( void )
{
return atomic64_add_return ( 1 , & trace_counter ) ;
}