2018-07-30 15:24:23 -07:00
// SPDX-License-Identifier: GPL-2.0
/*
* preemptoff and irqoff tracepoints
*
* Copyright ( C ) Joel Fernandes ( Google ) < joel @ joelfernandes . org >
*/
# include <linux/kallsyms.h>
# include <linux/uaccess.h>
# include <linux/module.h>
# include <linux/ftrace.h>
2019-02-13 01:13:40 +09:00
# include <linux/kprobes.h>
2018-08-08 21:28:05 -04:00
# include "trace.h"
2018-07-30 15:24:23 -07:00
# define CREATE_TRACE_POINTS
# include <trace/events/preemptirq.h>
# ifdef CONFIG_TRACE_IRQFLAGS
/* Per-cpu variable to prevent redundant calls when IRQs already off */
static DEFINE_PER_CPU ( int , tracing_irq_cpu ) ;
2020-03-04 13:09:50 +01:00
/*
* Like trace_hardirqs_on ( ) but without the lockdep invocation . This is
* used in the low level entry code where the ordering vs . RCU is important
* and lockdep uses a staged approach which splits the lockdep hardirq
* tracking into a RCU on and a RCU off section .
*/
void trace_hardirqs_on_prepare ( void )
{
if ( this_cpu_read ( tracing_irq_cpu ) ) {
if ( ! in_nmi ( ) )
trace_irq_enable ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
tracer_hardirqs_on ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
this_cpu_write ( tracing_irq_cpu , 0 ) ;
}
}
EXPORT_SYMBOL ( trace_hardirqs_on_prepare ) ;
NOKPROBE_SYMBOL ( trace_hardirqs_on_prepare ) ;
2018-07-30 15:24:23 -07:00
void trace_hardirqs_on ( void )
{
2018-08-06 15:50:58 -04:00
if ( this_cpu_read ( tracing_irq_cpu ) ) {
2018-08-08 21:28:05 -04:00
if ( ! in_nmi ( ) )
trace_irq_enable_rcuidle ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
tracer_hardirqs_on ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2018-08-06 15:50:58 -04:00
this_cpu_write ( tracing_irq_cpu , 0 ) ;
}
2018-07-30 15:24:23 -07:00
2020-03-18 14:22:03 +01:00
lockdep_hardirqs_on_prepare ( CALLER_ADDR0 ) ;
2018-08-06 15:50:58 -04:00
lockdep_hardirqs_on ( CALLER_ADDR0 ) ;
2018-07-30 15:24:23 -07:00
}
EXPORT_SYMBOL ( trace_hardirqs_on ) ;
2019-02-13 01:13:40 +09:00
NOKPROBE_SYMBOL ( trace_hardirqs_on ) ;
2018-07-30 15:24:23 -07:00
2020-03-04 13:09:50 +01:00
/*
* Like trace_hardirqs_off ( ) but without the lockdep invocation . This is
* used in the low level entry code where the ordering vs . RCU is important
* and lockdep uses a staged approach which splits the lockdep hardirq
* tracking into a RCU on and a RCU off section .
*/
2020-05-29 23:27:40 +02:00
void trace_hardirqs_off_finish ( void )
2020-03-04 13:09:50 +01:00
{
if ( ! this_cpu_read ( tracing_irq_cpu ) ) {
this_cpu_write ( tracing_irq_cpu , 1 ) ;
tracer_hardirqs_off ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
if ( ! in_nmi ( ) )
trace_irq_disable ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
}
}
2020-05-29 23:27:40 +02:00
EXPORT_SYMBOL ( trace_hardirqs_off_finish ) ;
NOKPROBE_SYMBOL ( trace_hardirqs_off_finish ) ;
2020-03-04 13:09:50 +01:00
2018-07-30 15:24:23 -07:00
void trace_hardirqs_off ( void )
{
2020-05-29 23:27:40 +02:00
lockdep_hardirqs_off ( CALLER_ADDR0 ) ;
2018-08-06 15:50:58 -04:00
if ( ! this_cpu_read ( tracing_irq_cpu ) ) {
this_cpu_write ( tracing_irq_cpu , 1 ) ;
2018-08-08 21:28:05 -04:00
tracer_hardirqs_off ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
if ( ! in_nmi ( ) )
trace_irq_disable_rcuidle ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2018-08-06 15:50:58 -04:00
}
2018-07-30 15:24:23 -07:00
}
EXPORT_SYMBOL ( trace_hardirqs_off ) ;
2019-02-13 01:13:40 +09:00
NOKPROBE_SYMBOL ( trace_hardirqs_off ) ;
2018-07-30 15:24:23 -07:00
__visible void trace_hardirqs_on_caller ( unsigned long caller_addr )
{
2018-08-06 15:50:58 -04:00
if ( this_cpu_read ( tracing_irq_cpu ) ) {
2018-08-08 21:28:05 -04:00
if ( ! in_nmi ( ) )
trace_irq_enable_rcuidle ( CALLER_ADDR0 , caller_addr ) ;
tracer_hardirqs_on ( CALLER_ADDR0 , caller_addr ) ;
2018-08-06 15:50:58 -04:00
this_cpu_write ( tracing_irq_cpu , 0 ) ;
}
2018-07-30 15:24:23 -07:00
2020-03-18 14:22:03 +01:00
lockdep_hardirqs_on_prepare ( CALLER_ADDR0 ) ;
2018-08-06 15:50:58 -04:00
lockdep_hardirqs_on ( CALLER_ADDR0 ) ;
2018-07-30 15:24:23 -07:00
}
EXPORT_SYMBOL ( trace_hardirqs_on_caller ) ;
2019-02-13 01:13:40 +09:00
NOKPROBE_SYMBOL ( trace_hardirqs_on_caller ) ;
2018-07-30 15:24:23 -07:00
__visible void trace_hardirqs_off_caller ( unsigned long caller_addr )
{
2020-09-10 12:24:53 +02:00
lockdep_hardirqs_off ( CALLER_ADDR0 ) ;
2018-08-06 15:50:58 -04:00
if ( ! this_cpu_read ( tracing_irq_cpu ) ) {
this_cpu_write ( tracing_irq_cpu , 1 ) ;
2018-08-08 21:28:05 -04:00
tracer_hardirqs_off ( CALLER_ADDR0 , caller_addr ) ;
if ( ! in_nmi ( ) )
trace_irq_disable_rcuidle ( CALLER_ADDR0 , caller_addr ) ;
2018-08-06 15:50:58 -04:00
}
2018-07-30 15:24:23 -07:00
}
EXPORT_SYMBOL ( trace_hardirqs_off_caller ) ;
2019-02-13 01:13:40 +09:00
NOKPROBE_SYMBOL ( trace_hardirqs_off_caller ) ;
2018-07-30 15:24:23 -07:00
# endif /* CONFIG_TRACE_IRQFLAGS */
# ifdef CONFIG_TRACE_PREEMPT_TOGGLE
void trace_preempt_on ( unsigned long a0 , unsigned long a1 )
{
2018-08-08 21:28:05 -04:00
if ( ! in_nmi ( ) )
trace_preempt_enable_rcuidle ( a0 , a1 ) ;
tracer_preempt_on ( a0 , a1 ) ;
2018-07-30 15:24:23 -07:00
}
void trace_preempt_off ( unsigned long a0 , unsigned long a1 )
{
2018-08-08 21:28:05 -04:00
if ( ! in_nmi ( ) )
trace_preempt_disable_rcuidle ( a0 , a1 ) ;
tracer_preempt_off ( a0 , a1 ) ;
2018-07-30 15:24:23 -07:00
}
# endif