2018-07-30 15:24:23 -07:00
// SPDX-License-Identifier: GPL-2.0
/*
* preemptoff and irqoff tracepoints
*
* Copyright ( C ) Joel Fernandes ( Google ) < joel @ joelfernandes . org >
*/
# include <linux/kallsyms.h>
# include <linux/uaccess.h>
# include <linux/module.h>
# include <linux/ftrace.h>
2019-02-13 01:13:40 +09:00
# include <linux/kprobes.h>
2018-08-08 21:28:05 -04:00
# include "trace.h"
2018-07-30 15:24:23 -07:00
# define CREATE_TRACE_POINTS
# include <trace/events/preemptirq.h>
2023-01-12 20:43:49 +01:00
/*
* Use regular trace points on architectures that implement noinstr
* tooling : these calls will only happen with RCU enabled , which can
* use a regular tracepoint .
*
* On older architectures , use the rcuidle tracing methods ( which
* aren ' t NMI - safe - so exclude NMI contexts ) :
*/
# ifdef CONFIG_ARCH_WANTS_NO_INSTR
# define trace(point) trace_##point
# else
# define trace(point) if (!in_nmi()) trace_##point##_rcuidle
# endif
2023-01-31 09:50:36 +01:00
# ifdef CONFIG_TRACE_IRQFLAGS
/* Per-cpu variable to prevent redundant calls when IRQs already off */
static DEFINE_PER_CPU ( int , tracing_irq_cpu ) ;
2020-03-04 13:09:50 +01:00
/*
* Like trace_hardirqs_on ( ) but without the lockdep invocation . This is
* used in the low level entry code where the ordering vs . RCU is important
* and lockdep uses a staged approach which splits the lockdep hardirq
* tracking into a RCU on and a RCU off section .
*/
void trace_hardirqs_on_prepare ( void )
{
if ( this_cpu_read ( tracing_irq_cpu ) ) {
2023-01-12 20:43:49 +01:00
trace ( irq_enable ) ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2020-03-04 13:09:50 +01:00
tracer_hardirqs_on ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
this_cpu_write ( tracing_irq_cpu , 0 ) ;
}
}
EXPORT_SYMBOL ( trace_hardirqs_on_prepare ) ;
NOKPROBE_SYMBOL ( trace_hardirqs_on_prepare ) ;
2018-07-30 15:24:23 -07:00
void trace_hardirqs_on ( void )
{
2018-08-06 15:50:58 -04:00
if ( this_cpu_read ( tracing_irq_cpu ) ) {
2023-01-12 20:43:49 +01:00
trace ( irq_enable ) ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2018-08-08 21:28:05 -04:00
tracer_hardirqs_on ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2018-08-06 15:50:58 -04:00
this_cpu_write ( tracing_irq_cpu , 0 ) ;
}
2018-07-30 15:24:23 -07:00
2022-03-14 15:19:03 -07:00
lockdep_hardirqs_on_prepare ( ) ;
2018-08-06 15:50:58 -04:00
lockdep_hardirqs_on ( CALLER_ADDR0 ) ;
2018-07-30 15:24:23 -07:00
}
EXPORT_SYMBOL ( trace_hardirqs_on ) ;
2019-02-13 01:13:40 +09:00
NOKPROBE_SYMBOL ( trace_hardirqs_on ) ;
2018-07-30 15:24:23 -07:00
2020-03-04 13:09:50 +01:00
/*
* Like trace_hardirqs_off ( ) but without the lockdep invocation . This is
* used in the low level entry code where the ordering vs . RCU is important
* and lockdep uses a staged approach which splits the lockdep hardirq
* tracking into a RCU on and a RCU off section .
*/
2020-05-29 23:27:40 +02:00
void trace_hardirqs_off_finish ( void )
2020-03-04 13:09:50 +01:00
{
if ( ! this_cpu_read ( tracing_irq_cpu ) ) {
this_cpu_write ( tracing_irq_cpu , 1 ) ;
tracer_hardirqs_off ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2023-01-12 20:43:49 +01:00
trace ( irq_disable ) ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2020-03-04 13:09:50 +01:00
}
}
2020-05-29 23:27:40 +02:00
EXPORT_SYMBOL ( trace_hardirqs_off_finish ) ;
NOKPROBE_SYMBOL ( trace_hardirqs_off_finish ) ;
2020-03-04 13:09:50 +01:00
2018-07-30 15:24:23 -07:00
void trace_hardirqs_off ( void )
{
2020-05-29 23:27:40 +02:00
lockdep_hardirqs_off ( CALLER_ADDR0 ) ;
2018-08-06 15:50:58 -04:00
if ( ! this_cpu_read ( tracing_irq_cpu ) ) {
this_cpu_write ( tracing_irq_cpu , 1 ) ;
2018-08-08 21:28:05 -04:00
tracer_hardirqs_off ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2023-01-12 20:43:49 +01:00
trace ( irq_disable ) ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2018-08-06 15:50:58 -04:00
}
2018-07-30 15:24:23 -07:00
}
EXPORT_SYMBOL ( trace_hardirqs_off ) ;
2019-02-13 01:13:40 +09:00
NOKPROBE_SYMBOL ( trace_hardirqs_off ) ;
2018-07-30 15:24:23 -07:00
# endif /* CONFIG_TRACE_IRQFLAGS */
# ifdef CONFIG_TRACE_PREEMPT_TOGGLE
void trace_preempt_on ( unsigned long a0 , unsigned long a1 )
{
2023-01-31 09:50:36 +01:00
trace ( preempt_enable ) ( a0 , a1 ) ;
2018-08-08 21:28:05 -04:00
tracer_preempt_on ( a0 , a1 ) ;
2018-07-30 15:24:23 -07:00
}
void trace_preempt_off ( unsigned long a0 , unsigned long a1 )
{
2023-01-31 09:50:36 +01:00
trace ( preempt_disable ) ( a0 , a1 ) ;
2018-08-08 21:28:05 -04:00
tracer_preempt_off ( a0 , a1 ) ;
2018-07-30 15:24:23 -07:00
}
# endif