2018-07-31 01:24:23 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* preemptoff and irqoff tracepoints
*
* Copyright ( C ) Joel Fernandes ( Google ) < joel @ joelfernandes . org >
*/
# include <linux/kallsyms.h>
# include <linux/uaccess.h>
# include <linux/module.h>
# include <linux/ftrace.h>
2019-02-12 19:13:40 +03:00
# include <linux/kprobes.h>
2018-08-09 04:28:05 +03:00
# include "trace.h"
2018-07-31 01:24:23 +03:00
# define CREATE_TRACE_POINTS
# include <trace/events/preemptirq.h>
2023-01-12 22:43:49 +03:00
/*
* Use regular trace points on architectures that implement noinstr
* tooling : these calls will only happen with RCU enabled , which can
* use a regular tracepoint .
*
* On older architectures , use the rcuidle tracing methods ( which
* aren ' t NMI - safe - so exclude NMI contexts ) :
*/
# ifdef CONFIG_ARCH_WANTS_NO_INSTR
# define trace(point) trace_##point
# else
# define trace(point) if (!in_nmi()) trace_##point##_rcuidle
# endif
2023-01-31 11:50:36 +03:00
# ifdef CONFIG_TRACE_IRQFLAGS
/* Per-cpu variable to prevent redundant calls when IRQs already off */
static DEFINE_PER_CPU ( int , tracing_irq_cpu ) ;
2020-03-04 15:09:50 +03:00
/*
* Like trace_hardirqs_on ( ) but without the lockdep invocation . This is
* used in the low level entry code where the ordering vs . RCU is important
* and lockdep uses a staged approach which splits the lockdep hardirq
* tracking into a RCU on and a RCU off section .
*/
void trace_hardirqs_on_prepare ( void )
{
if ( this_cpu_read ( tracing_irq_cpu ) ) {
2023-01-12 22:43:49 +03:00
trace ( irq_enable ) ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2020-03-04 15:09:50 +03:00
tracer_hardirqs_on ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
this_cpu_write ( tracing_irq_cpu , 0 ) ;
}
}
EXPORT_SYMBOL ( trace_hardirqs_on_prepare ) ;
NOKPROBE_SYMBOL ( trace_hardirqs_on_prepare ) ;
2018-07-31 01:24:23 +03:00
void trace_hardirqs_on ( void )
{
2018-08-06 22:50:58 +03:00
if ( this_cpu_read ( tracing_irq_cpu ) ) {
2023-01-12 22:43:49 +03:00
trace ( irq_enable ) ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2018-08-09 04:28:05 +03:00
tracer_hardirqs_on ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2018-08-06 22:50:58 +03:00
this_cpu_write ( tracing_irq_cpu , 0 ) ;
}
2018-07-31 01:24:23 +03:00
2022-03-15 01:19:03 +03:00
lockdep_hardirqs_on_prepare ( ) ;
2018-08-06 22:50:58 +03:00
lockdep_hardirqs_on ( CALLER_ADDR0 ) ;
2018-07-31 01:24:23 +03:00
}
EXPORT_SYMBOL ( trace_hardirqs_on ) ;
2019-02-12 19:13:40 +03:00
NOKPROBE_SYMBOL ( trace_hardirqs_on ) ;
2018-07-31 01:24:23 +03:00
2020-03-04 15:09:50 +03:00
/*
* Like trace_hardirqs_off ( ) but without the lockdep invocation . This is
* used in the low level entry code where the ordering vs . RCU is important
* and lockdep uses a staged approach which splits the lockdep hardirq
* tracking into a RCU on and a RCU off section .
*/
2020-05-30 00:27:40 +03:00
void trace_hardirqs_off_finish ( void )
2020-03-04 15:09:50 +03:00
{
if ( ! this_cpu_read ( tracing_irq_cpu ) ) {
this_cpu_write ( tracing_irq_cpu , 1 ) ;
tracer_hardirqs_off ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2023-01-12 22:43:49 +03:00
trace ( irq_disable ) ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2020-03-04 15:09:50 +03:00
}
}
2020-05-30 00:27:40 +03:00
EXPORT_SYMBOL ( trace_hardirqs_off_finish ) ;
NOKPROBE_SYMBOL ( trace_hardirqs_off_finish ) ;
2020-03-04 15:09:50 +03:00
2018-07-31 01:24:23 +03:00
void trace_hardirqs_off ( void )
{
2020-05-30 00:27:40 +03:00
lockdep_hardirqs_off ( CALLER_ADDR0 ) ;
2018-08-06 22:50:58 +03:00
if ( ! this_cpu_read ( tracing_irq_cpu ) ) {
this_cpu_write ( tracing_irq_cpu , 1 ) ;
2018-08-09 04:28:05 +03:00
tracer_hardirqs_off ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2023-01-12 22:43:49 +03:00
trace ( irq_disable ) ( CALLER_ADDR0 , CALLER_ADDR1 ) ;
2018-08-06 22:50:58 +03:00
}
2018-07-31 01:24:23 +03:00
}
EXPORT_SYMBOL ( trace_hardirqs_off ) ;
2019-02-12 19:13:40 +03:00
NOKPROBE_SYMBOL ( trace_hardirqs_off ) ;
2018-07-31 01:24:23 +03:00
# endif /* CONFIG_TRACE_IRQFLAGS */
# ifdef CONFIG_TRACE_PREEMPT_TOGGLE
void trace_preempt_on ( unsigned long a0 , unsigned long a1 )
{
2023-01-31 11:50:36 +03:00
trace ( preempt_enable ) ( a0 , a1 ) ;
2018-08-09 04:28:05 +03:00
tracer_preempt_on ( a0 , a1 ) ;
2018-07-31 01:24:23 +03:00
}
void trace_preempt_off ( unsigned long a0 , unsigned long a1 )
{
2023-01-31 11:50:36 +03:00
trace ( preempt_disable ) ( a0 , a1 ) ;
2018-08-09 04:28:05 +03:00
tracer_preempt_off ( a0 , a1 ) ;
2018-07-31 01:24:23 +03:00
}
# endif