2019-05-19 13:08:55 +01:00
// SPDX-License-Identifier: GPL-2.0-only
2008-10-16 11:32:24 +02:00
/*
* Common interrupt code for 32 and 64 bit
*/
# include <linux/cpu.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
2010-11-12 05:45:26 +00:00
# include <linux/of.h>
2008-10-16 11:32:24 +02:00
# include <linux/seq_file.h>
2009-01-04 16:22:17 +05:30
# include <linux/smp.h>
2009-02-06 14:09:41 -08:00
# include <linux/ftrace.h>
2011-03-25 15:20:14 +01:00
# include <linux/delay.h>
2011-05-26 12:22:53 -04:00
# include <linux/export.h>
2018-07-29 12:15:33 +02:00
# include <linux/irq.h>
2008-10-16 11:32:24 +02:00
2020-05-21 22:05:35 +02:00
# include <asm/irq_stack.h>
2009-02-17 13:58:15 +01:00
# include <asm/apic.h>
2008-10-16 11:32:24 +02:00
# include <asm/io_apic.h>
2008-12-23 15:15:17 +01:00
# include <asm/irq.h>
2009-05-27 21:56:52 +02:00
# include <asm/mce.h>
2009-04-11 00:03:10 +05:30
# include <asm/hw_irq.h>
2014-05-13 11:39:34 -04:00
# include <asm/desc.h>
2020-05-21 22:05:37 +02:00
# include <asm/traps.h>
2021-01-07 13:29:05 +01:00
# include <asm/thermal.h>
2013-06-21 10:29:05 -04:00
# define CREATE_TRACE_POINTS
x86, trace: Add irq vector tracepoints
[Purpose of this patch]
As Vaibhav explained in the thread below, tracepoints for irq vectors
are useful.
http://www.spinics.net/lists/mm-commits/msg85707.html
<snip>
The current interrupt traces from irq_handler_entry and irq_handler_exit
provide when an interrupt is handled. They provide good data about when
the system has switched to kernel space and how it affects the currently
running processes.
There are some IRQ vectors which trigger the system into kernel space,
which are not handled in generic IRQ handlers. Tracing such events gives
us the information about IRQ interaction with other system events.
The trace also tells where the system is spending its time. We want to
know which cores are handling interrupts and how they are affecting other
processes in the system. Also, the trace provides information about when
the cores are idle and which interrupts are changing that state.
<snip>
On the other hand, my usecase is tracing just local timer event and
getting a value of instruction pointer.
I suggested to add an argument local timer event to get instruction pointer before.
But there is another way to get it with external module like systemtap.
So, I don't need to add any argument to irq vector tracepoints now.
[Patch Description]
Vaibhav's patch shared a trace point ,irq_vector_entry/irq_vector_exit, in all events.
But there is an above use case to trace specific irq_vector rather than tracing all events.
In this case, we are concerned about overhead due to unwanted events.
So, add following tracepoints instead of introducing irq_vector_entry/exit.
so that we can enable them independently.
- local_timer_vector
- reschedule_vector
- call_function_vector
- call_function_single_vector
- irq_work_entry_vector
- error_apic_vector
- thermal_apic_vector
- threshold_apic_vector
- spurious_apic_vector
- x86_platform_ipi_vector
Also, introduce a logic switching IDT at enabling/disabling time so that a time penalty
makes a zero when tracepoints are disabled. Detailed explanations are as follows.
- Create trace irq handlers with entering_irq()/exiting_irq().
- Create a new IDT, trace_idt_table, at boot time by adding a logic to
_set_gate(). It is just a copy of original idt table.
- Register the new handlers for tracpoints to the new IDT by introducing
macros to alloc_intr_gate() called at registering time of irq_vector handlers.
- Add checking, whether irq vector tracing is on/off, into load_current_idt().
This has to be done below debug checking for these reasons.
- Switching to debug IDT may be kicked while tracing is enabled.
- On the other hands, switching to trace IDT is kicked only when debugging
is disabled.
In addition, the new IDT is created only when CONFIG_TRACING is enabled to avoid being
used for other purposes.
Signed-off-by: Seiji Aguchi <seiji.aguchi@hds.com>
Link: http://lkml.kernel.org/r/51C323ED.5050708@hds.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
2013-06-20 11:46:53 -04:00
# include <asm/trace/irq_vectors.h>
2008-10-16 11:32:24 +02:00
2015-05-09 11:36:50 -04:00
DEFINE_PER_CPU_SHARED_ALIGNED ( irq_cpustat_t , irq_stat ) ;
EXPORT_PER_CPU_SYMBOL ( irq_stat ) ;
2008-10-16 11:32:24 +02:00
atomic_t irq_err_count ;
2008-10-16 12:18:50 +02:00
/*
* ' what should we do if we get a hw irq event on an illegal vector ' .
* each architecture has to answer this themselves .
*/
void ack_bad_irq ( unsigned int irq )
{
2009-04-12 20:47:39 +04:00
if ( printk_ratelimit ( ) )
pr_err ( " unexpected IRQ trap at vector %02x \n " , irq ) ;
2008-10-16 12:18:50 +02:00
/*
* Currently unexpected vectors happen only on SMP and APIC .
* We _must_ ack these because every local APIC has only N
* irq slots per priority level , and a ' hanging , unacked ' IRQ
* holds up an irq slot - in excessive cases ( when multiple
* unexpected vectors occur ) that might lock up the APIC
* completely .
* But only ack when the APIC is enabled - AK
*/
2023-08-09 08:16:46 -07:00
apic_eoi ( ) ;
2008-10-16 12:18:50 +02:00
}
2009-01-19 00:38:57 +09:00
# define irq_stats(x) (&per_cpu(irq_stat, x))
2008-10-16 11:32:24 +02:00
/*
2010-12-16 17:59:57 +01:00
* / proc / interrupts printing for arch specific interrupts
2008-10-16 11:32:24 +02:00
*/
2010-12-16 17:59:57 +01:00
int arch_show_interrupts ( struct seq_file * p , int prec )
2008-10-16 11:32:24 +02:00
{
int j ;
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " NMI " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > __nmi_count ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Non-maskable interrupts \n " ) ;
2008-10-16 11:32:24 +02:00
# ifdef CONFIG_X86_LOCAL_APIC
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " LOC " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > apic_timer_irqs ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Local timer interrupts \n " ) ;
2009-03-23 02:08:34 +05:30
seq_printf ( p , " %*s: " , prec , " SPU " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_spurious_count ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Spurious interrupts \n " ) ;
2009-10-14 18:50:39 +08:00
seq_printf ( p , " %*s: " , prec , " PMI " ) ;
2008-12-03 10:39:53 +01:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > apic_perf_irqs ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Performance monitoring interrupts \n " ) ;
2010-10-14 14:01:34 +08:00
seq_printf ( p , " %*s: " , prec , " IWI " ) ;
2009-04-06 11:45:03 +02:00
for_each_online_cpu ( j )
2010-10-14 14:01:34 +08:00
seq_printf ( p , " %10u " , irq_stats ( j ) - > apic_irq_work_irqs ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " IRQ work interrupts \n " ) ;
2011-12-13 11:51:53 +09:00
seq_printf ( p , " %*s: " , prec , " RTR " ) ;
for_each_online_cpu ( j )
2011-12-15 11:32:24 +09:00
seq_printf ( p , " %10u " , irq_stats ( j ) - > icr_read_retry_count ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " APIC ICR read retries \n " ) ;
2009-10-14 09:22:57 -05:00
if ( x86_platform_ipi_callback ) {
2009-03-25 10:50:34 +09:00
seq_printf ( p , " %*s: " , prec , " PLT " ) ;
2009-03-04 12:56:05 -06:00
for_each_online_cpu ( j )
2009-10-14 09:22:57 -05:00
seq_printf ( p , " %10u " , irq_stats ( j ) - > x86_platform_ipis ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Platform interrupts \n " ) ;
2009-03-04 12:56:05 -06:00
}
2017-08-28 08:47:34 +02:00
# endif
2008-10-16 11:32:24 +02:00
# ifdef CONFIG_SMP
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " RES " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_resched_count ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Rescheduling interrupts \n " ) ;
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " CAL " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
2016-08-11 15:44:30 +08:00
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_call_count ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Function call interrupts \n " ) ;
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " TLB " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_tlb_count ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " TLB shootdowns \n " ) ;
2008-10-16 11:32:24 +02:00
# endif
2009-11-20 14:03:05 +00:00
# ifdef CONFIG_X86_THERMAL_VECTOR
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " TRM " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_thermal_count ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Thermal event interrupts \n " ) ;
2009-11-20 14:03:05 +00:00
# endif
# ifdef CONFIG_X86_MCE_THRESHOLD
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " THR " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_threshold_count ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Threshold APIC interrupts \n " ) ;
2009-05-27 21:56:52 +02:00
# endif
2015-05-06 06:58:56 -05:00
# ifdef CONFIG_X86_MCE_AMD
seq_printf ( p , " %*s: " , prec , " DFR " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_deferred_error_count ) ;
seq_puts ( p , " Deferred Error APIC interrupts \n " ) ;
# endif
2009-07-09 00:31:41 +02:00
# ifdef CONFIG_X86_MCE
2009-05-27 21:56:52 +02:00
seq_printf ( p , " %*s: " , prec , " MCE " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , per_cpu ( mce_exception_count , j ) ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Machine check exceptions \n " ) ;
2009-05-27 21:56:57 +02:00
seq_printf ( p , " %*s: " , prec , " MCP " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , per_cpu ( mce_poll_count , j ) ) ;
2014-11-28 22:03:41 +01:00
seq_puts ( p , " Machine check polls \n " ) ;
2008-10-16 11:32:24 +02:00
# endif
2019-04-30 11:45:23 +08:00
# ifdef CONFIG_X86_HV_CALLBACK_VECTOR
2017-09-13 23:29:26 +02:00
if ( test_bit ( HYPERVISOR_CALLBACK_VECTOR , system_vectors ) ) {
2015-07-07 18:26:13 +02:00
seq_printf ( p , " %*s: " , prec , " HYP " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " ,
irq_stats ( j ) - > irq_hv_callback_count ) ;
seq_puts ( p , " Hypervisor callback interrupts \n " ) ;
}
2018-01-24 14:23:35 +01:00
# endif
# if IS_ENABLED(CONFIG_HYPERV)
if ( test_bit ( HYPERV_REENLIGHTENMENT_VECTOR , system_vectors ) ) {
seq_printf ( p , " %*s: " , prec , " HRE " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " ,
irq_stats ( j ) - > irq_hv_reenlightenment_count ) ;
seq_puts ( p , " Hyper-V reenlightenment interrupts \n " ) ;
}
2018-03-04 22:17:18 -07:00
if ( test_bit ( HYPERV_STIMER0_VECTOR , system_vectors ) ) {
seq_printf ( p , " %*s: " , prec , " HVS " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " ,
irq_stats ( j ) - > hyperv_stimer0_count ) ;
seq_puts ( p , " Hyper-V stimer0 interrupts \n " ) ;
}
2014-02-23 21:40:20 +00:00
# endif
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: %10u \n " , prec , " ERR " , atomic_read ( & irq_err_count ) ) ;
2008-10-16 11:32:24 +02:00
# if defined(CONFIG_X86_IO_APIC)
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: %10u \n " , prec , " MIS " , atomic_read ( & irq_mis_count ) ) ;
2015-05-19 17:07:17 +08:00
# endif
# ifdef CONFIG_HAVE_KVM
seq_printf ( p , " %*s: " , prec , " PIN " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > kvm_posted_intr_ipis ) ;
seq_puts ( p , " Posted-interrupt notification event \n " ) ;
2017-04-28 13:13:58 +08:00
seq_printf ( p , " %*s: " , prec , " NPI " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " ,
irq_stats ( j ) - > kvm_posted_intr_nested_ipis ) ;
seq_puts ( p , " Nested posted-interrupt event \n " ) ;
2015-05-19 17:07:17 +08:00
seq_printf ( p , " %*s: " , prec , " PIW " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " ,
irq_stats ( j ) - > kvm_posted_intr_wakeup_ipis ) ;
seq_puts ( p , " Posted-interrupt wakeup event \n " ) ;
2008-10-16 11:32:24 +02:00
# endif
return 0 ;
}
/*
* / proc / stat helpers
*/
u64 arch_irq_stat_cpu ( unsigned int cpu )
{
u64 sum = irq_stats ( cpu ) - > __nmi_count ;
# ifdef CONFIG_X86_LOCAL_APIC
sum + = irq_stats ( cpu ) - > apic_timer_irqs ;
2009-03-23 02:08:34 +05:30
sum + = irq_stats ( cpu ) - > irq_spurious_count ;
2008-12-03 10:39:53 +01:00
sum + = irq_stats ( cpu ) - > apic_perf_irqs ;
2010-10-14 14:01:34 +08:00
sum + = irq_stats ( cpu ) - > apic_irq_work_irqs ;
2011-12-15 11:32:24 +09:00
sum + = irq_stats ( cpu ) - > icr_read_retry_count ;
2009-10-14 09:22:57 -05:00
if ( x86_platform_ipi_callback )
sum + = irq_stats ( cpu ) - > x86_platform_ipis ;
2017-08-28 08:47:34 +02:00
# endif
2008-10-16 11:32:24 +02:00
# ifdef CONFIG_SMP
sum + = irq_stats ( cpu ) - > irq_resched_count ;
sum + = irq_stats ( cpu ) - > irq_call_count ;
# endif
2009-11-20 14:03:05 +00:00
# ifdef CONFIG_X86_THERMAL_VECTOR
2008-10-16 11:32:24 +02:00
sum + = irq_stats ( cpu ) - > irq_thermal_count ;
2009-11-20 14:03:05 +00:00
# endif
# ifdef CONFIG_X86_MCE_THRESHOLD
2008-10-16 11:32:24 +02:00
sum + = irq_stats ( cpu ) - > irq_threshold_count ;
2009-06-02 16:53:23 +09:00
# endif
2023-02-27 10:46:08 -08:00
# ifdef CONFIG_X86_HV_CALLBACK_VECTOR
sum + = irq_stats ( cpu ) - > irq_hv_callback_count ;
# endif
# if IS_ENABLED(CONFIG_HYPERV)
sum + = irq_stats ( cpu ) - > irq_hv_reenlightenment_count ;
sum + = irq_stats ( cpu ) - > hyperv_stimer0_count ;
# endif
2009-07-09 00:31:41 +02:00
# ifdef CONFIG_X86_MCE
2009-06-02 16:53:23 +09:00
sum + = per_cpu ( mce_exception_count , cpu ) ;
sum + = per_cpu ( mce_poll_count , cpu ) ;
2008-10-16 11:32:24 +02:00
# endif
return sum ;
}
u64 arch_irq_stat ( void )
{
u64 sum = atomic_read ( & irq_err_count ) ;
return sum ;
}
2008-12-23 15:15:17 +01:00
2020-05-21 22:05:35 +02:00
static __always_inline void handle_irq ( struct irq_desc * desc ,
struct pt_regs * regs )
{
if ( IS_ENABLED ( CONFIG_X86_64 ) )
2021-02-10 00:40:48 +01:00
generic_handle_irq_desc ( desc ) ;
2020-05-21 22:05:35 +02:00
else
__handle_irq ( desc , regs ) ;
}
2009-02-06 14:09:41 -08:00
/*
2020-05-21 22:05:37 +02:00
* common_interrupt ( ) handles all normal device IRQ ' s ( the special SMP
* cross - CPU interrupts have their own entry points ) .
2009-02-06 14:09:41 -08:00
*/
2020-05-21 22:05:37 +02:00
DEFINE_IDTENTRY_IRQ ( common_interrupt )
2009-02-06 14:09:41 -08:00
{
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2020-05-21 22:05:34 +02:00
struct irq_desc * desc ;
2009-02-06 14:09:41 -08:00
2020-05-21 22:05:37 +02:00
/* entry code tells RCU that we're not quiescent. Check it. */
2015-09-01 08:40:25 -07:00
RCU_LOCKDEP_WARN ( ! rcu_is_watching ( ) , " IRQ failed to wake up RCU " ) ;
2015-07-03 12:44:34 -07:00
2015-08-02 20:38:27 +00:00
desc = __this_cpu_read ( vector_irq [ vector ] ) ;
2019-08-19 21:36:09 +02:00
if ( likely ( ! IS_ERR_OR_NULL ( desc ) ) ) {
2020-05-21 22:05:37 +02:00
handle_irq ( desc , regs ) ;
2019-08-19 21:36:09 +02:00
} else {
2023-08-09 08:16:46 -07:00
apic_eoi ( ) ;
2009-02-06 14:09:41 -08:00
2019-08-19 21:36:39 +02:00
if ( desc = = VECTOR_UNUSED ) {
2020-05-21 22:05:37 +02:00
pr_emerg_ratelimited ( " %s: %d.%u No irq handler for vector \n " ,
2014-01-05 11:10:52 -05:00
__func__ , smp_processor_id ( ) ,
2015-08-02 20:38:27 +00:00
vector ) ;
2014-01-05 11:10:52 -05:00
} else {
2015-08-02 20:38:25 +00:00
__this_cpu_write ( vector_irq [ vector ] , VECTOR_UNUSED ) ;
2014-01-05 11:10:52 -05:00
}
2009-02-06 14:09:41 -08:00
}
set_irq_regs ( old_regs ) ;
}
2017-08-28 08:47:34 +02:00
# ifdef CONFIG_X86_LOCAL_APIC
/* Function pointer for generic interrupt vector handling */
void ( * x86_platform_ipi_callback ) ( void ) = NULL ;
2009-03-04 12:56:05 -06:00
/*
2009-10-14 09:22:57 -05:00
* Handler for X86_PLATFORM_IPI_VECTOR .
2009-03-04 12:56:05 -06:00
*/
2020-05-21 22:05:39 +02:00
DEFINE_IDTENTRY_SYSVEC ( sysvec_x86_platform_ipi )
x86, trace: Introduce entering/exiting_irq()
When implementing tracepoints in interrupt handers, if the tracepoints are
simply added in the performance sensitive path of interrupt handers,
it may cause potential performance problem due to the time penalty.
To solve the problem, an idea is to prepare non-trace/trace irq handers and
switch their IDTs at the enabling/disabling time.
So, let's introduce entering_irq()/exiting_irq() for pre/post-
processing of each irq handler.
A way to use them is as follows.
Non-trace irq handler:
smp_irq_handler()
{
entering_irq(); /* pre-processing of this handler */
__smp_irq_handler(); /*
* common logic between non-trace and trace handlers
* in a vector.
*/
exiting_irq(); /* post-processing of this handler */
}
Trace irq_handler:
smp_trace_irq_handler()
{
entering_irq(); /* pre-processing of this handler */
trace_irq_entry(); /* tracepoint for irq entry */
__smp_irq_handler(); /*
* common logic between non-trace and trace handlers
* in a vector.
*/
trace_irq_exit(); /* tracepoint for irq exit */
exiting_irq(); /* post-processing of this handler */
}
If tracepoints can place outside entering_irq()/exiting_irq() as follows,
it looks cleaner.
smp_trace_irq_handler()
{
trace_irq_entry();
smp_irq_handler();
trace_irq_exit();
}
But it doesn't work.
The problem is with irq_enter/exit() being called. They must be called before
trace_irq_enter/exit(), because of the rcu_irq_enter() must be called before
any tracepoints are used, as tracepoints use rcu to synchronize.
As a possible alternative, we may be able to call irq_enter() first as follows
if irq_enter() can nest.
smp_trace_irq_hander()
{
irq_entry();
trace_irq_entry();
smp_irq_handler();
trace_irq_exit();
irq_exit();
}
But it doesn't work, either.
If irq_enter() is nested, it may have a time penalty because it has to check if it
was already called or not. The time penalty is not desired in performance sensitive
paths even if it is tiny.
Signed-off-by: Seiji Aguchi <seiji.aguchi@hds.com>
Link: http://lkml.kernel.org/r/51C3238D.9040706@hds.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
2013-06-20 11:45:17 -04:00
{
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2009-03-04 12:56:05 -06:00
2023-08-09 08:16:46 -07:00
apic_eoi ( ) ;
2017-08-28 08:47:25 +02:00
trace_x86_platform_ipi_entry ( X86_PLATFORM_IPI_VECTOR ) ;
inc_irq_stat ( x86_platform_ipis ) ;
if ( x86_platform_ipi_callback )
x86_platform_ipi_callback ( ) ;
trace_x86_platform_ipi_exit ( X86_PLATFORM_IPI_VECTOR ) ;
2009-03-04 12:56:05 -06:00
set_irq_regs ( old_regs ) ;
}
2017-08-28 08:47:34 +02:00
# endif
2009-03-04 12:56:05 -06:00
2013-04-11 19:25:11 +08:00
# ifdef CONFIG_HAVE_KVM
2015-05-19 17:07:16 +08:00
static void dummy_handler ( void ) { }
static void ( * kvm_posted_intr_wakeup_handler ) ( void ) = dummy_handler ;
void kvm_set_posted_intr_wakeup_handler ( void ( * handler ) ( void ) )
{
if ( handler )
kvm_posted_intr_wakeup_handler = handler ;
2021-10-08 17:11:04 -07:00
else {
2015-05-19 17:07:16 +08:00
kvm_posted_intr_wakeup_handler = dummy_handler ;
2021-10-08 17:11:04 -07:00
synchronize_rcu ( ) ;
}
2015-05-19 17:07:16 +08:00
}
EXPORT_SYMBOL_GPL ( kvm_set_posted_intr_wakeup_handler ) ;
2013-04-11 19:25:11 +08:00
/*
* Handler for POSTED_INTERRUPT_VECTOR .
*/
2020-05-21 22:05:42 +02:00
DEFINE_IDTENTRY_SYSVEC_SIMPLE ( sysvec_kvm_posted_intr_ipi )
2013-04-11 19:25:11 +08:00
{
2023-08-09 08:16:46 -07:00
apic_eoi ( ) ;
2013-04-11 19:25:11 +08:00
inc_irq_stat ( kvm_posted_intr_ipis ) ;
2015-05-19 17:07:16 +08:00
}
/*
* Handler for POSTED_INTERRUPT_WAKEUP_VECTOR .
*/
2020-05-21 22:05:42 +02:00
DEFINE_IDTENTRY_SYSVEC ( sysvec_kvm_posted_intr_wakeup_ipi )
2015-05-19 17:07:16 +08:00
{
2023-08-09 08:16:46 -07:00
apic_eoi ( ) ;
2015-05-19 17:07:16 +08:00
inc_irq_stat ( kvm_posted_intr_wakeup_ipis ) ;
kvm_posted_intr_wakeup_handler ( ) ;
2013-04-11 19:25:11 +08:00
}
2017-04-28 13:13:58 +08:00
/*
* Handler for POSTED_INTERRUPT_NESTED_VECTOR .
*/
2020-05-21 22:05:42 +02:00
DEFINE_IDTENTRY_SYSVEC_SIMPLE ( sysvec_kvm_posted_intr_nested_ipi )
2017-04-28 13:13:58 +08:00
{
2023-08-09 08:16:46 -07:00
apic_eoi ( ) ;
2017-04-28 13:13:58 +08:00
inc_irq_stat ( kvm_posted_intr_nested_ipis ) ;
}
2013-04-11 19:25:11 +08:00
# endif
x86, trace: Add irq vector tracepoints
[Purpose of this patch]
As Vaibhav explained in the thread below, tracepoints for irq vectors
are useful.
http://www.spinics.net/lists/mm-commits/msg85707.html
<snip>
The current interrupt traces from irq_handler_entry and irq_handler_exit
provide when an interrupt is handled. They provide good data about when
the system has switched to kernel space and how it affects the currently
running processes.
There are some IRQ vectors which trigger the system into kernel space,
which are not handled in generic IRQ handlers. Tracing such events gives
us the information about IRQ interaction with other system events.
The trace also tells where the system is spending its time. We want to
know which cores are handling interrupts and how they are affecting other
processes in the system. Also, the trace provides information about when
the cores are idle and which interrupts are changing that state.
<snip>
On the other hand, my usecase is tracing just local timer event and
getting a value of instruction pointer.
I suggested to add an argument local timer event to get instruction pointer before.
But there is another way to get it with external module like systemtap.
So, I don't need to add any argument to irq vector tracepoints now.
[Patch Description]
Vaibhav's patch shared a trace point ,irq_vector_entry/irq_vector_exit, in all events.
But there is an above use case to trace specific irq_vector rather than tracing all events.
In this case, we are concerned about overhead due to unwanted events.
So, add following tracepoints instead of introducing irq_vector_entry/exit.
so that we can enable them independently.
- local_timer_vector
- reschedule_vector
- call_function_vector
- call_function_single_vector
- irq_work_entry_vector
- error_apic_vector
- thermal_apic_vector
- threshold_apic_vector
- spurious_apic_vector
- x86_platform_ipi_vector
Also, introduce a logic switching IDT at enabling/disabling time so that a time penalty
makes a zero when tracepoints are disabled. Detailed explanations are as follows.
- Create trace irq handlers with entering_irq()/exiting_irq().
- Create a new IDT, trace_idt_table, at boot time by adding a logic to
_set_gate(). It is just a copy of original idt table.
- Register the new handlers for tracpoints to the new IDT by introducing
macros to alloc_intr_gate() called at registering time of irq_vector handlers.
- Add checking, whether irq vector tracing is on/off, into load_current_idt().
This has to be done below debug checking for these reasons.
- Switching to debug IDT may be kicked while tracing is enabled.
- On the other hands, switching to trace IDT is kicked only when debugging
is disabled.
In addition, the new IDT is created only when CONFIG_TRACING is enabled to avoid being
used for other purposes.
Signed-off-by: Seiji Aguchi <seiji.aguchi@hds.com>
Link: http://lkml.kernel.org/r/51C323ED.5050708@hds.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
2013-06-20 11:46:53 -04:00
2009-10-26 14:24:31 -08:00
# ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs ( void )
{
2017-06-20 01:37:33 +02:00
unsigned int irr , vector ;
2009-10-26 14:24:31 -08:00
struct irq_desc * desc ;
2010-10-08 20:24:58 +02:00
struct irq_data * data ;
2011-02-10 21:40:36 +01:00
struct irq_chip * chip ;
2009-10-26 14:24:31 -08:00
2017-06-20 01:37:33 +02:00
irq_migrate_all_off_this_cpu ( ) ;
2009-10-26 14:24:31 -08:00
2009-10-26 14:24:36 -08:00
/*
2021-03-18 15:28:01 +01:00
* We can remove mdelay ( ) and then send spurious interrupts to
2009-10-26 14:24:36 -08:00
* new cpu targets for all the irqs that were handled previously by
* this cpu . While it works , I have seen spurious interrupt messages
* ( nothing wrong but still . . . ) .
*
* So for now , retain mdelay ( 1 ) and check the IRR and then send those
* interrupts to new targets as this cpu is already offlined . . .
*/
2009-10-26 14:24:31 -08:00
mdelay ( 1 ) ;
2009-10-26 14:24:36 -08:00
2015-07-05 17:12:35 +00:00
/*
* We can walk the vector array of this cpu without holding
* vector_lock because the cpu is already marked ! online , so
* nothing else will touch it .
*/
2009-10-26 14:24:36 -08:00
for ( vector = FIRST_EXTERNAL_VECTOR ; vector < NR_VECTORS ; vector + + ) {
2015-08-02 20:38:27 +00:00
if ( IS_ERR_OR_NULL ( __this_cpu_read ( vector_irq [ vector ] ) ) )
2009-10-26 14:24:36 -08:00
continue ;
irr = apic_read ( APIC_IRR + ( vector / 32 * 0x10 ) ) ;
if ( irr & ( 1 < < ( vector % 32 ) ) ) {
2015-08-02 20:38:27 +00:00
desc = __this_cpu_read ( vector_irq [ vector ] ) ;
2009-10-26 14:24:36 -08:00
2015-07-05 17:12:35 +00:00
raw_spin_lock ( & desc - > lock ) ;
2011-02-10 21:40:36 +01:00
data = irq_desc_get_irq_data ( desc ) ;
chip = irq_data_get_irq_chip ( data ) ;
2014-01-05 11:10:52 -05:00
if ( chip - > irq_retrigger ) {
2011-02-10 21:40:36 +01:00
chip - > irq_retrigger ( data ) ;
2014-01-05 11:10:52 -05:00
__this_cpu_write ( vector_irq [ vector ] , VECTOR_RETRIGGERED ) ;
}
2009-11-17 16:46:45 +01:00
raw_spin_unlock ( & desc - > lock ) ;
2009-10-26 14:24:36 -08:00
}
2014-01-05 11:10:52 -05:00
if ( __this_cpu_read ( vector_irq [ vector ] ) ! = VECTOR_RETRIGGERED )
2015-08-02 20:38:25 +00:00
__this_cpu_write ( vector_irq [ vector ] , VECTOR_UNUSED ) ;
2009-10-26 14:24:36 -08:00
}
2009-10-26 14:24:31 -08:00
}
# endif
2021-01-07 13:29:05 +01:00
# ifdef CONFIG_X86_THERMAL_VECTOR
static void smp_thermal_vector ( void )
{
if ( x86_thermal_enabled ( ) )
intel_thermal_interrupt ( ) ;
else
pr_err ( " CPU%d: Unexpected LVT thermal interrupt! \n " ,
smp_processor_id ( ) ) ;
}
DEFINE_IDTENTRY_SYSVEC ( sysvec_thermal )
{
trace_thermal_apic_entry ( THERMAL_APIC_VECTOR ) ;
inc_irq_stat ( irq_thermal_count ) ;
smp_thermal_vector ( ) ;
trace_thermal_apic_exit ( THERMAL_APIC_VECTOR ) ;
2023-08-09 08:16:46 -07:00
apic_eoi ( ) ;
2021-01-07 13:29:05 +01:00
}
# endif