Files
linux/arch/x86/include/asm/irq_stack.h
Thomas Gleixner e7f8900179 x86/irq: Sanitize irq stack tracking
The recursion protection for hard interrupt stacks is an unsigned int per
CPU variable initialized to -1 named __irq_count. 

The irq stack switching is only done when the variable is -1, which creates
worse code than just checking for 0. When the stack switching happens it
uses this_cpu_add/sub(1), but there is no reason to do so. It simply can
use straight writes. This is a historical leftover from the low level ASM
code which used inc and jz to make a decision.

Rename it to hardirq_stack_inuse, make it a bool and use plain stores.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20210210002512.228830141@linutronix.de
2021-02-10 23:34:13 +01:00

107 lines
2.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_IRQ_STACK_H
#define _ASM_X86_IRQ_STACK_H
#include <linux/ptrace.h>
#include <asm/processor.h>
#ifdef CONFIG_X86_64
static __always_inline bool irqstack_active(void)
{
return __this_cpu_read(hardirq_stack_inuse);
}
void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
struct pt_regs *regs);
void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
struct irq_desc *desc);
static __always_inline void __run_on_irqstack(void (*func)(void))
{
void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_write(hardirq_stack_inuse, true);
asm_call_on_stack(tos - 8, func, NULL);
__this_cpu_write(hardirq_stack_inuse, false);
}
static __always_inline void
__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
struct pt_regs *regs)
{
void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_write(hardirq_stack_inuse, true);
asm_call_sysvec_on_stack(tos - 8, func, regs);
__this_cpu_write(hardirq_stack_inuse, false);
}
static __always_inline void
__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
struct irq_desc *desc)
{
void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_write(hardirq_stack_inuse, true);
asm_call_irq_on_stack(tos - 8, func, desc);
__this_cpu_write(hardirq_stack_inuse, false);
}
#else /* CONFIG_X86_64 */
static inline bool irqstack_active(void) { return false; }
static inline void __run_on_irqstack(void (*func)(void)) { }
static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
struct pt_regs *regs) { }
static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
struct irq_desc *desc) { }
#endif /* !CONFIG_X86_64 */
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_X86_32))
return false;
if (!regs)
return !irqstack_active();
return !user_mode(regs) && !irqstack_active();
}
static __always_inline void run_on_irqstack_cond(void (*func)(void),
struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs))
__run_on_irqstack(func);
else
func();
}
static __always_inline void
run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs))
__run_sysvec_on_irqstack(func, regs);
else
func(regs);
}
static __always_inline void
run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs))
__run_irq_on_irqstack(func, desc);
else
func(desc);
}
#endif