linux/arch/arm64/include/asm/irqflags.h
Mark Rutland 5235c7e2cf arm64: alternatives: use cpucap naming
To more clearly align the various users of the cpucap enumeration, this patch
changes the alternative code to use the term `cpucap` in favour of `feature`.
The alternative_has_feature_{likely,unlikely}() functions are renamed to
alternative_has_cap_<likely,unlikely}() to more clearly align with the
cpus_have_{const_,}cap() helpers.

At the same time remove the stale comment referring to the "ARM64_CB
bit", which is evidently a typo for ARM64_CB_PATCH, which was removed in
commit:

  4c0bd995d73ed889 ("arm64: alternatives: have callbacks take a cap")

There should be no functional change as a result of this patch; this is
purely a renaming exercise.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20230607164846.3967305-3-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-06-07 17:57:47 +01:00

207 lines
4.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 ARM Ltd.
*/
#ifndef __ASM_IRQFLAGS_H
#define __ASM_IRQFLAGS_H
#include <asm/alternative.h>
#include <asm/barrier.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
/*
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
* FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
* order:
* Masking debug exceptions causes all other exceptions to be masked too/
* Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
* always masked and unmasked together, and have no side effects for other
* flags. Keeping to this order makes it easier for entry.S to know which
* exceptions should be unmasked.
*/
static __always_inline bool __irqflags_uses_pmr(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
}
static __always_inline void __daif_local_irq_enable(void)
{
barrier();
asm volatile("msr daifclr, #3");
barrier();
}
static __always_inline void __pmr_local_irq_enable(void)
{
if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
}
barrier();
write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1);
pmr_sync();
barrier();
}
static inline void arch_local_irq_enable(void)
{
if (__irqflags_uses_pmr()) {
__pmr_local_irq_enable();
} else {
__daif_local_irq_enable();
}
}
static __always_inline void __daif_local_irq_disable(void)
{
barrier();
asm volatile("msr daifset, #3");
barrier();
}
static __always_inline void __pmr_local_irq_disable(void)
{
if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
}
barrier();
write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1);
barrier();
}
static inline void arch_local_irq_disable(void)
{
if (__irqflags_uses_pmr()) {
__pmr_local_irq_disable();
} else {
__daif_local_irq_disable();
}
}
static __always_inline unsigned long __daif_local_save_flags(void)
{
return read_sysreg(daif);
}
static __always_inline unsigned long __pmr_local_save_flags(void)
{
return read_sysreg_s(SYS_ICC_PMR_EL1);
}
/*
* Save the current interrupt enable state.
*/
static inline unsigned long arch_local_save_flags(void)
{
if (__irqflags_uses_pmr()) {
return __pmr_local_save_flags();
} else {
return __daif_local_save_flags();
}
}
static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags)
{
return flags & PSR_I_BIT;
}
static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
{
return flags != GIC_PRIO_IRQON;
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
if (__irqflags_uses_pmr()) {
return __pmr_irqs_disabled_flags(flags);
} else {
return __daif_irqs_disabled_flags(flags);
}
}
static __always_inline bool __daif_irqs_disabled(void)
{
return __daif_irqs_disabled_flags(__daif_local_save_flags());
}
static __always_inline bool __pmr_irqs_disabled(void)
{
return __pmr_irqs_disabled_flags(__pmr_local_save_flags());
}
static inline bool arch_irqs_disabled(void)
{
if (__irqflags_uses_pmr()) {
return __pmr_irqs_disabled();
} else {
return __daif_irqs_disabled();
}
}
static __always_inline unsigned long __daif_local_irq_save(void)
{
unsigned long flags = __daif_local_save_flags();
__daif_local_irq_disable();
return flags;
}
static __always_inline unsigned long __pmr_local_irq_save(void)
{
unsigned long flags = __pmr_local_save_flags();
/*
* There are too many states with IRQs disabled, just keep the current
* state if interrupts are already disabled/masked.
*/
if (!__pmr_irqs_disabled_flags(flags))
__pmr_local_irq_disable();
return flags;
}
static inline unsigned long arch_local_irq_save(void)
{
if (__irqflags_uses_pmr()) {
return __pmr_local_irq_save();
} else {
return __daif_local_irq_save();
}
}
static __always_inline void __daif_local_irq_restore(unsigned long flags)
{
barrier();
write_sysreg(flags, daif);
barrier();
}
static __always_inline void __pmr_local_irq_restore(unsigned long flags)
{
barrier();
write_sysreg_s(flags, SYS_ICC_PMR_EL1);
pmr_sync();
barrier();
}
/*
* restore saved IRQ state
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
if (__irqflags_uses_pmr()) {
__pmr_local_irq_restore(flags);
} else {
__daif_local_irq_restore(flags);
}
}
#endif /* __ASM_IRQFLAGS_H */