6be22809e5
* for-next/elf-hwcap-docs: : Update the arm64 ELF HWCAP documentation docs/arm64: cpu-feature-registers: Rewrite bitfields that don't follow [e, s] docs/arm64: cpu-feature-registers: Documents missing visible fields docs/arm64: elf_hwcaps: Document HWCAP_SB docs/arm64: elf_hwcaps: sort the HWCAP{, 2} documentation by ascending value * for-next/smccc-conduit-cleanup: : SMC calling convention conduit clean-up firmware: arm_sdei: use common SMCCC_CONDUIT_* firmware/psci: use common SMCCC_CONDUIT_* arm: spectre-v2: use arm_smccc_1_1_get_conduit() arm64: errata: use arm_smccc_1_1_get_conduit() arm/arm64: smccc/psci: add arm_smccc_1_1_get_conduit() * for-next/zone-dma: : Reintroduction of ZONE_DMA for Raspberry Pi 4 support arm64: mm: reserve CMA and crashkernel in ZONE_DMA32 dma/direct: turn ARCH_ZONE_DMA_BITS into a variable arm64: Make arm64_dma32_phys_limit static arm64: mm: Fix unused variable warning in zone_sizes_init mm: refresh ZONE_DMA and ZONE_DMA32 comments in 'enum zone_type' arm64: use both ZONE_DMA and ZONE_DMA32 arm64: rename variables used to calculate ZONE_DMA32's size arm64: mm: use arm64_dma_phys_limit instead of calling max_zone_dma_phys() * for-next/relax-icc_pmr_el1-sync: : Relax ICC_PMR_EL1 (GICv3) accesses when ICC_CTLR_EL1.PMHE is clear arm64: Document ICC_CTLR_EL3.PMHE setting requirements arm64: Relax ICC_PMR_EL1 accesses when ICC_CTLR_EL1.PMHE is clear * for-next/double-page-fault: : Avoid a double page fault in __copy_from_user_inatomic() if hw does not support auto Access Flag mm: fix double page fault on arm64 if PTE_AF is cleared x86/mm: implement arch_faults_on_old_pte() stub on x86 arm64: mm: implement arch_faults_on_old_pte() on arm64 arm64: cpufeature: introduce helper cpu_has_hw_af() * for-next/misc: : Various fixes and clean-ups arm64: kpti: Add NVIDIA's Carmel core to the KPTI whitelist arm64: mm: Remove MAX_USER_VA_BITS definition arm64: mm: simplify the page end calculation in __create_pgd_mapping() arm64: print additional fault message when executing non-exec memory arm64: psci: Reduce the waiting time for cpu_psci_cpu_kill() arm64: pgtable: Correct typo in comment arm64: docs: cpu-feature-registers: Document ID_AA64PFR1_EL1 arm64: cpufeature: Fix typos in comment arm64/mm: Poison initmem while freeing with free_reserved_area() arm64: use generic free_initrd_mem() arm64: simplify syscall wrapper ifdeffery * for-next/kselftest-arm64-signal: : arm64-specific kselftest support with signal-related test-cases kselftest: arm64: fake_sigreturn_misaligned_sp kselftest: arm64: fake_sigreturn_bad_size kselftest: arm64: fake_sigreturn_duplicated_fpsimd kselftest: arm64: fake_sigreturn_missing_fpsimd kselftest: arm64: fake_sigreturn_bad_size_for_magic0 kselftest: arm64: fake_sigreturn_bad_magic kselftest: arm64: add helper get_current_context kselftest: arm64: extend test_init functionalities kselftest: arm64: mangle_pstate_invalid_mode_el[123][ht] kselftest: arm64: mangle_pstate_invalid_daif_bits kselftest: arm64: mangle_pstate_invalid_compat_toggle and common utils kselftest: arm64: extend toplevel skeleton Makefile * for-next/kaslr-diagnostics: : Provide diagnostics on boot for KASLR arm64: kaslr: Check command line before looking for a seed arm64: kaslr: Announce KASLR status on boot
130 lines
3.1 KiB
C
130 lines
3.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2017 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_DAIFFLAGS_H
|
|
#define __ASM_DAIFFLAGS_H
|
|
|
|
#include <linux/irqflags.h>
|
|
|
|
#include <asm/arch_gicv3.h>
|
|
#include <asm/barrier.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/ptrace.h>
|
|
|
|
#define DAIF_PROCCTX 0
|
|
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
|
|
#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
|
|
#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
|
|
|
|
|
|
/* mask/save/unmask/restore all exceptions, including interrupts. */
|
|
static inline void local_daif_mask(void)
|
|
{
|
|
WARN_ON(system_has_prio_mask_debugging() &&
|
|
(read_sysreg_s(SYS_ICC_PMR_EL1) == (GIC_PRIO_IRQOFF |
|
|
GIC_PRIO_PSR_I_SET)));
|
|
|
|
asm volatile(
|
|
"msr daifset, #0xf // local_daif_mask\n"
|
|
:
|
|
:
|
|
: "memory");
|
|
|
|
/* Don't really care for a dsb here, we don't intend to enable IRQs */
|
|
if (system_uses_irq_prio_masking())
|
|
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
|
|
|
trace_hardirqs_off();
|
|
}
|
|
|
|
static inline unsigned long local_daif_save(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
flags = read_sysreg(daif);
|
|
|
|
if (system_uses_irq_prio_masking()) {
|
|
/* If IRQs are masked with PMR, reflect it in the flags */
|
|
if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
|
|
flags |= PSR_I_BIT;
|
|
}
|
|
|
|
local_daif_mask();
|
|
|
|
return flags;
|
|
}
|
|
|
|
static inline void local_daif_restore(unsigned long flags)
|
|
{
|
|
bool irq_disabled = flags & PSR_I_BIT;
|
|
|
|
WARN_ON(system_has_prio_mask_debugging() &&
|
|
!(read_sysreg(daif) & PSR_I_BIT));
|
|
|
|
if (!irq_disabled) {
|
|
trace_hardirqs_on();
|
|
|
|
if (system_uses_irq_prio_masking()) {
|
|
gic_write_pmr(GIC_PRIO_IRQON);
|
|
pmr_sync();
|
|
}
|
|
} else if (system_uses_irq_prio_masking()) {
|
|
u64 pmr;
|
|
|
|
if (!(flags & PSR_A_BIT)) {
|
|
/*
|
|
* If interrupts are disabled but we can take
|
|
* asynchronous errors, we can take NMIs
|
|
*/
|
|
flags &= ~PSR_I_BIT;
|
|
pmr = GIC_PRIO_IRQOFF;
|
|
} else {
|
|
pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
|
|
}
|
|
|
|
/*
|
|
* There has been concern that the write to daif
|
|
* might be reordered before this write to PMR.
|
|
* From the ARM ARM DDI 0487D.a, section D1.7.1
|
|
* "Accessing PSTATE fields":
|
|
* Writes to the PSTATE fields have side-effects on
|
|
* various aspects of the PE operation. All of these
|
|
* side-effects are guaranteed:
|
|
* - Not to be visible to earlier instructions in
|
|
* the execution stream.
|
|
* - To be visible to later instructions in the
|
|
* execution stream
|
|
*
|
|
* Also, writes to PMR are self-synchronizing, so no
|
|
* interrupts with a lower priority than PMR is signaled
|
|
* to the PE after the write.
|
|
*
|
|
* So we don't need additional synchronization here.
|
|
*/
|
|
gic_write_pmr(pmr);
|
|
}
|
|
|
|
write_sysreg(flags, daif);
|
|
|
|
if (irq_disabled)
|
|
trace_hardirqs_off();
|
|
}
|
|
|
|
/*
|
|
* Called by synchronous exception handlers to restore the DAIF bits that were
|
|
* modified by taking an exception.
|
|
*/
|
|
static inline void local_daif_inherit(struct pt_regs *regs)
|
|
{
|
|
unsigned long flags = regs->pstate & DAIF_MASK;
|
|
|
|
/*
|
|
* We can't use local_daif_restore(regs->pstate) here as
|
|
* system_has_prio_mask_debugging() won't restore the I bit if it can
|
|
* use the pmr instead.
|
|
*/
|
|
write_sysreg(flags, daif);
|
|
}
|
|
#endif
|