When switching to an 'mm_struct' for the first time following an ASID rollover, a new ASID may be allocated and assigned to 'mm->context.id'. This reassignment can happen concurrently with other operations on the mm, such as unmapping pages and subsequently issuing TLB invalidation. Consequently, we need to ensure that (a) accesses to 'mm->context.id' are atomic and (b) all page-table updates made prior to a TLBI using the old ASID are guaranteed to be visible to CPUs running with the new ASID. This was found by inspection after reviewing the VMID changes from Shameer but it looks like a real (yet hard to hit) bug. Cc: <stable@vger.kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Jade Alglave <jade.alglave@arm.com> Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: Will Deacon <will@kernel.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20210806113109.2475-2-will@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
79 lines
2.4 KiB
C
79 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_MMU_H
|
|
#define __ASM_MMU_H
|
|
|
|
#include <asm/cputype.h>
|
|
|
|
#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
|
|
#define USER_ASID_BIT 48
|
|
#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
|
|
#define TTBR_ASID_MASK (UL(0xffff) << 48)
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/refcount.h>
|
|
|
|
typedef struct {
|
|
atomic64_t id;
|
|
#ifdef CONFIG_COMPAT
|
|
void *sigpage;
|
|
#endif
|
|
refcount_t pinned;
|
|
void *vdso;
|
|
unsigned long flags;
|
|
} mm_context_t;
|
|
|
|
/*
|
|
* We use atomic64_read() here because the ASID for an 'mm_struct' can
|
|
* be reallocated when scheduling one of its threads following a
|
|
* rollover event (see new_context() and flush_context()). In this case,
|
|
* a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
|
|
* may use a stale ASID. This is fine in principle as the new ASID is
|
|
* guaranteed to be clean in the TLB, but the TLBI routines have to take
|
|
* care to handle the following race:
|
|
*
|
|
* CPU 0 CPU 1 CPU 2
|
|
*
|
|
* // ptep_clear_flush(mm)
|
|
* xchg_relaxed(pte, 0)
|
|
* DSB ISHST
|
|
* old = ASID(mm)
|
|
* | <rollover>
|
|
* | new = new_context(mm)
|
|
* \-----------------> atomic_set(mm->context.id, new)
|
|
* cpu_switch_mm(mm)
|
|
* // Hardware walk of pte using new ASID
|
|
* TLBI(old)
|
|
*
|
|
* In this scenario, the barrier on CPU 0 and the dependency on CPU 1
|
|
* ensure that the page-table walker on CPU 1 *must* see the invalid PTE
|
|
* written by CPU 0.
|
|
*/
|
|
#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff)
|
|
|
|
static inline bool arm64_kernel_unmapped_at_el0(void)
|
|
{
|
|
return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
|
|
}
|
|
|
|
extern void arm64_memblock_init(void);
|
|
extern void paging_init(void);
|
|
extern void bootmem_init(void);
|
|
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
|
|
extern void init_mem_pgprot(void);
|
|
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|
unsigned long virt, phys_addr_t size,
|
|
pgprot_t prot, bool page_mappings_only);
|
|
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
|
|
extern void mark_linear_text_alias_ro(void);
|
|
extern bool kaslr_requires_kpti(void);
|
|
|
|
#define INIT_MM_CONTEXT(name) \
|
|
.pgd = init_pg_dir,
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
#endif
|