arm64/mm: remove now-superfluous ISBs from TTBR writes

At the time of authoring 7655abb953 ("arm64: mm: Move ASID from TTBR0
to TTBR1"), the Arm ARM did not specify any ordering guarantees for
direct writes to TTBR0_ELx and TTBR1_ELx and so an ISB was required
after each write to ensure TLBs would only be populated from the
expected (or reserved tables).

In a recent update to the Arm ARM, the requirements have been relaxed to
reflect the implementation of current CPUs and required implementation
of future CPUs to read (RDYDPX in D8.2.3 Translation table base address
register):

  Direct writes to TTBR0_ELx and TTBR1_ELx occur in program order
  relative to one another, without the need for explicit
  synchronization. For any one translation, all indirect reads of
  TTBR0_ELx and TTBR1_ELx that are made as part of the translation
  observe only one point in that order of direct writes.

Remove the superfluous ISBs to optimize uaccess helpers and context
switch.

Cc: Will Deacon <will@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Jamie Iles <quic_jiles@quicinc.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20230613141959.92697-1-quic_jiles@quicinc.com
[catalin.marinas@arm.com: rename __cpu_set_reserved_ttbr0 to ..._nosync]
[catalin.marinas@arm.com: move the cpu_set_reserved_ttbr0_nosync() call to cpu_do_switch_mm()]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Jamie Iles 2023-06-13 15:19:59 +01:00 committed by Catalin Marinas
parent 601eaec513
commit b9293d457f
4 changed files with 7 additions and 7 deletions

View File

@ -18,7 +18,6 @@
bic \tmp1, \tmp1, #TTBR_ASID_MASK
sub \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET // reserved_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
add \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
msr ttbr1_el1, \tmp1 // set reserved ASID
isb
@ -31,7 +30,6 @@
extr \tmp2, \tmp2, \tmp1, #48
ror \tmp2, \tmp2, #16
msr ttbr1_el1, \tmp2 // set the active ASID
isb
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm

View File

@ -39,11 +39,16 @@ static inline void contextidr_thread_switch(struct task_struct *next)
/*
* Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
*/
static inline void cpu_set_reserved_ttbr0(void)
static inline void cpu_set_reserved_ttbr0_nosync(void)
{
unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
write_sysreg(ttbr, ttbr0_el1);
}
static inline void cpu_set_reserved_ttbr0(void)
{
cpu_set_reserved_ttbr0_nosync();
isb();
}
@ -52,7 +57,6 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
{
BUG_ON(pgd == swapper_pg_dir);
cpu_set_reserved_ttbr0();
cpu_do_switch_mm(virt_to_phys(pgd),mm);
}

View File

@ -65,7 +65,6 @@ static inline void __uaccess_ttbr0_disable(void)
ttbr &= ~TTBR_ASID_MASK;
/* reserved_pg_dir placed before swapper_pg_dir */
write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
isb();
/* Set reserved ASID */
write_sysreg(ttbr, ttbr1_el1);
isb();
@ -89,7 +88,6 @@ static inline void __uaccess_ttbr0_enable(void)
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
ttbr1 |= ttbr0 & TTBR_ASID_MASK;
write_sysreg(ttbr1, ttbr1_el1);
isb();
/* Restore user page table */
write_sysreg(ttbr0, ttbr0_el1);

View File

@ -364,8 +364,8 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
ttbr1 &= ~TTBR_ASID_MASK;
ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
cpu_set_reserved_ttbr0_nosync();
write_sysreg(ttbr1, ttbr1_el1);
isb();
write_sysreg(ttbr0, ttbr0_el1);
isb();
post_ttbr_update_workaround();