Since at least the beginning of the git era we've declared our TLB exception handling functions inconsistently. They're actually functions, but we declare them as arrays of u32 where each u32 is an encoded instruction. This has always been the case for arch/mips/mm/tlbex.c, and has also been true for arch/mips/kernel/traps.c since commit 86a1708a9d54 ("MIPS: Make tlb exception handler definitions and declarations match.") which aimed for consistency but did so by consistently making the our C code inconsistent with our assembly. This is all usually harmless, but when using GCC 7 or newer to build a kernel targeting microMIPS (ie. CONFIG_CPU_MICROMIPS=y) it becomes problematic. With microMIPS bit 0 of the program counter indicates the ISA mode. When bit 0 is zero instructions are decoded using the standard MIPS32 or MIPS64 ISA. When bit 0 is one instructions are decoded using microMIPS. This means that function pointers become odd - their least significant bit is one for microMIPS code. We work around this in cases where we need to access code using loads & stores with our msk_isa16_mode() macro which simply clears bit 0 of the value it is given: #define msk_isa16_mode(x) ((x) & ~0x1) For example we do this for our TLB load handler in build_r4000_tlb_load_handler(): u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl); We then write code to p, expecting it to be suitably aligned (our LEAF macro aligns functions on 4 byte boundaries, so (ulong)handle_tlbl will give a value one greater than a multiple of 4 - ie. the start of a function on a 4 byte boundary, with the ISA mode bit 0 set). This worked fine up to GCC 6, but GCC 7 & onwards is smart enough to presume that handle_tlbl which we declared as an array of u32s must be aligned sufficiently that bit 0 of its address will never be set, and as a result optimize out msk_isa16_mode(). This leads to p having an address with bit 0 set, and when we go on to attempt to store code at that address we take an address error exception due to the unaligned memory access. This leads to an exception prior to the kernel having configured its own exception handlers, so we jump to whatever handlers the bootloader configured. In the case of QEMU this results in a silent hang, since it has no useful general exception vector. Fix this by consistently declaring our TLB-related functions as functions. For handle_tlbl(), handle_tlbs() & handle_tlbm() we do this in asm/tlbex.h & we make use of the existing declaration of tlbmiss_handler_setup_pgd() in asm/mmu_context.h. Our TLB handler generation code in arch/mips/mm/tlbex.c is adjusted to deal with these definitions, in most cases simply by casting the function pointers to u32 pointers. This allows us to include asm/mmu_context.h in arch/mips/mm/tlbex.c to get the definitions of tlbmiss_handler_setup_pgd & pgd_current, removing some needless duplication. Consistently using msk_isa16_mode() on function pointers means we no longer need the tlbmiss_handler_setup_pgd_start symbol so that is removed entirely. Now that we're declaring our functions as functions GCC stops optimizing out msk_isa16_mode() & a microMIPS kernel built with either GCC 7.3.0 or 8.1.0 boots successfully. Signed-off-by: Paul Burton <paul.burton@mips.com>
224 lines
5.5 KiB
C
224 lines
5.5 KiB
C
/*
|
|
* Switch a MMU context.
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
|
|
* Copyright (C) 1999 Silicon Graphics, Inc.
|
|
*/
|
|
#ifndef _ASM_MMU_CONTEXT_H
|
|
#define _ASM_MMU_CONTEXT_H
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm_types.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/dsemul.h>
|
|
#include <asm/hazards.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm-generic/mm_hooks.h>
|
|
|
|
#define htw_set_pwbase(pgd) \
|
|
do { \
|
|
if (cpu_has_htw) { \
|
|
write_c0_pwbase(pgd); \
|
|
back_to_back_c0_hazard(); \
|
|
} \
|
|
} while (0)
|
|
|
|
extern void tlbmiss_handler_setup_pgd(unsigned long);
|
|
extern char tlbmiss_handler_setup_pgd_end[];
|
|
|
|
/* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */
|
|
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
|
|
do { \
|
|
tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \
|
|
htw_set_pwbase((unsigned long)pgd); \
|
|
} while (0)
|
|
|
|
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
|
|
|
|
#define TLBMISS_HANDLER_RESTORE() \
|
|
write_c0_xcontext((unsigned long) smp_processor_id() << \
|
|
SMP_CPUID_REGSHIFT)
|
|
|
|
#define TLBMISS_HANDLER_SETUP() \
|
|
do { \
|
|
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
|
|
TLBMISS_HANDLER_RESTORE(); \
|
|
} while (0)
|
|
|
|
#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
|
|
|
|
/*
|
|
* For the fast tlb miss handlers, we keep a per cpu array of pointers
|
|
* to the current pgd for each processor. Also, the proc. id is stuffed
|
|
* into the context register.
|
|
*/
|
|
extern unsigned long pgd_current[];
|
|
|
|
#define TLBMISS_HANDLER_RESTORE() \
|
|
write_c0_context((unsigned long) smp_processor_id() << \
|
|
SMP_CPUID_REGSHIFT)
|
|
|
|
#define TLBMISS_HANDLER_SETUP() \
|
|
TLBMISS_HANDLER_RESTORE(); \
|
|
back_to_back_c0_hazard(); \
|
|
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
|
|
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
|
|
|
|
/*
|
|
* All unused by hardware upper bits will be considered
|
|
* as a software asid extension.
|
|
*/
|
|
static unsigned long asid_version_mask(unsigned int cpu)
|
|
{
|
|
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
|
|
|
|
return ~(asid_mask | (asid_mask - 1));
|
|
}
|
|
|
|
static unsigned long asid_first_version(unsigned int cpu)
|
|
{
|
|
return ~asid_version_mask(cpu) + 1;
|
|
}
|
|
|
|
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
|
|
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
|
|
#define cpu_asid(cpu, mm) \
|
|
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
|
|
|
|
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
{
|
|
}
|
|
|
|
|
|
/* Normal, classic MIPS get_new_mmu_context */
|
|
static inline void
|
|
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|
{
|
|
unsigned long asid = asid_cache(cpu);
|
|
|
|
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
|
|
if (cpu_has_vtag_icache)
|
|
flush_icache_all();
|
|
local_flush_tlb_all(); /* start new asid cycle */
|
|
if (!asid) /* fix version if needed */
|
|
asid = asid_first_version(cpu);
|
|
}
|
|
|
|
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|
|
}
|
|
|
|
/*
|
|
* Initialize the context related info for a new mm_struct
|
|
* instance.
|
|
*/
|
|
static inline int
|
|
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|
{
|
|
int i;
|
|
|
|
for_each_possible_cpu(i)
|
|
cpu_context(i, mm) = 0;
|
|
|
|
mm->context.bd_emupage_allocmap = NULL;
|
|
spin_lock_init(&mm->context.bd_emupage_lock);
|
|
init_waitqueue_head(&mm->context.bd_emupage_queue);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
unsigned long flags;
|
|
local_irq_save(flags);
|
|
|
|
htw_stop();
|
|
/* Check if our ASID is of an older version and thus invalid */
|
|
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
|
|
get_new_mmu_context(next, cpu);
|
|
write_c0_entryhi(cpu_asid(cpu, next));
|
|
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
|
|
|
/*
|
|
* Mark current->active_mm as not "active" anymore.
|
|
* We don't want to mislead possible IPI tlb flush routines.
|
|
*/
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
htw_start();
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
/*
|
|
* Destroy context related info for an mm_struct that is about
|
|
* to be put to rest.
|
|
*/
|
|
static inline void destroy_context(struct mm_struct *mm)
|
|
{
|
|
dsemul_mm_cleanup(mm);
|
|
}
|
|
|
|
#define deactivate_mm(tsk, mm) do { } while (0)
|
|
|
|
/*
|
|
* After we have set current->mm to a new value, this activates
|
|
* the context for the new mm so we see the new mappings.
|
|
*/
|
|
static inline void
|
|
activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|
{
|
|
unsigned long flags;
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
local_irq_save(flags);
|
|
|
|
htw_stop();
|
|
/* Unconditionally get a new ASID. */
|
|
get_new_mmu_context(next, cpu);
|
|
|
|
write_c0_entryhi(cpu_asid(cpu, next));
|
|
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
|
|
|
/* mark mmu ownership change */
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
htw_start();
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
/*
|
|
* If mm is currently active_mm, we can't really drop it. Instead,
|
|
* we will get a new one for it.
|
|
*/
|
|
static inline void
|
|
drop_mmu_context(struct mm_struct *mm, unsigned cpu)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
htw_stop();
|
|
|
|
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
|
get_new_mmu_context(mm, cpu);
|
|
write_c0_entryhi(cpu_asid(cpu, mm));
|
|
} else {
|
|
/* will get a new context next time */
|
|
cpu_context(cpu, mm) = 0;
|
|
}
|
|
htw_start();
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
#endif /* _ASM_MMU_CONTEXT_H */
|