Merge remote-tracking branch 'arm64/for-next/vhe-only' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
8320832940
@ -2279,8 +2279,7 @@
|
||||
state is kept private from the host.
|
||||
Not valid if the kernel is running in EL2.
|
||||
|
||||
Defaults to VHE/nVHE based on hardware support and
|
||||
the value of CONFIG_ARM64_VHE.
|
||||
Defaults to VHE/nVHE based on hardware support.
|
||||
|
||||
kvm-arm.vgic_v3_group0_trap=
|
||||
[KVM,ARM] Trap guest accesses to GICv3 group-0
|
||||
|
@ -1416,19 +1416,6 @@ config ARM64_USE_LSE_ATOMICS
|
||||
built with binutils >= 2.25 in order for the new instructions
|
||||
to be used.
|
||||
|
||||
config ARM64_VHE
|
||||
bool "Enable support for Virtualization Host Extensions (VHE)"
|
||||
default y
|
||||
help
|
||||
Virtualization Host Extensions (VHE) allow the kernel to run
|
||||
directly at EL2 (instead of EL1) on processors that support
|
||||
it. This leads to better performance for KVM, as they reduce
|
||||
the cost of the world switch.
|
||||
|
||||
Selecting this option allows the VHE feature to be detected
|
||||
at runtime, and does not affect processors that do not
|
||||
implement this feature.
|
||||
|
||||
endmenu
|
||||
|
||||
menu "ARMv8.2 architectural features"
|
||||
|
@ -63,6 +63,23 @@ struct arm64_ftr_bits {
|
||||
s64 safe_val; /* safe value for FTR_EXACT features */
|
||||
};
|
||||
|
||||
/*
|
||||
* Describe the early feature override to the core override code:
|
||||
*
|
||||
* @val Values that are to be merged into the final
|
||||
* sanitised value of the register. Only the bitfields
|
||||
* set to 1 in @mask are valid
|
||||
* @mask Mask of the features that are overridden by @val
|
||||
*
|
||||
* A @mask field set to full-1 indicates that the corresponding field
|
||||
* in @val is a valid override.
|
||||
*
|
||||
* A @mask field set to full-0 with the corresponding @val field set
|
||||
* to full-0 denotes that this field has no override
|
||||
*
|
||||
* A @mask field set to full-0 with the corresponding @val field set
|
||||
* to full-1 denotes thath this field has an invalid override.
|
||||
*/
|
||||
struct arm64_ftr_override {
|
||||
u64 val;
|
||||
u64 mask;
|
||||
|
@ -808,6 +808,12 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
|
||||
reg->name,
|
||||
ftrp->shift + ftrp->width - 1,
|
||||
ftrp->shift, str, tmp);
|
||||
} else if ((ftr_mask & reg->override->val) == ftr_mask) {
|
||||
reg->override->val &= ~ftr_mask;
|
||||
pr_warn("%s[%d:%d]: impossible override, ignored\n",
|
||||
reg->name,
|
||||
ftrp->shift + ftrp->width - 1,
|
||||
ftrp->shift);
|
||||
}
|
||||
|
||||
val = arm64_ftr_set_value(ftrp, val, ftr_new);
|
||||
@ -1616,7 +1622,6 @@ int get_cpu_with_amu_feat(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_VHE
|
||||
static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||
{
|
||||
return is_kernel_in_hyp_mode();
|
||||
@ -1635,7 +1640,6 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
|
||||
if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
|
||||
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
@ -1838,7 +1842,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
.matches = has_no_hw_prefetch,
|
||||
},
|
||||
#ifdef CONFIG_ARM64_VHE
|
||||
{
|
||||
.desc = "Virtualization Host Extensions",
|
||||
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
||||
@ -1846,7 +1849,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.matches = runs_at_el2,
|
||||
.cpu_enable = cpu_copy_el2regs,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_VHE */
|
||||
{
|
||||
.desc = "32-bit EL0 Support",
|
||||
.capability = ARM64_HAS_32BIT_EL0,
|
||||
|
@ -477,14 +477,13 @@ EXPORT_SYMBOL(kimage_vaddr)
|
||||
* booted in EL1 or EL2 respectively.
|
||||
*/
|
||||
SYM_FUNC_START(init_kernel_el)
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||
msr sctlr_el1, x0
|
||||
|
||||
mrs x0, CurrentEL
|
||||
cmp x0, #CurrentEL_EL2
|
||||
b.eq init_el2
|
||||
|
||||
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||
msr sctlr_el1, x0
|
||||
isb
|
||||
mov_q x0, INIT_PSTATE_EL1
|
||||
msr spsr_el1, x0
|
||||
@ -504,9 +503,43 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
msr vbar_el2, x0
|
||||
isb
|
||||
|
||||
/*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
|
||||
* making it impossible to start in nVHE mode. Is that
|
||||
* compliant with the architecture? Absolutely not!
|
||||
*/
|
||||
mrs x0, hcr_el2
|
||||
and x0, x0, #HCR_E2H
|
||||
cbz x0, 1f
|
||||
|
||||
/* Switching to VHE requires a sane SCTLR_EL1 as a start */
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||
msr_s SYS_SCTLR_EL12, x0
|
||||
|
||||
/*
|
||||
* Force an eret into a helper "function", and let it return
|
||||
* to our original caller... This makes sure that we have
|
||||
* initialised the basic PSTATE state.
|
||||
*/
|
||||
mov x0, #INIT_PSTATE_EL2
|
||||
msr spsr_el1, x0
|
||||
adr x0, __cpu_stick_to_vhe
|
||||
msr elr_el1, x0
|
||||
eret
|
||||
|
||||
1:
|
||||
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
|
||||
msr sctlr_el1, x0
|
||||
|
||||
msr elr_el2, lr
|
||||
mov w0, #BOOT_CPU_MODE_EL2
|
||||
eret
|
||||
|
||||
__cpu_stick_to_vhe:
|
||||
mov x0, #HVC_VHE_RESTART
|
||||
hvc #0
|
||||
mov x0, #BOOT_CPU_MODE_EL2
|
||||
ret
|
||||
SYM_FUNC_END(init_kernel_el)
|
||||
|
||||
/*
|
||||
|
@ -27,12 +27,12 @@ SYM_CODE_START(__hyp_stub_vectors)
|
||||
ventry el2_fiq_invalid // FIQ EL2t
|
||||
ventry el2_error_invalid // Error EL2t
|
||||
|
||||
ventry el2_sync_invalid // Synchronous EL2h
|
||||
ventry elx_sync // Synchronous EL2h
|
||||
ventry el2_irq_invalid // IRQ EL2h
|
||||
ventry el2_fiq_invalid // FIQ EL2h
|
||||
ventry el2_error_invalid // Error EL2h
|
||||
|
||||
ventry el1_sync // Synchronous 64-bit EL1
|
||||
ventry elx_sync // Synchronous 64-bit EL1
|
||||
ventry el1_irq_invalid // IRQ 64-bit EL1
|
||||
ventry el1_fiq_invalid // FIQ 64-bit EL1
|
||||
ventry el1_error_invalid // Error 64-bit EL1
|
||||
@ -45,7 +45,7 @@ SYM_CODE_END(__hyp_stub_vectors)
|
||||
|
||||
.align 11
|
||||
|
||||
SYM_CODE_START_LOCAL(el1_sync)
|
||||
SYM_CODE_START_LOCAL(elx_sync)
|
||||
cmp x0, #HVC_SET_VECTORS
|
||||
b.ne 1f
|
||||
msr vbar_el2, x1
|
||||
@ -71,7 +71,7 @@ SYM_CODE_START_LOCAL(el1_sync)
|
||||
|
||||
9: mov x0, xzr
|
||||
eret
|
||||
SYM_CODE_END(el1_sync)
|
||||
SYM_CODE_END(elx_sync)
|
||||
|
||||
// nVHE? No way! Give me the real thing!
|
||||
SYM_CODE_START_LOCAL(mutate_to_vhe)
|
||||
@ -225,7 +225,6 @@ SYM_FUNC_END(__hyp_reset_vectors)
|
||||
* Entry point to switch to VHE if deemed capable
|
||||
*/
|
||||
SYM_FUNC_START(switch_to_vhe)
|
||||
#ifdef CONFIG_ARM64_VHE
|
||||
// Need to have booted at EL2
|
||||
adr_l x1, __boot_cpu_mode
|
||||
ldr w0, [x1]
|
||||
@ -241,6 +240,5 @@ SYM_FUNC_START(switch_to_vhe)
|
||||
mov x0, #HVC_VHE_RESTART
|
||||
hvc #0
|
||||
1:
|
||||
#endif
|
||||
ret
|
||||
SYM_FUNC_END(switch_to_vhe)
|
||||
|
@ -25,14 +25,26 @@ struct ftr_set_desc {
|
||||
struct {
|
||||
char name[FTR_DESC_FIELD_LEN];
|
||||
u8 shift;
|
||||
bool (*filter)(u64 val);
|
||||
} fields[];
|
||||
};
|
||||
|
||||
static bool __init mmfr1_vh_filter(u64 val)
|
||||
{
|
||||
/*
|
||||
* If we ever reach this point while running VHE, we're
|
||||
* guaranteed to be on one of these funky, VHE-stuck CPUs. If
|
||||
* the user was trying to force nVHE on us, proceed with
|
||||
* attitude adjustment.
|
||||
*/
|
||||
return !(is_kernel_in_hyp_mode() && val == 0);
|
||||
}
|
||||
|
||||
static const struct ftr_set_desc mmfr1 __initconst = {
|
||||
.name = "id_aa64mmfr1",
|
||||
.override = &id_aa64mmfr1_override,
|
||||
.fields = {
|
||||
{ "vh", ID_AA64MMFR1_VHE_SHIFT },
|
||||
{ "vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter },
|
||||
{}
|
||||
},
|
||||
};
|
||||
@ -124,6 +136,18 @@ static void __init match_options(const char *cmdline)
|
||||
if (find_field(cmdline, regs[i], f, &v))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If an override gets filtered out, advertise
|
||||
* it by setting the value to 0xf, but
|
||||
* clearing the mask... Yes, this is fragile.
|
||||
*/
|
||||
if (regs[i]->fields[f].filter &&
|
||||
!regs[i]->fields[f].filter(v)) {
|
||||
regs[i]->override->val |= mask;
|
||||
regs[i]->override->mask &= ~mask;
|
||||
continue;
|
||||
}
|
||||
|
||||
regs[i]->override->val &= ~mask;
|
||||
regs[i]->override->val |= (v << shift) & mask;
|
||||
regs[i]->override->mask |= mask;
|
||||
|
Loading…
Reference in New Issue
Block a user