Merge branch kvm-arm64/stage2-vhe-load into kvmarm/next
* kvm-arm64/stage2-vhe-load: : Setup stage-2 MMU from vcpu_load() for VHE : : Unlike nVHE, there is no need to switch the stage-2 MMU around on guest : entry/exit in VHE mode as the host is running at EL2. Despite this KVM : reloads the stage-2 on every guest entry, which is needless. : : This series moves the setup of the stage-2 MMU context to vcpu_load() : when running in VHE mode. This is likely to be a win across the board, : but also allows us to remove an ISB on the guest entry path for systems : with one of the speculative AT errata. KVM: arm64: Move VTCR_EL2 into struct s2_mmu KVM: arm64: Load the stage-2 MMU context in kvm_vcpu_load_vhe() KVM: arm64: Rename helpers for VHE vCPU load/put KVM: arm64: Reload stage-2 for VMID change on VHE KVM: arm64: Restore the stage-2 context in VHE's __tlb_switch_to_host() KVM: arm64: Don't zero VTTBR in __tlb_switch_to_host() Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
commit
df26b77915
@ -158,6 +158,16 @@ struct kvm_s2_mmu {
|
||||
phys_addr_t pgd_phys;
|
||||
struct kvm_pgtable *pgt;
|
||||
|
||||
/*
|
||||
* VTCR value used on the host. For a non-NV guest (or a NV
|
||||
* guest that runs in a context where its own S2 doesn't
|
||||
* apply), its T0SZ value reflects that of the IPA size.
|
||||
*
|
||||
* For a shadow S2 MMU, T0SZ reflects the PARange exposed to
|
||||
* the guest.
|
||||
*/
|
||||
u64 vtcr;
|
||||
|
||||
/* The last vcpu id that ran on each physical CPU */
|
||||
int __percpu *last_vcpu_ran;
|
||||
|
||||
@ -205,9 +215,6 @@ struct kvm_protected_vm {
|
||||
struct kvm_arch {
|
||||
struct kvm_s2_mmu mmu;
|
||||
|
||||
/* VTCR_EL2 value for this VM */
|
||||
u64 vtcr;
|
||||
|
||||
/* Interrupt controller */
|
||||
struct vgic_dist vgic;
|
||||
|
||||
@ -1020,7 +1027,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
||||
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
|
||||
int __init kvm_arm_vmid_alloc_init(void);
|
||||
void __init kvm_arm_vmid_alloc_free(void);
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_clear_active(void);
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
@ -1104,8 +1111,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
|
||||
}
|
||||
#endif
|
||||
|
||||
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
|
||||
|
||||
int __init kvm_set_ipa_limit(void);
|
||||
|
||||
|
@ -93,6 +93,8 @@ void __timer_disable_traps(struct kvm_vcpu *vcpu);
|
||||
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
|
||||
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
|
||||
#else
|
||||
void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu);
|
||||
void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu);
|
||||
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
|
||||
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
|
||||
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
||||
@ -111,11 +113,6 @@ void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
||||
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
||||
void __sve_restore_state(void *sve_pffr, u32 *fpsr);
|
||||
|
||||
#ifndef __KVM_NVHE_HYPERVISOR__
|
||||
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
|
||||
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
u64 __guest_enter(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
|
||||
|
@ -150,9 +150,9 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
*/
|
||||
#define KVM_PHYS_SHIFT (40)
|
||||
|
||||
#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
|
||||
#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
|
||||
#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
|
||||
#define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr)
|
||||
#define kvm_phys_size(mmu) (_AC(1, ULL) << kvm_phys_shift(mmu))
|
||||
#define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL))
|
||||
|
||||
#include <asm/kvm_pgtable.h>
|
||||
#include <asm/stage2_pgtable.h>
|
||||
@ -324,7 +324,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
|
||||
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
|
||||
struct kvm_arch *arch)
|
||||
{
|
||||
write_sysreg(arch->vtcr, vtcr_el2);
|
||||
write_sysreg(mmu->vtcr, vtcr_el2);
|
||||
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
|
||||
|
||||
/*
|
||||
|
@ -21,13 +21,13 @@
|
||||
* (IPA_SHIFT - 4).
|
||||
*/
|
||||
#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
|
||||
#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
|
||||
#define kvm_stage2_levels(mmu) VTCR_EL2_LVLS((mmu)->vtcr)
|
||||
|
||||
/*
|
||||
* kvm_mmmu_cache_min_pages() is the number of pages required to install
|
||||
* a stage-2 translation. We pre-allocate the entry level page table at
|
||||
* the VM creation.
|
||||
*/
|
||||
#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1)
|
||||
#define kvm_mmu_cache_min_pages(mmu) (kvm_stage2_levels(mmu) - 1)
|
||||
|
||||
#endif /* __ARM64_S2_PGTABLE_H_ */
|
||||
|
@ -447,7 +447,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
kvm_vgic_load(vcpu);
|
||||
kvm_timer_vcpu_load(vcpu);
|
||||
if (has_vhe())
|
||||
kvm_vcpu_load_sysregs_vhe(vcpu);
|
||||
kvm_vcpu_load_vhe(vcpu);
|
||||
kvm_arch_vcpu_load_fp(vcpu);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
|
||||
@ -471,7 +471,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
kvm_arch_vcpu_put_debug_state_flags(vcpu);
|
||||
kvm_arch_vcpu_put_fp(vcpu);
|
||||
if (has_vhe())
|
||||
kvm_vcpu_put_sysregs_vhe(vcpu);
|
||||
kvm_vcpu_put_vhe(vcpu);
|
||||
kvm_timer_vcpu_put(vcpu);
|
||||
kvm_vgic_put(vcpu);
|
||||
kvm_vcpu_pmu_restore_host(vcpu);
|
||||
@ -949,7 +949,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
* making a thread's VMID inactive. So we need to call
|
||||
* kvm_arm_vmid_update() in non-premptible context.
|
||||
*/
|
||||
kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid);
|
||||
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
|
||||
has_vhe())
|
||||
__load_stage2(vcpu->arch.hw_mmu,
|
||||
vcpu->arch.hw_mmu->arch);
|
||||
|
||||
kvm_pmu_flush_hwstate(vcpu);
|
||||
|
||||
|
@ -129,8 +129,8 @@ static void prepare_host_vtcr(void)
|
||||
parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
|
||||
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
|
||||
|
||||
host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
|
||||
id_aa64mmfr1_el1_sys_val, phys_shift);
|
||||
host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
|
||||
id_aa64mmfr1_el1_sys_val, phys_shift);
|
||||
}
|
||||
|
||||
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
|
||||
@ -235,7 +235,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
|
||||
unsigned long nr_pages;
|
||||
int ret;
|
||||
|
||||
nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
|
||||
nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT;
|
||||
ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -295,7 +295,7 @@ int __pkvm_prot_finalize(void)
|
||||
return -EPERM;
|
||||
|
||||
params->vttbr = kvm_get_vttbr(mmu);
|
||||
params->vtcr = host_mmu.arch.vtcr;
|
||||
params->vtcr = mmu->vtcr;
|
||||
params->hcr_el2 |= HCR_VM;
|
||||
|
||||
/*
|
||||
|
@ -303,7 +303,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
|
||||
{
|
||||
hyp_vm->host_kvm = host_kvm;
|
||||
hyp_vm->kvm.created_vcpus = nr_vcpus;
|
||||
hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
|
||||
hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
|
||||
}
|
||||
|
||||
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
|
||||
@ -483,7 +483,7 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
|
||||
}
|
||||
|
||||
vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
|
||||
pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
|
||||
pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
|
@ -1511,7 +1511,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
|
||||
kvm_pgtable_force_pte_cb_t force_pte_cb)
|
||||
{
|
||||
size_t pgd_sz;
|
||||
u64 vtcr = mmu->arch->vtcr;
|
||||
u64 vtcr = mmu->vtcr;
|
||||
u32 ia_bits = VTCR_EL2_IPA(vtcr);
|
||||
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
|
||||
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
|
||||
|
@ -93,12 +93,12 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
NOKPROBE_SYMBOL(__deactivate_traps);
|
||||
|
||||
/*
|
||||
* Disable IRQs in {activate,deactivate}_traps_vhe_{load,put}() to
|
||||
* Disable IRQs in __vcpu_{load,put}_{activate,deactivate}_traps() to
|
||||
* prevent a race condition between context switching of PMUSERENR_EL0
|
||||
* in __{activate,deactivate}_traps_common() and IPIs that attempts to
|
||||
* update PMUSERENR_EL0. See also kvm_set_pmuserenr().
|
||||
*/
|
||||
void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
|
||||
static void __vcpu_load_activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -107,7 +107,7 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
|
||||
static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -116,6 +116,19 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__vcpu_load_switch_sysregs(vcpu);
|
||||
__vcpu_load_activate_traps(vcpu);
|
||||
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
|
||||
}
|
||||
|
||||
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__vcpu_put_deactivate_traps(vcpu);
|
||||
__vcpu_put_switch_sysregs(vcpu);
|
||||
}
|
||||
|
||||
static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = NULL,
|
||||
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
|
||||
@ -170,17 +183,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
sysreg_save_host_state_vhe(host_ctxt);
|
||||
|
||||
/*
|
||||
* ARM erratum 1165522 requires us to configure both stage 1 and
|
||||
* stage 2 translation for the guest context before we clear
|
||||
* HCR_EL2.TGE.
|
||||
*
|
||||
* We have already configured the guest's stage 1 translation in
|
||||
* kvm_vcpu_load_sysregs_vhe above. We must now call
|
||||
* __load_stage2 before __activate_traps, because
|
||||
* __load_stage2 configures stage 2 translation, and
|
||||
* __activate_traps clear HCR_EL2.TGE (among other things).
|
||||
* Note that ARM erratum 1165522 requires us to configure both stage 1
|
||||
* and stage 2 translation for the guest context before we clear
|
||||
* HCR_EL2.TGE. The stage 1 and stage 2 guest context has already been
|
||||
* loaded on the CPU in kvm_vcpu_load_vhe().
|
||||
*/
|
||||
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
|
||||
__activate_traps(vcpu);
|
||||
|
||||
__kvm_adjust_pc(vcpu);
|
||||
|
@ -52,7 +52,7 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
|
||||
NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
|
||||
|
||||
/**
|
||||
* kvm_vcpu_load_sysregs_vhe - Load guest system registers to the physical CPU
|
||||
* __vcpu_load_switch_sysregs - Load guest system registers to the physical CPU
|
||||
*
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
@ -62,7 +62,7 @@ NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
|
||||
* and loading system register state early avoids having to load them on
|
||||
* every entry to the VM.
|
||||
*/
|
||||
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
|
||||
void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
@ -92,12 +92,10 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
|
||||
__sysreg_restore_el1_state(guest_ctxt);
|
||||
|
||||
vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
|
||||
|
||||
activate_traps_vhe_load(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_vcpu_put_sysregs_vhe - Restore host system registers to the physical CPU
|
||||
* __vcpu_put_switch_syregs - Restore host system registers to the physical CPU
|
||||
*
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
@ -107,13 +105,12 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
|
||||
* and deferring saving system register state until we're no longer running the
|
||||
* VCPU avoids having to save them on every exit from the VM.
|
||||
*/
|
||||
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
|
||||
void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
|
||||
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||
deactivate_traps_vhe_put(vcpu);
|
||||
|
||||
__sysreg_save_el1_state(guest_ctxt);
|
||||
__sysreg_save_user_state(guest_ctxt);
|
||||
|
@ -11,18 +11,25 @@
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
struct tlb_inv_context {
|
||||
unsigned long flags;
|
||||
u64 tcr;
|
||||
u64 sctlr;
|
||||
struct kvm_s2_mmu *mmu;
|
||||
unsigned long flags;
|
||||
u64 tcr;
|
||||
u64 sctlr;
|
||||
};
|
||||
|
||||
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
|
||||
struct tlb_inv_context *cxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
|
||||
u64 val;
|
||||
|
||||
local_irq_save(cxt->flags);
|
||||
|
||||
if (vcpu && mmu != vcpu->arch.hw_mmu)
|
||||
cxt->mmu = vcpu->arch.hw_mmu;
|
||||
else
|
||||
cxt->mmu = NULL;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
/*
|
||||
* For CPUs that are affected by ARM errata 1165522 or 1530923,
|
||||
@ -66,10 +73,13 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
|
||||
* We're done with the TLB operation, let's restore the host's
|
||||
* view of HCR_EL2.
|
||||
*/
|
||||
write_sysreg(0, vttbr_el2);
|
||||
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
|
||||
isb();
|
||||
|
||||
/* ... and the stage-2 MMU context that we switched away from */
|
||||
if (cxt->mmu)
|
||||
__load_stage2(cxt->mmu, cxt->mmu->arch);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
/* Restore the registers to what they were */
|
||||
write_sysreg_el1(cxt->tcr, SYS_TCR);
|
||||
|
@ -892,7 +892,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
|
||||
mmu->vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
|
||||
|
||||
if (mmu->pgt != NULL) {
|
||||
kvm_err("kvm_arch already initialized?\n");
|
||||
@ -1067,7 +1067,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
phys_addr_t addr;
|
||||
int ret = 0;
|
||||
struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
|
||||
struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
|
||||
struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
|
||||
struct kvm_pgtable *pgt = mmu->pgt;
|
||||
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
|
||||
KVM_PGTABLE_PROT_R |
|
||||
(writable ? KVM_PGTABLE_PROT_W : 0);
|
||||
@ -1080,7 +1081,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
|
||||
for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
|
||||
ret = kvm_mmu_topup_memory_cache(&cache,
|
||||
kvm_mmu_cache_min_pages(kvm));
|
||||
kvm_mmu_cache_min_pages(mmu));
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@ -1411,7 +1412,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
if (fault_status != ESR_ELx_FSC_PERM ||
|
||||
(logging_active && write_fault)) {
|
||||
ret = kvm_mmu_topup_memory_cache(memcache,
|
||||
kvm_mmu_cache_min_pages(kvm));
|
||||
kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1727,7 +1728,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Userspace should not be able to register out-of-bounds IPAs */
|
||||
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
|
||||
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu));
|
||||
|
||||
if (fault_status == ESR_ELx_FSC_ACCESS) {
|
||||
handle_access_fault(vcpu, fault_ipa);
|
||||
@ -2001,7 +2002,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
* Prevent userspace from creating a memory region outside of the IPA
|
||||
* space addressable by the KVM guest IPA space.
|
||||
*/
|
||||
if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
|
||||
if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT))
|
||||
return -EFAULT;
|
||||
|
||||
hva = new->userspace_addr;
|
||||
|
@ -123,7 +123,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
|
||||
if (host_kvm->created_vcpus < 1)
|
||||
return -EINVAL;
|
||||
|
||||
pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
|
||||
pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.mmu.vtcr);
|
||||
|
||||
/*
|
||||
* The PGD pages will be reclaimed using a hyp_memcache which implies
|
||||
|
@ -27,7 +27,8 @@ int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
|
||||
if (addr + size < addr)
|
||||
return -EINVAL;
|
||||
|
||||
if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm))
|
||||
if (addr & ~kvm_phys_mask(&kvm->arch.mmu) ||
|
||||
(addr + size) > kvm_phys_size(&kvm->arch.mmu))
|
||||
return -E2BIG;
|
||||
|
||||
return 0;
|
||||
|
@ -135,10 +135,11 @@ void kvm_arm_vmid_clear_active(void)
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
|
||||
}
|
||||
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 vmid, old_active_vmid;
|
||||
bool updated = false;
|
||||
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
|
||||
@ -156,17 +157,21 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
|
||||
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
|
||||
old_active_vmid, vmid))
|
||||
return;
|
||||
return false;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
|
||||
|
||||
/* Check that our VMID belongs to the current generation. */
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
if (!vmid_gen_match(vmid))
|
||||
if (!vmid_gen_match(vmid)) {
|
||||
vmid = new_vmid(kvm_vmid);
|
||||
updated = true;
|
||||
}
|
||||
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
|
||||
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
|
||||
|
||||
return updated;
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user