Merge branch kvm-arm64/vmid-allocator into kvmarm-master/next
* kvm-arm64/vmid-allocator: : . : VMID allocation rewrite from Shameerali Kolothum Thodi, paving the : way for pinned VMIDs and SVA. : . KVM: arm64: Make active_vmids invalid on vCPU schedule out KVM: arm64: Align the VMID allocation with the arm64 ASID KVM: arm64: Make VMID bits accessible outside of allocator KVM: arm64: Introduce a new VMID allocator for KVM Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
ebca68972e
@ -73,9 +73,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
||||
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_vmid {
|
||||
/* The VMID generation used for the virt. memory system */
|
||||
u64 vmid_gen;
|
||||
u32 vmid;
|
||||
atomic64_t id;
|
||||
};
|
||||
|
||||
struct kvm_s2_mmu {
|
||||
@ -695,6 +693,12 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
|
||||
int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
|
||||
extern unsigned int kvm_arm_vmid_bits;
|
||||
int kvm_arm_vmid_alloc_init(void);
|
||||
void kvm_arm_vmid_alloc_free(void);
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_clear_active(void);
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
{
|
||||
vcpu_arch->steal.base = GPA_INVALID;
|
||||
|
@ -115,6 +115,7 @@ alternative_cb_end
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/kvm_host.h>
|
||||
|
||||
void kvm_update_va_mask(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||
@ -266,7 +267,8 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
|
||||
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
|
||||
|
||||
baddr = mmu->pgd_phys;
|
||||
vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
|
||||
vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT;
|
||||
vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits);
|
||||
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
|
||||
}
|
||||
|
||||
|
@ -79,6 +79,9 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors);
|
||||
/* Kernel symbol used by icache_is_vpipt(). */
|
||||
KVM_NVHE_ALIAS(__icache_flags);
|
||||
|
||||
/* VMID bits set by the KVM VMID allocator */
|
||||
KVM_NVHE_ALIAS(kvm_arm_vmid_bits);
|
||||
|
||||
/* Kernel symbols needed for cpus_have_final/const_caps checks. */
|
||||
KVM_NVHE_ALIAS(arm64_const_caps_ready);
|
||||
KVM_NVHE_ALIAS(cpu_hwcap_keys);
|
||||
|
@ -14,7 +14,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
|
||||
inject_fault.o va_layout.o handle_exit.o \
|
||||
guest.o debug.o reset.o sys_regs.o \
|
||||
vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
|
||||
arch_timer.o trng.o\
|
||||
arch_timer.o trng.o vmid.o \
|
||||
vgic/vgic.o vgic/vgic-init.o \
|
||||
vgic/vgic-irqfd.o vgic/vgic-v2.o \
|
||||
vgic/vgic-v3.o vgic/vgic-v4.o \
|
||||
|
@ -53,11 +53,6 @@ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
||||
unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
|
||||
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||
|
||||
/* The VMID used in the VTTBR */
|
||||
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
|
||||
static u32 kvm_next_vmid;
|
||||
static DEFINE_SPINLOCK(kvm_vmid_lock);
|
||||
|
||||
static bool vgic_present;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
|
||||
@ -422,6 +417,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
kvm_timer_vcpu_put(vcpu);
|
||||
kvm_vgic_put(vcpu);
|
||||
kvm_vcpu_pmu_restore_host(vcpu);
|
||||
kvm_arm_vmid_clear_active();
|
||||
|
||||
vcpu->cpu = -1;
|
||||
}
|
||||
@ -489,87 +485,6 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Just ensure a guest exit from a particular CPU */
|
||||
static void exit_vm_noop(void *info)
|
||||
{
|
||||
}
|
||||
|
||||
void force_vm_exit(const cpumask_t *mask)
|
||||
{
|
||||
preempt_disable();
|
||||
smp_call_function_many(mask, exit_vm_noop, NULL, true);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
* need_new_vmid_gen - check that the VMID is still valid
|
||||
* @vmid: The VMID to check
|
||||
*
|
||||
* return true if there is a new generation of VMIDs being used
|
||||
*
|
||||
* The hardware supports a limited set of values with the value zero reserved
|
||||
* for the host, so we check if an assigned value belongs to a previous
|
||||
* generation, which requires us to assign a new value. If we're the first to
|
||||
* use a VMID for the new generation, we must flush necessary caches and TLBs
|
||||
* on all CPUs.
|
||||
*/
|
||||
static bool need_new_vmid_gen(struct kvm_vmid *vmid)
|
||||
{
|
||||
u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
|
||||
smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
|
||||
return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
|
||||
}
|
||||
|
||||
/**
|
||||
* update_vmid - Update the vmid with a valid VMID for the current generation
|
||||
* @vmid: The stage-2 VMID information struct
|
||||
*/
|
||||
static void update_vmid(struct kvm_vmid *vmid)
|
||||
{
|
||||
if (!need_new_vmid_gen(vmid))
|
||||
return;
|
||||
|
||||
spin_lock(&kvm_vmid_lock);
|
||||
|
||||
/*
|
||||
* We need to re-check the vmid_gen here to ensure that if another vcpu
|
||||
* already allocated a valid vmid for this vm, then this vcpu should
|
||||
* use the same vmid.
|
||||
*/
|
||||
if (!need_new_vmid_gen(vmid)) {
|
||||
spin_unlock(&kvm_vmid_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/* First user of a new VMID generation? */
|
||||
if (unlikely(kvm_next_vmid == 0)) {
|
||||
atomic64_inc(&kvm_vmid_gen);
|
||||
kvm_next_vmid = 1;
|
||||
|
||||
/*
|
||||
* On SMP we know no other CPUs can use this CPU's or each
|
||||
* other's VMID after force_vm_exit returns since the
|
||||
* kvm_vmid_lock blocks them from reentry to the guest.
|
||||
*/
|
||||
force_vm_exit(cpu_all_mask);
|
||||
/*
|
||||
* Now broadcast TLB + ICACHE invalidation over the inner
|
||||
* shareable domain to make sure all data structures are
|
||||
* clean.
|
||||
*/
|
||||
kvm_call_hyp(__kvm_flush_vm_context);
|
||||
}
|
||||
|
||||
WRITE_ONCE(vmid->vmid, kvm_next_vmid);
|
||||
kvm_next_vmid++;
|
||||
kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
|
||||
|
||||
smp_wmb();
|
||||
WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
|
||||
|
||||
spin_unlock(&kvm_vmid_lock);
|
||||
}
|
||||
|
||||
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.target >= 0;
|
||||
@ -793,7 +708,6 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
|
||||
}
|
||||
|
||||
return kvm_request_pending(vcpu) ||
|
||||
need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
|
||||
xfer_to_guest_mode_work_pending();
|
||||
}
|
||||
|
||||
@ -855,8 +769,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
|
||||
update_vmid(&vcpu->arch.hw_mmu->vmid);
|
||||
|
||||
check_vcpu_requests(vcpu);
|
||||
|
||||
/*
|
||||
@ -866,6 +778,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* The VMID allocator only tracks active VMIDs per
|
||||
* physical CPU, and therefore the VMID allocated may not be
|
||||
* preserved on VMID roll-over if the task was preempted,
|
||||
* making a thread's VMID inactive. So we need to call
|
||||
* kvm_arm_vmid_update() in non-premptible context.
|
||||
*/
|
||||
kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid);
|
||||
|
||||
kvm_pmu_flush_hwstate(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
@ -2161,6 +2082,12 @@ int kvm_arch_init(void *opaque)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = kvm_arm_vmid_alloc_init();
|
||||
if (err) {
|
||||
kvm_err("Failed to initialize VMID allocator.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!in_hyp_mode) {
|
||||
err = init_hyp_mode();
|
||||
if (err)
|
||||
@ -2200,6 +2127,7 @@ out_hyp:
|
||||
if (!in_hyp_mode)
|
||||
teardown_hyp_mode();
|
||||
out_err:
|
||||
kvm_arm_vmid_alloc_free();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -138,8 +138,7 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
|
||||
|
||||
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
|
||||
mmu->pgt = &host_kvm.pgt;
|
||||
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
|
||||
WRITE_ONCE(mmu->vmid.vmid, 0);
|
||||
atomic64_set(&mmu->vmid.id, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -653,7 +653,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
|
||||
|
||||
mmu->pgt = pgt;
|
||||
mmu->pgd_phys = __pa(pgt->pgd);
|
||||
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
|
||||
return 0;
|
||||
|
||||
out_destroy_pgtable:
|
||||
|
196
arch/arm64/kvm/vmid.c
Normal file
196
arch/arm64/kvm/vmid.c
Normal file
@ -0,0 +1,196 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* VMID allocator.
|
||||
*
|
||||
* Based on Arm64 ASID allocator algorithm.
|
||||
* Please refer arch/arm64/mm/context.c for detailed
|
||||
* comments on algorithm.
|
||||
*
|
||||
* Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
unsigned int kvm_arm_vmid_bits;
|
||||
static DEFINE_RAW_SPINLOCK(cpu_vmid_lock);
|
||||
|
||||
static atomic64_t vmid_generation;
|
||||
static unsigned long *vmid_map;
|
||||
|
||||
static DEFINE_PER_CPU(atomic64_t, active_vmids);
|
||||
static DEFINE_PER_CPU(u64, reserved_vmids);
|
||||
|
||||
#define VMID_MASK (~GENMASK(kvm_arm_vmid_bits - 1, 0))
|
||||
#define VMID_FIRST_VERSION (1UL << kvm_arm_vmid_bits)
|
||||
|
||||
#define NUM_USER_VMIDS VMID_FIRST_VERSION
|
||||
#define vmid2idx(vmid) ((vmid) & ~VMID_MASK)
|
||||
#define idx2vmid(idx) vmid2idx(idx)
|
||||
|
||||
/*
|
||||
* As vmid #0 is always reserved, we will never allocate one
|
||||
* as below and can be treated as invalid. This is used to
|
||||
* set the active_vmids on vCPU schedule out.
|
||||
*/
|
||||
#define VMID_ACTIVE_INVALID VMID_FIRST_VERSION
|
||||
|
||||
#define vmid_gen_match(vmid) \
|
||||
(!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
|
||||
|
||||
static void flush_context(void)
|
||||
{
|
||||
int cpu;
|
||||
u64 vmid;
|
||||
|
||||
bitmap_clear(vmid_map, 0, NUM_USER_VMIDS);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
|
||||
|
||||
/* Preserve reserved VMID */
|
||||
if (vmid == 0)
|
||||
vmid = per_cpu(reserved_vmids, cpu);
|
||||
__set_bit(vmid2idx(vmid), vmid_map);
|
||||
per_cpu(reserved_vmids, cpu) = vmid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlike ASID allocator, we expect less frequent rollover in
|
||||
* case of VMIDs. Hence, instead of marking the CPU as
|
||||
* flush_pending and issuing a local context invalidation on
|
||||
* the next context-switch, we broadcast TLB flush + I-cache
|
||||
* invalidation over the inner shareable domain on rollover.
|
||||
*/
|
||||
kvm_call_hyp(__kvm_flush_vm_context);
|
||||
}
|
||||
|
||||
static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
|
||||
{
|
||||
int cpu;
|
||||
bool hit = false;
|
||||
|
||||
/*
|
||||
* Iterate over the set of reserved VMIDs looking for a match
|
||||
* and update to use newvmid (i.e. the same VMID in the current
|
||||
* generation).
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (per_cpu(reserved_vmids, cpu) == vmid) {
|
||||
hit = true;
|
||||
per_cpu(reserved_vmids, cpu) = newvmid;
|
||||
}
|
||||
}
|
||||
|
||||
return hit;
|
||||
}
|
||||
|
||||
static u64 new_vmid(struct kvm_vmid *kvm_vmid)
|
||||
{
|
||||
static u32 cur_idx = 1;
|
||||
u64 vmid = atomic64_read(&kvm_vmid->id);
|
||||
u64 generation = atomic64_read(&vmid_generation);
|
||||
|
||||
if (vmid != 0) {
|
||||
u64 newvmid = generation | (vmid & ~VMID_MASK);
|
||||
|
||||
if (check_update_reserved_vmid(vmid, newvmid)) {
|
||||
atomic64_set(&kvm_vmid->id, newvmid);
|
||||
return newvmid;
|
||||
}
|
||||
|
||||
if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
|
||||
atomic64_set(&kvm_vmid->id, newvmid);
|
||||
return newvmid;
|
||||
}
|
||||
}
|
||||
|
||||
vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx);
|
||||
if (vmid != NUM_USER_VMIDS)
|
||||
goto set_vmid;
|
||||
|
||||
/* We're out of VMIDs, so increment the global generation count */
|
||||
generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION,
|
||||
&vmid_generation);
|
||||
flush_context();
|
||||
|
||||
/* We have more VMIDs than CPUs, so this will always succeed */
|
||||
vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1);
|
||||
|
||||
set_vmid:
|
||||
__set_bit(vmid, vmid_map);
|
||||
cur_idx = vmid;
|
||||
vmid = idx2vmid(vmid) | generation;
|
||||
atomic64_set(&kvm_vmid->id, vmid);
|
||||
return vmid;
|
||||
}
|
||||
|
||||
/* Called from vCPU sched out with preemption disabled */
|
||||
void kvm_arm_vmid_clear_active(void)
|
||||
{
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
|
||||
}
|
||||
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 vmid, old_active_vmid;
|
||||
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
|
||||
/*
|
||||
* Please refer comments in check_and_switch_context() in
|
||||
* arch/arm64/mm/context.c.
|
||||
*
|
||||
* Unlike ASID allocator, we set the active_vmids to
|
||||
* VMID_ACTIVE_INVALID on vCPU schedule out to avoid
|
||||
* reserving the VMID space needlessly on rollover.
|
||||
* Hence explicitly check here for a "!= 0" to
|
||||
* handle the sync with a concurrent rollover.
|
||||
*/
|
||||
old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
|
||||
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
|
||||
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
|
||||
old_active_vmid, vmid))
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
|
||||
|
||||
/* Check that our VMID belongs to the current generation. */
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
if (!vmid_gen_match(vmid))
|
||||
vmid = new_vmid(kvm_vmid);
|
||||
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
|
||||
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the VMID allocator
|
||||
*/
|
||||
int kvm_arm_vmid_alloc_init(void)
|
||||
{
|
||||
kvm_arm_vmid_bits = kvm_get_vmid_bits();
|
||||
|
||||
/*
|
||||
* Expect allocation after rollover to fail if we don't have
|
||||
* at least one more VMID than CPUs. VMID #0 is always reserved.
|
||||
*/
|
||||
WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
|
||||
atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
|
||||
vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS),
|
||||
sizeof(*vmid_map), GFP_KERNEL);
|
||||
if (!vmid_map)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arm_vmid_alloc_free(void)
|
||||
{
|
||||
kfree(vmid_map);
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user