Merge branch kvm-arm64/pkvm-vcpu-state into kvmarm-master/next
* kvm-arm64/pkvm-vcpu-state: (25 commits) : . : Large drop of pKVM patches from Will Deacon and co, adding : a private vm/vcpu state at EL2, managed independently from : the EL1 state. From the cover letter: : : "This is version six of the pKVM EL2 state series, extending the pKVM : hypervisor code so that it can dynamically instantiate and manage VM : data structures without the host being able to access them directly. : These structures consist of a hyp VM, a set of hyp vCPUs and the stage-2 : page-table for the MMU. The pages used to hold the hypervisor structures : are returned to the host when the VM is destroyed." : . KVM: arm64: Use the pKVM hyp vCPU structure in handle___kvm_vcpu_run() KVM: arm64: Don't unnecessarily map host kernel sections at EL2 KVM: arm64: Explicitly map 'kvm_vgic_global_state' at EL2 KVM: arm64: Maintain a copy of 'kvm_arm_vmid_bits' at EL2 KVM: arm64: Unmap 'kvm_arm_hyp_percpu_base' from the host KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache KVM: arm64: Instantiate guest stage-2 page-tables at EL2 KVM: arm64: Consolidate stage-2 initialisation into a single function KVM: arm64: Add generic hyp_memcache helpers KVM: arm64: Provide I-cache invalidation by virtual address at EL2 KVM: arm64: Initialise hypervisor copies of host symbols unconditionally KVM: arm64: Add per-cpu fixmap infrastructure at EL2 KVM: arm64: Instantiate pKVM hypervisor VM and vCPU structures from EL1 KVM: arm64: Add infrastructure to create and track pKVM instances at EL2 KVM: arm64: Rename 'host_kvm' to 'host_mmu' KVM: arm64: Add hyp_spinlock_t static initializer KVM: arm64: Include asm/kvm_mmu.h in nvhe/mem_protect.h KVM: arm64: Add helpers to pin memory shared with the hypervisor at EL2 KVM: arm64: Prevent the donation of no-map pages KVM: arm64: Implement do_donate() helper for donating memory ... Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
cfa72993d1
@ -135,7 +135,7 @@
|
||||
* 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
|
||||
* not known to exist and will break with this configuration.
|
||||
*
|
||||
* The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2().
|
||||
* The VTCR_EL2 is configured per VM and is initialised in kvm_init_stage2_mmu.
|
||||
*
|
||||
* Note that when using 4K pages, we concatenate two first level page tables
|
||||
* together. With 16K pages, we concatenate 16 first level page tables.
|
||||
|
@ -76,6 +76,9 @@ enum __kvm_host_smccc_func {
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
|
||||
};
|
||||
|
||||
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
|
||||
@ -106,7 +109,7 @@ enum __kvm_host_smccc_func {
|
||||
#define per_cpu_ptr_nvhe_sym(sym, cpu) \
|
||||
({ \
|
||||
unsigned long base, off; \
|
||||
base = kvm_arm_hyp_percpu_base[cpu]; \
|
||||
base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \
|
||||
off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
|
||||
(unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
|
||||
base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
|
||||
@ -211,7 +214,7 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
|
||||
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
|
||||
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
|
||||
|
||||
extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
|
||||
extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
|
||||
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
|
||||
DECLARE_KVM_NVHE_SYM(__per_cpu_end);
|
||||
|
||||
|
@ -73,6 +73,63 @@ u32 __attribute_const__ kvm_target_cpu(void);
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
||||
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_hyp_memcache {
|
||||
phys_addr_t head;
|
||||
unsigned long nr_pages;
|
||||
};
|
||||
|
||||
static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
|
||||
phys_addr_t *p,
|
||||
phys_addr_t (*to_pa)(void *virt))
|
||||
{
|
||||
*p = mc->head;
|
||||
mc->head = to_pa(p);
|
||||
mc->nr_pages++;
|
||||
}
|
||||
|
||||
static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
|
||||
void *(*to_va)(phys_addr_t phys))
|
||||
{
|
||||
phys_addr_t *p = to_va(mc->head);
|
||||
|
||||
if (!mc->nr_pages)
|
||||
return NULL;
|
||||
|
||||
mc->head = *p;
|
||||
mc->nr_pages--;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
|
||||
unsigned long min_pages,
|
||||
void *(*alloc_fn)(void *arg),
|
||||
phys_addr_t (*to_pa)(void *virt),
|
||||
void *arg)
|
||||
{
|
||||
while (mc->nr_pages < min_pages) {
|
||||
phys_addr_t *p = alloc_fn(arg);
|
||||
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
push_hyp_memcache(mc, p, to_pa);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
|
||||
void (*free_fn)(void *virt, void *arg),
|
||||
void *(*to_va)(phys_addr_t phys),
|
||||
void *arg)
|
||||
{
|
||||
while (mc->nr_pages)
|
||||
free_fn(pop_hyp_memcache(mc, to_va), arg);
|
||||
}
|
||||
|
||||
void free_hyp_memcache(struct kvm_hyp_memcache *mc);
|
||||
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
|
||||
|
||||
struct kvm_vmid {
|
||||
atomic64_t id;
|
||||
};
|
||||
@ -115,6 +172,13 @@ struct kvm_smccc_features {
|
||||
unsigned long vendor_hyp_bmap;
|
||||
};
|
||||
|
||||
typedef unsigned int pkvm_handle_t;
|
||||
|
||||
struct kvm_protected_vm {
|
||||
pkvm_handle_t handle;
|
||||
struct kvm_hyp_memcache teardown_mc;
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
struct kvm_s2_mmu mmu;
|
||||
|
||||
@ -166,6 +230,12 @@ struct kvm_arch {
|
||||
|
||||
/* Hypercall features firmware registers' descriptor */
|
||||
struct kvm_smccc_features smccc_feat;
|
||||
|
||||
/*
|
||||
* For an untrusted host VM, 'pkvm.handle' is used to lookup
|
||||
* the associated pKVM instance in the hypervisor.
|
||||
*/
|
||||
struct kvm_protected_vm pkvm;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_fault_info {
|
||||
@ -915,8 +985,6 @@ int kvm_set_ipa_limit(void);
|
||||
#define __KVM_HAVE_ARCH_VM_ALLOC
|
||||
struct kvm *kvm_arch_alloc_vm(void);
|
||||
|
||||
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
|
||||
|
||||
static inline bool kvm_vm_is_protected(struct kvm *kvm)
|
||||
{
|
||||
return false;
|
||||
|
@ -123,4 +123,7 @@ extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
|
||||
extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
|
||||
extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
|
||||
|
||||
extern unsigned long kvm_nvhe_sym(__icache_flags);
|
||||
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_H__ */
|
||||
|
@ -166,7 +166,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
|
||||
void free_hyp_pgds(void);
|
||||
|
||||
void stage2_unmap_vm(struct kvm *kvm);
|
||||
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
|
||||
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
|
||||
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
|
||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
phys_addr_t pa, unsigned long size, bool writable);
|
||||
|
@ -42,6 +42,8 @@ typedef u64 kvm_pte_t;
|
||||
#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
|
||||
#define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
|
||||
|
||||
#define KVM_PHYS_INVALID (-1ULL)
|
||||
|
||||
static inline bool kvm_pte_valid(kvm_pte_t pte)
|
||||
{
|
||||
return pte & KVM_PTE_VALID;
|
||||
@ -57,6 +59,18 @@ static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
|
||||
return pa;
|
||||
}
|
||||
|
||||
static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
|
||||
{
|
||||
kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
|
||||
|
||||
if (PAGE_SHIFT == 16) {
|
||||
pa &= GENMASK(51, 48);
|
||||
pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
|
||||
}
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline u64 kvm_granule_shift(u32 level)
|
||||
{
|
||||
/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
|
||||
@ -381,6 +395,14 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
|
||||
*/
|
||||
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
|
||||
* @vtcr: Content of the VTCR register.
|
||||
*
|
||||
* Return: the size (in bytes) of the stage-2 PGD
|
||||
*/
|
||||
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
|
||||
|
||||
/**
|
||||
* __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
|
||||
* @pgt: Uninitialised page-table structure to initialise.
|
||||
|
@ -9,11 +9,49 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/kvm_pgtable.h>
|
||||
|
||||
/* Maximum number of VMs that can co-exist under pKVM. */
|
||||
#define KVM_MAX_PVMS 255
|
||||
|
||||
#define HYP_MEMBLOCK_REGIONS 128
|
||||
|
||||
int pkvm_init_host_vm(struct kvm *kvm);
|
||||
int pkvm_create_hyp_vm(struct kvm *kvm);
|
||||
void pkvm_destroy_hyp_vm(struct kvm *kvm);
|
||||
|
||||
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
|
||||
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
|
||||
|
||||
static inline unsigned long
|
||||
hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
|
||||
{
|
||||
unsigned long nr_pages = reg->size >> PAGE_SHIFT;
|
||||
unsigned long start, end;
|
||||
|
||||
start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
|
||||
end = start + nr_pages * vmemmap_entry_size;
|
||||
start = ALIGN_DOWN(start, PAGE_SIZE);
|
||||
end = ALIGN(end, PAGE_SIZE);
|
||||
|
||||
return end - start;
|
||||
}
|
||||
|
||||
static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
|
||||
{
|
||||
unsigned long res = 0, i;
|
||||
|
||||
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
|
||||
res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
|
||||
vmemmap_entry_size);
|
||||
}
|
||||
|
||||
return res >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long hyp_vm_table_pages(void)
|
||||
{
|
||||
return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
|
||||
{
|
||||
unsigned long total = 0, i;
|
||||
|
@ -71,12 +71,6 @@ KVM_NVHE_ALIAS(nvhe_hyp_panic_handler);
|
||||
/* Vectors installed by hyp-init on reset HVC. */
|
||||
KVM_NVHE_ALIAS(__hyp_stub_vectors);
|
||||
|
||||
/* Kernel symbol used by icache_is_vpipt(). */
|
||||
KVM_NVHE_ALIAS(__icache_flags);
|
||||
|
||||
/* VMID bits set by the KVM VMID allocator */
|
||||
KVM_NVHE_ALIAS(kvm_arm_vmid_bits);
|
||||
|
||||
/* Static keys which are set if a vGIC trap should be handled in hyp. */
|
||||
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
|
||||
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
|
||||
@ -92,9 +86,6 @@ KVM_NVHE_ALIAS(gic_nonsecure_priorities);
|
||||
KVM_NVHE_ALIAS(__start___kvm_ex_table);
|
||||
KVM_NVHE_ALIAS(__stop___kvm_ex_table);
|
||||
|
||||
/* Array containing bases of nVHE per-CPU memory regions. */
|
||||
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
|
||||
|
||||
/* PMU available static key */
|
||||
#ifdef CONFIG_HW_PERF_EVENTS
|
||||
KVM_NVHE_ALIAS(kvm_arm_pmu_available);
|
||||
@ -111,12 +102,6 @@ KVM_NVHE_ALIAS_HYP(__memcpy, __pi_memcpy);
|
||||
KVM_NVHE_ALIAS_HYP(__memset, __pi_memset);
|
||||
#endif
|
||||
|
||||
/* Kernel memory sections */
|
||||
KVM_NVHE_ALIAS(__start_rodata);
|
||||
KVM_NVHE_ALIAS(__end_rodata);
|
||||
KVM_NVHE_ALIAS(__bss_start);
|
||||
KVM_NVHE_ALIAS(__bss_stop);
|
||||
|
||||
/* Hyp memory sections */
|
||||
KVM_NVHE_ALIAS(__hyp_idmap_text_start);
|
||||
KVM_NVHE_ALIAS(__hyp_idmap_text_end);
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_pkvm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
@ -50,7 +51,6 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
|
||||
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
||||
unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
|
||||
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||
|
||||
static bool vgic_present;
|
||||
@ -138,24 +138,24 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = kvm_arm_setup_stage2(kvm, type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = kvm_share_hyp(kvm, kvm + 1);
|
||||
if (ret)
|
||||
goto out_free_stage2_pgd;
|
||||
return ret;
|
||||
|
||||
ret = pkvm_init_host_vm(kvm);
|
||||
if (ret)
|
||||
goto err_unshare_kvm;
|
||||
|
||||
if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_stage2_pgd;
|
||||
goto err_unshare_kvm;
|
||||
}
|
||||
cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
|
||||
|
||||
ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type);
|
||||
if (ret)
|
||||
goto err_free_cpumask;
|
||||
|
||||
kvm_vgic_early_init(kvm);
|
||||
|
||||
/* The maximum number of VCPUs is limited by the host's GIC model */
|
||||
@ -164,9 +164,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
set_default_spectre(kvm);
|
||||
kvm_arm_init_hypercalls(kvm);
|
||||
|
||||
return ret;
|
||||
out_free_stage2_pgd:
|
||||
kvm_free_stage2_pgd(&kvm->arch.mmu);
|
||||
return 0;
|
||||
|
||||
err_free_cpumask:
|
||||
free_cpumask_var(kvm->arch.supported_cpus);
|
||||
err_unshare_kvm:
|
||||
kvm_unshare_hyp(kvm, kvm + 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -187,6 +190,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
|
||||
kvm_vgic_destroy(kvm);
|
||||
|
||||
if (is_protected_kvm_enabled())
|
||||
pkvm_destroy_hyp_vm(kvm);
|
||||
|
||||
kvm_destroy_vcpus(kvm);
|
||||
|
||||
kvm_unshare_hyp(kvm, kvm + 1);
|
||||
@ -569,6 +575,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
ret = pkvm_create_hyp_vm(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!irqchip_in_kernel(kvm)) {
|
||||
/*
|
||||
* Tell the rest of the code that there are userspace irqchip
|
||||
@ -1833,13 +1845,13 @@ static void teardown_hyp_mode(void)
|
||||
free_hyp_pgds();
|
||||
for_each_possible_cpu(cpu) {
|
||||
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
|
||||
free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
|
||||
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
|
||||
}
|
||||
}
|
||||
|
||||
static int do_pkvm_init(u32 hyp_va_bits)
|
||||
{
|
||||
void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base);
|
||||
void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
@ -1859,11 +1871,8 @@ static int do_pkvm_init(u32 hyp_va_bits)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_hyp_init_protection(u32 hyp_va_bits)
|
||||
static void kvm_hyp_init_symbols(void)
|
||||
{
|
||||
void *addr = phys_to_virt(hyp_mem_base);
|
||||
int ret;
|
||||
|
||||
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
|
||||
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
|
||||
@ -1872,6 +1881,14 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
|
||||
kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
|
||||
kvm_nvhe_sym(__icache_flags) = __icache_flags;
|
||||
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
|
||||
}
|
||||
|
||||
static int kvm_hyp_init_protection(u32 hyp_va_bits)
|
||||
{
|
||||
void *addr = phys_to_virt(hyp_mem_base);
|
||||
int ret;
|
||||
|
||||
ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
|
||||
if (ret)
|
||||
@ -1939,7 +1956,7 @@ static int init_hyp_mode(void)
|
||||
|
||||
page_addr = page_address(page);
|
||||
memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
|
||||
kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
|
||||
kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2032,7 +2049,7 @@ static int init_hyp_mode(void)
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
|
||||
char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
|
||||
char *percpu_end = percpu_begin + nvhe_percpu_size();
|
||||
|
||||
/* Map Hyp percpu pages */
|
||||
@ -2046,6 +2063,8 @@ static int init_hyp_mode(void)
|
||||
cpu_prepare_hyp_mode(cpu, hyp_va_bits);
|
||||
}
|
||||
|
||||
kvm_hyp_init_symbols();
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
init_cpu_logical_map();
|
||||
|
||||
@ -2053,9 +2072,7 @@ static int init_hyp_mode(void)
|
||||
err = -ENODEV;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
err = kvm_hyp_init_protection(hyp_va_bits);
|
||||
if (err) {
|
||||
kvm_err("Failed to init hyp memory protection\n");
|
||||
|
@ -2,9 +2,12 @@
|
||||
|
||||
#include <linux/kbuild.h>
|
||||
#include <nvhe/memory.h>
|
||||
#include <nvhe/pkvm.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
DEFINE(STRUCT_HYP_PAGE_SIZE, sizeof(struct hyp_page));
|
||||
DEFINE(PKVM_HYP_VM_SIZE, sizeof(struct pkvm_hyp_vm));
|
||||
DEFINE(PKVM_HYP_VCPU_SIZE, sizeof(struct pkvm_hyp_vcpu));
|
||||
return 0;
|
||||
}
|
||||
|
@ -8,8 +8,10 @@
|
||||
#define __KVM_NVHE_MEM_PROTECT__
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_pgtable.h>
|
||||
#include <asm/virt.h>
|
||||
#include <nvhe/pkvm.h>
|
||||
#include <nvhe/spinlock.h>
|
||||
|
||||
/*
|
||||
@ -43,30 +45,45 @@ static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
|
||||
return prot & PKVM_PAGE_STATE_PROT_MASK;
|
||||
}
|
||||
|
||||
struct host_kvm {
|
||||
struct host_mmu {
|
||||
struct kvm_arch arch;
|
||||
struct kvm_pgtable pgt;
|
||||
struct kvm_pgtable_mm_ops mm_ops;
|
||||
hyp_spinlock_t lock;
|
||||
};
|
||||
extern struct host_kvm host_kvm;
|
||||
extern struct host_mmu host_mmu;
|
||||
|
||||
extern const u8 pkvm_hyp_id;
|
||||
/* This corresponds to page-table locking order */
|
||||
enum pkvm_component_id {
|
||||
PKVM_ID_HOST,
|
||||
PKVM_ID_HYP,
|
||||
};
|
||||
|
||||
extern unsigned long hyp_nr_cpus;
|
||||
|
||||
int __pkvm_prot_finalize(void);
|
||||
int __pkvm_host_share_hyp(u64 pfn);
|
||||
int __pkvm_host_unshare_hyp(u64 pfn);
|
||||
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
|
||||
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
|
||||
|
||||
bool addr_is_memory(phys_addr_t phys);
|
||||
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
|
||||
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
|
||||
int kvm_host_prepare_stage2(void *pgt_pool_base);
|
||||
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd);
|
||||
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
|
||||
|
||||
int hyp_pin_shared_mem(void *from, void *to);
|
||||
void hyp_unpin_shared_mem(void *from, void *to);
|
||||
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
|
||||
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
|
||||
struct kvm_hyp_memcache *host_mc);
|
||||
|
||||
static __always_inline void __load_host_stage2(void)
|
||||
{
|
||||
if (static_branch_likely(&kvm_protected_mode_initialized))
|
||||
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
|
||||
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
|
||||
else
|
||||
write_sysreg(0, vttbr_el2);
|
||||
}
|
||||
|
@ -38,6 +38,10 @@ static inline phys_addr_t hyp_virt_to_phys(void *addr)
|
||||
#define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
|
||||
#define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
|
||||
|
||||
/*
|
||||
* Refcounting for 'struct hyp_page'.
|
||||
* hyp_pool::lock must be held if atomic access to the refcount is required.
|
||||
*/
|
||||
static inline int hyp_page_count(void *addr)
|
||||
{
|
||||
struct hyp_page *p = hyp_virt_to_page(addr);
|
||||
@ -45,4 +49,27 @@ static inline int hyp_page_count(void *addr)
|
||||
return p->refcount;
|
||||
}
|
||||
|
||||
static inline void hyp_page_ref_inc(struct hyp_page *p)
|
||||
{
|
||||
BUG_ON(p->refcount == USHRT_MAX);
|
||||
p->refcount++;
|
||||
}
|
||||
|
||||
static inline void hyp_page_ref_dec(struct hyp_page *p)
|
||||
{
|
||||
BUG_ON(!p->refcount);
|
||||
p->refcount--;
|
||||
}
|
||||
|
||||
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
|
||||
{
|
||||
hyp_page_ref_dec(p);
|
||||
return (p->refcount == 0);
|
||||
}
|
||||
|
||||
static inline void hyp_set_page_refcounted(struct hyp_page *p)
|
||||
{
|
||||
BUG_ON(p->refcount);
|
||||
p->refcount = 1;
|
||||
}
|
||||
#endif /* __KVM_HYP_MEMORY_H */
|
||||
|
@ -13,9 +13,13 @@
|
||||
extern struct kvm_pgtable pkvm_pgtable;
|
||||
extern hyp_spinlock_t pkvm_pgd_lock;
|
||||
|
||||
int hyp_create_pcpu_fixmap(void);
|
||||
void *hyp_fixmap_map(phys_addr_t phys);
|
||||
void hyp_fixmap_unmap(void);
|
||||
|
||||
int hyp_create_idmap(u32 hyp_va_bits);
|
||||
int hyp_map_vectors(void);
|
||||
int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back);
|
||||
int hyp_back_vmemmap(phys_addr_t back);
|
||||
int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot);
|
||||
int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
|
||||
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot);
|
||||
@ -24,16 +28,4 @@ int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
|
||||
unsigned long *haddr);
|
||||
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
|
||||
|
||||
static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
|
||||
unsigned long *start, unsigned long *end)
|
||||
{
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct hyp_page *p = hyp_phys_to_page(phys);
|
||||
|
||||
*start = (unsigned long)p;
|
||||
*end = *start + nr_pages * sizeof(struct hyp_page);
|
||||
*start = ALIGN_DOWN(*start, PAGE_SIZE);
|
||||
*end = ALIGN(*end, PAGE_SIZE);
|
||||
}
|
||||
|
||||
#endif /* __KVM_HYP_MM_H */
|
||||
|
68
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
Normal file
68
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
Normal file
@ -0,0 +1,68 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2021 Google LLC
|
||||
* Author: Fuad Tabba <tabba@google.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM64_KVM_NVHE_PKVM_H__
|
||||
#define __ARM64_KVM_NVHE_PKVM_H__
|
||||
|
||||
#include <asm/kvm_pkvm.h>
|
||||
|
||||
#include <nvhe/gfp.h>
|
||||
#include <nvhe/spinlock.h>
|
||||
|
||||
/*
|
||||
* Holds the relevant data for maintaining the vcpu state completely at hyp.
|
||||
*/
|
||||
struct pkvm_hyp_vcpu {
|
||||
struct kvm_vcpu vcpu;
|
||||
|
||||
/* Backpointer to the host's (untrusted) vCPU instance. */
|
||||
struct kvm_vcpu *host_vcpu;
|
||||
};
|
||||
|
||||
/*
|
||||
* Holds the relevant data for running a protected vm.
|
||||
*/
|
||||
struct pkvm_hyp_vm {
|
||||
struct kvm kvm;
|
||||
|
||||
/* Backpointer to the host's (untrusted) KVM instance. */
|
||||
struct kvm *host_kvm;
|
||||
|
||||
/* The guest's stage-2 page-table managed by the hypervisor. */
|
||||
struct kvm_pgtable pgt;
|
||||
struct kvm_pgtable_mm_ops mm_ops;
|
||||
struct hyp_pool pool;
|
||||
hyp_spinlock_t lock;
|
||||
|
||||
/*
|
||||
* The number of vcpus initialized and ready to run.
|
||||
* Modifying this is protected by 'vm_table_lock'.
|
||||
*/
|
||||
unsigned int nr_vcpus;
|
||||
|
||||
/* Array of the hyp vCPU structures for this VM. */
|
||||
struct pkvm_hyp_vcpu *vcpus[];
|
||||
};
|
||||
|
||||
static inline struct pkvm_hyp_vm *
|
||||
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
|
||||
}
|
||||
|
||||
void pkvm_hyp_vm_table_init(void *tbl);
|
||||
|
||||
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
|
||||
unsigned long pgd_hva);
|
||||
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
|
||||
unsigned long vcpu_hva);
|
||||
int __pkvm_teardown_vm(pkvm_handle_t handle);
|
||||
|
||||
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
|
||||
unsigned int vcpu_idx);
|
||||
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
|
||||
|
||||
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
|
@ -28,9 +28,17 @@ typedef union hyp_spinlock {
|
||||
};
|
||||
} hyp_spinlock_t;
|
||||
|
||||
#define __HYP_SPIN_LOCK_INITIALIZER \
|
||||
{ .__val = 0 }
|
||||
|
||||
#define __HYP_SPIN_LOCK_UNLOCKED \
|
||||
((hyp_spinlock_t) __HYP_SPIN_LOCK_INITIALIZER)
|
||||
|
||||
#define DEFINE_HYP_SPINLOCK(x) hyp_spinlock_t x = __HYP_SPIN_LOCK_UNLOCKED
|
||||
|
||||
#define hyp_spin_lock_init(l) \
|
||||
do { \
|
||||
*(l) = (hyp_spinlock_t){ .__val = 0 }; \
|
||||
*(l) = __HYP_SPIN_LOCK_UNLOCKED; \
|
||||
} while (0)
|
||||
|
||||
static inline void hyp_spin_lock(hyp_spinlock_t *lock)
|
||||
|
@ -12,3 +12,14 @@ SYM_FUNC_START(__pi_dcache_clean_inval_poc)
|
||||
ret
|
||||
SYM_FUNC_END(__pi_dcache_clean_inval_poc)
|
||||
SYM_FUNC_ALIAS(dcache_clean_inval_poc, __pi_dcache_clean_inval_poc)
|
||||
|
||||
SYM_FUNC_START(__pi_icache_inval_pou)
|
||||
alternative_if ARM64_HAS_CACHE_DIC
|
||||
isb
|
||||
ret
|
||||
alternative_else_nop_endif
|
||||
|
||||
invalidate_icache_by_line x0, x1, x2, x3
|
||||
ret
|
||||
SYM_FUNC_END(__pi_icache_inval_pou)
|
||||
SYM_FUNC_ALIAS(icache_inval_pou, __pi_icache_inval_pou)
|
||||
|
@ -15,17 +15,93 @@
|
||||
|
||||
#include <nvhe/mem_protect.h>
|
||||
#include <nvhe/mm.h>
|
||||
#include <nvhe/pkvm.h>
|
||||
#include <nvhe/trap_handler.h>
|
||||
|
||||
DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||
|
||||
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
|
||||
|
||||
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||
|
||||
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
|
||||
|
||||
hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
|
||||
hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
|
||||
|
||||
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
|
||||
|
||||
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
|
||||
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
|
||||
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
|
||||
|
||||
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
|
||||
hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state;
|
||||
|
||||
hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
|
||||
hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
|
||||
|
||||
hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;
|
||||
|
||||
hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
|
||||
}
|
||||
|
||||
static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||
struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
|
||||
struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
|
||||
unsigned int i;
|
||||
|
||||
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
|
||||
|
||||
host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
|
||||
host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;
|
||||
|
||||
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
|
||||
|
||||
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
|
||||
host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state;
|
||||
|
||||
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
|
||||
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
|
||||
host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
|
||||
}
|
||||
|
||||
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
|
||||
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
|
||||
int ret;
|
||||
|
||||
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
|
||||
host_vcpu = kern_hyp_va(host_vcpu);
|
||||
|
||||
if (unlikely(is_protected_kvm_enabled())) {
|
||||
struct pkvm_hyp_vcpu *hyp_vcpu;
|
||||
struct kvm *host_kvm;
|
||||
|
||||
host_kvm = kern_hyp_va(host_vcpu->kvm);
|
||||
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
|
||||
host_vcpu->vcpu_idx);
|
||||
if (!hyp_vcpu) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
flush_hyp_vcpu(hyp_vcpu);
|
||||
|
||||
ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
|
||||
|
||||
sync_hyp_vcpu(hyp_vcpu);
|
||||
pkvm_put_hyp_vcpu(hyp_vcpu);
|
||||
} else {
|
||||
/* The host is fully trusted, run its vCPU directly. */
|
||||
ret = __kvm_vcpu_run(host_vcpu);
|
||||
}
|
||||
|
||||
out:
|
||||
cpu_reg(host_ctxt, 1) = ret;
|
||||
}
|
||||
|
||||
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
|
||||
@ -191,6 +267,33 @@ static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
|
||||
__pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
|
||||
}
|
||||
|
||||
static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
|
||||
DECLARE_REG(unsigned long, vm_hva, host_ctxt, 2);
|
||||
DECLARE_REG(unsigned long, pgd_hva, host_ctxt, 3);
|
||||
|
||||
host_kvm = kern_hyp_va(host_kvm);
|
||||
cpu_reg(host_ctxt, 1) = __pkvm_init_vm(host_kvm, vm_hva, pgd_hva);
|
||||
}
|
||||
|
||||
static void handle___pkvm_init_vcpu(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
|
||||
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2);
|
||||
DECLARE_REG(unsigned long, vcpu_hva, host_ctxt, 3);
|
||||
|
||||
host_vcpu = kern_hyp_va(host_vcpu);
|
||||
cpu_reg(host_ctxt, 1) = __pkvm_init_vcpu(handle, host_vcpu, vcpu_hva);
|
||||
}
|
||||
|
||||
static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
|
||||
|
||||
cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
|
||||
}
|
||||
|
||||
typedef void (*hcall_t)(struct kvm_cpu_context *);
|
||||
|
||||
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
|
||||
@ -220,6 +323,9 @@ static const hcall_t host_hcall[] = {
|
||||
HANDLE_FUNC(__vgic_v3_save_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_restore_aprs),
|
||||
HANDLE_FUNC(__pkvm_vcpu_init_traps),
|
||||
HANDLE_FUNC(__pkvm_init_vm),
|
||||
HANDLE_FUNC(__pkvm_init_vcpu),
|
||||
HANDLE_FUNC(__pkvm_teardown_vm),
|
||||
};
|
||||
|
||||
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
|
@ -23,6 +23,8 @@ u64 cpu_logical_map(unsigned int cpu)
|
||||
return hyp_cpu_logical_map[cpu];
|
||||
}
|
||||
|
||||
unsigned long __ro_after_init kvm_arm_hyp_percpu_base[NR_CPUS];
|
||||
|
||||
unsigned long __hyp_per_cpu_offset(unsigned int cpu)
|
||||
{
|
||||
unsigned long *cpu_base_array;
|
||||
|
@ -21,21 +21,33 @@
|
||||
|
||||
#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
|
||||
|
||||
extern unsigned long hyp_nr_cpus;
|
||||
struct host_kvm host_kvm;
|
||||
struct host_mmu host_mmu;
|
||||
|
||||
static struct hyp_pool host_s2_pool;
|
||||
|
||||
const u8 pkvm_hyp_id = 1;
|
||||
static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm);
|
||||
#define current_vm (*this_cpu_ptr(&__current_vm))
|
||||
|
||||
static void guest_lock_component(struct pkvm_hyp_vm *vm)
|
||||
{
|
||||
hyp_spin_lock(&vm->lock);
|
||||
current_vm = vm;
|
||||
}
|
||||
|
||||
static void guest_unlock_component(struct pkvm_hyp_vm *vm)
|
||||
{
|
||||
current_vm = NULL;
|
||||
hyp_spin_unlock(&vm->lock);
|
||||
}
|
||||
|
||||
static void host_lock_component(void)
|
||||
{
|
||||
hyp_spin_lock(&host_kvm.lock);
|
||||
hyp_spin_lock(&host_mmu.lock);
|
||||
}
|
||||
|
||||
static void host_unlock_component(void)
|
||||
{
|
||||
hyp_spin_unlock(&host_kvm.lock);
|
||||
hyp_spin_unlock(&host_mmu.lock);
|
||||
}
|
||||
|
||||
static void hyp_lock_component(void)
|
||||
@ -81,7 +93,7 @@ static void host_s2_put_page(void *addr)
|
||||
|
||||
static void host_s2_free_removed_table(void *addr, u32 level)
|
||||
{
|
||||
kvm_pgtable_stage2_free_removed(&host_kvm.mm_ops, addr, level);
|
||||
kvm_pgtable_stage2_free_removed(&host_mmu.mm_ops, addr, level);
|
||||
}
|
||||
|
||||
static int prepare_s2_pool(void *pgt_pool_base)
|
||||
@ -95,7 +107,7 @@ static int prepare_s2_pool(void *pgt_pool_base)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
|
||||
host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
|
||||
.zalloc_pages_exact = host_s2_zalloc_pages_exact,
|
||||
.zalloc_page = host_s2_zalloc_page,
|
||||
.free_removed_table = host_s2_free_removed_table,
|
||||
@ -117,7 +129,7 @@ static void prepare_host_vtcr(void)
|
||||
parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
|
||||
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
|
||||
|
||||
host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
|
||||
host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
|
||||
id_aa64mmfr1_el1_sys_val, phys_shift);
|
||||
}
|
||||
|
||||
@ -125,45 +137,170 @@ static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot pr
|
||||
|
||||
int kvm_host_prepare_stage2(void *pgt_pool_base)
|
||||
{
|
||||
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
|
||||
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
|
||||
int ret;
|
||||
|
||||
prepare_host_vtcr();
|
||||
hyp_spin_lock_init(&host_kvm.lock);
|
||||
mmu->arch = &host_kvm.arch;
|
||||
hyp_spin_lock_init(&host_mmu.lock);
|
||||
mmu->arch = &host_mmu.arch;
|
||||
|
||||
ret = prepare_s2_pool(pgt_pool_base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
|
||||
&host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
|
||||
ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu,
|
||||
&host_mmu.mm_ops, KVM_HOST_S2_FLAGS,
|
||||
host_stage2_force_pte_cb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
|
||||
mmu->pgt = &host_kvm.pgt;
|
||||
mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
|
||||
mmu->pgt = &host_mmu.pgt;
|
||||
atomic64_set(&mmu->vmid.id, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
|
||||
enum kvm_pgtable_prot prot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *guest_s2_zalloc_pages_exact(size_t size)
|
||||
{
|
||||
void *addr = hyp_alloc_pages(¤t_vm->pool, get_order(size));
|
||||
|
||||
WARN_ON(size != (PAGE_SIZE << get_order(size)));
|
||||
hyp_split_page(hyp_virt_to_page(addr));
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void guest_s2_free_pages_exact(void *addr, unsigned long size)
|
||||
{
|
||||
u8 order = get_order(size);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < (1 << order); i++)
|
||||
hyp_put_page(¤t_vm->pool, addr + (i * PAGE_SIZE));
|
||||
}
|
||||
|
||||
static void *guest_s2_zalloc_page(void *mc)
|
||||
{
|
||||
struct hyp_page *p;
|
||||
void *addr;
|
||||
|
||||
addr = hyp_alloc_pages(¤t_vm->pool, 0);
|
||||
if (addr)
|
||||
return addr;
|
||||
|
||||
addr = pop_hyp_memcache(mc, hyp_phys_to_virt);
|
||||
if (!addr)
|
||||
return addr;
|
||||
|
||||
memset(addr, 0, PAGE_SIZE);
|
||||
p = hyp_virt_to_page(addr);
|
||||
memset(p, 0, sizeof(*p));
|
||||
p->refcount = 1;
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void guest_s2_get_page(void *addr)
|
||||
{
|
||||
hyp_get_page(¤t_vm->pool, addr);
|
||||
}
|
||||
|
||||
static void guest_s2_put_page(void *addr)
|
||||
{
|
||||
hyp_put_page(¤t_vm->pool, addr);
|
||||
}
|
||||
|
||||
static void clean_dcache_guest_page(void *va, size_t size)
|
||||
{
|
||||
__clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
|
||||
hyp_fixmap_unmap();
|
||||
}
|
||||
|
||||
static void invalidate_icache_guest_page(void *va, size_t size)
|
||||
{
|
||||
__invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
|
||||
hyp_fixmap_unmap();
|
||||
}
|
||||
|
||||
int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
|
||||
{
|
||||
struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu;
|
||||
unsigned long nr_pages;
|
||||
int ret;
|
||||
|
||||
nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
|
||||
ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hyp_spin_lock_init(&vm->lock);
|
||||
vm->mm_ops = (struct kvm_pgtable_mm_ops) {
|
||||
.zalloc_pages_exact = guest_s2_zalloc_pages_exact,
|
||||
.free_pages_exact = guest_s2_free_pages_exact,
|
||||
.zalloc_page = guest_s2_zalloc_page,
|
||||
.phys_to_virt = hyp_phys_to_virt,
|
||||
.virt_to_phys = hyp_virt_to_phys,
|
||||
.page_count = hyp_page_count,
|
||||
.get_page = guest_s2_get_page,
|
||||
.put_page = guest_s2_put_page,
|
||||
.dcache_clean_inval_poc = clean_dcache_guest_page,
|
||||
.icache_inval_pou = invalidate_icache_guest_page,
|
||||
};
|
||||
|
||||
guest_lock_component(vm);
|
||||
ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
|
||||
guest_stage2_force_pte_cb);
|
||||
guest_unlock_component(vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
/* Dump all pgtable pages in the hyp_pool */
|
||||
guest_lock_component(vm);
|
||||
kvm_pgtable_stage2_destroy(&vm->pgt);
|
||||
vm->kvm.arch.mmu.pgd_phys = 0ULL;
|
||||
guest_unlock_component(vm);
|
||||
|
||||
/* Drain the hyp_pool into the memcache */
|
||||
addr = hyp_alloc_pages(&vm->pool, 0);
|
||||
while (addr) {
|
||||
memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
|
||||
push_hyp_memcache(mc, addr, hyp_virt_to_phys);
|
||||
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
|
||||
addr = hyp_alloc_pages(&vm->pool, 0);
|
||||
}
|
||||
}
|
||||
|
||||
int __pkvm_prot_finalize(void)
|
||||
{
|
||||
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
|
||||
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
|
||||
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
|
||||
|
||||
if (params->hcr_el2 & HCR_VM)
|
||||
return -EPERM;
|
||||
|
||||
params->vttbr = kvm_get_vttbr(mmu);
|
||||
params->vtcr = host_kvm.arch.vtcr;
|
||||
params->vtcr = host_mmu.arch.vtcr;
|
||||
params->hcr_el2 |= HCR_VM;
|
||||
kvm_flush_dcache_to_poc(params, sizeof(*params));
|
||||
|
||||
write_sysreg(params->hcr_el2, hcr_el2);
|
||||
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
|
||||
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
|
||||
|
||||
/*
|
||||
* Make sure to have an ISB before the TLB maintenance below but only
|
||||
@ -181,7 +318,7 @@ int __pkvm_prot_finalize(void)
|
||||
|
||||
static int host_stage2_unmap_dev_all(void)
|
||||
{
|
||||
struct kvm_pgtable *pgt = &host_kvm.pgt;
|
||||
struct kvm_pgtable *pgt = &host_mmu.pgt;
|
||||
struct memblock_region *reg;
|
||||
u64 addr = 0;
|
||||
int i, ret;
|
||||
@ -201,7 +338,7 @@ struct kvm_mem_range {
|
||||
u64 end;
|
||||
};
|
||||
|
||||
static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
|
||||
static struct memblock_region *find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
|
||||
{
|
||||
int cur, left = 0, right = hyp_memblock_nr;
|
||||
struct memblock_region *reg;
|
||||
@ -224,18 +361,28 @@ static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
|
||||
} else {
|
||||
range->start = reg->base;
|
||||
range->end = end;
|
||||
return true;
|
||||
return reg;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool addr_is_memory(phys_addr_t phys)
|
||||
{
|
||||
struct kvm_mem_range range;
|
||||
|
||||
return find_mem_range(phys, &range);
|
||||
return !!find_mem_range(phys, &range);
|
||||
}
|
||||
|
||||
static bool addr_is_allowed_memory(phys_addr_t phys)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
struct kvm_mem_range range;
|
||||
|
||||
reg = find_mem_range(phys, &range);
|
||||
|
||||
return reg && !(reg->flags & MEMBLOCK_NOMAP);
|
||||
}
|
||||
|
||||
static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
|
||||
@ -256,7 +403,7 @@ static bool range_is_memory(u64 start, u64 end)
|
||||
static inline int __host_stage2_idmap(u64 start, u64 end,
|
||||
enum kvm_pgtable_prot prot)
|
||||
{
|
||||
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
|
||||
return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
|
||||
prot, &host_s2_pool, 0);
|
||||
}
|
||||
|
||||
@ -269,7 +416,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
|
||||
#define host_stage2_try(fn, ...) \
|
||||
({ \
|
||||
int __ret; \
|
||||
hyp_assert_lock_held(&host_kvm.lock); \
|
||||
hyp_assert_lock_held(&host_mmu.lock); \
|
||||
__ret = fn(__VA_ARGS__); \
|
||||
if (__ret == -ENOMEM) { \
|
||||
__ret = host_stage2_unmap_dev_all(); \
|
||||
@ -292,8 +439,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
|
||||
u32 level;
|
||||
int ret;
|
||||
|
||||
hyp_assert_lock_held(&host_kvm.lock);
|
||||
ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
|
||||
hyp_assert_lock_held(&host_mmu.lock);
|
||||
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -325,7 +472,7 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
|
||||
|
||||
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
|
||||
{
|
||||
return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
|
||||
return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
|
||||
addr, size, &host_s2_pool, owner_id);
|
||||
}
|
||||
|
||||
@ -354,7 +501,7 @@ static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot pr
|
||||
static int host_stage2_idmap(u64 addr)
|
||||
{
|
||||
struct kvm_mem_range range;
|
||||
bool is_memory = find_mem_range(addr, &range);
|
||||
bool is_memory = !!find_mem_range(addr, &range);
|
||||
enum kvm_pgtable_prot prot;
|
||||
int ret;
|
||||
|
||||
@ -386,12 +533,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
|
||||
BUG_ON(ret && ret != -EAGAIN);
|
||||
}
|
||||
|
||||
/* This corresponds to locking order */
|
||||
enum pkvm_component_id {
|
||||
PKVM_ID_HOST,
|
||||
PKVM_ID_HYP,
|
||||
};
|
||||
|
||||
struct pkvm_mem_transition {
|
||||
u64 nr_pages;
|
||||
|
||||
@ -405,6 +546,9 @@ struct pkvm_mem_transition {
|
||||
/* Address in the completer's address space */
|
||||
u64 completer_addr;
|
||||
} host;
|
||||
struct {
|
||||
u64 completer_addr;
|
||||
} hyp;
|
||||
};
|
||||
} initiator;
|
||||
|
||||
@ -418,6 +562,10 @@ struct pkvm_mem_share {
|
||||
const enum kvm_pgtable_prot completer_prot;
|
||||
};
|
||||
|
||||
struct pkvm_mem_donation {
|
||||
const struct pkvm_mem_transition tx;
|
||||
};
|
||||
|
||||
struct check_walk_data {
|
||||
enum pkvm_page_state desired;
|
||||
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
|
||||
@ -428,7 +576,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
{
|
||||
struct check_walk_data *d = ctx->arg;
|
||||
|
||||
if (kvm_pte_valid(ctx->old) && !addr_is_memory(kvm_pte_to_phys(ctx->old)))
|
||||
if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old)))
|
||||
return -EINVAL;
|
||||
|
||||
return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
|
||||
@ -462,8 +610,8 @@ static int __host_check_page_state_range(u64 addr, u64 size,
|
||||
.get_page_state = host_get_page_state,
|
||||
};
|
||||
|
||||
hyp_assert_lock_held(&host_kvm.lock);
|
||||
return check_page_state_range(&host_kvm.pgt, addr, size, &d);
|
||||
hyp_assert_lock_held(&host_mmu.lock);
|
||||
return check_page_state_range(&host_mmu.pgt, addr, size, &d);
|
||||
}
|
||||
|
||||
static int __host_set_page_state_range(u64 addr, u64 size,
|
||||
@ -514,6 +662,46 @@ static int host_initiate_unshare(u64 *completer_addr,
|
||||
return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
|
||||
}
|
||||
|
||||
static int host_initiate_donation(u64 *completer_addr,
|
||||
const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
u8 owner_id = tx->completer.id;
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
|
||||
*completer_addr = tx->initiator.host.completer_addr;
|
||||
return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id);
|
||||
}
|
||||
|
||||
static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
|
||||
tx->initiator.id != PKVM_ID_HYP);
|
||||
}
|
||||
|
||||
static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx,
|
||||
enum pkvm_page_state state)
|
||||
{
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
|
||||
if (__host_ack_skip_pgtable_check(tx))
|
||||
return 0;
|
||||
|
||||
return __host_check_page_state_range(addr, size, state);
|
||||
}
|
||||
|
||||
static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
return __host_ack_transition(addr, tx, PKVM_NOPAGE);
|
||||
}
|
||||
|
||||
static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
u8 host_id = tx->completer.id;
|
||||
|
||||
return host_stage2_set_owner_locked(addr, size, host_id);
|
||||
}
|
||||
|
||||
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
|
||||
{
|
||||
if (!kvm_pte_valid(pte))
|
||||
@ -534,6 +722,27 @@ static int __hyp_check_page_state_range(u64 addr, u64 size,
|
||||
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
|
||||
}
|
||||
|
||||
static int hyp_request_donation(u64 *completer_addr,
|
||||
const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
u64 addr = tx->initiator.addr;
|
||||
|
||||
*completer_addr = tx->initiator.hyp.completer_addr;
|
||||
return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
|
||||
}
|
||||
|
||||
static int hyp_initiate_donation(u64 *completer_addr,
|
||||
const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
int ret;
|
||||
|
||||
*completer_addr = tx->initiator.hyp.completer_addr;
|
||||
ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size);
|
||||
return (ret != size) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
|
||||
@ -558,6 +767,9 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
|
||||
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
|
||||
return -EBUSY;
|
||||
|
||||
if (__hyp_ack_skip_pgtable_check(tx))
|
||||
return 0;
|
||||
|
||||
@ -565,6 +777,16 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
|
||||
PKVM_PAGE_SHARED_BORROWED);
|
||||
}
|
||||
|
||||
static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
|
||||
if (__hyp_ack_skip_pgtable_check(tx))
|
||||
return 0;
|
||||
|
||||
return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
|
||||
}
|
||||
|
||||
static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
|
||||
enum kvm_pgtable_prot perms)
|
||||
{
|
||||
@ -583,6 +805,15 @@ static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
|
||||
return (ret != size) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int hyp_complete_donation(u64 addr,
|
||||
const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
|
||||
enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
|
||||
|
||||
return pkvm_create_mappings_locked(start, end, prot);
|
||||
}
|
||||
|
||||
static int check_share(struct pkvm_mem_share *share)
|
||||
{
|
||||
const struct pkvm_mem_transition *tx = &share->tx;
|
||||
@ -735,6 +966,94 @@ static int do_unshare(struct pkvm_mem_share *share)
|
||||
return WARN_ON(__do_unshare(share));
|
||||
}
|
||||
|
||||
static int check_donation(struct pkvm_mem_donation *donation)
|
||||
{
|
||||
const struct pkvm_mem_transition *tx = &donation->tx;
|
||||
u64 completer_addr;
|
||||
int ret;
|
||||
|
||||
switch (tx->initiator.id) {
|
||||
case PKVM_ID_HOST:
|
||||
ret = host_request_owned_transition(&completer_addr, tx);
|
||||
break;
|
||||
case PKVM_ID_HYP:
|
||||
ret = hyp_request_donation(&completer_addr, tx);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (tx->completer.id) {
|
||||
case PKVM_ID_HOST:
|
||||
ret = host_ack_donation(completer_addr, tx);
|
||||
break;
|
||||
case PKVM_ID_HYP:
|
||||
ret = hyp_ack_donation(completer_addr, tx);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __do_donate(struct pkvm_mem_donation *donation)
|
||||
{
|
||||
const struct pkvm_mem_transition *tx = &donation->tx;
|
||||
u64 completer_addr;
|
||||
int ret;
|
||||
|
||||
switch (tx->initiator.id) {
|
||||
case PKVM_ID_HOST:
|
||||
ret = host_initiate_donation(&completer_addr, tx);
|
||||
break;
|
||||
case PKVM_ID_HYP:
|
||||
ret = hyp_initiate_donation(&completer_addr, tx);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (tx->completer.id) {
|
||||
case PKVM_ID_HOST:
|
||||
ret = host_complete_donation(completer_addr, tx);
|
||||
break;
|
||||
case PKVM_ID_HYP:
|
||||
ret = hyp_complete_donation(completer_addr, tx);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* do_donate():
|
||||
*
|
||||
* The page owner transfers ownership to another component, losing access
|
||||
* as a consequence.
|
||||
*
|
||||
* Initiator: OWNED => NOPAGE
|
||||
* Completer: NOPAGE => OWNED
|
||||
*/
|
||||
static int do_donate(struct pkvm_mem_donation *donation)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = check_donation(donation);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return WARN_ON(__do_donate(donation));
|
||||
}
|
||||
|
||||
int __pkvm_host_share_hyp(u64 pfn)
|
||||
{
|
||||
int ret;
|
||||
@ -800,3 +1119,112 @@ int __pkvm_host_unshare_hyp(u64 pfn)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
|
||||
{
|
||||
int ret;
|
||||
u64 host_addr = hyp_pfn_to_phys(pfn);
|
||||
u64 hyp_addr = (u64)__hyp_va(host_addr);
|
||||
struct pkvm_mem_donation donation = {
|
||||
.tx = {
|
||||
.nr_pages = nr_pages,
|
||||
.initiator = {
|
||||
.id = PKVM_ID_HOST,
|
||||
.addr = host_addr,
|
||||
.host = {
|
||||
.completer_addr = hyp_addr,
|
||||
},
|
||||
},
|
||||
.completer = {
|
||||
.id = PKVM_ID_HYP,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
host_lock_component();
|
||||
hyp_lock_component();
|
||||
|
||||
ret = do_donate(&donation);
|
||||
|
||||
hyp_unlock_component();
|
||||
host_unlock_component();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
|
||||
{
|
||||
int ret;
|
||||
u64 host_addr = hyp_pfn_to_phys(pfn);
|
||||
u64 hyp_addr = (u64)__hyp_va(host_addr);
|
||||
struct pkvm_mem_donation donation = {
|
||||
.tx = {
|
||||
.nr_pages = nr_pages,
|
||||
.initiator = {
|
||||
.id = PKVM_ID_HYP,
|
||||
.addr = hyp_addr,
|
||||
.hyp = {
|
||||
.completer_addr = host_addr,
|
||||
},
|
||||
},
|
||||
.completer = {
|
||||
.id = PKVM_ID_HOST,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
host_lock_component();
|
||||
hyp_lock_component();
|
||||
|
||||
ret = do_donate(&donation);
|
||||
|
||||
hyp_unlock_component();
|
||||
host_unlock_component();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hyp_pin_shared_mem(void *from, void *to)
|
||||
{
|
||||
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
|
||||
u64 end = PAGE_ALIGN((u64)to);
|
||||
u64 size = end - start;
|
||||
int ret;
|
||||
|
||||
host_lock_component();
|
||||
hyp_lock_component();
|
||||
|
||||
ret = __host_check_page_state_range(__hyp_pa(start), size,
|
||||
PKVM_PAGE_SHARED_OWNED);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
ret = __hyp_check_page_state_range(start, size,
|
||||
PKVM_PAGE_SHARED_BORROWED);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
for (cur = start; cur < end; cur += PAGE_SIZE)
|
||||
hyp_page_ref_inc(hyp_virt_to_page(cur));
|
||||
|
||||
unlock:
|
||||
hyp_unlock_component();
|
||||
host_unlock_component();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hyp_unpin_shared_mem(void *from, void *to)
|
||||
{
|
||||
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
|
||||
u64 end = PAGE_ALIGN((u64)to);
|
||||
|
||||
host_lock_component();
|
||||
hyp_lock_component();
|
||||
|
||||
for (cur = start; cur < end; cur += PAGE_SIZE)
|
||||
hyp_page_ref_dec(hyp_virt_to_page(cur));
|
||||
|
||||
hyp_unlock_component();
|
||||
host_unlock_component();
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <nvhe/early_alloc.h>
|
||||
#include <nvhe/gfp.h>
|
||||
#include <nvhe/memory.h>
|
||||
#include <nvhe/mem_protect.h>
|
||||
#include <nvhe/mm.h>
|
||||
#include <nvhe/spinlock.h>
|
||||
|
||||
@ -25,6 +26,12 @@ unsigned int hyp_memblock_nr;
|
||||
|
||||
static u64 __io_map_base;
|
||||
|
||||
struct hyp_fixmap_slot {
|
||||
u64 addr;
|
||||
kvm_pte_t *ptep;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots);
|
||||
|
||||
static int __pkvm_create_mappings(unsigned long start, unsigned long size,
|
||||
unsigned long phys, enum kvm_pgtable_prot prot)
|
||||
{
|
||||
@ -129,13 +136,36 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
|
||||
int hyp_back_vmemmap(phys_addr_t back)
|
||||
{
|
||||
unsigned long start, end;
|
||||
unsigned long i, start, size, end = 0;
|
||||
int ret;
|
||||
|
||||
hyp_vmemmap_range(phys, size, &start, &end);
|
||||
for (i = 0; i < hyp_memblock_nr; i++) {
|
||||
start = hyp_memory[i].base;
|
||||
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
|
||||
/*
|
||||
* The begining of the hyp_vmemmap region for the current
|
||||
* memblock may already be backed by the page backing the end
|
||||
* the previous region, so avoid mapping it twice.
|
||||
*/
|
||||
start = max(start, end);
|
||||
|
||||
return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
|
||||
end = hyp_memory[i].base + hyp_memory[i].size;
|
||||
end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
|
||||
if (start >= end)
|
||||
continue;
|
||||
|
||||
size = end - start;
|
||||
ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memset(hyp_phys_to_virt(back), 0, size);
|
||||
back += size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *__hyp_bp_vect_base;
|
||||
@ -189,6 +219,102 @@ int hyp_map_vectors(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *hyp_fixmap_map(phys_addr_t phys)
|
||||
{
|
||||
struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
|
||||
kvm_pte_t pte, *ptep = slot->ptep;
|
||||
|
||||
pte = *ptep;
|
||||
pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
|
||||
pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID;
|
||||
WRITE_ONCE(*ptep, pte);
|
||||
dsb(ishst);
|
||||
|
||||
return (void *)slot->addr;
|
||||
}
|
||||
|
||||
static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
|
||||
{
|
||||
kvm_pte_t *ptep = slot->ptep;
|
||||
u64 addr = slot->addr;
|
||||
|
||||
WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
|
||||
|
||||
/*
|
||||
* Irritatingly, the architecture requires that we use inner-shareable
|
||||
* broadcast TLB invalidation here in case another CPU speculates
|
||||
* through our fixmap and decides to create an "amalagamation of the
|
||||
* values held in the TLB" due to the apparent lack of a
|
||||
* break-before-make sequence.
|
||||
*
|
||||
* https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
|
||||
*/
|
||||
dsb(ishst);
|
||||
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1));
|
||||
dsb(ish);
|
||||
isb();
|
||||
}
|
||||
|
||||
void hyp_fixmap_unmap(void)
|
||||
{
|
||||
fixmap_clear_slot(this_cpu_ptr(&fixmap_slots));
|
||||
}
|
||||
|
||||
static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
enum kvm_pgtable_walk_flags visit)
|
||||
{
|
||||
struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg);
|
||||
|
||||
if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_MAX_LEVELS - 1)
|
||||
return -EINVAL;
|
||||
|
||||
slot->addr = ctx->addr;
|
||||
slot->ptep = ctx->ptep;
|
||||
|
||||
/*
|
||||
* Clear the PTE, but keep the page-table page refcount elevated to
|
||||
* prevent it from ever being freed. This lets us manipulate the PTEs
|
||||
* by hand safely without ever needing to allocate memory.
|
||||
*/
|
||||
fixmap_clear_slot(slot);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int create_fixmap_slot(u64 addr, u64 cpu)
|
||||
{
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = __create_fixmap_slot_cb,
|
||||
.flags = KVM_PGTABLE_WALK_LEAF,
|
||||
.arg = (void *)cpu,
|
||||
};
|
||||
|
||||
return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
|
||||
}
|
||||
|
||||
int hyp_create_pcpu_fixmap(void)
|
||||
{
|
||||
unsigned long addr, i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < hyp_nr_cpus; i++) {
|
||||
ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
|
||||
__hyp_pa(__hyp_bss_start), PAGE_HYP);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = create_fixmap_slot(addr, i);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hyp_create_idmap(u32 hyp_va_bits)
|
||||
{
|
||||
unsigned long start, end;
|
||||
@ -213,3 +339,36 @@ int hyp_create_idmap(u32 hyp_va_bits)
|
||||
|
||||
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
|
||||
}
|
||||
|
||||
static void *admit_host_page(void *arg)
|
||||
{
|
||||
struct kvm_hyp_memcache *host_mc = arg;
|
||||
|
||||
if (!host_mc->nr_pages)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The host still owns the pages in its memcache, so we need to go
|
||||
* through a full host-to-hyp donation cycle to change it. Fortunately,
|
||||
* __pkvm_host_donate_hyp() takes care of races for us, so if it
|
||||
* succeeds we're good to go.
|
||||
*/
|
||||
if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
|
||||
return NULL;
|
||||
|
||||
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
|
||||
}
|
||||
|
||||
/* Refill our local memcache by poping pages from the one provided by the host. */
|
||||
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
|
||||
struct kvm_hyp_memcache *host_mc)
|
||||
{
|
||||
struct kvm_hyp_memcache tmp = *host_mc;
|
||||
int ret;
|
||||
|
||||
ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
|
||||
hyp_virt_to_phys, &tmp);
|
||||
*host_mc = tmp;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -93,11 +93,16 @@ static inline struct hyp_page *node_to_page(struct list_head *node)
|
||||
static void __hyp_attach_page(struct hyp_pool *pool,
|
||||
struct hyp_page *p)
|
||||
{
|
||||
phys_addr_t phys = hyp_page_to_phys(p);
|
||||
unsigned short order = p->order;
|
||||
struct hyp_page *buddy;
|
||||
|
||||
memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
|
||||
|
||||
/* Skip coalescing for 'external' pages being freed into the pool. */
|
||||
if (phys < pool->range_start || phys >= pool->range_end)
|
||||
goto insert;
|
||||
|
||||
/*
|
||||
* Only the first struct hyp_page of a high-order page (otherwise known
|
||||
* as the 'head') should have p->order set. The non-head pages should
|
||||
@ -116,6 +121,7 @@ static void __hyp_attach_page(struct hyp_pool *pool,
|
||||
p = min(p, buddy);
|
||||
}
|
||||
|
||||
insert:
|
||||
/* Mark the new head, and insert it */
|
||||
p->order = order;
|
||||
page_add_to_list(p, &pool->free_area[order]);
|
||||
@ -144,25 +150,6 @@ static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline void hyp_page_ref_inc(struct hyp_page *p)
|
||||
{
|
||||
BUG_ON(p->refcount == USHRT_MAX);
|
||||
p->refcount++;
|
||||
}
|
||||
|
||||
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
|
||||
{
|
||||
BUG_ON(!p->refcount);
|
||||
p->refcount--;
|
||||
return (p->refcount == 0);
|
||||
}
|
||||
|
||||
static inline void hyp_set_page_refcounted(struct hyp_page *p)
|
||||
{
|
||||
BUG_ON(p->refcount);
|
||||
p->refcount = 1;
|
||||
}
|
||||
|
||||
static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
|
||||
{
|
||||
if (hyp_page_ref_dec_and_test(p))
|
||||
@ -249,10 +236,8 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
|
||||
|
||||
/* Init the vmemmap portion */
|
||||
p = hyp_phys_to_page(phys);
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
p[i].order = 0;
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
hyp_set_page_refcounted(&p[i]);
|
||||
}
|
||||
|
||||
/* Attach the unused pages to the buddy tree */
|
||||
for (i = reserved_pages; i < nr_pages; i++)
|
||||
|
@ -7,8 +7,17 @@
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/mm.h>
|
||||
#include <nvhe/fixed_config.h>
|
||||
#include <nvhe/mem_protect.h>
|
||||
#include <nvhe/memory.h>
|
||||
#include <nvhe/pkvm.h>
|
||||
#include <nvhe/trap_handler.h>
|
||||
|
||||
/* Used by icache_is_vpipt(). */
|
||||
unsigned long __icache_flags;
|
||||
|
||||
/* Used by kvm_get_vttbr(). */
|
||||
unsigned int kvm_arm_vmid_bits;
|
||||
|
||||
/*
|
||||
* Set trap register values based on features in ID_AA64PFR0.
|
||||
*/
|
||||
@ -183,3 +192,430 @@ void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
|
||||
pvm_init_traps_aa64mmfr0(vcpu);
|
||||
pvm_init_traps_aa64mmfr1(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the VM table handle at the offset defined instead of at 0.
|
||||
* Mainly for sanity checking and debugging.
|
||||
*/
|
||||
#define HANDLE_OFFSET 0x1000
|
||||
|
||||
static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
|
||||
{
|
||||
return handle - HANDLE_OFFSET;
|
||||
}
|
||||
|
||||
static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
|
||||
{
|
||||
return idx + HANDLE_OFFSET;
|
||||
}
|
||||
|
||||
/*
|
||||
* Spinlock for protecting state related to the VM table. Protects writes
|
||||
* to 'vm_table' and 'nr_table_entries' as well as reads and writes to
|
||||
* 'last_hyp_vcpu_lookup'.
|
||||
*/
|
||||
static DEFINE_HYP_SPINLOCK(vm_table_lock);
|
||||
|
||||
/*
|
||||
* The table of VM entries for protected VMs in hyp.
|
||||
* Allocated at hyp initialization and setup.
|
||||
*/
|
||||
static struct pkvm_hyp_vm **vm_table;
|
||||
|
||||
void pkvm_hyp_vm_table_init(void *tbl)
|
||||
{
|
||||
WARN_ON(vm_table);
|
||||
vm_table = tbl;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the hyp vm structure corresponding to the handle.
|
||||
*/
|
||||
static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
|
||||
{
|
||||
unsigned int idx = vm_handle_to_idx(handle);
|
||||
|
||||
if (unlikely(idx >= KVM_MAX_PVMS))
|
||||
return NULL;
|
||||
|
||||
return vm_table[idx];
|
||||
}
|
||||
|
||||
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
|
||||
unsigned int vcpu_idx)
|
||||
{
|
||||
struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
|
||||
struct pkvm_hyp_vm *hyp_vm;
|
||||
|
||||
hyp_spin_lock(&vm_table_lock);
|
||||
hyp_vm = get_vm_by_handle(handle);
|
||||
if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
|
||||
goto unlock;
|
||||
|
||||
hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
|
||||
hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
|
||||
unlock:
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
return hyp_vcpu;
|
||||
}
|
||||
|
||||
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
|
||||
|
||||
hyp_spin_lock(&vm_table_lock);
|
||||
hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
}
|
||||
|
||||
static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
|
||||
{
|
||||
if (host_vcpu)
|
||||
hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
|
||||
}
|
||||
|
||||
static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
|
||||
unsigned int nr_vcpus)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_vcpus; i++)
|
||||
unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
|
||||
}
|
||||
|
||||
static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
|
||||
unsigned int nr_vcpus)
|
||||
{
|
||||
hyp_vm->host_kvm = host_kvm;
|
||||
hyp_vm->kvm.created_vcpus = nr_vcpus;
|
||||
hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
|
||||
}
|
||||
|
||||
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
|
||||
struct pkvm_hyp_vm *hyp_vm,
|
||||
struct kvm_vcpu *host_vcpu,
|
||||
unsigned int vcpu_idx)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
|
||||
return -EBUSY;
|
||||
|
||||
if (host_vcpu->vcpu_idx != vcpu_idx) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
hyp_vcpu->host_vcpu = host_vcpu;
|
||||
|
||||
hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
|
||||
hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
|
||||
hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
|
||||
|
||||
hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
|
||||
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
|
||||
done:
|
||||
if (ret)
|
||||
unpin_host_vcpu(host_vcpu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int find_free_vm_table_entry(struct kvm *host_kvm)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < KVM_MAX_PVMS; ++i) {
|
||||
if (!vm_table[i])
|
||||
return i;
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a VM table entry and insert a pointer to the new vm.
|
||||
*
|
||||
* Return a unique handle to the protected VM on success,
|
||||
* negative error code on failure.
|
||||
*/
|
||||
static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
|
||||
struct pkvm_hyp_vm *hyp_vm)
|
||||
{
|
||||
struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
|
||||
int idx;
|
||||
|
||||
hyp_assert_lock_held(&vm_table_lock);
|
||||
|
||||
/*
|
||||
* Initializing protected state might have failed, yet a malicious
|
||||
* host could trigger this function. Thus, ensure that 'vm_table'
|
||||
* exists.
|
||||
*/
|
||||
if (unlikely(!vm_table))
|
||||
return -EINVAL;
|
||||
|
||||
idx = find_free_vm_table_entry(host_kvm);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
|
||||
|
||||
/* VMID 0 is reserved for the host */
|
||||
atomic64_set(&mmu->vmid.id, idx + 1);
|
||||
|
||||
mmu->arch = &hyp_vm->kvm.arch;
|
||||
mmu->pgt = &hyp_vm->pgt;
|
||||
|
||||
vm_table[idx] = hyp_vm;
|
||||
return hyp_vm->kvm.arch.pkvm.handle;
|
||||
}
|
||||
|
||||
/*
|
||||
* Deallocate and remove the VM table entry corresponding to the handle.
|
||||
*/
|
||||
static void remove_vm_table_entry(pkvm_handle_t handle)
|
||||
{
|
||||
hyp_assert_lock_held(&vm_table_lock);
|
||||
vm_table[vm_handle_to_idx(handle)] = NULL;
|
||||
}
|
||||
|
||||
static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
|
||||
{
|
||||
return size_add(sizeof(struct pkvm_hyp_vm),
|
||||
size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
|
||||
}
|
||||
|
||||
static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
|
||||
{
|
||||
void *va = (void *)kern_hyp_va(host_va);
|
||||
|
||||
if (!PAGE_ALIGNED(va))
|
||||
return NULL;
|
||||
|
||||
if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
|
||||
PAGE_ALIGN(size) >> PAGE_SHIFT))
|
||||
return NULL;
|
||||
|
||||
return va;
|
||||
}
|
||||
|
||||
static void *map_donated_memory(unsigned long host_va, size_t size)
|
||||
{
|
||||
void *va = map_donated_memory_noclear(host_va, size);
|
||||
|
||||
if (va)
|
||||
memset(va, 0, size);
|
||||
|
||||
return va;
|
||||
}
|
||||
|
||||
static void __unmap_donated_memory(void *va, size_t size)
|
||||
{
|
||||
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
|
||||
PAGE_ALIGN(size) >> PAGE_SHIFT));
|
||||
}
|
||||
|
||||
static void unmap_donated_memory(void *va, size_t size)
|
||||
{
|
||||
if (!va)
|
||||
return;
|
||||
|
||||
memset(va, 0, size);
|
||||
__unmap_donated_memory(va, size);
|
||||
}
|
||||
|
||||
static void unmap_donated_memory_noclear(void *va, size_t size)
|
||||
{
|
||||
if (!va)
|
||||
return;
|
||||
|
||||
__unmap_donated_memory(va, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the hypervisor copy of the protected VM state using the
|
||||
* memory donated by the host.
|
||||
*
|
||||
* Unmaps the donated memory from the host at stage 2.
|
||||
*
|
||||
* host_kvm: A pointer to the host's struct kvm.
|
||||
* vm_hva: The host va of the area being donated for the VM state.
|
||||
* Must be page aligned.
|
||||
* pgd_hva: The host va of the area being donated for the stage-2 PGD for
|
||||
* the VM. Must be page aligned. Its size is implied by the VM's
|
||||
* VTCR.
|
||||
*
|
||||
* Return a unique handle to the protected VM on success,
|
||||
* negative error code on failure.
|
||||
*/
|
||||
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
|
||||
unsigned long pgd_hva)
|
||||
{
|
||||
struct pkvm_hyp_vm *hyp_vm = NULL;
|
||||
size_t vm_size, pgd_size;
|
||||
unsigned int nr_vcpus;
|
||||
void *pgd = NULL;
|
||||
int ret;
|
||||
|
||||
ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
|
||||
if (nr_vcpus < 1) {
|
||||
ret = -EINVAL;
|
||||
goto err_unpin_kvm;
|
||||
}
|
||||
|
||||
vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
|
||||
pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
hyp_vm = map_donated_memory(vm_hva, vm_size);
|
||||
if (!hyp_vm)
|
||||
goto err_remove_mappings;
|
||||
|
||||
pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
|
||||
if (!pgd)
|
||||
goto err_remove_mappings;
|
||||
|
||||
init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
|
||||
|
||||
hyp_spin_lock(&vm_table_lock);
|
||||
ret = insert_vm_table_entry(host_kvm, hyp_vm);
|
||||
if (ret < 0)
|
||||
goto err_unlock;
|
||||
|
||||
ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
|
||||
if (ret)
|
||||
goto err_remove_vm_table_entry;
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
|
||||
return hyp_vm->kvm.arch.pkvm.handle;
|
||||
|
||||
err_remove_vm_table_entry:
|
||||
remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
|
||||
err_unlock:
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
err_remove_mappings:
|
||||
unmap_donated_memory(hyp_vm, vm_size);
|
||||
unmap_donated_memory(pgd, pgd_size);
|
||||
err_unpin_kvm:
|
||||
hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the hypervisor copy of the protected vCPU state using the
|
||||
* memory donated by the host.
|
||||
*
|
||||
* handle: The handle for the protected vm.
|
||||
* host_vcpu: A pointer to the corresponding host vcpu.
|
||||
* vcpu_hva: The host va of the area being donated for the vcpu state.
|
||||
* Must be page aligned. The size of the area must be equal to
|
||||
* the page-aligned size of 'struct pkvm_hyp_vcpu'.
|
||||
* Return 0 on success, negative error code on failure.
|
||||
*/
|
||||
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
|
||||
unsigned long vcpu_hva)
|
||||
{
|
||||
struct pkvm_hyp_vcpu *hyp_vcpu;
|
||||
struct pkvm_hyp_vm *hyp_vm;
|
||||
unsigned int idx;
|
||||
int ret;
|
||||
|
||||
hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
|
||||
if (!hyp_vcpu)
|
||||
return -ENOMEM;
|
||||
|
||||
hyp_spin_lock(&vm_table_lock);
|
||||
|
||||
hyp_vm = get_vm_by_handle(handle);
|
||||
if (!hyp_vm) {
|
||||
ret = -ENOENT;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
idx = hyp_vm->nr_vcpus;
|
||||
if (idx >= hyp_vm->kvm.created_vcpus) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
hyp_vm->vcpus[idx] = hyp_vcpu;
|
||||
hyp_vm->nr_vcpus++;
|
||||
unlock:
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
|
||||
if (ret)
|
||||
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
|
||||
{
|
||||
size = PAGE_ALIGN(size);
|
||||
memset(addr, 0, size);
|
||||
|
||||
for (void *start = addr; start < addr + size; start += PAGE_SIZE)
|
||||
push_hyp_memcache(mc, start, hyp_virt_to_phys);
|
||||
|
||||
unmap_donated_memory_noclear(addr, size);
|
||||
}
|
||||
|
||||
int __pkvm_teardown_vm(pkvm_handle_t handle)
|
||||
{
|
||||
struct kvm_hyp_memcache *mc;
|
||||
struct pkvm_hyp_vm *hyp_vm;
|
||||
struct kvm *host_kvm;
|
||||
unsigned int idx;
|
||||
size_t vm_size;
|
||||
int err;
|
||||
|
||||
hyp_spin_lock(&vm_table_lock);
|
||||
hyp_vm = get_vm_by_handle(handle);
|
||||
if (!hyp_vm) {
|
||||
err = -ENOENT;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (WARN_ON(hyp_page_count(hyp_vm))) {
|
||||
err = -EBUSY;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
host_kvm = hyp_vm->host_kvm;
|
||||
|
||||
/* Ensure the VMID is clean before it can be reallocated */
|
||||
__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
|
||||
remove_vm_table_entry(handle);
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
|
||||
/* Reclaim guest pages (including page-table pages) */
|
||||
mc = &host_kvm->arch.pkvm.teardown_mc;
|
||||
reclaim_guest_pages(hyp_vm, mc);
|
||||
unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
|
||||
|
||||
/* Push the metadata pages to the teardown memcache */
|
||||
for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
|
||||
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
|
||||
|
||||
teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
|
||||
}
|
||||
|
||||
vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
|
||||
teardown_donated_memory(mc, hyp_vm, vm_size);
|
||||
hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
return err;
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <nvhe/memory.h>
|
||||
#include <nvhe/mem_protect.h>
|
||||
#include <nvhe/mm.h>
|
||||
#include <nvhe/pkvm.h>
|
||||
#include <nvhe/trap_handler.h>
|
||||
|
||||
unsigned long hyp_nr_cpus;
|
||||
@ -24,6 +25,7 @@ unsigned long hyp_nr_cpus;
|
||||
(unsigned long)__per_cpu_start)
|
||||
|
||||
static void *vmemmap_base;
|
||||
static void *vm_table_base;
|
||||
static void *hyp_pgt_base;
|
||||
static void *host_s2_pgt_base;
|
||||
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
|
||||
@ -31,16 +33,20 @@ static struct hyp_pool hpool;
|
||||
|
||||
static int divide_memory_pool(void *virt, unsigned long size)
|
||||
{
|
||||
unsigned long vstart, vend, nr_pages;
|
||||
unsigned long nr_pages;
|
||||
|
||||
hyp_early_alloc_init(virt, size);
|
||||
|
||||
hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
|
||||
nr_pages = (vend - vstart) >> PAGE_SHIFT;
|
||||
nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
|
||||
vmemmap_base = hyp_early_alloc_contig(nr_pages);
|
||||
if (!vmemmap_base)
|
||||
return -ENOMEM;
|
||||
|
||||
nr_pages = hyp_vm_table_pages();
|
||||
vm_table_base = hyp_early_alloc_contig(nr_pages);
|
||||
if (!vm_table_base)
|
||||
return -ENOMEM;
|
||||
|
||||
nr_pages = hyp_s1_pgtable_pages();
|
||||
hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
|
||||
if (!hyp_pgt_base)
|
||||
@ -78,7 +84,7 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
|
||||
ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -138,20 +144,17 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the host's .bss and .rodata sections RO in the hypervisor, but
|
||||
* transfer the ownership from the host to the hypervisor itself to
|
||||
* make sure it can't be donated or shared with another entity.
|
||||
* Map the host sections RO in the hypervisor, but transfer the
|
||||
* ownership from the host to the hypervisor itself to make sure they
|
||||
* can't be donated or shared with another entity.
|
||||
*
|
||||
* The ownership transition requires matching changes in the host
|
||||
* stage-2. This will be done later (see finalize_host_mappings()) once
|
||||
* the hyp_vmemmap is addressable.
|
||||
*/
|
||||
prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
|
||||
ret = pkvm_create_mappings(__start_rodata, __end_rodata, prot);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, prot);
|
||||
ret = pkvm_create_mappings(&kvm_vgic_global_state,
|
||||
&kvm_vgic_global_state + 1, prot);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -186,10 +189,9 @@ static void hpool_put_page(void *addr)
|
||||
hyp_put_page(&hpool, addr);
|
||||
}
|
||||
|
||||
static int finalize_host_mappings_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
enum kvm_pgtable_walk_flags visit)
|
||||
static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
enum kvm_pgtable_walk_flags visit)
|
||||
{
|
||||
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
|
||||
enum kvm_pgtable_prot prot;
|
||||
enum pkvm_page_state state;
|
||||
phys_addr_t phys;
|
||||
@ -197,15 +199,6 @@ static int finalize_host_mappings_walker(const struct kvm_pgtable_visit_ctx *ctx
|
||||
if (!kvm_pte_valid(ctx->old))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Fix-up the refcount for the page-table pages as the early allocator
|
||||
* was unable to access the hyp_vmemmap and so the buddy allocator has
|
||||
* initialised the refcount to '1'.
|
||||
*/
|
||||
mm_ops->get_page(ctx->ptep);
|
||||
if (visit != KVM_PGTABLE_WALK_LEAF)
|
||||
return 0;
|
||||
|
||||
if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1))
|
||||
return -EINVAL;
|
||||
|
||||
@ -220,7 +213,7 @@ static int finalize_host_mappings_walker(const struct kvm_pgtable_visit_ctx *ctx
|
||||
state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
|
||||
switch (state) {
|
||||
case PKVM_PAGE_OWNED:
|
||||
return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id);
|
||||
return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
|
||||
case PKVM_PAGE_SHARED_OWNED:
|
||||
prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
|
||||
break;
|
||||
@ -234,11 +227,25 @@ static int finalize_host_mappings_walker(const struct kvm_pgtable_visit_ctx *ctx
|
||||
return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
|
||||
}
|
||||
|
||||
static int finalize_host_mappings(void)
|
||||
static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
enum kvm_pgtable_walk_flags visit)
|
||||
{
|
||||
/*
|
||||
* Fix-up the refcount for the page-table pages as the early allocator
|
||||
* was unable to access the hyp_vmemmap and so the buddy allocator has
|
||||
* initialised the refcount to '1'.
|
||||
*/
|
||||
if (kvm_pte_valid(ctx->old))
|
||||
ctx->mm_ops->get_page(ctx->ptep);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fix_host_ownership(void)
|
||||
{
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = finalize_host_mappings_walker,
|
||||
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
|
||||
.cb = fix_host_ownership_walker,
|
||||
.flags = KVM_PGTABLE_WALK_LEAF,
|
||||
};
|
||||
int i, ret;
|
||||
|
||||
@ -254,6 +261,18 @@ static int finalize_host_mappings(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fix_hyp_pgtable_refcnt(void)
|
||||
{
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = fix_hyp_pgtable_refcnt_walker,
|
||||
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
|
||||
.arg = pkvm_pgtable.mm_ops,
|
||||
};
|
||||
|
||||
return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
|
||||
&walker);
|
||||
}
|
||||
|
||||
void __noreturn __pkvm_init_finalise(void)
|
||||
{
|
||||
struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
|
||||
@ -283,10 +302,19 @@ void __noreturn __pkvm_init_finalise(void)
|
||||
};
|
||||
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
|
||||
|
||||
ret = finalize_host_mappings();
|
||||
ret = fix_host_ownership();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = fix_hyp_pgtable_refcnt();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = hyp_create_pcpu_fixmap();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
pkvm_hyp_vm_table_init(vm_table_base);
|
||||
out:
|
||||
/*
|
||||
* We tail-called to here from handle___pkvm_init() and will not return,
|
||||
|
@ -62,8 +62,6 @@ struct kvm_pgtable_walk_data {
|
||||
u64 end;
|
||||
};
|
||||
|
||||
#define KVM_PHYS_INVALID (-1ULL)
|
||||
|
||||
static bool kvm_phys_is_valid(u64 phys)
|
||||
{
|
||||
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
|
||||
@ -122,16 +120,6 @@ static bool kvm_pte_table(kvm_pte_t pte, u32 level)
|
||||
return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
|
||||
}
|
||||
|
||||
static kvm_pte_t kvm_phys_to_pte(u64 pa)
|
||||
{
|
||||
kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
|
||||
|
||||
if (PAGE_SHIFT == 16)
|
||||
pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
|
||||
{
|
||||
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
|
||||
@ -1217,6 +1205,15 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
|
||||
{
|
||||
u32 ia_bits = VTCR_EL2_IPA(vtcr);
|
||||
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
|
||||
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
|
||||
|
||||
return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
enum kvm_pgtable_walk_flags visit)
|
||||
{
|
||||
|
@ -695,15 +695,42 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
|
||||
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
|
||||
* @kvm: The pointer to the KVM structure
|
||||
* @mmu: The pointer to the s2 MMU structure
|
||||
* @type: The machine type of the virtual machine
|
||||
*
|
||||
* Allocates only the stage-2 HW PGD level table(s).
|
||||
* Note we don't need locking here as this is only called when the VM is
|
||||
* created, which can only be done once.
|
||||
*/
|
||||
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
|
||||
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
|
||||
{
|
||||
u32 kvm_ipa_limit = get_kvm_ipa_limit();
|
||||
int cpu, err;
|
||||
struct kvm_pgtable *pgt;
|
||||
u64 mmfr0, mmfr1;
|
||||
u32 phys_shift;
|
||||
|
||||
if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
|
||||
if (is_protected_kvm_enabled()) {
|
||||
phys_shift = kvm_ipa_limit;
|
||||
} else if (phys_shift) {
|
||||
if (phys_shift > kvm_ipa_limit ||
|
||||
phys_shift < ARM64_MIN_PARANGE_BITS)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
phys_shift = KVM_PHYS_SHIFT;
|
||||
if (phys_shift > kvm_ipa_limit) {
|
||||
pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
|
||||
current->comm);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
|
||||
|
||||
if (mmu->pgt != NULL) {
|
||||
kvm_err("kvm_arch already initialized?\n");
|
||||
@ -827,6 +854,32 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
|
||||
}
|
||||
}
|
||||
|
||||
static void hyp_mc_free_fn(void *addr, void *unused)
|
||||
{
|
||||
free_page((unsigned long)addr);
|
||||
}
|
||||
|
||||
static void *hyp_mc_alloc_fn(void *unused)
|
||||
{
|
||||
return (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
|
||||
}
|
||||
|
||||
void free_hyp_memcache(struct kvm_hyp_memcache *mc)
|
||||
{
|
||||
if (is_protected_kvm_enabled())
|
||||
__free_hyp_memcache(mc, hyp_mc_free_fn,
|
||||
kvm_host_va, NULL);
|
||||
}
|
||||
|
||||
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
|
||||
{
|
||||
if (!is_protected_kvm_enabled())
|
||||
return 0;
|
||||
|
||||
return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
|
||||
kvm_host_pa, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_phys_addr_ioremap - map a device range to guest IPA
|
||||
*
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include <asm/kvm_pkvm.h>
|
||||
@ -53,7 +54,7 @@ static int __init register_memblock_regions(void)
|
||||
|
||||
void __init kvm_hyp_reserve(void)
|
||||
{
|
||||
u64 nr_pages, prev, hyp_mem_pages = 0;
|
||||
u64 hyp_mem_pages = 0;
|
||||
int ret;
|
||||
|
||||
if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
|
||||
@ -71,21 +72,8 @@ void __init kvm_hyp_reserve(void)
|
||||
|
||||
hyp_mem_pages += hyp_s1_pgtable_pages();
|
||||
hyp_mem_pages += host_s2_pgtable_pages();
|
||||
|
||||
/*
|
||||
* The hyp_vmemmap needs to be backed by pages, but these pages
|
||||
* themselves need to be present in the vmemmap, so compute the number
|
||||
* of pages needed by looking for a fixed point.
|
||||
*/
|
||||
nr_pages = 0;
|
||||
do {
|
||||
prev = nr_pages;
|
||||
nr_pages = hyp_mem_pages + prev;
|
||||
nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
|
||||
PAGE_SIZE);
|
||||
nr_pages += __hyp_pgtable_max_pages(nr_pages);
|
||||
} while (nr_pages != prev);
|
||||
hyp_mem_pages += nr_pages;
|
||||
hyp_mem_pages += hyp_vm_table_pages();
|
||||
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Try to allocate a PMD-aligned region to reduce TLB pressure once
|
||||
@ -107,3 +95,121 @@ void __init kvm_hyp_reserve(void)
|
||||
kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
|
||||
hyp_mem_base);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates and donates memory for hypervisor VM structs at EL2.
|
||||
*
|
||||
* Allocates space for the VM state, which includes the hyp vm as well as
|
||||
* the hyp vcpus.
|
||||
*
|
||||
* Stores an opaque handler in the kvm struct for future reference.
|
||||
*
|
||||
* Return 0 on success, negative error code on failure.
|
||||
*/
|
||||
static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
|
||||
{
|
||||
size_t pgd_sz, hyp_vm_sz, hyp_vcpu_sz;
|
||||
struct kvm_vcpu *host_vcpu;
|
||||
pkvm_handle_t handle;
|
||||
void *pgd, *hyp_vm;
|
||||
unsigned long idx;
|
||||
int ret;
|
||||
|
||||
if (host_kvm->created_vcpus < 1)
|
||||
return -EINVAL;
|
||||
|
||||
pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
|
||||
|
||||
/*
|
||||
* The PGD pages will be reclaimed using a hyp_memcache which implies
|
||||
* page granularity. So, use alloc_pages_exact() to get individual
|
||||
* refcounts.
|
||||
*/
|
||||
pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT);
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate memory to donate to hyp for vm and vcpu pointers. */
|
||||
hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
|
||||
size_mul(sizeof(void *),
|
||||
host_kvm->created_vcpus)));
|
||||
hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT);
|
||||
if (!hyp_vm) {
|
||||
ret = -ENOMEM;
|
||||
goto free_pgd;
|
||||
}
|
||||
|
||||
/* Donate the VM memory to hyp and let hyp initialize it. */
|
||||
ret = kvm_call_hyp_nvhe(__pkvm_init_vm, host_kvm, hyp_vm, pgd);
|
||||
if (ret < 0)
|
||||
goto free_vm;
|
||||
|
||||
handle = ret;
|
||||
|
||||
host_kvm->arch.pkvm.handle = handle;
|
||||
|
||||
/* Donate memory for the vcpus at hyp and initialize it. */
|
||||
hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
|
||||
kvm_for_each_vcpu(idx, host_vcpu, host_kvm) {
|
||||
void *hyp_vcpu;
|
||||
|
||||
/* Indexing of the vcpus to be sequential starting at 0. */
|
||||
if (WARN_ON(host_vcpu->vcpu_idx != idx)) {
|
||||
ret = -EINVAL;
|
||||
goto destroy_vm;
|
||||
}
|
||||
|
||||
hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT);
|
||||
if (!hyp_vcpu) {
|
||||
ret = -ENOMEM;
|
||||
goto destroy_vm;
|
||||
}
|
||||
|
||||
ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
|
||||
hyp_vcpu);
|
||||
if (ret) {
|
||||
free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
|
||||
goto destroy_vm;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
destroy_vm:
|
||||
pkvm_destroy_hyp_vm(host_kvm);
|
||||
return ret;
|
||||
free_vm:
|
||||
free_pages_exact(hyp_vm, hyp_vm_sz);
|
||||
free_pgd:
|
||||
free_pages_exact(pgd, pgd_sz);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pkvm_create_hyp_vm(struct kvm *host_kvm)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&host_kvm->lock);
|
||||
if (!host_kvm->arch.pkvm.handle)
|
||||
ret = __pkvm_create_hyp_vm(host_kvm);
|
||||
mutex_unlock(&host_kvm->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
|
||||
{
|
||||
if (host_kvm->arch.pkvm.handle) {
|
||||
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
|
||||
host_kvm->arch.pkvm.handle));
|
||||
}
|
||||
|
||||
host_kvm->arch.pkvm.handle = 0;
|
||||
free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
|
||||
}
|
||||
|
||||
int pkvm_init_host_vm(struct kvm *host_kvm)
|
||||
{
|
||||
mutex_init(&host_kvm->lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -395,32 +395,3 @@ int kvm_set_ipa_limit(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
u64 mmfr0, mmfr1;
|
||||
u32 phys_shift;
|
||||
|
||||
if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
|
||||
if (phys_shift) {
|
||||
if (phys_shift > kvm_ipa_limit ||
|
||||
phys_shift < ARM64_MIN_PARANGE_BITS)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
phys_shift = KVM_PHYS_SHIFT;
|
||||
if (phys_shift > kvm_ipa_limit) {
|
||||
pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
|
||||
current->comm);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user