9d0c063a4d
With the pKVM hypervisor at EL2 now offering hypercalls to the host for creating and destroying VM and vCPU structures, plumb these in to the existing arm64 KVM backend to ensure that the hypervisor data structures are allocated and initialised on first vCPU run for a pKVM guest. In the host, 'struct kvm_protected_vm' is introduced to hold the handle of the pKVM VM instance as well as to track references to the memory donated to the hypervisor so that it can be freed back to the host allocator following VM teardown. The stage-2 page-table, hypervisor VM and vCPU structures are allocated separately so as to avoid the need for a large physically-contiguous allocation in the host at run-time. Tested-by: Vincent Donnefort <vdonnefort@google.com> Signed-off-by: Fuad Tabba <tabba@google.com> Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221110190259.26861-14-will@kernel.org
110 lines
2.5 KiB
C
110 lines
2.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2020 - Google LLC
|
|
* Author: Quentin Perret <qperret@google.com>
|
|
*/
|
|
#ifndef __ARM64_KVM_PKVM_H__
|
|
#define __ARM64_KVM_PKVM_H__
|
|
|
|
#include <linux/memblock.h>
|
|
#include <asm/kvm_pgtable.h>
|
|
|
|
/* Maximum number of VMs that can co-exist under pKVM. */
|
|
#define KVM_MAX_PVMS 255
|
|
|
|
#define HYP_MEMBLOCK_REGIONS 128
|
|
|
|
int pkvm_init_host_vm(struct kvm *kvm);
|
|
int pkvm_create_hyp_vm(struct kvm *kvm);
|
|
void pkvm_destroy_hyp_vm(struct kvm *kvm);
|
|
|
|
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
|
|
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
|
|
|
|
static inline unsigned long
|
|
hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
|
|
{
|
|
unsigned long nr_pages = reg->size >> PAGE_SHIFT;
|
|
unsigned long start, end;
|
|
|
|
start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
|
|
end = start + nr_pages * vmemmap_entry_size;
|
|
start = ALIGN_DOWN(start, PAGE_SIZE);
|
|
end = ALIGN(end, PAGE_SIZE);
|
|
|
|
return end - start;
|
|
}
|
|
|
|
static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
|
|
{
|
|
unsigned long res = 0, i;
|
|
|
|
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
|
|
res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
|
|
vmemmap_entry_size);
|
|
}
|
|
|
|
return res >> PAGE_SHIFT;
|
|
}
|
|
|
|
static inline unsigned long hyp_vm_table_pages(void)
|
|
{
|
|
return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
|
|
}
|
|
|
|
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
|
|
{
|
|
unsigned long total = 0, i;
|
|
|
|
/* Provision the worst case scenario */
|
|
for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
|
|
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
|
|
total += nr_pages;
|
|
}
|
|
|
|
return total;
|
|
}
|
|
|
|
static inline unsigned long __hyp_pgtable_total_pages(void)
|
|
{
|
|
unsigned long res = 0, i;
|
|
|
|
/* Cover all of memory with page-granularity */
|
|
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
|
|
struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
|
|
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
static inline unsigned long hyp_s1_pgtable_pages(void)
|
|
{
|
|
unsigned long res;
|
|
|
|
res = __hyp_pgtable_total_pages();
|
|
|
|
/* Allow 1 GiB for private mappings */
|
|
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
|
|
|
|
return res;
|
|
}
|
|
|
|
static inline unsigned long host_s2_pgtable_pages(void)
|
|
{
|
|
unsigned long res;
|
|
|
|
/*
|
|
* Include an extra 16 pages to safely upper-bound the worst case of
|
|
* concatenated pgds.
|
|
*/
|
|
res = __hyp_pgtable_total_pages() + 16;
|
|
|
|
/* Allow 1 GiB for MMIO mappings */
|
|
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
|
|
|
|
return res;
|
|
}
|
|
|
|
#endif /* __ARM64_KVM_PKVM_H__ */
|