KVM: selftests: Add wrapper to allocate page table page
Add a helper to allocate a page for use in constructing the guest's page tables. All architectures have identical address and memslot requirements (which appear to be arbitrary anyways). No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210622200529.3650424-15-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
444d084b46
commit
cce0c23dd9
@ -30,6 +30,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
|
||||
|
||||
/* Minimum allocated guest virtual and physical addresses */
|
||||
#define KVM_UTIL_MIN_VADDR 0x2000
|
||||
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
|
||||
|
||||
#define DEFAULT_GUEST_PHY_PAGES 512
|
||||
#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
|
||||
@ -262,6 +263,7 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
|
||||
uint32_t memslot);
|
||||
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot);
|
||||
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
|
||||
|
||||
/*
|
||||
* Create a VM with reasonable defaults
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include "../kvm_util_internal.h"
|
||||
#include "processor.h"
|
||||
|
||||
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
|
||||
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
|
||||
|
||||
static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
|
||||
@ -104,25 +103,19 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
paddr, vm->max_gfn, vm->page_size);
|
||||
|
||||
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
|
||||
if (!*ptep) {
|
||||
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
|
||||
*ptep |= 3;
|
||||
}
|
||||
if (!*ptep)
|
||||
*ptep = vm_alloc_page_table(vm) | 3;
|
||||
|
||||
switch (vm->pgtable_levels) {
|
||||
case 4:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
|
||||
if (!*ptep) {
|
||||
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
|
||||
*ptep |= 3;
|
||||
}
|
||||
if (!*ptep)
|
||||
*ptep = vm_alloc_page_table(vm) | 3;
|
||||
/* fall through */
|
||||
case 3:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
|
||||
if (!*ptep) {
|
||||
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
|
||||
*ptep |= 3;
|
||||
}
|
||||
if (!*ptep)
|
||||
*ptep = vm_alloc_page_table(vm) | 3;
|
||||
/* fall through */
|
||||
case 2:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
|
||||
|
@ -2209,6 +2209,14 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
|
||||
return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
|
||||
}
|
||||
|
||||
/* Arbitrary minimum physical address used for virtual translation tables. */
|
||||
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
|
||||
|
||||
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
|
||||
{
|
||||
return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Address Guest Virtual to Host Virtual
|
||||
*
|
||||
|
@ -9,8 +9,6 @@
|
||||
#include "kvm_util.h"
|
||||
#include "../kvm_util_internal.h"
|
||||
|
||||
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
|
||||
|
||||
#define PAGES_PER_REGION 4
|
||||
|
||||
void virt_pgd_alloc(struct kvm_vm *vm)
|
||||
|
@ -17,9 +17,6 @@
|
||||
#define DEFAULT_CODE_SELECTOR 0x8
|
||||
#define DEFAULT_DATA_SELECTOR 0x10
|
||||
|
||||
/* Minimum physical address used for virtual translation tables. */
|
||||
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
|
||||
|
||||
vm_vaddr_t exception_handlers;
|
||||
|
||||
/* Virtual translation table structure declarations */
|
||||
@ -214,9 +211,7 @@ void virt_pgd_alloc(struct kvm_vm *vm)
|
||||
|
||||
/* If needed, create page map l4 table. */
|
||||
if (!vm->pgd_created) {
|
||||
vm_paddr_t paddr = vm_phy_page_alloc(vm,
|
||||
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
|
||||
vm->pgd = paddr;
|
||||
vm->pgd = vm_alloc_page_table(vm);
|
||||
vm->pgd_created = true;
|
||||
}
|
||||
}
|
||||
@ -254,9 +249,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
/* Allocate page directory pointer table if not present. */
|
||||
pml4e = addr_gpa2hva(vm, vm->pgd);
|
||||
if (!pml4e[index[3]].present) {
|
||||
pml4e[index[3]].address = vm_phy_page_alloc(vm,
|
||||
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
|
||||
>> vm->page_shift;
|
||||
pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
|
||||
pml4e[index[3]].writable = true;
|
||||
pml4e[index[3]].present = true;
|
||||
}
|
||||
@ -265,9 +258,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
struct pageDirectoryPointerEntry *pdpe;
|
||||
pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
|
||||
if (!pdpe[index[2]].present) {
|
||||
pdpe[index[2]].address = vm_phy_page_alloc(vm,
|
||||
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
|
||||
>> vm->page_shift;
|
||||
pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
|
||||
pdpe[index[2]].writable = true;
|
||||
pdpe[index[2]].present = true;
|
||||
}
|
||||
@ -276,9 +267,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
struct pageDirectoryEntry *pde;
|
||||
pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
|
||||
if (!pde[index[1]].present) {
|
||||
pde[index[1]].address = vm_phy_page_alloc(vm,
|
||||
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
|
||||
>> vm->page_shift;
|
||||
pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
|
||||
pde[index[1]].writable = true;
|
||||
pde[index[1]].present = true;
|
||||
}
|
||||
|
@ -426,9 +426,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
|
||||
/* Allocate page directory pointer table if not present. */
|
||||
pml4e = vmx->eptp_hva;
|
||||
if (!pml4e[index[3]].readable) {
|
||||
pml4e[index[3]].address = vm_phy_page_alloc(vm,
|
||||
KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
|
||||
>> vm->page_shift;
|
||||
pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
|
||||
pml4e[index[3]].writable = true;
|
||||
pml4e[index[3]].readable = true;
|
||||
pml4e[index[3]].executable = true;
|
||||
@ -438,9 +436,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
|
||||
struct eptPageTableEntry *pdpe;
|
||||
pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
|
||||
if (!pdpe[index[2]].readable) {
|
||||
pdpe[index[2]].address = vm_phy_page_alloc(vm,
|
||||
KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
|
||||
>> vm->page_shift;
|
||||
pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
|
||||
pdpe[index[2]].writable = true;
|
||||
pdpe[index[2]].readable = true;
|
||||
pdpe[index[2]].executable = true;
|
||||
@ -450,9 +446,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
|
||||
struct eptPageTableEntry *pde;
|
||||
pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
|
||||
if (!pde[index[1]].readable) {
|
||||
pde[index[1]].address = vm_phy_page_alloc(vm,
|
||||
KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
|
||||
>> vm->page_shift;
|
||||
pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
|
||||
pde[index[1]].writable = true;
|
||||
pde[index[1]].readable = true;
|
||||
pde[index[1]].executable = true;
|
||||
|
Loading…
x
Reference in New Issue
Block a user