KVM: selftests: Rename vm_create() => vm_create_barebones(), drop param
Rename vm_create() to vm_create_barebones() and drop the @phys_pages param. Pass '0' for the number of pages even though some callers pass 'DEFAULT_GUEST_PHY_PAGES', as the intent behind creating truly barebones VMs is purely to create a VM, i.e. there aren't vCPUs, there's no guest code loaded, etc..., and so there is nothing that will ever need or consume guest memory. Freeing up the name vm_create() will allow using the name for an inner helper to the other VM creators, which need a "full" VM. Opportunisticaly rewrite the function comment for addr_gpa2alias() to focus on what the _function_ does, not what its _sole caller_ does. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
eb0adbc03a
commit
95fb046071
@ -416,7 +416,7 @@ static void run_test(struct vcpu_config *c)
|
||||
|
||||
check_supported(c);
|
||||
|
||||
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
|
||||
vm = vm_create_barebones();
|
||||
prepare_vcpu_init(c, &init);
|
||||
vm_vcpu_add(vm, 0);
|
||||
aarch64_vcpu_setup(vm, 0, &init);
|
||||
|
@ -24,7 +24,7 @@ static int add_init_2vcpus(struct kvm_vcpu_init *init1,
|
||||
struct kvm_vm *vm;
|
||||
int ret;
|
||||
|
||||
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
|
||||
vm = vm_create_barebones();
|
||||
|
||||
vm_vcpu_add(vm, 0);
|
||||
ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
|
||||
@ -49,7 +49,7 @@ static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
|
||||
struct kvm_vm *vm;
|
||||
int ret;
|
||||
|
||||
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
|
||||
vm = vm_create_barebones();
|
||||
|
||||
vm_vcpu_add(vm, 0);
|
||||
vm_vcpu_add(vm, 1);
|
||||
@ -86,7 +86,7 @@ int main(void)
|
||||
}
|
||||
|
||||
/* Get the preferred target type and copy that to init2 for later use */
|
||||
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
|
||||
vm = vm_create_barebones();
|
||||
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
|
||||
kvm_vm_free(vm);
|
||||
init2 = init1;
|
||||
|
@ -248,7 +248,6 @@ void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
|
||||
const char *vm_guest_mode_string(uint32_t i);
|
||||
|
||||
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t phy_pages);
|
||||
struct kvm_vm *vm_create(uint64_t phy_pages);
|
||||
void kvm_vm_free(struct kvm_vm *vmp);
|
||||
void kvm_vm_restart(struct kvm_vm *vmp);
|
||||
void kvm_vm_release(struct kvm_vm *vmp);
|
||||
@ -596,6 +595,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot);
|
||||
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
|
||||
|
||||
static inline struct kvm_vm *vm_create_barebones(void)
|
||||
{
|
||||
return __vm_create(VM_MODE_DEFAULT, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a VM with reasonable defaults
|
||||
*
|
||||
|
@ -221,7 +221,7 @@ int main(int argc, char *argv[])
|
||||
vms = malloc(sizeof(vms[0]) * max_vm);
|
||||
TEST_ASSERT(vms, "Allocate memory for storing VM pointers");
|
||||
for (i = 0; i < max_vm; ++i) {
|
||||
vms[i] = vm_create(DEFAULT_GUEST_PHY_PAGES);
|
||||
vms[i] = vm_create_barebones();
|
||||
for (j = 0; j < max_vcpu; ++j)
|
||||
vm_vcpu_add(vms[i], j);
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
|
||||
pr_info("Testing creating %d vCPUs, with IDs %d...%d.\n",
|
||||
num_vcpus, first_vcpu_id, first_vcpu_id + num_vcpus - 1);
|
||||
|
||||
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
|
||||
vm = vm_create_barebones();
|
||||
|
||||
for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++)
|
||||
/* This asserts that the vCPU was created. */
|
||||
|
@ -258,26 +258,6 @@ struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t phy_pages)
|
||||
return vm;
|
||||
}
|
||||
|
||||
/*
|
||||
* VM Create
|
||||
*
|
||||
* Input Args:
|
||||
* phy_pages - Physical memory pages
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return:
|
||||
* Pointer to opaque structure that describes the created VM.
|
||||
*
|
||||
* Creates a VM with the default physical/virtual address widths and page size.
|
||||
* When phy_pages is non-zero, a memory region of phy_pages physical pages
|
||||
* is created and mapped starting at guest physical address 0.
|
||||
*/
|
||||
struct kvm_vm *vm_create(uint64_t phy_pages)
|
||||
{
|
||||
return __vm_create(VM_MODE_DEFAULT, phy_pages);
|
||||
}
|
||||
|
||||
struct kvm_vm *vm_create_without_vcpus(enum vm_guest_mode mode, uint64_t pages)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
@ -1421,11 +1401,10 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
|
||||
* (without failing the test) if the guest memory is not shared (so
|
||||
* no alias exists).
|
||||
*
|
||||
* When vm_create() and related functions are called with a shared memory
|
||||
* src_type, we also create a writable, shared alias mapping of the
|
||||
* underlying guest memory. This allows the host to manipulate guest memory
|
||||
* without mapping that memory in the guest's address space. And, for
|
||||
* userfaultfd-based demand paging, we can do so without triggering userfaults.
|
||||
* Create a writable, shared virtual=>physical alias for the specific GPA.
|
||||
* The primary use case is to allow the host selftest to manipulate guest
|
||||
* memory without mapping said memory in the guest's address space. And, for
|
||||
* userfaultfd-based demand paging, to do so without triggering userfaults.
|
||||
*/
|
||||
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
|
||||
{
|
||||
|
@ -314,7 +314,7 @@ static void test_zero_memory_regions(void)
|
||||
|
||||
pr_info("Testing KVM_RUN with zero added memory regions\n");
|
||||
|
||||
vm = vm_create(0);
|
||||
vm = vm_create_barebones();
|
||||
vm_vcpu_add(vm, VCPU_ID);
|
||||
|
||||
vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
|
||||
@ -353,7 +353,7 @@ static void test_add_max_memory_regions(void)
|
||||
"KVM_CAP_NR_MEMSLOTS should be greater than 0");
|
||||
pr_info("Allowed number of memory slots: %i\n", max_mem_slots);
|
||||
|
||||
vm = vm_create(0);
|
||||
vm = vm_create_barebones();
|
||||
|
||||
/* Check it can be added memory slots up to the maximum allowed */
|
||||
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
|
||||
|
@ -16,7 +16,7 @@ int main(int argc, char *argv[])
|
||||
struct kvm_vm *vm;
|
||||
int ret;
|
||||
|
||||
vm = vm_create(0);
|
||||
vm = vm_create_barebones();
|
||||
|
||||
/* Get KVM_CAP_MAX_VCPU_ID cap supported in KVM */
|
||||
ret = vm_check_cap(vm, KVM_CAP_MAX_VCPU_ID);
|
||||
|
@ -95,7 +95,7 @@ int main(int argc, char *argv[])
|
||||
* use it to verify all supported CR4 bits can be set prior to defining
|
||||
* the vCPU model, i.e. without doing KVM_SET_CPUID2.
|
||||
*/
|
||||
vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
|
||||
vm = vm_create_barebones();
|
||||
vm_vcpu_add(vm, VCPU_ID);
|
||||
|
||||
vcpu_sregs_get(vm, VCPU_ID, &sregs);
|
||||
|
@ -53,7 +53,7 @@ static struct kvm_vm *sev_vm_create(bool es)
|
||||
struct kvm_sev_launch_start start = { 0 };
|
||||
int i;
|
||||
|
||||
vm = vm_create(0);
|
||||
vm = vm_create_barebones();
|
||||
sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
|
||||
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
|
||||
vm_vcpu_add(vm, i);
|
||||
@ -70,7 +70,7 @@ static struct kvm_vm *aux_vm_create(bool with_vcpus)
|
||||
struct kvm_vm *vm;
|
||||
int i;
|
||||
|
||||
vm = vm_create(0);
|
||||
vm = vm_create_barebones();
|
||||
if (!with_vcpus)
|
||||
return vm;
|
||||
|
||||
@ -168,7 +168,7 @@ static void test_sev_migrate_parameters(void)
|
||||
*sev_es_vm_no_vmsa;
|
||||
int ret;
|
||||
|
||||
vm_no_vcpu = vm_create(0);
|
||||
vm_no_vcpu = vm_create_barebones();
|
||||
vm_no_sev = aux_vm_create(true);
|
||||
ret = __sev_migrate_from(vm_no_vcpu, vm_no_sev);
|
||||
TEST_ASSERT(ret == -1 && errno == EINVAL,
|
||||
@ -180,7 +180,7 @@ static void test_sev_migrate_parameters(void)
|
||||
|
||||
sev_vm = sev_vm_create(/* es= */ false);
|
||||
sev_es_vm = sev_vm_create(/* es= */ true);
|
||||
sev_es_vm_no_vmsa = vm_create(0);
|
||||
sev_es_vm_no_vmsa = vm_create_barebones();
|
||||
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
|
||||
vm_vcpu_add(sev_es_vm_no_vmsa, 1);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user