KVM selftests changes for 6.3:

- Cache the CPU vendor (AMD vs. Intel) and use the info to emit the correct
    hypercall instruction instead of relying on KVM to patch in VMMCALL
 
  - A variety of one-off cleanups and fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCgAwFiEEMHr+pfEFOIzK+KY1YJEiAU0MEvkFAmPsHjESHHNlYW5qY0Bn
 b29nbGUuY29tAAoJEGCRIgFNDBL55TEP/jMaS0pV9MFPvu4aEoqBL5uB1ReXKaFY
 3Z6a79oRLbfceoaFRhc8I+PTJP3V8jIGTvuJSzhtCzOlUpy6breIhdrJcnVWSEl2
 sxsbEGZvC9+hRFH0OMNcpLOTk8z7OfMGc0QjbWU1W2wmN8ePD/WkoKqOvJL2NZs8
 fYG0b7L2o3jU4wGZ6Y7N+1fu8bev5K0BO/NkGzrs8M3XuIXzB36jPar4am/th8fF
 qguqti1vhj7LIaroyFPIV6YT6LVH3O5TT1gL/L/oo/bQdTpJ9yrAWXJBSEBGeEu2
 9RcUPkVEfWXocQB8xSx0HFfjCIa9NS2Yzl9H1Up9zaTVM3RR7ebS0Mh1ekPGWBYy
 HPMRVBSRElHxLNLy+peR6dvK3j+LLUguOTUfRXeP65uuNR+gqQs/pM8WcZogIpdf
 6VGjQ16fHaxRI8r6/oxH78vQRGcL9HtURlJfGrZU0b4msi2a/nT9kYXsO1/L/jvh
 s6gggAdv/IGO3iDGnql9iNniYOtUTnigFHRLe8QNK3JVIeWHjY/segB6qvbIo14N
 1AN5sNy8ArtbEC8whr5ghFG6VMbPNPB0aQo2WOZ058JaEo0QQKnPtKy9dJNUvHTI
 CIQp6eFAn14qUKTuDFxbCjUiADJb8C9XoVNd1OTofPX4i78U4ST621YE5SbqszPY
 dsX6XYFfxrze
 =KI7X
 -----END PGP SIGNATURE-----

Merge tag 'kvm-x86-selftests-6.3' of https://github.com/kvm-x86/linux into HEAD

KVM selftests changes for 6.3:

 - Cache the CPU vendor (AMD vs. Intel) and use the info to emit the correct
   hypercall instruction instead of relying on KVM to patch in VMMCALL

 - A variety of one-off cleanups and fixes
This commit is contained in:
Paolo Bonzini 2023-02-15 08:34:32 -05:00
commit e84183f68e
12 changed files with 63 additions and 62 deletions

View File

@ -19,6 +19,9 @@
#include "../kvm_util.h" #include "../kvm_util.h"
extern bool host_cpu_is_intel;
extern bool host_cpu_is_amd;
#define NMI_VECTOR 0x02 #define NMI_VECTOR 0x02
#define X86_EFLAGS_FIXED (1u << 1) #define X86_EFLAGS_FIXED (1u << 1)
@ -555,6 +558,28 @@ static inline uint32_t this_cpu_model(void)
return x86_model(this_cpu_fms()); return x86_model(this_cpu_fms());
} }
static inline bool this_cpu_vendor_string_is(const char *vendor)
{
const uint32_t *chunk = (const uint32_t *)vendor;
uint32_t eax, ebx, ecx, edx;
cpuid(0, &eax, &ebx, &ecx, &edx);
return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
}
static inline bool this_cpu_is_intel(void)
{
return this_cpu_vendor_string_is("GenuineIntel");
}
/*
* Exclude early K5 samples with a vendor string of "AMDisbetter!"
*/
static inline bool this_cpu_is_amd(void)
{
return this_cpu_vendor_string_is("AuthenticAMD");
}
static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
uint8_t reg, uint8_t lo, uint8_t hi) uint8_t reg, uint8_t lo, uint8_t hi)
{ {
@ -691,9 +716,6 @@ static inline void cpu_relax(void)
"hlt\n" \ "hlt\n" \
) )
bool is_intel_cpu(void);
bool is_amd_cpu(void);
struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu); struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state); void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
void kvm_x86_state_cleanup(struct kvm_x86_state *state); void kvm_x86_state_cleanup(struct kvm_x86_state *state);
@ -717,7 +739,7 @@ static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs); int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
TEST_ASSERT(r == msrs->nmsrs, TEST_ASSERT(r == msrs->nmsrs,
"KVM_GET_MSRS failed, r: %i (failed on MSR %x)", "KVM_SET_MSRS failed, r: %i (failed on MSR %x)",
r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
} }
static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu, static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,

View File

@ -134,7 +134,7 @@ static void stats_test(int stats_fd)
"Bucket size of stats (%s) is not zero", "Bucket size of stats (%s) is not zero",
pdesc->name); pdesc->name);
} }
size_data += pdesc->size * sizeof(*stats_data); size_data = max(size_data, pdesc->offset + pdesc->size * sizeof(*stats_data));
} }
/* /*
@ -149,14 +149,6 @@ static void stats_test(int stats_fd)
TEST_ASSERT(size_data >= header.num_desc * sizeof(*stats_data), TEST_ASSERT(size_data >= header.num_desc * sizeof(*stats_data),
"Data size is not correct"); "Data size is not correct");
/* Check stats offset */
for (i = 0; i < header.num_desc; ++i) {
pdesc = get_stats_descriptor(stats_desc, i, &header);
TEST_ASSERT(pdesc->offset < size_data,
"Invalid offset (%u) for stats: %s",
pdesc->offset, pdesc->name);
}
/* Allocate memory for stats data */ /* Allocate memory for stats data */
stats_data = malloc(size_data); stats_data = malloc(size_data);
TEST_ASSERT(stats_data, "Allocate memory for stats data"); TEST_ASSERT(stats_data, "Allocate memory for stats data");

View File

@ -90,6 +90,7 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
" hdrp->e_shentsize: %x\n" " hdrp->e_shentsize: %x\n"
" expected: %zx", " expected: %zx",
hdrp->e_shentsize, sizeof(Elf64_Shdr)); hdrp->e_shentsize, sizeof(Elf64_Shdr));
close(fd);
} }
/* VM ELF Load /* VM ELF Load
@ -190,4 +191,5 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
phdr.p_filesz); phdr.p_filesz);
} }
} }
close(fd);
} }

View File

@ -127,7 +127,7 @@ void guest_modes_cmdline(const char *arg)
mode_selected = true; mode_selected = true;
} }
mode = strtoul(optarg, NULL, 10); mode = atoi_non_negative("Guest mode ID", arg);
TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode); TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode);
guest_modes[mode].enabled = true; guest_modes[mode].enabled = true;
} }

View File

@ -1942,9 +1942,6 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
} }
/* Arbitrary minimum physical address used for virtual translation tables. */
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
{ {
return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,

View File

@ -19,6 +19,8 @@
#define MAX_NR_CPUID_ENTRIES 100 #define MAX_NR_CPUID_ENTRIES 100
vm_vaddr_t exception_handlers; vm_vaddr_t exception_handlers;
bool host_cpu_is_amd;
bool host_cpu_is_intel;
static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
{ {
@ -113,7 +115,7 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
bool kvm_is_tdp_enabled(void) bool kvm_is_tdp_enabled(void)
{ {
if (is_intel_cpu()) if (host_cpu_is_intel)
return get_kvm_intel_param_bool("ept"); return get_kvm_intel_param_bool("ept");
else else
return get_kvm_amd_param_bool("npt"); return get_kvm_amd_param_bool("npt");
@ -555,6 +557,8 @@ static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
void kvm_arch_vm_post_create(struct kvm_vm *vm) void kvm_arch_vm_post_create(struct kvm_vm *vm)
{ {
vm_create_irqchip(vm); vm_create_irqchip(vm);
sync_global_to_guest(vm, host_cpu_is_intel);
sync_global_to_guest(vm, host_cpu_is_amd);
} }
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
@ -1006,28 +1010,6 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state)
free(state); free(state);
} }
static bool cpu_vendor_string_is(const char *vendor)
{
const uint32_t *chunk = (const uint32_t *)vendor;
uint32_t eax, ebx, ecx, edx;
cpuid(0, &eax, &ebx, &ecx, &edx);
return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
}
bool is_intel_cpu(void)
{
return cpu_vendor_string_is("GenuineIntel");
}
/*
* Exclude early K5 samples with a vendor string of "AMDisbetter!"
*/
bool is_amd_cpu(void)
{
return cpu_vendor_string_is("AuthenticAMD");
}
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
{ {
if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) { if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
@ -1162,9 +1144,15 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
{ {
uint64_t r; uint64_t r;
asm volatile("vmcall" asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t"
"jnz 1f\n\t"
"vmcall\n\t"
"jmp 2f\n\t"
"1: vmmcall\n\t"
"2:"
: "=a"(r) : "=a"(r)
: "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3),
[use_vmmcall] "r" (host_cpu_is_amd));
return r; return r;
} }
@ -1236,7 +1224,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
/* Avoid reserved HyperTransport region on AMD processors. */ /* Avoid reserved HyperTransport region on AMD processors. */
if (!is_amd_cpu()) if (!host_cpu_is_amd)
return max_gfn; return max_gfn;
/* On parts with <40 physical address bits, the area is fully hidden */ /* On parts with <40 physical address bits, the area is fully hidden */
@ -1276,3 +1264,9 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm)
return get_kvm_intel_param_bool("unrestricted_guest"); return get_kvm_intel_param_bool("unrestricted_guest");
} }
void kvm_selftest_arch_init(void)
{
host_cpu_is_intel = this_cpu_is_intel();
host_cpu_is_amd = this_cpu_is_amd();
}

View File

@ -308,8 +308,6 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
TEST_ASSERT(data->hva_slots, "malloc() fail"); TEST_ASSERT(data->hva_slots, "malloc() fail");
data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
data->nslots, data->pages_per_slot, rempages); data->nslots, data->pages_per_slot, rempages);
@ -349,6 +347,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages); virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages);
sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
sync->guest_page_size = data->vm->page_size;
atomic_init(&sync->start_flag, false); atomic_init(&sync->start_flag, false);
atomic_init(&sync->exit_flag, false); atomic_init(&sync->exit_flag, false);
atomic_init(&sync->sync_flag, false); atomic_init(&sync->sync_flag, false);
@ -810,8 +809,6 @@ static bool test_execute(int nslots, uint64_t *maxslots,
} }
sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
sync->guest_page_size = data->vm->page_size;
if (tdata->prepare && if (tdata->prepare &&
!tdata->prepare(data, sync, maxslots)) { !tdata->prepare(data, sync, maxslots)) {
ret = false; ret = false;

View File

@ -48,10 +48,10 @@ static void guest_main(void)
const uint8_t *other_hypercall_insn; const uint8_t *other_hypercall_insn;
uint64_t ret; uint64_t ret;
if (is_intel_cpu()) { if (host_cpu_is_intel) {
native_hypercall_insn = vmx_vmcall; native_hypercall_insn = vmx_vmcall;
other_hypercall_insn = svm_vmmcall; other_hypercall_insn = svm_vmmcall;
} else if (is_amd_cpu()) { } else if (host_cpu_is_amd) {
native_hypercall_insn = svm_vmmcall; native_hypercall_insn = svm_vmmcall;
other_hypercall_insn = vmx_vmcall; other_hypercall_insn = vmx_vmcall;
} else { } else {

View File

@ -93,7 +93,7 @@ int main(void)
{ {
int warnings_before, warnings_after; int warnings_before, warnings_after;
TEST_REQUIRE(is_intel_cpu()); TEST_REQUIRE(host_cpu_is_intel);
TEST_REQUIRE(!vm_is_unrestricted_guest(NULL)); TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));

View File

@ -364,7 +364,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
*/ */
static bool use_intel_pmu(void) static bool use_intel_pmu(void)
{ {
return is_intel_cpu() && return host_cpu_is_intel &&
kvm_cpu_property(X86_PROPERTY_PMU_VERSION) && kvm_cpu_property(X86_PROPERTY_PMU_VERSION) &&
kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) && kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) &&
kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED); kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
@ -398,7 +398,7 @@ static bool use_amd_pmu(void)
uint32_t family = kvm_cpu_family(); uint32_t family = kvm_cpu_family();
uint32_t model = kvm_cpu_model(); uint32_t model = kvm_cpu_model();
return is_amd_cpu() && return host_cpu_is_amd &&
(is_zen1(family, model) || (is_zen1(family, model) ||
is_zen2(family, model) || is_zen2(family, model) ||
is_zen3(family, model)); is_zen3(family, model));

View File

@ -111,7 +111,7 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
TEST_REQUIRE(is_intel_cpu()); TEST_REQUIRE(host_cpu_is_intel);
TEST_REQUIRE(!vm_is_unrestricted_guest(NULL)); TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);

View File

@ -19,9 +19,6 @@
#include <sys/eventfd.h> #include <sys/eventfd.h>
/* Defined in include/linux/kvm_types.h */
#define GPA_INVALID (~(ulong)0)
#define SHINFO_REGION_GVA 0xc0000000ULL #define SHINFO_REGION_GVA 0xc0000000ULL
#define SHINFO_REGION_GPA 0xc0000000ULL #define SHINFO_REGION_GPA 0xc0000000ULL
#define SHINFO_REGION_SLOT 10 #define SHINFO_REGION_SLOT 10
@ -412,21 +409,21 @@ static void *juggle_shinfo_state(void *arg)
{ {
struct kvm_vm *vm = (struct kvm_vm *)arg; struct kvm_vm *vm = (struct kvm_vm *)arg;
struct kvm_xen_hvm_attr cache_init = { struct kvm_xen_hvm_attr cache_activate = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO, .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
.u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE
}; };
struct kvm_xen_hvm_attr cache_destroy = { struct kvm_xen_hvm_attr cache_deactivate = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO, .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
.u.shared_info.gfn = GPA_INVALID .u.shared_info.gfn = KVM_XEN_INVALID_GFN
}; };
for (;;) { for (;;) {
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_init); __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate);
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_destroy); __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate);
pthread_testcancel(); pthread_testcancel();
}; }
return NULL; return NULL;
} }