KVM: x86: Selftests changes for 6.6:
- Add testcases to x86's sync_regs_test for detecting KVM TOCTOU bugs - Add support for printf() in guest code and covert all guest asserts to use printf-based reporting - Clean up the PMU event filter test and add new testcases - Include x86 selftests in the KVM x86 MAINTAINERS entry -----BEGIN PGP SIGNATURE----- iQJGBAABCgAwFiEEMHr+pfEFOIzK+KY1YJEiAU0MEvkFAmTueu4SHHNlYW5qY0Bn b29nbGUuY29tAAoJEGCRIgFNDBL5wvIQAK8jWhb1Y4CzrJmcZyYYIR6apgtXl4vB KbhFIFHi5ZeZXlpXA2o/FW8Q9LNmcRLtxoapb09t/eyb0+ODllDPt/aSG7p6Y4p9 rNb1g6Hj77LTaG5gMy7/lbk9ERzf61+MKUuucU7WzjlY8oyd+lm+y2cx2O3+S/89 C5cp2CGnqK2NMbUnzYN8izMrdvtwDvgQvm3H7Ah8yrGXJkcemVggXibuh+2coTfo p2RKrY+A4Syw/edNe0GVZYoSVJdwPEif8o0gAz5PwC2LTjpf9Iobt89KEx08BkVw ms0MFbwLS66MoSYIVoZkBdy/Tri5aCKxHGqu7taEWhogjbzrPvktA6PNYihO4zGa OSjA/oyAPvFJ4cLuBlrVh/xPWVoGX/6Sx3dBP5TI3zyR0FAqZkoAPDivWhflOpTt q3aoHr6THGRzqHOCYuX7nwzhqBFSSHUF1zy/P7rThSzieSzUiJiANUwBjTeB9Wsr 5Cn+KQ8XOZw1LVcoeI9y97xcHh9HeP3seO+MFie8OH9QK4nUqgqEbF8sp7WF0rB6 6rZ1lht9a2Qx4xdtqSMBkQdgnnaiCZ7jBtEFMK6kSQ67zvorlCwkOue3TrtorJ4H 1XI/DGAzltEfCLMAq+4FkHkkEr84S3gRjaLlI9aHWlVrSk1wxM87R16jgVfJp74R gTNAzCys2KwM =dHTQ -----END PGP SIGNATURE----- Merge tag 'kvm-x86-selftests-6.6' of https://github.com/kvm-x86/linux into HEAD KVM: x86: Selftests changes for 6.6: - Add testcases to x86's sync_regs_test for detecting KVM TOCTOU bugs - Add support for printf() in guest code and covert all guest asserts to use printf-based reporting - Clean up the PMU event filter test and add new testcases - Include x86 selftests in the KVM x86 MAINTAINERS entry
This commit is contained in:
commit
1814db83c0
@ -11500,6 +11500,8 @@ F: arch/x86/include/uapi/asm/svm.h
|
||||
F: arch/x86/include/uapi/asm/vmx.h
|
||||
F: arch/x86/kvm/
|
||||
F: arch/x86/kvm/*/
|
||||
F: tools/testing/selftests/kvm/*/x86_64/
|
||||
F: tools/testing/selftests/kvm/x86_64/
|
||||
|
||||
KERNFS
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
|
@ -11798,15 +11798,22 @@ static int sync_regs(struct kvm_vcpu *vcpu)
|
||||
__set_regs(vcpu, &vcpu->run->s.regs.regs);
|
||||
vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
|
||||
}
|
||||
|
||||
if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
|
||||
if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
|
||||
struct kvm_sregs sregs = vcpu->run->s.regs.sregs;
|
||||
|
||||
if (__set_sregs(vcpu, &sregs))
|
||||
return -EINVAL;
|
||||
|
||||
vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
|
||||
}
|
||||
|
||||
if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
|
||||
if (kvm_vcpu_ioctl_x86_set_vcpu_events(
|
||||
vcpu, &vcpu->run->s.regs.events))
|
||||
struct kvm_vcpu_events events = vcpu->run->s.regs.events;
|
||||
|
||||
if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events))
|
||||
return -EINVAL;
|
||||
|
||||
vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@ LIBKVM += lib/guest_modes.c
|
||||
LIBKVM += lib/io.c
|
||||
LIBKVM += lib/kvm_util.c
|
||||
LIBKVM += lib/memstress.c
|
||||
LIBKVM += lib/guest_sprintf.c
|
||||
LIBKVM += lib/rbtree.c
|
||||
LIBKVM += lib/sparsebit.c
|
||||
LIBKVM += lib/test_util.c
|
||||
@ -122,6 +123,7 @@ TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
|
||||
TEST_GEN_PROGS_x86_64 += demand_paging_test
|
||||
TEST_GEN_PROGS_x86_64 += dirty_log_test
|
||||
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
|
||||
TEST_GEN_PROGS_x86_64 += guest_print_test
|
||||
TEST_GEN_PROGS_x86_64 += hardware_disable_test
|
||||
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
|
||||
TEST_GEN_PROGS_x86_64 += kvm_page_table_test
|
||||
@ -152,6 +154,7 @@ TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
|
||||
TEST_GEN_PROGS_aarch64 += demand_paging_test
|
||||
TEST_GEN_PROGS_aarch64 += dirty_log_test
|
||||
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
|
||||
TEST_GEN_PROGS_aarch64 += guest_print_test
|
||||
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
|
||||
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
|
||||
TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test
|
||||
@ -168,6 +171,7 @@ TEST_GEN_PROGS_s390x += s390x/tprot
|
||||
TEST_GEN_PROGS_s390x += s390x/cmma_test
|
||||
TEST_GEN_PROGS_s390x += demand_paging_test
|
||||
TEST_GEN_PROGS_s390x += dirty_log_test
|
||||
TEST_GEN_PROGS_s390x += guest_print_test
|
||||
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
|
||||
TEST_GEN_PROGS_s390x += kvm_page_table_test
|
||||
TEST_GEN_PROGS_s390x += rseq_test
|
||||
@ -176,6 +180,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
|
||||
|
||||
TEST_GEN_PROGS_riscv += demand_paging_test
|
||||
TEST_GEN_PROGS_riscv += dirty_log_test
|
||||
TEST_GEN_PROGS_riscv += guest_print_test
|
||||
TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
|
||||
TEST_GEN_PROGS_riscv += kvm_page_table_test
|
||||
TEST_GEN_PROGS_riscv += set_memory_region_test
|
||||
@ -204,6 +209,7 @@ endif
|
||||
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
|
||||
-Wno-gnu-variable-sized-type-not-at-end -MD\
|
||||
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
|
||||
-fno-builtin-strnlen \
|
||||
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
|
||||
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
|
||||
-I$(<D) -Iinclude/$(ARCH_DIR) -I ../rseq -I.. $(EXTRA_CFLAGS) \
|
||||
|
@ -98,7 +98,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
|
||||
uint64_t val;
|
||||
|
||||
vcpu_get_reg(vcpu, reg_id, &val);
|
||||
ASSERT_EQ(val, 0);
|
||||
TEST_ASSERT_EQ(val, 0);
|
||||
|
||||
/*
|
||||
* Expect the ioctl to succeed with no effect on the register
|
||||
@ -107,7 +107,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
|
||||
vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
|
||||
|
||||
vcpu_get_reg(vcpu, reg_id, &val);
|
||||
ASSERT_EQ(val, 0);
|
||||
TEST_ASSERT_EQ(val, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -127,14 +127,14 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
|
||||
uint64_t val;
|
||||
|
||||
vcpu_get_reg(vcpu, reg_id, &val);
|
||||
ASSERT_EQ(val, 0);
|
||||
TEST_ASSERT_EQ(val, 0);
|
||||
|
||||
r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
|
||||
TEST_ASSERT(r < 0 && errno == EINVAL,
|
||||
"unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
|
||||
|
||||
vcpu_get_reg(vcpu, reg_id, &val);
|
||||
ASSERT_EQ(val, 0);
|
||||
TEST_ASSERT_EQ(val, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
*
|
||||
* Copyright (c) 2021, Google LLC.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <stdlib.h>
|
||||
@ -155,11 +154,13 @@ static void guest_validate_irq(unsigned int intid,
|
||||
xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
|
||||
|
||||
/* Make sure we are dealing with the correct timer IRQ */
|
||||
GUEST_ASSERT_2(intid == timer_irq, intid, timer_irq);
|
||||
GUEST_ASSERT_EQ(intid, timer_irq);
|
||||
|
||||
/* Basic 'timer condition met' check */
|
||||
GUEST_ASSERT_3(xcnt >= cval, xcnt, cval, xcnt_diff_us);
|
||||
GUEST_ASSERT_1(xctl & CTL_ISTATUS, xctl);
|
||||
__GUEST_ASSERT(xcnt >= cval,
|
||||
"xcnt = 0x%llx, cval = 0x%llx, xcnt_diff_us = 0x%llx",
|
||||
xcnt, cval, xcnt_diff_us);
|
||||
__GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%llx", xcnt);
|
||||
|
||||
WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
|
||||
}
|
||||
@ -192,8 +193,7 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
|
||||
TIMER_TEST_ERR_MARGIN_US);
|
||||
|
||||
irq_iter = READ_ONCE(shared_data->nr_iter);
|
||||
GUEST_ASSERT_2(config_iter + 1 == irq_iter,
|
||||
config_iter + 1, irq_iter);
|
||||
GUEST_ASSERT_EQ(config_iter + 1, irq_iter);
|
||||
}
|
||||
}
|
||||
|
||||
@ -243,13 +243,9 @@ static void *test_vcpu_run(void *arg)
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
sync_global_from_guest(vm, *shared_data);
|
||||
REPORT_GUEST_ASSERT_N(uc, "values: %lu, %lu; %lu, vcpu %u; stage; %u; iter: %u",
|
||||
GUEST_ASSERT_ARG(uc, 0),
|
||||
GUEST_ASSERT_ARG(uc, 1),
|
||||
GUEST_ASSERT_ARG(uc, 2),
|
||||
vcpu_idx,
|
||||
shared_data->guest_stage,
|
||||
shared_data->nr_iter);
|
||||
fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n",
|
||||
vcpu_idx, shared_data->guest_stage, shared_data->nr_iter);
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
default:
|
||||
TEST_FAIL("Unexpected guest exit\n");
|
||||
|
@ -365,7 +365,7 @@ static void guest_wp_handler(struct ex_regs *regs)
|
||||
|
||||
static void guest_ss_handler(struct ex_regs *regs)
|
||||
{
|
||||
GUEST_ASSERT_1(ss_idx < 4, ss_idx);
|
||||
__GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%u'", ss_idx);
|
||||
ss_addr[ss_idx++] = regs->pc;
|
||||
regs->pstate |= SPSR_SS;
|
||||
}
|
||||
@ -410,8 +410,8 @@ static void guest_code_ss(int test_cnt)
|
||||
/* Userspace disables Single Step when the end is nigh. */
|
||||
asm volatile("iter_ss_end:\n");
|
||||
|
||||
GUEST_ASSERT(bvr == w_bvr);
|
||||
GUEST_ASSERT(wvr == w_wvr);
|
||||
GUEST_ASSERT_EQ(bvr, w_bvr);
|
||||
GUEST_ASSERT_EQ(wvr, w_wvr);
|
||||
}
|
||||
GUEST_DONE();
|
||||
}
|
||||
@ -450,7 +450,7 @@ static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bp
|
||||
vcpu_run(vcpu);
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
|
@ -8,7 +8,6 @@
|
||||
* hypercalls are properly masked or unmasked to the guest when disabled or
|
||||
* enabled from the KVM userspace, respectively.
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <asm/kvm.h>
|
||||
@ -105,15 +104,17 @@ static void guest_test_hvc(const struct test_hvc_info *hc_info)
|
||||
switch (stage) {
|
||||
case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
|
||||
case TEST_STAGE_HVC_IFACE_FALSE_INFO:
|
||||
GUEST_ASSERT_3(res.a0 == SMCCC_RET_NOT_SUPPORTED,
|
||||
res.a0, hc_info->func_id, hc_info->arg1);
|
||||
__GUEST_ASSERT(res.a0 == SMCCC_RET_NOT_SUPPORTED,
|
||||
"a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
|
||||
res.a0, hc_info->func_id, hc_info->arg1, stage);
|
||||
break;
|
||||
case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
|
||||
GUEST_ASSERT_3(res.a0 != SMCCC_RET_NOT_SUPPORTED,
|
||||
res.a0, hc_info->func_id, hc_info->arg1);
|
||||
__GUEST_ASSERT(res.a0 != SMCCC_RET_NOT_SUPPORTED,
|
||||
"a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
|
||||
res.a0, hc_info->func_id, hc_info->arg1, stage);
|
||||
break;
|
||||
default:
|
||||
GUEST_ASSERT_1(0, stage);
|
||||
GUEST_FAIL("Unexpected stage = %u", stage);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -132,7 +133,7 @@ static void guest_code(void)
|
||||
guest_test_hvc(false_hvc_info);
|
||||
break;
|
||||
default:
|
||||
GUEST_ASSERT_1(0, stage);
|
||||
GUEST_FAIL("Unexpected stage = %u", stage);
|
||||
}
|
||||
|
||||
GUEST_SYNC(stage);
|
||||
@ -290,10 +291,7 @@ static void test_run(void)
|
||||
guest_done = true;
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_N(uc, "values: 0x%lx, 0x%lx; 0x%lx, stage: %u",
|
||||
GUEST_ASSERT_ARG(uc, 0),
|
||||
GUEST_ASSERT_ARG(uc, 1),
|
||||
GUEST_ASSERT_ARG(uc, 2), stage);
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
default:
|
||||
TEST_FAIL("Unexpected guest exit\n");
|
||||
|
@ -7,7 +7,6 @@
|
||||
* hugetlbfs with a hole). It checks that the expected handling method is
|
||||
* called (e.g., uffd faults with the right address and write/read flag).
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#include <linux/bitmap.h>
|
||||
#include <fcntl.h>
|
||||
@ -293,12 +292,12 @@ static void guest_code(struct test_desc *test)
|
||||
|
||||
static void no_dabt_handler(struct ex_regs *regs)
|
||||
{
|
||||
GUEST_ASSERT_1(false, read_sysreg(far_el1));
|
||||
GUEST_FAIL("Unexpected dabt, far_el1 = 0x%llx", read_sysreg(far_el1));
|
||||
}
|
||||
|
||||
static void no_iabt_handler(struct ex_regs *regs)
|
||||
{
|
||||
GUEST_ASSERT_1(false, regs->pc);
|
||||
GUEST_FAIL("Unexpected iabt, pc = 0x%lx", regs->pc);
|
||||
}
|
||||
|
||||
static struct uffd_args {
|
||||
@ -318,7 +317,7 @@ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
|
||||
|
||||
TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
|
||||
"The only expected UFFD mode is MISSING");
|
||||
ASSERT_EQ(addr, (uint64_t)args->hva);
|
||||
TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
|
||||
|
||||
pr_debug("uffd fault: addr=%p write=%d\n",
|
||||
(void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
|
||||
@ -432,7 +431,7 @@ static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
|
||||
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
|
||||
hva = (void *)region->region.userspace_addr;
|
||||
|
||||
ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
|
||||
TEST_ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
|
||||
|
||||
memcpy(hva, run->mmio.data, run->mmio.len);
|
||||
events.mmio_exits += 1;
|
||||
@ -631,9 +630,9 @@ static void setup_default_handlers(struct test_desc *test)
|
||||
|
||||
static void check_event_counts(struct test_desc *test)
|
||||
{
|
||||
ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
|
||||
ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
|
||||
ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
|
||||
TEST_ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
|
||||
TEST_ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
|
||||
TEST_ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
|
||||
}
|
||||
|
||||
static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
|
||||
@ -679,7 +678,7 @@ static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
|
||||
}
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
|
@ -7,7 +7,6 @@
|
||||
* host to inject a specific intid via a GUEST_SYNC call, and then checks that
|
||||
* it received it.
|
||||
*/
|
||||
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_para.h>
|
||||
#include <sys/eventfd.h>
|
||||
@ -781,7 +780,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
|
||||
run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
|
219
tools/testing/selftests/kvm/guest_print_test.c
Normal file
219
tools/testing/selftests/kvm/guest_print_test.c
Normal file
@ -0,0 +1,219 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* A test for GUEST_PRINTF
|
||||
*
|
||||
* Copyright 2022, Google, Inc. and/or its affiliates.
|
||||
*/
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
|
||||
struct guest_vals {
|
||||
uint64_t a;
|
||||
uint64_t b;
|
||||
uint64_t type;
|
||||
};
|
||||
|
||||
static struct guest_vals vals;
|
||||
|
||||
/* GUEST_PRINTF()/GUEST_ASSERT_FMT() does not support float or double. */
|
||||
#define TYPE_LIST \
|
||||
TYPE(test_type_i64, I64, "%ld", int64_t) \
|
||||
TYPE(test_type_u64, U64u, "%lu", uint64_t) \
|
||||
TYPE(test_type_x64, U64x, "0x%lx", uint64_t) \
|
||||
TYPE(test_type_X64, U64X, "0x%lX", uint64_t) \
|
||||
TYPE(test_type_u32, U32u, "%u", uint32_t) \
|
||||
TYPE(test_type_x32, U32x, "0x%x", uint32_t) \
|
||||
TYPE(test_type_X32, U32X, "0x%X", uint32_t) \
|
||||
TYPE(test_type_int, INT, "%d", int) \
|
||||
TYPE(test_type_char, CHAR, "%c", char) \
|
||||
TYPE(test_type_str, STR, "'%s'", const char *) \
|
||||
TYPE(test_type_ptr, PTR, "%p", uintptr_t)
|
||||
|
||||
enum args_type {
|
||||
#define TYPE(fn, ext, fmt_t, T) TYPE_##ext,
|
||||
TYPE_LIST
|
||||
#undef TYPE
|
||||
};
|
||||
|
||||
static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
|
||||
const char *expected_assert);
|
||||
|
||||
#define BUILD_TYPE_STRINGS_AND_HELPER(fn, ext, fmt_t, T) \
|
||||
const char *PRINTF_FMT_##ext = "Got params a = " fmt_t " and b = " fmt_t; \
|
||||
const char *ASSERT_FMT_##ext = "Expected " fmt_t ", got " fmt_t " instead"; \
|
||||
static void fn(struct kvm_vcpu *vcpu, T a, T b) \
|
||||
{ \
|
||||
char expected_printf[UCALL_BUFFER_LEN]; \
|
||||
char expected_assert[UCALL_BUFFER_LEN]; \
|
||||
\
|
||||
snprintf(expected_printf, UCALL_BUFFER_LEN, PRINTF_FMT_##ext, a, b); \
|
||||
snprintf(expected_assert, UCALL_BUFFER_LEN, ASSERT_FMT_##ext, a, b); \
|
||||
vals = (struct guest_vals){ (uint64_t)a, (uint64_t)b, TYPE_##ext }; \
|
||||
sync_global_to_guest(vcpu->vm, vals); \
|
||||
run_test(vcpu, expected_printf, expected_assert); \
|
||||
}
|
||||
|
||||
#define TYPE(fn, ext, fmt_t, T) \
|
||||
BUILD_TYPE_STRINGS_AND_HELPER(fn, ext, fmt_t, T)
|
||||
TYPE_LIST
|
||||
#undef TYPE
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
while (1) {
|
||||
switch (vals.type) {
|
||||
#define TYPE(fn, ext, fmt_t, T) \
|
||||
case TYPE_##ext: \
|
||||
GUEST_PRINTF(PRINTF_FMT_##ext, vals.a, vals.b); \
|
||||
__GUEST_ASSERT(vals.a == vals.b, \
|
||||
ASSERT_FMT_##ext, vals.a, vals.b); \
|
||||
break;
|
||||
TYPE_LIST
|
||||
#undef TYPE
|
||||
default:
|
||||
GUEST_SYNC(vals.type);
|
||||
}
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Unfortunately this gets a little messy because 'assert_msg' doesn't
|
||||
* just contains the matching string, it also contains additional assert
|
||||
* info. Fortunately the part that matches should be at the very end of
|
||||
* 'assert_msg'.
|
||||
*/
|
||||
static void ucall_abort(const char *assert_msg, const char *expected_assert_msg)
|
||||
{
|
||||
int len_str = strlen(assert_msg);
|
||||
int len_substr = strlen(expected_assert_msg);
|
||||
int offset = len_str - len_substr;
|
||||
|
||||
TEST_ASSERT(len_substr <= len_str,
|
||||
"Expected '%s' to be a substring of '%s'\n",
|
||||
assert_msg, expected_assert_msg);
|
||||
|
||||
TEST_ASSERT(strcmp(&assert_msg[offset], expected_assert_msg) == 0,
|
||||
"Unexpected mismatch. Expected: '%s', got: '%s'",
|
||||
expected_assert_msg, &assert_msg[offset]);
|
||||
}
|
||||
|
||||
static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
|
||||
const char *expected_assert)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
struct ucall uc;
|
||||
|
||||
while (1) {
|
||||
vcpu_run(vcpu);
|
||||
|
||||
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
|
||||
"Unexpected exit reason: %u (%s),\n",
|
||||
run->exit_reason, exit_reason_str(run->exit_reason));
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_SYNC:
|
||||
TEST_FAIL("Unknown 'args_type' = %lu", uc.args[1]);
|
||||
break;
|
||||
case UCALL_PRINTF:
|
||||
TEST_ASSERT(strcmp(uc.buffer, expected_printf) == 0,
|
||||
"Unexpected mismatch. Expected: '%s', got: '%s'",
|
||||
expected_printf, uc.buffer);
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
ucall_abort(uc.buffer, expected_assert);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
return;
|
||||
default:
|
||||
TEST_FAIL("Unknown ucall %lu", uc.cmd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void guest_code_limits(void)
|
||||
{
|
||||
char test_str[UCALL_BUFFER_LEN + 10];
|
||||
|
||||
memset(test_str, 'a', sizeof(test_str));
|
||||
test_str[sizeof(test_str) - 1] = 0;
|
||||
|
||||
GUEST_PRINTF("%s", test_str);
|
||||
}
|
||||
|
||||
static void test_limits(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_run *run;
|
||||
struct kvm_vm *vm;
|
||||
struct ucall uc;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code_limits);
|
||||
run = vcpu->run;
|
||||
vcpu_run(vcpu);
|
||||
|
||||
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
|
||||
"Unexpected exit reason: %u (%s),\n",
|
||||
run->exit_reason, exit_reason_str(run->exit_reason));
|
||||
|
||||
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_ABORT,
|
||||
"Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)\n",
|
||||
uc.cmd, UCALL_ABORT);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
test_type_i64(vcpu, -1, -1);
|
||||
test_type_i64(vcpu, -1, 1);
|
||||
test_type_i64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
|
||||
test_type_i64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
|
||||
|
||||
test_type_u64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
|
||||
test_type_u64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
|
||||
test_type_x64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
|
||||
test_type_x64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
|
||||
test_type_X64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
|
||||
test_type_X64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
|
||||
|
||||
test_type_u32(vcpu, 0x90abcdef, 0x90abcdef);
|
||||
test_type_u32(vcpu, 0x90abcdef, 0x90abcdee);
|
||||
test_type_x32(vcpu, 0x90abcdef, 0x90abcdef);
|
||||
test_type_x32(vcpu, 0x90abcdef, 0x90abcdee);
|
||||
test_type_X32(vcpu, 0x90abcdef, 0x90abcdef);
|
||||
test_type_X32(vcpu, 0x90abcdef, 0x90abcdee);
|
||||
|
||||
test_type_int(vcpu, -1, -1);
|
||||
test_type_int(vcpu, -1, 1);
|
||||
test_type_int(vcpu, 1, 1);
|
||||
|
||||
test_type_char(vcpu, 'a', 'a');
|
||||
test_type_char(vcpu, 'a', 'A');
|
||||
test_type_char(vcpu, 'a', 'b');
|
||||
|
||||
test_type_str(vcpu, "foo", "foo");
|
||||
test_type_str(vcpu, "foo", "bar");
|
||||
|
||||
test_type_ptr(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
|
||||
test_type_ptr(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
||||
test_limits();
|
||||
|
||||
return 0;
|
||||
}
|
@ -41,7 +41,7 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer)
|
||||
case PHYSICAL:
|
||||
return read_sysreg(cntpct_el0);
|
||||
default:
|
||||
GUEST_ASSERT_1(0, timer);
|
||||
GUEST_FAIL("Unexpected timer type = %u", timer);
|
||||
}
|
||||
|
||||
/* We should not reach here */
|
||||
@ -58,7 +58,7 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
|
||||
write_sysreg(cval, cntp_cval_el0);
|
||||
break;
|
||||
default:
|
||||
GUEST_ASSERT_1(0, timer);
|
||||
GUEST_FAIL("Unexpected timer type = %u", timer);
|
||||
}
|
||||
|
||||
isb();
|
||||
@ -72,7 +72,7 @@ static inline uint64_t timer_get_cval(enum arch_timer timer)
|
||||
case PHYSICAL:
|
||||
return read_sysreg(cntp_cval_el0);
|
||||
default:
|
||||
GUEST_ASSERT_1(0, timer);
|
||||
GUEST_FAIL("Unexpected timer type = %u", timer);
|
||||
}
|
||||
|
||||
/* We should not reach here */
|
||||
@ -89,7 +89,7 @@ static inline void timer_set_tval(enum arch_timer timer, uint32_t tval)
|
||||
write_sysreg(tval, cntp_tval_el0);
|
||||
break;
|
||||
default:
|
||||
GUEST_ASSERT_1(0, timer);
|
||||
GUEST_FAIL("Unexpected timer type = %u", timer);
|
||||
}
|
||||
|
||||
isb();
|
||||
@ -105,7 +105,7 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
|
||||
write_sysreg(ctl, cntp_ctl_el0);
|
||||
break;
|
||||
default:
|
||||
GUEST_ASSERT_1(0, timer);
|
||||
GUEST_FAIL("Unexpected timer type = %u", timer);
|
||||
}
|
||||
|
||||
isb();
|
||||
@ -119,7 +119,7 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer)
|
||||
case PHYSICAL:
|
||||
return read_sysreg(cntp_ctl_el0);
|
||||
default:
|
||||
GUEST_ASSERT_1(0, timer);
|
||||
GUEST_FAIL("Unexpected timer type = %u", timer);
|
||||
}
|
||||
|
||||
/* We should not reach here */
|
||||
|
20
tools/testing/selftests/kvm/include/aarch64/ucall.h
Normal file
20
tools/testing/selftests/kvm/include/aarch64/ucall.h
Normal file
@ -0,0 +1,20 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef SELFTEST_KVM_UCALL_H
|
||||
#define SELFTEST_KVM_UCALL_H
|
||||
|
||||
#include "kvm_util_base.h"
|
||||
|
||||
#define UCALL_EXIT_REASON KVM_EXIT_MMIO
|
||||
|
||||
/*
|
||||
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
|
||||
* VM), it must not be accessed from host code.
|
||||
*/
|
||||
extern vm_vaddr_t *ucall_exit_mmio_addr;
|
||||
|
||||
static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
{
|
||||
WRITE_ONCE(*ucall_exit_mmio_addr, uc);
|
||||
}
|
||||
|
||||
#endif
|
20
tools/testing/selftests/kvm/include/riscv/ucall.h
Normal file
20
tools/testing/selftests/kvm/include/riscv/ucall.h
Normal file
@ -0,0 +1,20 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef SELFTEST_KVM_UCALL_H
|
||||
#define SELFTEST_KVM_UCALL_H
|
||||
|
||||
#include "processor.h"
|
||||
|
||||
#define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI
|
||||
|
||||
static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
{
|
||||
sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
|
||||
KVM_RISCV_SELFTESTS_SBI_UCALL,
|
||||
uc, 0, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
#endif
|
19
tools/testing/selftests/kvm/include/s390x/ucall.h
Normal file
19
tools/testing/selftests/kvm/include/s390x/ucall.h
Normal file
@ -0,0 +1,19 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef SELFTEST_KVM_UCALL_H
|
||||
#define SELFTEST_KVM_UCALL_H
|
||||
|
||||
#include "kvm_util_base.h"
|
||||
|
||||
#define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC
|
||||
|
||||
static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
{
|
||||
/* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
|
||||
asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
|
||||
}
|
||||
|
||||
#endif
|
@ -53,14 +53,13 @@ void test_assert(bool exp, const char *exp_str,
|
||||
#define TEST_ASSERT(e, fmt, ...) \
|
||||
test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
|
||||
|
||||
#define ASSERT_EQ(a, b) do { \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
TEST_ASSERT(__a == __b, \
|
||||
"ASSERT_EQ(%s, %s) failed.\n" \
|
||||
"\t%s is %#lx\n" \
|
||||
"\t%s is %#lx", \
|
||||
#a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
|
||||
#define TEST_ASSERT_EQ(a, b) \
|
||||
do { \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
test_assert(__a == __b, #a " == " #b, __FILE__, __LINE__, \
|
||||
"%#lx != %#lx (%s != %s)", \
|
||||
(unsigned long)(__a), (unsigned long)(__b), #a, #b);\
|
||||
} while (0)
|
||||
|
||||
#define TEST_ASSERT_KVM_EXIT_REASON(vcpu, expected) do { \
|
||||
@ -186,4 +185,7 @@ static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
|
||||
return num;
|
||||
}
|
||||
|
||||
int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args);
|
||||
int guest_snprintf(char *buf, int n, const char *fmt, ...);
|
||||
|
||||
#endif /* SELFTEST_KVM_TEST_UTIL_H */
|
||||
|
@ -7,21 +7,25 @@
|
||||
#ifndef SELFTEST_KVM_UCALL_COMMON_H
|
||||
#define SELFTEST_KVM_UCALL_COMMON_H
|
||||
#include "test_util.h"
|
||||
#include "ucall.h"
|
||||
|
||||
/* Common ucalls */
|
||||
enum {
|
||||
UCALL_NONE,
|
||||
UCALL_SYNC,
|
||||
UCALL_ABORT,
|
||||
UCALL_PRINTF,
|
||||
UCALL_DONE,
|
||||
UCALL_UNHANDLED,
|
||||
};
|
||||
|
||||
#define UCALL_MAX_ARGS 7
|
||||
#define UCALL_BUFFER_LEN 1024
|
||||
|
||||
struct ucall {
|
||||
uint64_t cmd;
|
||||
uint64_t args[UCALL_MAX_ARGS];
|
||||
char buffer[UCALL_BUFFER_LEN];
|
||||
|
||||
/* Host virtual address of this struct. */
|
||||
struct ucall *hva;
|
||||
@ -32,8 +36,12 @@ void ucall_arch_do_ucall(vm_vaddr_t uc);
|
||||
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
|
||||
|
||||
void ucall(uint64_t cmd, int nargs, ...);
|
||||
void ucall_fmt(uint64_t cmd, const char *fmt, ...);
|
||||
void ucall_assert(uint64_t cmd, const char *exp, const char *file,
|
||||
unsigned int line, const char *fmt, ...);
|
||||
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
|
||||
void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
|
||||
int ucall_nr_pages_required(uint64_t page_size);
|
||||
|
||||
/*
|
||||
* Perform userspace call without any associated data. This bare call avoids
|
||||
@ -46,8 +54,11 @@ void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
|
||||
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
|
||||
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
|
||||
#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
|
||||
#define GUEST_PRINTF(_fmt, _args...) ucall_fmt(UCALL_PRINTF, _fmt, ##_args)
|
||||
#define GUEST_DONE() ucall(UCALL_DONE, 0)
|
||||
|
||||
#define REPORT_GUEST_PRINTF(ucall) pr_info("%s", (ucall).buffer)
|
||||
|
||||
enum guest_assert_builtin_args {
|
||||
GUEST_ERROR_STRING,
|
||||
GUEST_FILE,
|
||||
@ -55,70 +66,41 @@ enum guest_assert_builtin_args {
|
||||
GUEST_ASSERT_BUILTIN_NARGS
|
||||
};
|
||||
|
||||
#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) \
|
||||
do { \
|
||||
if (!(_condition)) \
|
||||
ucall(UCALL_ABORT, GUEST_ASSERT_BUILTIN_NARGS + _nargs, \
|
||||
"Failed guest assert: " _condstr, \
|
||||
__FILE__, __LINE__, ##_args); \
|
||||
#define ____GUEST_ASSERT(_condition, _exp, _fmt, _args...) \
|
||||
do { \
|
||||
if (!(_condition)) \
|
||||
ucall_assert(UCALL_ABORT, _exp, __FILE__, __LINE__, _fmt, ##_args); \
|
||||
} while (0)
|
||||
|
||||
#define GUEST_ASSERT(_condition) \
|
||||
__GUEST_ASSERT(_condition, #_condition, 0, 0)
|
||||
#define __GUEST_ASSERT(_condition, _fmt, _args...) \
|
||||
____GUEST_ASSERT(_condition, #_condition, _fmt, ##_args)
|
||||
|
||||
#define GUEST_ASSERT_1(_condition, arg1) \
|
||||
__GUEST_ASSERT(_condition, #_condition, 1, (arg1))
|
||||
#define GUEST_ASSERT(_condition) \
|
||||
__GUEST_ASSERT(_condition, #_condition)
|
||||
|
||||
#define GUEST_ASSERT_2(_condition, arg1, arg2) \
|
||||
__GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2))
|
||||
#define GUEST_FAIL(_fmt, _args...) \
|
||||
ucall_assert(UCALL_ABORT, "Unconditional guest failure", \
|
||||
__FILE__, __LINE__, _fmt, ##_args)
|
||||
|
||||
#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \
|
||||
__GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3))
|
||||
#define GUEST_ASSERT_EQ(a, b) \
|
||||
do { \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
____GUEST_ASSERT(__a == __b, #a " == " #b, "%#lx != %#lx (%s != %s)", \
|
||||
(unsigned long)(__a), (unsigned long)(__b), #a, #b); \
|
||||
} while (0)
|
||||
|
||||
#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \
|
||||
__GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4))
|
||||
#define GUEST_ASSERT_NE(a, b) \
|
||||
do { \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
____GUEST_ASSERT(__a != __b, #a " != " #b, "%#lx == %#lx (%s == %s)", \
|
||||
(unsigned long)(__a), (unsigned long)(__b), #a, #b); \
|
||||
} while (0)
|
||||
|
||||
#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b)
|
||||
|
||||
#define __REPORT_GUEST_ASSERT(_ucall, fmt, _args...) \
|
||||
TEST_FAIL("%s at %s:%ld\n" fmt, \
|
||||
(const char *)(_ucall).args[GUEST_ERROR_STRING], \
|
||||
(const char *)(_ucall).args[GUEST_FILE], \
|
||||
(_ucall).args[GUEST_LINE], \
|
||||
##_args)
|
||||
|
||||
#define GUEST_ASSERT_ARG(ucall, i) ((ucall).args[GUEST_ASSERT_BUILTIN_NARGS + i])
|
||||
|
||||
#define REPORT_GUEST_ASSERT(ucall) \
|
||||
__REPORT_GUEST_ASSERT((ucall), "")
|
||||
|
||||
#define REPORT_GUEST_ASSERT_1(ucall, fmt) \
|
||||
__REPORT_GUEST_ASSERT((ucall), \
|
||||
fmt, \
|
||||
GUEST_ASSERT_ARG((ucall), 0))
|
||||
|
||||
#define REPORT_GUEST_ASSERT_2(ucall, fmt) \
|
||||
__REPORT_GUEST_ASSERT((ucall), \
|
||||
fmt, \
|
||||
GUEST_ASSERT_ARG((ucall), 0), \
|
||||
GUEST_ASSERT_ARG((ucall), 1))
|
||||
|
||||
#define REPORT_GUEST_ASSERT_3(ucall, fmt) \
|
||||
__REPORT_GUEST_ASSERT((ucall), \
|
||||
fmt, \
|
||||
GUEST_ASSERT_ARG((ucall), 0), \
|
||||
GUEST_ASSERT_ARG((ucall), 1), \
|
||||
GUEST_ASSERT_ARG((ucall), 2))
|
||||
|
||||
#define REPORT_GUEST_ASSERT_4(ucall, fmt) \
|
||||
__REPORT_GUEST_ASSERT((ucall), \
|
||||
fmt, \
|
||||
GUEST_ASSERT_ARG((ucall), 0), \
|
||||
GUEST_ASSERT_ARG((ucall), 1), \
|
||||
GUEST_ASSERT_ARG((ucall), 2), \
|
||||
GUEST_ASSERT_ARG((ucall), 3))
|
||||
|
||||
#define REPORT_GUEST_ASSERT_N(ucall, fmt, args...) \
|
||||
__REPORT_GUEST_ASSERT((ucall), fmt, ##args)
|
||||
#define REPORT_GUEST_ASSERT(ucall) \
|
||||
test_assert(false, (const char *)(ucall).args[GUEST_ERROR_STRING], \
|
||||
(const char *)(ucall).args[GUEST_FILE], \
|
||||
(ucall).args[GUEST_LINE], "%s", (ucall).buffer)
|
||||
|
||||
#endif /* SELFTEST_KVM_UCALL_COMMON_H */
|
||||
|
@ -239,7 +239,12 @@ struct kvm_x86_cpu_property {
|
||||
#define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
|
||||
#define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
|
||||
#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
|
||||
#define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
|
||||
#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
|
||||
#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
|
||||
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
|
||||
#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
|
||||
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
|
||||
|
||||
#define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31)
|
||||
#define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
|
||||
|
13
tools/testing/selftests/kvm/include/x86_64/ucall.h
Normal file
13
tools/testing/selftests/kvm/include/x86_64/ucall.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef SELFTEST_KVM_UCALL_H
|
||||
#define SELFTEST_KVM_UCALL_H
|
||||
|
||||
#include "kvm_util_base.h"
|
||||
|
||||
#define UCALL_EXIT_REASON KVM_EXIT_IO
|
||||
|
||||
static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
@ -200,7 +200,7 @@ static void *vcpu_worker(void *data)
|
||||
if (READ_ONCE(host_quit))
|
||||
return NULL;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
|
||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
ret = _vcpu_run(vcpu);
|
||||
ts_diff = timespec_elapsed(start);
|
||||
|
||||
@ -367,7 +367,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
/* Test the stage of KVM creating mappings */
|
||||
*current_stage = KVM_CREATE_MAPPINGS;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
|
||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
vcpus_complete_new_stage(*current_stage);
|
||||
ts_diff = timespec_elapsed(start);
|
||||
|
||||
@ -380,7 +380,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
|
||||
*current_stage = KVM_UPDATE_MAPPINGS;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
|
||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
vcpus_complete_new_stage(*current_stage);
|
||||
ts_diff = timespec_elapsed(start);
|
||||
|
||||
@ -392,7 +392,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
|
||||
*current_stage = KVM_ADJUST_MAPPINGS;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
|
||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
vcpus_complete_new_stage(*current_stage);
|
||||
ts_diff = timespec_elapsed(start);
|
||||
|
||||
|
@ -6,11 +6,7 @@
|
||||
*/
|
||||
#include "kvm_util.h"
|
||||
|
||||
/*
|
||||
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
|
||||
* VM), it must not be accessed from host code.
|
||||
*/
|
||||
static vm_vaddr_t *ucall_exit_mmio_addr;
|
||||
vm_vaddr_t *ucall_exit_mmio_addr;
|
||||
|
||||
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
{
|
||||
@ -23,11 +19,6 @@ void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
|
||||
}
|
||||
|
||||
void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
{
|
||||
WRITE_ONCE(*ucall_exit_mmio_addr, uc);
|
||||
}
|
||||
|
||||
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
307
tools/testing/selftests/kvm/lib/guest_sprintf.c
Normal file
307
tools/testing/selftests/kvm/lib/guest_sprintf.c
Normal file
@ -0,0 +1,307 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "ucall_common.h"
|
||||
|
||||
#define APPEND_BUFFER_SAFE(str, end, v) \
|
||||
do { \
|
||||
GUEST_ASSERT(str < end); \
|
||||
*str++ = (v); \
|
||||
} while (0)
|
||||
|
||||
static int isdigit(int ch)
|
||||
{
|
||||
return (ch >= '0') && (ch <= '9');
|
||||
}
|
||||
|
||||
static int skip_atoi(const char **s)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
while (isdigit(**s))
|
||||
i = i * 10 + *((*s)++) - '0';
|
||||
return i;
|
||||
}
|
||||
|
||||
#define ZEROPAD 1 /* pad with zero */
|
||||
#define SIGN 2 /* unsigned/signed long */
|
||||
#define PLUS 4 /* show plus */
|
||||
#define SPACE 8 /* space if plus */
|
||||
#define LEFT 16 /* left justified */
|
||||
#define SMALL 32 /* Must be 32 == 0x20 */
|
||||
#define SPECIAL 64 /* 0x */
|
||||
|
||||
#define __do_div(n, base) \
|
||||
({ \
|
||||
int __res; \
|
||||
\
|
||||
__res = ((uint64_t) n) % (uint32_t) base; \
|
||||
n = ((uint64_t) n) / (uint32_t) base; \
|
||||
__res; \
|
||||
})
|
||||
|
||||
static char *number(char *str, const char *end, long num, int base, int size,
|
||||
int precision, int type)
|
||||
{
|
||||
/* we are called with base 8, 10 or 16, only, thus don't need "G..." */
|
||||
static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
|
||||
|
||||
char tmp[66];
|
||||
char c, sign, locase;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* locase = 0 or 0x20. ORing digits or letters with 'locase'
|
||||
* produces same digits or (maybe lowercased) letters
|
||||
*/
|
||||
locase = (type & SMALL);
|
||||
if (type & LEFT)
|
||||
type &= ~ZEROPAD;
|
||||
if (base < 2 || base > 16)
|
||||
return NULL;
|
||||
c = (type & ZEROPAD) ? '0' : ' ';
|
||||
sign = 0;
|
||||
if (type & SIGN) {
|
||||
if (num < 0) {
|
||||
sign = '-';
|
||||
num = -num;
|
||||
size--;
|
||||
} else if (type & PLUS) {
|
||||
sign = '+';
|
||||
size--;
|
||||
} else if (type & SPACE) {
|
||||
sign = ' ';
|
||||
size--;
|
||||
}
|
||||
}
|
||||
if (type & SPECIAL) {
|
||||
if (base == 16)
|
||||
size -= 2;
|
||||
else if (base == 8)
|
||||
size--;
|
||||
}
|
||||
i = 0;
|
||||
if (num == 0)
|
||||
tmp[i++] = '0';
|
||||
else
|
||||
while (num != 0)
|
||||
tmp[i++] = (digits[__do_div(num, base)] | locase);
|
||||
if (i > precision)
|
||||
precision = i;
|
||||
size -= precision;
|
||||
if (!(type & (ZEROPAD + LEFT)))
|
||||
while (size-- > 0)
|
||||
APPEND_BUFFER_SAFE(str, end, ' ');
|
||||
if (sign)
|
||||
APPEND_BUFFER_SAFE(str, end, sign);
|
||||
if (type & SPECIAL) {
|
||||
if (base == 8)
|
||||
APPEND_BUFFER_SAFE(str, end, '0');
|
||||
else if (base == 16) {
|
||||
APPEND_BUFFER_SAFE(str, end, '0');
|
||||
APPEND_BUFFER_SAFE(str, end, 'x');
|
||||
}
|
||||
}
|
||||
if (!(type & LEFT))
|
||||
while (size-- > 0)
|
||||
APPEND_BUFFER_SAFE(str, end, c);
|
||||
while (i < precision--)
|
||||
APPEND_BUFFER_SAFE(str, end, '0');
|
||||
while (i-- > 0)
|
||||
APPEND_BUFFER_SAFE(str, end, tmp[i]);
|
||||
while (size-- > 0)
|
||||
APPEND_BUFFER_SAFE(str, end, ' ');
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args)
|
||||
{
|
||||
char *str, *end;
|
||||
const char *s;
|
||||
uint64_t num;
|
||||
int i, base;
|
||||
int len;
|
||||
|
||||
int flags; /* flags to number() */
|
||||
|
||||
int field_width; /* width of output field */
|
||||
int precision; /*
|
||||
* min. # of digits for integers; max
|
||||
* number of chars for from string
|
||||
*/
|
||||
int qualifier; /* 'h', 'l', or 'L' for integer fields */
|
||||
|
||||
end = buf + n;
|
||||
GUEST_ASSERT(buf < end);
|
||||
GUEST_ASSERT(n > 0);
|
||||
|
||||
for (str = buf; *fmt; ++fmt) {
|
||||
if (*fmt != '%') {
|
||||
APPEND_BUFFER_SAFE(str, end, *fmt);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* process flags */
|
||||
flags = 0;
|
||||
repeat:
|
||||
++fmt; /* this also skips first '%' */
|
||||
switch (*fmt) {
|
||||
case '-':
|
||||
flags |= LEFT;
|
||||
goto repeat;
|
||||
case '+':
|
||||
flags |= PLUS;
|
||||
goto repeat;
|
||||
case ' ':
|
||||
flags |= SPACE;
|
||||
goto repeat;
|
||||
case '#':
|
||||
flags |= SPECIAL;
|
||||
goto repeat;
|
||||
case '0':
|
||||
flags |= ZEROPAD;
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
/* get field width */
|
||||
field_width = -1;
|
||||
if (isdigit(*fmt))
|
||||
field_width = skip_atoi(&fmt);
|
||||
else if (*fmt == '*') {
|
||||
++fmt;
|
||||
/* it's the next argument */
|
||||
field_width = va_arg(args, int);
|
||||
if (field_width < 0) {
|
||||
field_width = -field_width;
|
||||
flags |= LEFT;
|
||||
}
|
||||
}
|
||||
|
||||
/* get the precision */
|
||||
precision = -1;
|
||||
if (*fmt == '.') {
|
||||
++fmt;
|
||||
if (isdigit(*fmt))
|
||||
precision = skip_atoi(&fmt);
|
||||
else if (*fmt == '*') {
|
||||
++fmt;
|
||||
/* it's the next argument */
|
||||
precision = va_arg(args, int);
|
||||
}
|
||||
if (precision < 0)
|
||||
precision = 0;
|
||||
}
|
||||
|
||||
/* get the conversion qualifier */
|
||||
qualifier = -1;
|
||||
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') {
|
||||
qualifier = *fmt;
|
||||
++fmt;
|
||||
}
|
||||
|
||||
/* default base */
|
||||
base = 10;
|
||||
|
||||
switch (*fmt) {
|
||||
case 'c':
|
||||
if (!(flags & LEFT))
|
||||
while (--field_width > 0)
|
||||
APPEND_BUFFER_SAFE(str, end, ' ');
|
||||
APPEND_BUFFER_SAFE(str, end,
|
||||
(uint8_t)va_arg(args, int));
|
||||
while (--field_width > 0)
|
||||
APPEND_BUFFER_SAFE(str, end, ' ');
|
||||
continue;
|
||||
|
||||
case 's':
|
||||
s = va_arg(args, char *);
|
||||
len = strnlen(s, precision);
|
||||
|
||||
if (!(flags & LEFT))
|
||||
while (len < field_width--)
|
||||
APPEND_BUFFER_SAFE(str, end, ' ');
|
||||
for (i = 0; i < len; ++i)
|
||||
APPEND_BUFFER_SAFE(str, end, *s++);
|
||||
while (len < field_width--)
|
||||
APPEND_BUFFER_SAFE(str, end, ' ');
|
||||
continue;
|
||||
|
||||
case 'p':
|
||||
if (field_width == -1) {
|
||||
field_width = 2 * sizeof(void *);
|
||||
flags |= SPECIAL | SMALL | ZEROPAD;
|
||||
}
|
||||
str = number(str, end,
|
||||
(uint64_t)va_arg(args, void *), 16,
|
||||
field_width, precision, flags);
|
||||
continue;
|
||||
|
||||
case 'n':
|
||||
if (qualifier == 'l') {
|
||||
long *ip = va_arg(args, long *);
|
||||
*ip = (str - buf);
|
||||
} else {
|
||||
int *ip = va_arg(args, int *);
|
||||
*ip = (str - buf);
|
||||
}
|
||||
continue;
|
||||
|
||||
case '%':
|
||||
APPEND_BUFFER_SAFE(str, end, '%');
|
||||
continue;
|
||||
|
||||
/* integer number formats - set up the flags and "break" */
|
||||
case 'o':
|
||||
base = 8;
|
||||
break;
|
||||
|
||||
case 'x':
|
||||
flags |= SMALL;
|
||||
case 'X':
|
||||
base = 16;
|
||||
break;
|
||||
|
||||
case 'd':
|
||||
case 'i':
|
||||
flags |= SIGN;
|
||||
case 'u':
|
||||
break;
|
||||
|
||||
default:
|
||||
APPEND_BUFFER_SAFE(str, end, '%');
|
||||
if (*fmt)
|
||||
APPEND_BUFFER_SAFE(str, end, *fmt);
|
||||
else
|
||||
--fmt;
|
||||
continue;
|
||||
}
|
||||
if (qualifier == 'l')
|
||||
num = va_arg(args, uint64_t);
|
||||
else if (qualifier == 'h') {
|
||||
num = (uint16_t)va_arg(args, int);
|
||||
if (flags & SIGN)
|
||||
num = (int16_t)num;
|
||||
} else if (flags & SIGN)
|
||||
num = va_arg(args, int);
|
||||
else
|
||||
num = va_arg(args, uint32_t);
|
||||
str = number(str, end, num, base, field_width, precision, flags);
|
||||
}
|
||||
|
||||
GUEST_ASSERT(str < end);
|
||||
*str = '\0';
|
||||
return str - buf;
|
||||
}
|
||||
|
||||
int guest_snprintf(char *buf, int n, const char *fmt, ...)
|
||||
{
|
||||
va_list va;
|
||||
int len;
|
||||
|
||||
va_start(va, fmt);
|
||||
len = guest_vsnprintf(buf, n, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
return len;
|
||||
}
|
@ -312,6 +312,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
|
||||
uint32_t nr_runnable_vcpus,
|
||||
uint64_t extra_mem_pages)
|
||||
{
|
||||
uint64_t page_size = vm_guest_mode_params[mode].page_size;
|
||||
uint64_t nr_pages;
|
||||
|
||||
TEST_ASSERT(nr_runnable_vcpus,
|
||||
@ -340,6 +341,9 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
|
||||
*/
|
||||
nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
|
||||
|
||||
/* Account for the number of pages needed by ucall. */
|
||||
nr_pages += ucall_nr_pages_required(page_size);
|
||||
|
||||
return vm_adjust_num_guest_pages(mode, nr_pages);
|
||||
}
|
||||
|
||||
@ -994,7 +998,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
|
||||
if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
|
||||
alignment = max(backing_src_pagesz, alignment);
|
||||
|
||||
ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
|
||||
TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
|
||||
|
||||
/* Add enough memory to align up if necessary */
|
||||
if (alignment > 1)
|
||||
|
@ -10,10 +10,6 @@
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
|
||||
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
{
|
||||
}
|
||||
|
||||
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
|
||||
unsigned long arg1, unsigned long arg2,
|
||||
unsigned long arg3, unsigned long arg4,
|
||||
@ -40,13 +36,6 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
{
|
||||
sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
|
||||
KVM_RISCV_SELFTESTS_SBI_UCALL,
|
||||
uc, 0, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
@ -6,16 +6,6 @@
|
||||
*/
|
||||
#include "kvm_util.h"
|
||||
|
||||
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
{
|
||||
}
|
||||
|
||||
void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
{
|
||||
/* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
|
||||
asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
|
||||
}
|
||||
|
||||
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
@ -634,7 +634,6 @@ static void node_reduce(struct sparsebit *s, struct node *nodep)
|
||||
tmp = node_prev(s, nodep);
|
||||
|
||||
node_rm(s, nodep);
|
||||
nodep = NULL;
|
||||
|
||||
nodep = tmp;
|
||||
reduction_performed = true;
|
||||
|
@ -37,3 +37,12 @@ void *memset(void *s, int c, size_t count)
|
||||
*xs++ = c;
|
||||
return s;
|
||||
}
|
||||
|
||||
size_t strnlen(const char *s, size_t count)
|
||||
{
|
||||
const char *sc;
|
||||
|
||||
for (sc = s; count-- && *sc != '\0'; ++sc)
|
||||
/* nothing */;
|
||||
return sc - s;
|
||||
}
|
||||
|
@ -11,6 +11,11 @@ struct ucall_header {
|
||||
struct ucall ucalls[KVM_MAX_VCPUS];
|
||||
};
|
||||
|
||||
int ucall_nr_pages_required(uint64_t page_size)
|
||||
{
|
||||
return align_up(sizeof(struct ucall_header), page_size) / page_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* ucall_pool holds per-VM values (global data is duplicated by each VM), it
|
||||
* must not be accessed from host code.
|
||||
@ -70,6 +75,45 @@ static void ucall_free(struct ucall *uc)
|
||||
clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
|
||||
}
|
||||
|
||||
void ucall_assert(uint64_t cmd, const char *exp, const char *file,
|
||||
unsigned int line, const char *fmt, ...)
|
||||
{
|
||||
struct ucall *uc;
|
||||
va_list va;
|
||||
|
||||
uc = ucall_alloc();
|
||||
uc->cmd = cmd;
|
||||
|
||||
WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp));
|
||||
WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file));
|
||||
WRITE_ONCE(uc->args[GUEST_LINE], line);
|
||||
|
||||
va_start(va, fmt);
|
||||
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
|
||||
|
||||
ucall_free(uc);
|
||||
}
|
||||
|
||||
void ucall_fmt(uint64_t cmd, const char *fmt, ...)
|
||||
{
|
||||
struct ucall *uc;
|
||||
va_list va;
|
||||
|
||||
uc = ucall_alloc();
|
||||
uc->cmd = cmd;
|
||||
|
||||
va_start(va, fmt);
|
||||
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
|
||||
|
||||
ucall_free(uc);
|
||||
}
|
||||
|
||||
void ucall(uint64_t cmd, int nargs, ...)
|
||||
{
|
||||
struct ucall *uc;
|
||||
|
@ -1074,11 +1074,6 @@ static bool kvm_fixup_exception(struct ex_regs *regs)
|
||||
return true;
|
||||
}
|
||||
|
||||
void kvm_exit_unexpected_vector(uint32_t value)
|
||||
{
|
||||
ucall(UCALL_UNHANDLED, 1, value);
|
||||
}
|
||||
|
||||
void route_exception(struct ex_regs *regs)
|
||||
{
|
||||
typedef void(*handler)(struct ex_regs *);
|
||||
@ -1092,7 +1087,10 @@ void route_exception(struct ex_regs *regs)
|
||||
if (kvm_fixup_exception(regs))
|
||||
return;
|
||||
|
||||
kvm_exit_unexpected_vector(regs->vector);
|
||||
ucall_assert(UCALL_UNHANDLED,
|
||||
"Unhandled exception in guest", __FILE__, __LINE__,
|
||||
"Unhandled exception '0x%lx' at guest RIP '0x%lx'",
|
||||
regs->vector, regs->rip);
|
||||
}
|
||||
|
||||
void vm_init_descriptor_tables(struct kvm_vm *vm)
|
||||
@ -1135,12 +1133,8 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct ucall uc;
|
||||
|
||||
if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) {
|
||||
uint64_t vector = uc.args[0];
|
||||
|
||||
TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)",
|
||||
vector);
|
||||
}
|
||||
if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED)
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
}
|
||||
|
||||
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
|
||||
|
@ -8,14 +8,38 @@
|
||||
|
||||
#define UCALL_PIO_PORT ((uint16_t)0x1000)
|
||||
|
||||
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
{
|
||||
}
|
||||
|
||||
void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
{
|
||||
asm volatile("in %[port], %%al"
|
||||
: : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory");
|
||||
/*
|
||||
* FIXME: Revert this hack (the entire commit that added it) once nVMX
|
||||
* preserves L2 GPRs across a nested VM-Exit. If a ucall from L2, e.g.
|
||||
* to do a GUEST_SYNC(), lands the vCPU in L1, any and all GPRs can be
|
||||
* clobbered by L1. Save and restore non-volatile GPRs (clobbering RBP
|
||||
* in particular is problematic) along with RDX and RDI (which are
|
||||
* inputs), and clobber volatile GPRs. *sigh*
|
||||
*/
|
||||
#define HORRIFIC_L2_UCALL_CLOBBER_HACK \
|
||||
"rcx", "rsi", "r8", "r9", "r10", "r11"
|
||||
|
||||
asm volatile("push %%rbp\n\t"
|
||||
"push %%r15\n\t"
|
||||
"push %%r14\n\t"
|
||||
"push %%r13\n\t"
|
||||
"push %%r12\n\t"
|
||||
"push %%rbx\n\t"
|
||||
"push %%rdx\n\t"
|
||||
"push %%rdi\n\t"
|
||||
"in %[port], %%al\n\t"
|
||||
"pop %%rdi\n\t"
|
||||
"pop %%rdx\n\t"
|
||||
"pop %%rbx\n\t"
|
||||
"pop %%r12\n\t"
|
||||
"pop %%r13\n\t"
|
||||
"pop %%r14\n\t"
|
||||
"pop %%r15\n\t"
|
||||
"pop %%rbp\n\t"
|
||||
: : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory",
|
||||
HORRIFIC_L2_UCALL_CLOBBER_HACK);
|
||||
}
|
||||
|
||||
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
|
||||
|
@ -55,7 +55,7 @@ static void rendezvous_with_boss(void)
|
||||
static void run_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_run(vcpu);
|
||||
ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
|
||||
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
|
||||
}
|
||||
|
||||
static void *vcpu_worker(void *data)
|
||||
|
@ -157,7 +157,7 @@ static void *vcpu_worker(void *__data)
|
||||
goto done;
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_1(uc, "val = %lu");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
@ -560,7 +560,7 @@ static void guest_code_test_memslot_rw(void)
|
||||
ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) {
|
||||
uint64_t val = *(uint64_t *)ptr;
|
||||
|
||||
GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val);
|
||||
GUEST_ASSERT_EQ(val, MEM_TEST_VAL_2);
|
||||
*(uint64_t *)ptr = 0;
|
||||
}
|
||||
|
||||
|
@ -237,8 +237,8 @@ static void test_get_cmma_basic(void)
|
||||
|
||||
/* GET_CMMA_BITS without CMMA enabled should fail */
|
||||
rc = vm_get_cmma_bits(vm, 0, &errno_out);
|
||||
ASSERT_EQ(rc, -1);
|
||||
ASSERT_EQ(errno_out, ENXIO);
|
||||
TEST_ASSERT_EQ(rc, -1);
|
||||
TEST_ASSERT_EQ(errno_out, ENXIO);
|
||||
|
||||
enable_cmma(vm);
|
||||
vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
|
||||
@ -247,31 +247,31 @@ static void test_get_cmma_basic(void)
|
||||
|
||||
/* GET_CMMA_BITS without migration mode and without peeking should fail */
|
||||
rc = vm_get_cmma_bits(vm, 0, &errno_out);
|
||||
ASSERT_EQ(rc, -1);
|
||||
ASSERT_EQ(errno_out, EINVAL);
|
||||
TEST_ASSERT_EQ(rc, -1);
|
||||
TEST_ASSERT_EQ(errno_out, EINVAL);
|
||||
|
||||
/* GET_CMMA_BITS without migration mode and with peeking should work */
|
||||
rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out);
|
||||
ASSERT_EQ(rc, 0);
|
||||
ASSERT_EQ(errno_out, 0);
|
||||
TEST_ASSERT_EQ(rc, 0);
|
||||
TEST_ASSERT_EQ(errno_out, 0);
|
||||
|
||||
enable_dirty_tracking(vm);
|
||||
enable_migration_mode(vm);
|
||||
|
||||
/* GET_CMMA_BITS with invalid flags */
|
||||
rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out);
|
||||
ASSERT_EQ(rc, -1);
|
||||
ASSERT_EQ(errno_out, EINVAL);
|
||||
TEST_ASSERT_EQ(rc, -1);
|
||||
TEST_ASSERT_EQ(errno_out, EINVAL);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ASSERT_EQ(vcpu->run->exit_reason, 13);
|
||||
ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
|
||||
ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
|
||||
ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
|
||||
TEST_ASSERT_EQ(vcpu->run->exit_reason, 13);
|
||||
TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
|
||||
TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
|
||||
TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
|
||||
}
|
||||
|
||||
static void test_migration_mode(void)
|
||||
@ -283,8 +283,8 @@ static void test_migration_mode(void)
|
||||
|
||||
/* enabling migration mode on a VM without memory should fail */
|
||||
rc = __enable_migration_mode(vm);
|
||||
ASSERT_EQ(rc, -1);
|
||||
ASSERT_EQ(errno, EINVAL);
|
||||
TEST_ASSERT_EQ(rc, -1);
|
||||
TEST_ASSERT_EQ(errno, EINVAL);
|
||||
TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
|
||||
errno = 0;
|
||||
|
||||
@ -304,8 +304,8 @@ static void test_migration_mode(void)
|
||||
|
||||
/* migration mode when memslots have dirty tracking off should fail */
|
||||
rc = __enable_migration_mode(vm);
|
||||
ASSERT_EQ(rc, -1);
|
||||
ASSERT_EQ(errno, EINVAL);
|
||||
TEST_ASSERT_EQ(rc, -1);
|
||||
TEST_ASSERT_EQ(errno, EINVAL);
|
||||
TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
|
||||
errno = 0;
|
||||
|
||||
@ -314,7 +314,7 @@ static void test_migration_mode(void)
|
||||
|
||||
/* enabling migration mode should work now */
|
||||
rc = __enable_migration_mode(vm);
|
||||
ASSERT_EQ(rc, 0);
|
||||
TEST_ASSERT_EQ(rc, 0);
|
||||
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
|
||||
errno = 0;
|
||||
|
||||
@ -350,7 +350,7 @@ static void test_migration_mode(void)
|
||||
*/
|
||||
vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
|
||||
rc = __enable_migration_mode(vm);
|
||||
ASSERT_EQ(rc, 0);
|
||||
TEST_ASSERT_EQ(rc, 0);
|
||||
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
|
||||
errno = 0;
|
||||
|
||||
@ -394,9 +394,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
|
||||
};
|
||||
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
|
||||
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
|
||||
ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
|
||||
ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
|
||||
ASSERT_EQ(args.start_gfn, 0);
|
||||
TEST_ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
|
||||
TEST_ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
|
||||
TEST_ASSERT_EQ(args.start_gfn, 0);
|
||||
|
||||
/* ...and then - after a hole - the TEST_DATA memslot should follow */
|
||||
args = (struct kvm_s390_cmma_log){
|
||||
@ -407,9 +407,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
|
||||
};
|
||||
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
|
||||
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
|
||||
ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
|
||||
ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
|
||||
ASSERT_EQ(args.remaining, 0);
|
||||
TEST_ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
|
||||
TEST_ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
|
||||
TEST_ASSERT_EQ(args.remaining, 0);
|
||||
|
||||
/* ...and nothing else should be there */
|
||||
args = (struct kvm_s390_cmma_log){
|
||||
@ -420,9 +420,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
|
||||
};
|
||||
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
|
||||
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
|
||||
ASSERT_EQ(args.count, 0);
|
||||
ASSERT_EQ(args.start_gfn, 0);
|
||||
ASSERT_EQ(args.remaining, 0);
|
||||
TEST_ASSERT_EQ(args.count, 0);
|
||||
TEST_ASSERT_EQ(args.start_gfn, 0);
|
||||
TEST_ASSERT_EQ(args.remaining, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -498,11 +498,11 @@ static void assert_cmma_dirty(u64 first_dirty_gfn,
|
||||
u64 dirty_gfn_count,
|
||||
const struct kvm_s390_cmma_log *res)
|
||||
{
|
||||
ASSERT_EQ(res->start_gfn, first_dirty_gfn);
|
||||
ASSERT_EQ(res->count, dirty_gfn_count);
|
||||
TEST_ASSERT_EQ(res->start_gfn, first_dirty_gfn);
|
||||
TEST_ASSERT_EQ(res->count, dirty_gfn_count);
|
||||
for (size_t i = 0; i < dirty_gfn_count; i++)
|
||||
ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
|
||||
ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
|
||||
TEST_ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
|
||||
TEST_ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
|
||||
}
|
||||
|
||||
static void test_get_skip_holes(void)
|
||||
|
@ -4,7 +4,6 @@
|
||||
*
|
||||
* Copyright (C) 2019, Red Hat, Inc.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
@ -279,10 +278,10 @@ enum stage {
|
||||
vcpu_run(__vcpu); \
|
||||
get_ucall(__vcpu, &uc); \
|
||||
if (uc.cmd == UCALL_ABORT) { \
|
||||
REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
|
||||
REPORT_GUEST_ASSERT(uc); \
|
||||
} \
|
||||
ASSERT_EQ(uc.cmd, UCALL_SYNC); \
|
||||
ASSERT_EQ(uc.args[1], __stage); \
|
||||
TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \
|
||||
TEST_ASSERT_EQ(uc.args[1], __stage); \
|
||||
}) \
|
||||
|
||||
static void prepare_mem12(void)
|
||||
@ -469,7 +468,7 @@ static __uint128_t cut_to_size(int size, __uint128_t val)
|
||||
case 16:
|
||||
return val;
|
||||
}
|
||||
GUEST_ASSERT_1(false, "Invalid size");
|
||||
GUEST_FAIL("Invalid size = %u", size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -598,7 +597,7 @@ static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
GUEST_ASSERT_1(false, "Invalid size");
|
||||
GUEST_FAIL("Invalid size = %u", size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -808,7 +807,7 @@ static void test_termination(void)
|
||||
HOST_SYNC(t.vcpu, STAGE_IDLED);
|
||||
MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
|
||||
/* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
|
||||
ASSERT_EQ(teid & teid_mask, 0);
|
||||
TEST_ASSERT_EQ(teid & teid_mask, 0);
|
||||
|
||||
kvm_vm_free(t.kvm_vm);
|
||||
}
|
||||
|
@ -4,7 +4,6 @@
|
||||
*
|
||||
* Copyright IBM Corp. 2021
|
||||
*/
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
@ -156,7 +155,9 @@ static enum stage perform_next_stage(int *i, bool mapped_0)
|
||||
!mapped_0;
|
||||
if (!skip) {
|
||||
result = test_protection(tests[*i].addr, tests[*i].key);
|
||||
GUEST_ASSERT_2(result == tests[*i].expected, *i, result);
|
||||
__GUEST_ASSERT(result == tests[*i].expected,
|
||||
"Wanted %u, got %u, for i = %u",
|
||||
tests[*i].expected, result, *i);
|
||||
}
|
||||
}
|
||||
return stage;
|
||||
@ -190,9 +191,9 @@ static void guest_code(void)
|
||||
vcpu_run(__vcpu); \
|
||||
get_ucall(__vcpu, &uc); \
|
||||
if (uc.cmd == UCALL_ABORT) \
|
||||
REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
|
||||
ASSERT_EQ(uc.cmd, UCALL_SYNC); \
|
||||
ASSERT_EQ(uc.args[1], __stage); \
|
||||
REPORT_GUEST_ASSERT(uc); \
|
||||
TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \
|
||||
TEST_ASSERT_EQ(uc.args[1], __stage); \
|
||||
})
|
||||
|
||||
#define HOST_SYNC(vcpu, stage) \
|
||||
|
@ -88,7 +88,7 @@ static void *vcpu_worker(void *data)
|
||||
}
|
||||
|
||||
if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
|
||||
REPORT_GUEST_ASSERT_1(uc, "val = %lu");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -156,19 +156,22 @@ static void guest_code_move_memory_region(void)
|
||||
* window where the memslot is invalid is usually quite small.
|
||||
*/
|
||||
val = guest_spin_on_val(0);
|
||||
GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
|
||||
__GUEST_ASSERT(val == 1 || val == MMIO_VAL,
|
||||
"Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val);
|
||||
|
||||
/* Spin until the misaligning memory region move completes. */
|
||||
val = guest_spin_on_val(MMIO_VAL);
|
||||
GUEST_ASSERT_1(val == 1 || val == 0, val);
|
||||
__GUEST_ASSERT(val == 1 || val == 0,
|
||||
"Expected '0' or '1' (no MMIO), got '%llx'", val);
|
||||
|
||||
/* Spin until the memory region starts to get re-aligned. */
|
||||
val = guest_spin_on_val(0);
|
||||
GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
|
||||
__GUEST_ASSERT(val == 1 || val == MMIO_VAL,
|
||||
"Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val);
|
||||
|
||||
/* Spin until the re-aligning memory region move completes. */
|
||||
val = guest_spin_on_val(MMIO_VAL);
|
||||
GUEST_ASSERT_1(val == 1, val);
|
||||
GUEST_ASSERT_EQ(val, 1);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
@ -224,15 +227,15 @@ static void guest_code_delete_memory_region(void)
|
||||
|
||||
/* Spin until the memory region is deleted. */
|
||||
val = guest_spin_on_val(0);
|
||||
GUEST_ASSERT_1(val == MMIO_VAL, val);
|
||||
GUEST_ASSERT_EQ(val, MMIO_VAL);
|
||||
|
||||
/* Spin until the memory region is recreated. */
|
||||
val = guest_spin_on_val(MMIO_VAL);
|
||||
GUEST_ASSERT_1(val == 0, val);
|
||||
GUEST_ASSERT_EQ(val, 0);
|
||||
|
||||
/* Spin until the memory region is deleted. */
|
||||
val = guest_spin_on_val(0);
|
||||
GUEST_ASSERT_1(val == MMIO_VAL, val);
|
||||
GUEST_ASSERT_EQ(val, MMIO_VAL);
|
||||
|
||||
asm("1:\n\t"
|
||||
".pushsection .rodata\n\t"
|
||||
@ -249,7 +252,7 @@ static void guest_code_delete_memory_region(void)
|
||||
"final_rip_end: .quad 1b\n\t"
|
||||
".popsection");
|
||||
|
||||
GUEST_ASSERT_1(0, 0);
|
||||
GUEST_ASSERT(0);
|
||||
}
|
||||
|
||||
static void test_delete_memory_region(void)
|
||||
|
@ -31,8 +31,8 @@ static uint64_t guest_stolen_time[NR_VCPUS];
|
||||
static void check_status(struct kvm_steal_time *st)
|
||||
{
|
||||
GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
|
||||
GUEST_ASSERT(READ_ONCE(st->flags) == 0);
|
||||
GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
|
||||
GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
|
||||
GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
|
||||
}
|
||||
|
||||
static void guest_code(int cpu)
|
||||
@ -40,7 +40,7 @@ static void guest_code(int cpu)
|
||||
struct kvm_steal_time *st = st_gva[cpu];
|
||||
uint32_t version;
|
||||
|
||||
GUEST_ASSERT(rdmsr(MSR_KVM_STEAL_TIME) == ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
|
||||
GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
|
||||
|
||||
memset(st, 0, sizeof(*st));
|
||||
GUEST_SYNC(0);
|
||||
@ -122,8 +122,8 @@ static int64_t smccc(uint32_t func, uint64_t arg)
|
||||
|
||||
static void check_status(struct st_time *st)
|
||||
{
|
||||
GUEST_ASSERT(READ_ONCE(st->rev) == 0);
|
||||
GUEST_ASSERT(READ_ONCE(st->attr) == 0);
|
||||
GUEST_ASSERT_EQ(READ_ONCE(st->rev), 0);
|
||||
GUEST_ASSERT_EQ(READ_ONCE(st->attr), 0);
|
||||
}
|
||||
|
||||
static void guest_code(int cpu)
|
||||
@ -132,15 +132,15 @@ static void guest_code(int cpu)
|
||||
int64_t status;
|
||||
|
||||
status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
|
||||
GUEST_ASSERT(status == 0);
|
||||
GUEST_ASSERT_EQ(status, 0);
|
||||
status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
|
||||
GUEST_ASSERT(status == 0);
|
||||
GUEST_ASSERT_EQ(status, 0);
|
||||
status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
|
||||
GUEST_ASSERT(status == 0);
|
||||
GUEST_ASSERT_EQ(status, 0);
|
||||
|
||||
status = smccc(PV_TIME_ST, 0);
|
||||
GUEST_ASSERT(status != -1);
|
||||
GUEST_ASSERT(status == (ulong)st_gva[cpu]);
|
||||
GUEST_ASSERT_NE(status, -1);
|
||||
GUEST_ASSERT_EQ(status, (ulong)st_gva[cpu]);
|
||||
|
||||
st = (struct st_time *)status;
|
||||
GUEST_SYNC(0);
|
||||
|
@ -35,10 +35,10 @@ static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
|
||||
guest_cpuid->entries[i].index,
|
||||
&eax, &ebx, &ecx, &edx);
|
||||
|
||||
GUEST_ASSERT(eax == guest_cpuid->entries[i].eax &&
|
||||
ebx == guest_cpuid->entries[i].ebx &&
|
||||
ecx == guest_cpuid->entries[i].ecx &&
|
||||
edx == guest_cpuid->entries[i].edx);
|
||||
GUEST_ASSERT_EQ(eax, guest_cpuid->entries[i].eax);
|
||||
GUEST_ASSERT_EQ(ebx, guest_cpuid->entries[i].ebx);
|
||||
GUEST_ASSERT_EQ(ecx, guest_cpuid->entries[i].ecx);
|
||||
GUEST_ASSERT_EQ(edx, guest_cpuid->entries[i].edx);
|
||||
}
|
||||
|
||||
}
|
||||
@ -51,7 +51,7 @@ static void guest_main(struct kvm_cpuid2 *guest_cpuid)
|
||||
|
||||
GUEST_SYNC(2);
|
||||
|
||||
GUEST_ASSERT(this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF) == 0x40000001);
|
||||
GUEST_ASSERT_EQ(this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF), 0x40000001);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
@ -116,7 +116,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
|
||||
case UCALL_DONE:
|
||||
return;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
default:
|
||||
TEST_ASSERT(false, "Unexpected exit: %s",
|
||||
exit_reason_str(vcpu->run->exit_reason));
|
||||
|
@ -72,7 +72,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
|
||||
|
||||
vcpu_run(vcpu);
|
||||
|
||||
ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
|
||||
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
|
||||
|
||||
vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
|
||||
|
||||
@ -179,12 +179,12 @@ static void run_test(enum vm_guest_mode mode, void *unused)
|
||||
* with that capability.
|
||||
*/
|
||||
if (dirty_log_manual_caps) {
|
||||
ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
|
||||
ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
|
||||
ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
|
||||
TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
|
||||
TEST_ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
|
||||
TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
|
||||
} else {
|
||||
ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
|
||||
ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
|
||||
TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
|
||||
TEST_ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -192,9 +192,9 @@ static void run_test(enum vm_guest_mode mode, void *unused)
|
||||
* memory again, the page counts should be the same as they were
|
||||
* right after initial population of memory.
|
||||
*/
|
||||
ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
|
||||
ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
|
||||
ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
|
||||
TEST_ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
|
||||
TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
|
||||
TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
|
||||
}
|
||||
|
||||
static void help(char *name)
|
||||
|
@ -35,7 +35,7 @@ int main(int argc, char *argv[])
|
||||
vcpu_run(vcpu);
|
||||
handle_flds_emulation_failure_exit(vcpu);
|
||||
vcpu_run(vcpu);
|
||||
ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
|
||||
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
return 0;
|
||||
|
@ -8,7 +8,6 @@
|
||||
* Copyright 2022 Google LLC
|
||||
* Author: Vipin Sharma <vipinsh@google.com>
|
||||
*/
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
#include "hyperv.h"
|
||||
@ -84,7 +83,7 @@ int main(void)
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "arg1 = %ld, arg2 = %ld");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
break;
|
||||
|
@ -53,16 +53,21 @@ static void guest_msr(struct msr_data *msr)
|
||||
vector = rdmsr_safe(msr->idx, &msr_val);
|
||||
|
||||
if (msr->fault_expected)
|
||||
GUEST_ASSERT_3(vector == GP_VECTOR, msr->idx, vector, GP_VECTOR);
|
||||
__GUEST_ASSERT(vector == GP_VECTOR,
|
||||
"Expected #GP on %sMSR(0x%x), got vector '0x%x'",
|
||||
msr->idx, msr->write ? "WR" : "RD", vector);
|
||||
else
|
||||
GUEST_ASSERT_3(!vector, msr->idx, vector, 0);
|
||||
__GUEST_ASSERT(!vector,
|
||||
"Expected success on %sMSR(0x%x), got vector '0x%x'",
|
||||
msr->idx, msr->write ? "WR" : "RD", vector);
|
||||
|
||||
if (vector || is_write_only_msr(msr->idx))
|
||||
goto done;
|
||||
|
||||
if (msr->write)
|
||||
GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx,
|
||||
msr_val, msr->write_val);
|
||||
__GUEST_ASSERT(!vector,
|
||||
"WRMSR(0x%x) to '0x%llx', RDMSR read '0x%llx'",
|
||||
msr->idx, msr->write_val, msr_val);
|
||||
|
||||
/* Invariant TSC bit appears when TSC invariant control MSR is written to */
|
||||
if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
|
||||
@ -82,7 +87,7 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
|
||||
u64 res, input, output;
|
||||
uint8_t vector;
|
||||
|
||||
GUEST_ASSERT(hcall->control);
|
||||
GUEST_ASSERT_NE(hcall->control, 0);
|
||||
|
||||
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
|
||||
wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
|
||||
@ -96,10 +101,14 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
|
||||
|
||||
vector = __hyperv_hypercall(hcall->control, input, output, &res);
|
||||
if (hcall->ud_expected) {
|
||||
GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector);
|
||||
__GUEST_ASSERT(vector == UD_VECTOR,
|
||||
"Expected #UD for control '%u', got vector '0x%x'",
|
||||
hcall->control, vector);
|
||||
} else {
|
||||
GUEST_ASSERT_2(!vector, hcall->control, vector);
|
||||
GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res);
|
||||
__GUEST_ASSERT(!vector,
|
||||
"Expected no exception for control '%u', got vector '0x%x'",
|
||||
hcall->control, vector);
|
||||
GUEST_ASSERT_EQ(res, hcall->expect);
|
||||
}
|
||||
|
||||
GUEST_DONE();
|
||||
@ -495,7 +504,7 @@ static void guest_test_msrs_access(void)
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_3(uc, "MSR = %lx, arg1 = %lx, arg2 = %lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
return;
|
||||
case UCALL_DONE:
|
||||
break;
|
||||
@ -665,7 +674,7 @@ static void guest_test_hcalls_access(void)
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "arg1 = %lx, arg2 = %lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
return;
|
||||
case UCALL_DONE:
|
||||
break;
|
||||
|
@ -46,10 +46,10 @@ static void test_msr(struct msr_data *msr)
|
||||
PR_MSR(msr);
|
||||
|
||||
vector = rdmsr_safe(msr->idx, &ignored);
|
||||
GUEST_ASSERT_1(vector == GP_VECTOR, vector);
|
||||
GUEST_ASSERT_EQ(vector, GP_VECTOR);
|
||||
|
||||
vector = wrmsr_safe(msr->idx, 0);
|
||||
GUEST_ASSERT_1(vector == GP_VECTOR, vector);
|
||||
GUEST_ASSERT_EQ(vector, GP_VECTOR);
|
||||
}
|
||||
|
||||
struct hcall_data {
|
||||
@ -77,7 +77,7 @@ static void test_hcall(struct hcall_data *hc)
|
||||
|
||||
PR_HCALL(hc);
|
||||
r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
|
||||
GUEST_ASSERT(r == -KVM_ENOSYS);
|
||||
GUEST_ASSERT_EQ(r, -KVM_ENOSYS);
|
||||
}
|
||||
|
||||
static void guest_main(void)
|
||||
@ -125,7 +125,7 @@ static void enter_guest(struct kvm_vcpu *vcpu)
|
||||
pr_hcall(&uc);
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_1(uc, "vector = %lu");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
return;
|
||||
case UCALL_DONE:
|
||||
return;
|
||||
|
@ -16,14 +16,25 @@ enum monitor_mwait_testcases {
|
||||
MWAIT_DISABLED = BIT(2),
|
||||
};
|
||||
|
||||
/*
|
||||
* If both MWAIT and its quirk are disabled, MONITOR/MWAIT should #UD, in all
|
||||
* other scenarios KVM should emulate them as nops.
|
||||
*/
|
||||
#define GUEST_ASSERT_MONITOR_MWAIT(insn, testcase, vector) \
|
||||
do { \
|
||||
bool fault_wanted = ((testcase) & MWAIT_QUIRK_DISABLED) && \
|
||||
((testcase) & MWAIT_DISABLED); \
|
||||
\
|
||||
if (fault_wanted) \
|
||||
__GUEST_ASSERT((vector) == UD_VECTOR, \
|
||||
"Expected #UD on " insn " for testcase '0x%x', got '0x%x'", vector); \
|
||||
else \
|
||||
__GUEST_ASSERT(!(vector), \
|
||||
"Expected success on " insn " for testcase '0x%x', got '0x%x'", vector); \
|
||||
} while (0)
|
||||
|
||||
static void guest_monitor_wait(int testcase)
|
||||
{
|
||||
/*
|
||||
* If both MWAIT and its quirk are disabled, MONITOR/MWAIT should #UD,
|
||||
* in all other scenarios KVM should emulate them as nops.
|
||||
*/
|
||||
bool fault_wanted = (testcase & MWAIT_QUIRK_DISABLED) &&
|
||||
(testcase & MWAIT_DISABLED);
|
||||
u8 vector;
|
||||
|
||||
GUEST_SYNC(testcase);
|
||||
@ -33,16 +44,10 @@ static void guest_monitor_wait(int testcase)
|
||||
* intercept checks, so the inputs for MONITOR and MWAIT must be valid.
|
||||
*/
|
||||
vector = kvm_asm_safe("monitor", "a"(guest_monitor_wait), "c"(0), "d"(0));
|
||||
if (fault_wanted)
|
||||
GUEST_ASSERT_2(vector == UD_VECTOR, testcase, vector);
|
||||
else
|
||||
GUEST_ASSERT_2(!vector, testcase, vector);
|
||||
GUEST_ASSERT_MONITOR_MWAIT("MONITOR", testcase, vector);
|
||||
|
||||
vector = kvm_asm_safe("mwait", "a"(guest_monitor_wait), "c"(0), "d"(0));
|
||||
if (fault_wanted)
|
||||
GUEST_ASSERT_2(vector == UD_VECTOR, testcase, vector);
|
||||
else
|
||||
GUEST_ASSERT_2(!vector, testcase, vector);
|
||||
GUEST_ASSERT_MONITOR_MWAIT("MWAIT", testcase, vector);
|
||||
}
|
||||
|
||||
static void guest_code(void)
|
||||
@ -85,7 +90,7 @@ int main(int argc, char *argv[])
|
||||
testcase = uc.args[1];
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "testcase = %lx, vector = %ld");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
goto done;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
|
@ -180,9 +180,7 @@ static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)
|
||||
"Expected L2 to ask for %d, L2 says it's done", vector);
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
TEST_FAIL("%s at %s:%ld (0x%lx != 0x%lx)",
|
||||
(const char *)uc.args[0], __FILE__, uc.args[1],
|
||||
uc.args[2], uc.args[3]);
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
default:
|
||||
TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd);
|
||||
@ -247,12 +245,12 @@ int main(int argc, char *argv[])
|
||||
|
||||
/* Verify the pending events comes back out the same as it went in. */
|
||||
vcpu_events_get(vcpu, &events);
|
||||
ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
|
||||
KVM_VCPUEVENT_VALID_PAYLOAD);
|
||||
ASSERT_EQ(events.exception.pending, true);
|
||||
ASSERT_EQ(events.exception.nr, SS_VECTOR);
|
||||
ASSERT_EQ(events.exception.has_error_code, true);
|
||||
ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
|
||||
TEST_ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
|
||||
KVM_VCPUEVENT_VALID_PAYLOAD);
|
||||
TEST_ASSERT_EQ(events.exception.pending, true);
|
||||
TEST_ASSERT_EQ(events.exception.nr, SS_VECTOR);
|
||||
TEST_ASSERT_EQ(events.exception.has_error_code, true);
|
||||
TEST_ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
|
||||
|
||||
/*
|
||||
* Run for real with the pending #SS, L1 should get a VM-Exit due to
|
||||
|
@ -27,6 +27,15 @@
|
||||
#define ARCH_PERFMON_BRANCHES_RETIRED 5
|
||||
|
||||
#define NUM_BRANCHES 42
|
||||
#define INTEL_PMC_IDX_FIXED 32
|
||||
|
||||
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
|
||||
#define MAX_FILTER_EVENTS 300
|
||||
#define MAX_TEST_EVENTS 10
|
||||
|
||||
#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
|
||||
#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1)
|
||||
#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1)
|
||||
|
||||
/*
|
||||
* This is how the event selector and unit mask are stored in an AMD
|
||||
@ -69,21 +78,33 @@
|
||||
|
||||
#define INST_RETIRED EVENT(0xc0, 0)
|
||||
|
||||
struct __kvm_pmu_event_filter {
|
||||
__u32 action;
|
||||
__u32 nevents;
|
||||
__u32 fixed_counter_bitmap;
|
||||
__u32 flags;
|
||||
__u32 pad[4];
|
||||
__u64 events[MAX_FILTER_EVENTS];
|
||||
};
|
||||
|
||||
/*
|
||||
* This event list comprises Intel's eight architectural events plus
|
||||
* AMD's "retired branch instructions" for Zen[123] (and possibly
|
||||
* other AMD CPUs).
|
||||
*/
|
||||
static const uint64_t event_list[] = {
|
||||
EVENT(0x3c, 0),
|
||||
INST_RETIRED,
|
||||
EVENT(0x3c, 1),
|
||||
EVENT(0x2e, 0x4f),
|
||||
EVENT(0x2e, 0x41),
|
||||
EVENT(0xc4, 0),
|
||||
EVENT(0xc5, 0),
|
||||
EVENT(0xa4, 1),
|
||||
AMD_ZEN_BR_RETIRED,
|
||||
static const struct __kvm_pmu_event_filter base_event_filter = {
|
||||
.nevents = ARRAY_SIZE(base_event_filter.events),
|
||||
.events = {
|
||||
EVENT(0x3c, 0),
|
||||
INST_RETIRED,
|
||||
EVENT(0x3c, 1),
|
||||
EVENT(0x2e, 0x4f),
|
||||
EVENT(0x2e, 0x41),
|
||||
EVENT(0xc4, 0),
|
||||
EVENT(0xc5, 0),
|
||||
EVENT(0xa4, 1),
|
||||
AMD_ZEN_BR_RETIRED,
|
||||
},
|
||||
};
|
||||
|
||||
struct {
|
||||
@ -225,48 +246,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
|
||||
return !r;
|
||||
}
|
||||
|
||||
static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
|
||||
{
|
||||
struct kvm_pmu_event_filter *f;
|
||||
int size = sizeof(*f) + nevents * sizeof(f->events[0]);
|
||||
|
||||
f = malloc(size);
|
||||
TEST_ASSERT(f, "Out of memory");
|
||||
memset(f, 0, size);
|
||||
f->nevents = nevents;
|
||||
return f;
|
||||
}
|
||||
|
||||
|
||||
static struct kvm_pmu_event_filter *
|
||||
create_pmu_event_filter(const uint64_t event_list[], int nevents,
|
||||
uint32_t action, uint32_t flags)
|
||||
{
|
||||
struct kvm_pmu_event_filter *f;
|
||||
int i;
|
||||
|
||||
f = alloc_pmu_event_filter(nevents);
|
||||
f->action = action;
|
||||
f->flags = flags;
|
||||
for (i = 0; i < nevents; i++)
|
||||
f->events[i] = event_list[i];
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
static struct kvm_pmu_event_filter *event_filter(uint32_t action)
|
||||
{
|
||||
return create_pmu_event_filter(event_list,
|
||||
ARRAY_SIZE(event_list),
|
||||
action, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the first occurrence of 'event' (if any) from the filter's
|
||||
* event list.
|
||||
*/
|
||||
static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
|
||||
uint64_t event)
|
||||
static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
|
||||
{
|
||||
bool found = false;
|
||||
int i;
|
||||
@ -279,7 +263,6 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
|
||||
}
|
||||
if (found)
|
||||
f->nevents--;
|
||||
return f;
|
||||
}
|
||||
|
||||
#define ASSERT_PMC_COUNTING_INSTRUCTIONS() \
|
||||
@ -315,66 +298,73 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
static void test_with_filter(struct kvm_vcpu *vcpu,
|
||||
struct kvm_pmu_event_filter *f)
|
||||
struct __kvm_pmu_event_filter *__f)
|
||||
{
|
||||
struct kvm_pmu_event_filter *f = (void *)__f;
|
||||
|
||||
vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
|
||||
run_vcpu_and_sync_pmc_results(vcpu);
|
||||
}
|
||||
|
||||
static void test_amd_deny_list(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t event = EVENT(0x1C2, 0);
|
||||
struct kvm_pmu_event_filter *f;
|
||||
struct __kvm_pmu_event_filter f = {
|
||||
.action = KVM_PMU_EVENT_DENY,
|
||||
.nevents = 1,
|
||||
.events = {
|
||||
EVENT(0x1C2, 0),
|
||||
},
|
||||
};
|
||||
|
||||
f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
|
||||
test_with_filter(vcpu, f);
|
||||
free(f);
|
||||
test_with_filter(vcpu, &f);
|
||||
|
||||
ASSERT_PMC_COUNTING_INSTRUCTIONS();
|
||||
}
|
||||
|
||||
static void test_member_deny_list(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
|
||||
struct __kvm_pmu_event_filter f = base_event_filter;
|
||||
|
||||
test_with_filter(vcpu, f);
|
||||
free(f);
|
||||
f.action = KVM_PMU_EVENT_DENY;
|
||||
test_with_filter(vcpu, &f);
|
||||
|
||||
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
|
||||
}
|
||||
|
||||
static void test_member_allow_list(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
|
||||
struct __kvm_pmu_event_filter f = base_event_filter;
|
||||
|
||||
test_with_filter(vcpu, f);
|
||||
free(f);
|
||||
f.action = KVM_PMU_EVENT_ALLOW;
|
||||
test_with_filter(vcpu, &f);
|
||||
|
||||
ASSERT_PMC_COUNTING_INSTRUCTIONS();
|
||||
}
|
||||
|
||||
static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
|
||||
struct __kvm_pmu_event_filter f = base_event_filter;
|
||||
|
||||
remove_event(f, INST_RETIRED);
|
||||
remove_event(f, INTEL_BR_RETIRED);
|
||||
remove_event(f, AMD_ZEN_BR_RETIRED);
|
||||
test_with_filter(vcpu, f);
|
||||
free(f);
|
||||
f.action = KVM_PMU_EVENT_DENY;
|
||||
|
||||
remove_event(&f, INST_RETIRED);
|
||||
remove_event(&f, INTEL_BR_RETIRED);
|
||||
remove_event(&f, AMD_ZEN_BR_RETIRED);
|
||||
test_with_filter(vcpu, &f);
|
||||
|
||||
ASSERT_PMC_COUNTING_INSTRUCTIONS();
|
||||
}
|
||||
|
||||
static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
|
||||
struct __kvm_pmu_event_filter f = base_event_filter;
|
||||
|
||||
remove_event(f, INST_RETIRED);
|
||||
remove_event(f, INTEL_BR_RETIRED);
|
||||
remove_event(f, AMD_ZEN_BR_RETIRED);
|
||||
test_with_filter(vcpu, f);
|
||||
free(f);
|
||||
f.action = KVM_PMU_EVENT_ALLOW;
|
||||
|
||||
remove_event(&f, INST_RETIRED);
|
||||
remove_event(&f, INTEL_BR_RETIRED);
|
||||
remove_event(&f, AMD_ZEN_BR_RETIRED);
|
||||
test_with_filter(vcpu, &f);
|
||||
|
||||
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
|
||||
}
|
||||
@ -569,19 +559,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
|
||||
const uint64_t masked_events[],
|
||||
const int nmasked_events)
|
||||
{
|
||||
struct kvm_pmu_event_filter *f;
|
||||
struct __kvm_pmu_event_filter f = {
|
||||
.nevents = nmasked_events,
|
||||
.action = KVM_PMU_EVENT_ALLOW,
|
||||
.flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
|
||||
};
|
||||
|
||||
f = create_pmu_event_filter(masked_events, nmasked_events,
|
||||
KVM_PMU_EVENT_ALLOW,
|
||||
KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
|
||||
test_with_filter(vcpu, f);
|
||||
free(f);
|
||||
memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
|
||||
test_with_filter(vcpu, &f);
|
||||
}
|
||||
|
||||
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
|
||||
#define MAX_FILTER_EVENTS 300
|
||||
#define MAX_TEST_EVENTS 10
|
||||
|
||||
#define ALLOW_LOADS BIT(0)
|
||||
#define ALLOW_STORES BIT(1)
|
||||
#define ALLOW_LOADS_STORES BIT(2)
|
||||
@ -753,21 +740,33 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
|
||||
run_masked_events_tests(vcpu, events, nevents);
|
||||
}
|
||||
|
||||
static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
|
||||
int nevents, uint32_t flags)
|
||||
static int set_pmu_event_filter(struct kvm_vcpu *vcpu,
|
||||
struct __kvm_pmu_event_filter *__f)
|
||||
{
|
||||
struct kvm_pmu_event_filter *f;
|
||||
int r;
|
||||
struct kvm_pmu_event_filter *f = (void *)__f;
|
||||
|
||||
f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
|
||||
r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
|
||||
free(f);
|
||||
return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
|
||||
}
|
||||
|
||||
return r;
|
||||
static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
|
||||
uint32_t flags, uint32_t action)
|
||||
{
|
||||
struct __kvm_pmu_event_filter f = {
|
||||
.nevents = 1,
|
||||
.flags = flags,
|
||||
.action = action,
|
||||
.events = {
|
||||
event,
|
||||
},
|
||||
};
|
||||
|
||||
return set_pmu_event_filter(vcpu, &f);
|
||||
}
|
||||
|
||||
static void test_filter_ioctl(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
|
||||
struct __kvm_pmu_event_filter f;
|
||||
uint64_t e = ~0ul;
|
||||
int r;
|
||||
|
||||
@ -775,15 +774,144 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
|
||||
* Unfortunately having invalid bits set in event data is expected to
|
||||
* pass when flags == 0 (bits other than eventsel+umask).
|
||||
*/
|
||||
r = run_filter_test(vcpu, &e, 1, 0);
|
||||
r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
|
||||
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
|
||||
|
||||
r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
|
||||
r = set_pmu_single_event_filter(vcpu, e,
|
||||
KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
|
||||
KVM_PMU_EVENT_ALLOW);
|
||||
TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
|
||||
|
||||
e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
|
||||
r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
|
||||
r = set_pmu_single_event_filter(vcpu, e,
|
||||
KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
|
||||
KVM_PMU_EVENT_ALLOW);
|
||||
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
|
||||
|
||||
f = base_event_filter;
|
||||
f.action = PMU_EVENT_FILTER_INVALID_ACTION;
|
||||
r = set_pmu_event_filter(vcpu, &f);
|
||||
TEST_ASSERT(r, "Set invalid action is expected to fail");
|
||||
|
||||
f = base_event_filter;
|
||||
f.flags = PMU_EVENT_FILTER_INVALID_FLAGS;
|
||||
r = set_pmu_event_filter(vcpu, &f);
|
||||
TEST_ASSERT(r, "Set invalid flags is expected to fail");
|
||||
|
||||
f = base_event_filter;
|
||||
f.nevents = PMU_EVENT_FILTER_INVALID_NEVENTS;
|
||||
r = set_pmu_event_filter(vcpu, &f);
|
||||
TEST_ASSERT(r, "Exceeding the max number of filter events should fail");
|
||||
|
||||
f = base_event_filter;
|
||||
f.fixed_counter_bitmap = ~GENMASK_ULL(nr_fixed_counters, 0);
|
||||
r = set_pmu_event_filter(vcpu, &f);
|
||||
TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
|
||||
}
|
||||
|
||||
static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
|
||||
{
|
||||
for (;;) {
|
||||
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
||||
wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);
|
||||
|
||||
/* Only OS_EN bit is enabled for fixed counter[idx]. */
|
||||
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
|
||||
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
|
||||
BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
|
||||
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
|
||||
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
||||
|
||||
GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
|
||||
uint32_t action, uint32_t bitmap)
|
||||
{
|
||||
struct __kvm_pmu_event_filter f = {
|
||||
.action = action,
|
||||
.fixed_counter_bitmap = bitmap,
|
||||
};
|
||||
set_pmu_event_filter(vcpu, &f);
|
||||
|
||||
return run_vcpu_to_sync(vcpu);
|
||||
}
|
||||
|
||||
static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu,
|
||||
uint32_t action,
|
||||
uint32_t bitmap)
|
||||
{
|
||||
struct __kvm_pmu_event_filter f = base_event_filter;
|
||||
|
||||
f.action = action;
|
||||
f.fixed_counter_bitmap = bitmap;
|
||||
set_pmu_event_filter(vcpu, &f);
|
||||
|
||||
return run_vcpu_to_sync(vcpu);
|
||||
}
|
||||
|
||||
static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
|
||||
uint8_t nr_fixed_counters)
|
||||
{
|
||||
unsigned int i;
|
||||
uint32_t bitmap;
|
||||
uint64_t count;
|
||||
|
||||
TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,
|
||||
"Invalid nr_fixed_counters");
|
||||
|
||||
/*
|
||||
* Check the fixed performance counter can count normally when KVM
|
||||
* userspace doesn't set any pmu filter.
|
||||
*/
|
||||
count = run_vcpu_to_sync(vcpu);
|
||||
TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
|
||||
|
||||
for (i = 0; i < BIT(nr_fixed_counters); i++) {
|
||||
bitmap = BIT(i);
|
||||
count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
|
||||
bitmap);
|
||||
TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
|
||||
|
||||
count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
|
||||
bitmap);
|
||||
TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
|
||||
|
||||
/*
|
||||
* Check that fixed_counter_bitmap has higher priority than
|
||||
* events[] when both are set.
|
||||
*/
|
||||
count = test_set_gp_and_fixed_event_filter(vcpu,
|
||||
KVM_PMU_EVENT_ALLOW,
|
||||
bitmap);
|
||||
TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
|
||||
|
||||
count = test_set_gp_and_fixed_event_filter(vcpu,
|
||||
KVM_PMU_EVENT_DENY,
|
||||
bitmap);
|
||||
TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
|
||||
}
|
||||
}
|
||||
|
||||
static void test_fixed_counter_bitmap(void)
|
||||
{
|
||||
uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
uint8_t idx;
|
||||
|
||||
/*
|
||||
* Check that pmu_event_filter works as expected when it's applied to
|
||||
* fixed performance counters.
|
||||
*/
|
||||
for (idx = 0; idx < nr_fixed_counters; idx++) {
|
||||
vm = vm_create_with_one_vcpu(&vcpu,
|
||||
intel_run_fixed_counter_guest_code);
|
||||
vcpu_args_set(vcpu, 1, idx);
|
||||
__test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
@ -829,6 +957,7 @@ int main(int argc, char *argv[])
|
||||
kvm_vm_free(vm);
|
||||
|
||||
test_pmu_config_disable(guest_code);
|
||||
test_fixed_counter_bitmap();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ int main(void)
|
||||
for (i = 0; i < KVM_MAX_VCPUS; i++)
|
||||
vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC);
|
||||
|
||||
ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0);
|
||||
TEST_ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0);
|
||||
|
||||
vcpuN = vcpus[KVM_MAX_VCPUS - 1];
|
||||
for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
|
||||
@ -65,8 +65,8 @@ int main(void)
|
||||
vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED);
|
||||
}
|
||||
|
||||
ASSERT_EQ(pthread_cancel(thread), 0);
|
||||
ASSERT_EQ(pthread_join(thread, NULL), 0);
|
||||
TEST_ASSERT_EQ(pthread_cancel(thread), 0);
|
||||
TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
||||
|
@ -20,7 +20,7 @@ static void guest_bsp_vcpu(void *arg)
|
||||
{
|
||||
GUEST_SYNC(1);
|
||||
|
||||
GUEST_ASSERT(get_bsp_flag() != 0);
|
||||
GUEST_ASSERT_NE(get_bsp_flag(), 0);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
@ -29,7 +29,7 @@ static void guest_not_bsp_vcpu(void *arg)
|
||||
{
|
||||
GUEST_SYNC(1);
|
||||
|
||||
GUEST_ASSERT(get_bsp_flag() == 0);
|
||||
GUEST_ASSERT_EQ(get_bsp_flag(), 0);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
@ -65,7 +65,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu)
|
||||
stage);
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
default:
|
||||
TEST_ASSERT(false, "Unexpected exit: %s",
|
||||
exit_reason_str(vcpu->run->exit_reason));
|
||||
|
@ -8,7 +8,6 @@
|
||||
* Copyright (C) 2021, Red Hat, Inc.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <stdatomic.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
@ -34,13 +33,12 @@ static void l2_guest_code_int(void);
|
||||
static void guest_int_handler(struct ex_regs *regs)
|
||||
{
|
||||
int_fired++;
|
||||
GUEST_ASSERT_2(regs->rip == (unsigned long)l2_guest_code_int,
|
||||
regs->rip, (unsigned long)l2_guest_code_int);
|
||||
GUEST_ASSERT_EQ(regs->rip, (unsigned long)l2_guest_code_int);
|
||||
}
|
||||
|
||||
static void l2_guest_code_int(void)
|
||||
{
|
||||
GUEST_ASSERT_1(int_fired == 1, int_fired);
|
||||
GUEST_ASSERT_EQ(int_fired, 1);
|
||||
|
||||
/*
|
||||
* Same as the vmmcall() function, but with a ud2 sneaked after the
|
||||
@ -53,7 +51,7 @@ static void l2_guest_code_int(void)
|
||||
: "rbx", "rdx", "rsi", "rdi", "r8", "r9",
|
||||
"r10", "r11", "r12", "r13", "r14", "r15");
|
||||
|
||||
GUEST_ASSERT_1(bp_fired == 1, bp_fired);
|
||||
GUEST_ASSERT_EQ(bp_fired, 1);
|
||||
hlt();
|
||||
}
|
||||
|
||||
@ -66,9 +64,9 @@ static void guest_nmi_handler(struct ex_regs *regs)
|
||||
|
||||
if (nmi_stage_get() == 1) {
|
||||
vmmcall();
|
||||
GUEST_ASSERT(false);
|
||||
GUEST_FAIL("Unexpected resume after VMMCALL");
|
||||
} else {
|
||||
GUEST_ASSERT_1(nmi_stage_get() == 3, nmi_stage_get());
|
||||
GUEST_ASSERT_EQ(nmi_stage_get(), 3);
|
||||
GUEST_DONE();
|
||||
}
|
||||
}
|
||||
@ -104,7 +102,8 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
|
||||
}
|
||||
|
||||
run_guest(vmcb, svm->vmcb_gpa);
|
||||
GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
|
||||
__GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
|
||||
"Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
|
||||
vmcb->control.exit_code,
|
||||
vmcb->control.exit_info_1, vmcb->control.exit_info_2);
|
||||
|
||||
@ -112,7 +111,7 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
|
||||
clgi();
|
||||
x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI);
|
||||
|
||||
GUEST_ASSERT_1(nmi_stage_get() == 1, nmi_stage_get());
|
||||
GUEST_ASSERT_EQ(nmi_stage_get(), 1);
|
||||
nmi_stage_inc();
|
||||
|
||||
stgi();
|
||||
@ -133,7 +132,8 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
|
||||
vmcb->control.next_rip = vmcb->save.rip + 2;
|
||||
|
||||
run_guest(vmcb, svm->vmcb_gpa);
|
||||
GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_HLT,
|
||||
__GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_HLT,
|
||||
"Expected HLT #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
|
||||
vmcb->control.exit_code,
|
||||
vmcb->control.exit_info_1, vmcb->control.exit_info_2);
|
||||
|
||||
@ -185,7 +185,7 @@ static void run_test(bool is_nmi)
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_3(uc, "vals = 0x%lx 0x%lx 0x%lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
/* NOT REACHED */
|
||||
case UCALL_DONE:
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
@ -80,6 +81,133 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left,
|
||||
#define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
|
||||
#define INVALID_SYNC_FIELD 0x80000000
|
||||
|
||||
/*
|
||||
* Set an exception as pending *and* injected while KVM is processing events.
|
||||
* KVM is supposed to ignore/drop pending exceptions if userspace is also
|
||||
* requesting that an exception be injected.
|
||||
*/
|
||||
static void *race_events_inj_pen(void *arg)
|
||||
{
|
||||
struct kvm_run *run = (struct kvm_run *)arg;
|
||||
struct kvm_vcpu_events *events = &run->s.regs.events;
|
||||
|
||||
WRITE_ONCE(events->exception.nr, UD_VECTOR);
|
||||
|
||||
for (;;) {
|
||||
WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
|
||||
WRITE_ONCE(events->flags, 0);
|
||||
WRITE_ONCE(events->exception.injected, 1);
|
||||
WRITE_ONCE(events->exception.pending, 1);
|
||||
|
||||
pthread_testcancel();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set an invalid exception vector while KVM is processing events. KVM is
|
||||
* supposed to reject any vector >= 32, as well as NMIs (vector 2).
|
||||
*/
|
||||
static void *race_events_exc(void *arg)
|
||||
{
|
||||
struct kvm_run *run = (struct kvm_run *)arg;
|
||||
struct kvm_vcpu_events *events = &run->s.regs.events;
|
||||
|
||||
for (;;) {
|
||||
WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
|
||||
WRITE_ONCE(events->flags, 0);
|
||||
WRITE_ONCE(events->exception.nr, UD_VECTOR);
|
||||
WRITE_ONCE(events->exception.pending, 1);
|
||||
WRITE_ONCE(events->exception.nr, 255);
|
||||
|
||||
pthread_testcancel();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is
|
||||
* illegal, and KVM's MMU heavily relies on vCPU state being valid.
|
||||
*/
|
||||
static noinline void *race_sregs_cr4(void *arg)
|
||||
{
|
||||
struct kvm_run *run = (struct kvm_run *)arg;
|
||||
__u64 *cr4 = &run->s.regs.sregs.cr4;
|
||||
__u64 pae_enabled = *cr4;
|
||||
__u64 pae_disabled = *cr4 & ~X86_CR4_PAE;
|
||||
|
||||
for (;;) {
|
||||
WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS);
|
||||
WRITE_ONCE(*cr4, pae_enabled);
|
||||
asm volatile(".rept 512\n\t"
|
||||
"nop\n\t"
|
||||
".endr");
|
||||
WRITE_ONCE(*cr4, pae_disabled);
|
||||
|
||||
pthread_testcancel();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void race_sync_regs(void *racer)
|
||||
{
|
||||
const time_t TIMEOUT = 2; /* seconds, roughly */
|
||||
struct kvm_x86_state *state;
|
||||
struct kvm_translation tr;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_run *run;
|
||||
struct kvm_vm *vm;
|
||||
pthread_t thread;
|
||||
time_t t;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
run = vcpu->run;
|
||||
|
||||
run->kvm_valid_regs = KVM_SYNC_X86_SREGS;
|
||||
vcpu_run(vcpu);
|
||||
run->kvm_valid_regs = 0;
|
||||
|
||||
/* Save state *before* spawning the thread that mucks with vCPU state. */
|
||||
state = vcpu_save_state(vcpu);
|
||||
|
||||
/*
|
||||
* Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE
|
||||
* should already be set in guest state.
|
||||
*/
|
||||
TEST_ASSERT((run->s.regs.sregs.cr4 & X86_CR4_PAE) &&
|
||||
(run->s.regs.sregs.efer & EFER_LME),
|
||||
"vCPU should be in long mode, CR4.PAE=%d, EFER.LME=%d",
|
||||
!!(run->s.regs.sregs.cr4 & X86_CR4_PAE),
|
||||
!!(run->s.regs.sregs.efer & EFER_LME));
|
||||
|
||||
TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0);
|
||||
|
||||
for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
|
||||
/*
|
||||
* Reload known good state if the vCPU triple faults, e.g. due
|
||||
* to the unhandled #GPs being injected. VMX preserves state
|
||||
* on shutdown, but SVM synthesizes an INIT as the VMCB state
|
||||
* is architecturally undefined on triple fault.
|
||||
*/
|
||||
if (!__vcpu_run(vcpu) && run->exit_reason == KVM_EXIT_SHUTDOWN)
|
||||
vcpu_load_state(vcpu, state);
|
||||
|
||||
if (racer == race_sregs_cr4) {
|
||||
tr = (struct kvm_translation) { .linear_address = 0 };
|
||||
__vcpu_ioctl(vcpu, KVM_TRANSLATE, &tr);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_ASSERT_EQ(pthread_cancel(thread), 0);
|
||||
TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
|
||||
|
||||
kvm_x86_state_cleanup(state);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
@ -218,5 +346,9 @@ int main(int argc, char *argv[])
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
||||
race_sync_regs(race_sregs_cr4);
|
||||
race_sync_regs(race_events_exc);
|
||||
race_sync_regs(race_events_inj_pen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
|
||||
ksft_test_result_pass("stage %d passed\n", stage + 1);
|
||||
return;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
default:
|
||||
TEST_ASSERT(false, "Unexpected exit: %s",
|
||||
exit_reason_str(vcpu->run->exit_reason));
|
||||
@ -103,39 +103,39 @@ int main(void)
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
val = 0;
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
|
||||
/* Guest: writes to MSR_IA32_TSC affect both MSRs. */
|
||||
run_vcpu(vcpu, 1);
|
||||
val = 1ull * GUEST_STEP;
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
|
||||
/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */
|
||||
run_vcpu(vcpu, 2);
|
||||
val = 2ull * GUEST_STEP;
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
|
||||
/*
|
||||
* Host: writes to MSR_IA32_TSC set the host-side offset
|
||||
* and therefore do not change MSR_IA32_TSC_ADJUST.
|
||||
*/
|
||||
vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
run_vcpu(vcpu, 3);
|
||||
|
||||
/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */
|
||||
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
|
||||
ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
|
||||
TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
|
||||
|
||||
/* Restore previous value. */
|
||||
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
|
||||
/*
|
||||
* Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
|
||||
@ -143,8 +143,8 @@ int main(void)
|
||||
*/
|
||||
run_vcpu(vcpu, 4);
|
||||
val = 3ull * GUEST_STEP;
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
|
||||
|
||||
/*
|
||||
* Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
|
||||
@ -152,8 +152,8 @@ int main(void)
|
||||
*/
|
||||
run_vcpu(vcpu, 5);
|
||||
val = 4ull * GUEST_STEP;
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
|
||||
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
|
||||
TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
||||
|
@ -20,8 +20,8 @@ static void guest_ins_port80(uint8_t *buffer, unsigned int count)
|
||||
end = (unsigned long)buffer + 8192;
|
||||
|
||||
asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory");
|
||||
GUEST_ASSERT_1(count == 0, count);
|
||||
GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end);
|
||||
GUEST_ASSERT_EQ(count, 0);
|
||||
GUEST_ASSERT_EQ((unsigned long)buffer, end);
|
||||
}
|
||||
|
||||
static void guest_code(void)
|
||||
@ -43,7 +43,9 @@ static void guest_code(void)
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
guest_ins_port80(buffer, 8192);
|
||||
for (i = 0; i < 8192; i++)
|
||||
GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]);
|
||||
__GUEST_ASSERT(buffer[i] == 0xaa,
|
||||
"Expected '0xaa', got '0x%x' at buffer[%u]",
|
||||
buffer[i], i);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
@ -91,7 +93,7 @@ int main(int argc, char *argv[])
|
||||
case UCALL_DONE:
|
||||
break;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "argN+1 = 0x%lx, argN+2 = 0x%lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
default:
|
||||
TEST_FAIL("Unknown ucall %lu", uc.cmd);
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ static void set_timer(void)
|
||||
timer.it_value.tv_sec = 0;
|
||||
timer.it_value.tv_usec = 200;
|
||||
timer.it_interval = timer.it_value;
|
||||
ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0);
|
||||
TEST_ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0);
|
||||
}
|
||||
|
||||
static void set_or_clear_invalid_guest_state(struct kvm_vcpu *vcpu, bool set)
|
||||
|
@ -10,7 +10,6 @@
|
||||
* and check it can be retrieved with KVM_GET_MSR, also test
|
||||
* the invalid LBR formats are rejected.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE /* for program_invocation_short_name */
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
@ -52,23 +51,24 @@ static const union perf_capabilities format_caps = {
|
||||
.pebs_format = -1,
|
||||
};
|
||||
|
||||
static void guest_test_perf_capabilities_gp(uint64_t val)
|
||||
{
|
||||
uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val);
|
||||
|
||||
__GUEST_ASSERT(vector == GP_VECTOR,
|
||||
"Expected #GP for value '0x%llx', got vector '0x%x'",
|
||||
val, vector);
|
||||
}
|
||||
|
||||
static void guest_code(uint64_t current_val)
|
||||
{
|
||||
uint8_t vector;
|
||||
int i;
|
||||
|
||||
vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, current_val);
|
||||
GUEST_ASSERT_2(vector == GP_VECTOR, current_val, vector);
|
||||
guest_test_perf_capabilities_gp(current_val);
|
||||
guest_test_perf_capabilities_gp(0);
|
||||
|
||||
vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, 0);
|
||||
GUEST_ASSERT_2(vector == GP_VECTOR, 0, vector);
|
||||
|
||||
for (i = 0; i < 64; i++) {
|
||||
vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES,
|
||||
current_val ^ BIT_ULL(i));
|
||||
GUEST_ASSERT_2(vector == GP_VECTOR,
|
||||
current_val ^ BIT_ULL(i), vector);
|
||||
}
|
||||
for (i = 0; i < 64; i++)
|
||||
guest_test_perf_capabilities_gp(current_val ^ BIT_ULL(i));
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
@ -95,7 +95,7 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap)
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_2(uc, "val = 0x%lx, vector = %lu");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
break;
|
||||
@ -103,7 +103,8 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap)
|
||||
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
|
||||
}
|
||||
|
||||
ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities);
|
||||
TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES),
|
||||
host_cap.capabilities);
|
||||
|
||||
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
|
||||
|
||||
|
@ -65,17 +65,17 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
|
||||
vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
|
||||
|
||||
vcpu_run(vcpu);
|
||||
ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
|
||||
ASSERT_EQ(uc.args[1], val);
|
||||
TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
|
||||
TEST_ASSERT_EQ(uc.args[1], val);
|
||||
|
||||
vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
|
||||
icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
|
||||
(u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
|
||||
if (!x->is_x2apic) {
|
||||
val &= (-1u | (0xffull << (32 + 24)));
|
||||
ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
|
||||
TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
|
||||
} else {
|
||||
ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
|
||||
TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,6 @@
|
||||
*
|
||||
* Copyright (C) 2022, Google LLC.
|
||||
*/
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -20,13 +19,14 @@
|
||||
* Assert that architectural dependency rules are satisfied, e.g. that AVX is
|
||||
* supported if and only if SSE is supported.
|
||||
*/
|
||||
#define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \
|
||||
do { \
|
||||
uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \
|
||||
\
|
||||
GUEST_ASSERT_3((__supported & (xfeatures)) != (xfeatures) || \
|
||||
__supported == ((xfeatures) | (dependencies)), \
|
||||
__supported, (xfeatures), (dependencies)); \
|
||||
#define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \
|
||||
do { \
|
||||
uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \
|
||||
\
|
||||
__GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \
|
||||
__supported == ((xfeatures) | (dependencies)), \
|
||||
"supported = 0x%llx, xfeatures = 0x%llx, dependencies = 0x%llx", \
|
||||
__supported, (xfeatures), (dependencies)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@ -41,7 +41,8 @@ do { \
|
||||
do { \
|
||||
uint64_t __supported = (supported_xcr0) & (xfeatures); \
|
||||
\
|
||||
GUEST_ASSERT_2(!__supported || __supported == (xfeatures), \
|
||||
__GUEST_ASSERT(!__supported || __supported == (xfeatures), \
|
||||
"supported = 0x%llx, xfeatures = 0x%llx", \
|
||||
__supported, (xfeatures)); \
|
||||
} while (0)
|
||||
|
||||
@ -79,14 +80,18 @@ static void guest_code(void)
|
||||
XFEATURE_MASK_XTILE);
|
||||
|
||||
vector = xsetbv_safe(0, supported_xcr0);
|
||||
GUEST_ASSERT_2(!vector, supported_xcr0, vector);
|
||||
__GUEST_ASSERT(!vector,
|
||||
"Expected success on XSETBV(0x%llx), got vector '0x%x'",
|
||||
supported_xcr0, vector);
|
||||
|
||||
for (i = 0; i < 64; i++) {
|
||||
if (supported_xcr0 & BIT_ULL(i))
|
||||
continue;
|
||||
|
||||
vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
|
||||
GUEST_ASSERT_3(vector == GP_VECTOR, supported_xcr0, vector, BIT_ULL(i));
|
||||
__GUEST_ASSERT(vector == GP_VECTOR,
|
||||
"Expected #GP on XSETBV(0x%llx), supported XCR0 = %llx, got vector '0x%x'",
|
||||
BIT_ULL(i), supported_xcr0, vector);
|
||||
}
|
||||
|
||||
GUEST_DONE();
|
||||
@ -117,7 +122,7 @@ int main(int argc, char *argv[])
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT_3(uc, "0x%lx 0x%lx 0x%lx");
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
|
@ -108,16 +108,16 @@ int main(int argc, char *argv[])
|
||||
vcpu_run(vcpu);
|
||||
|
||||
if (run->exit_reason == KVM_EXIT_XEN) {
|
||||
ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
|
||||
ASSERT_EQ(run->xen.u.hcall.cpl, 0);
|
||||
ASSERT_EQ(run->xen.u.hcall.longmode, 1);
|
||||
ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
|
||||
ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
|
||||
ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
|
||||
ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
|
||||
ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
|
||||
ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
|
||||
ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
|
||||
TEST_ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
|
||||
TEST_ASSERT_EQ(run->xen.u.hcall.cpl, 0);
|
||||
TEST_ASSERT_EQ(run->xen.u.hcall.longmode, 1);
|
||||
TEST_ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
|
||||
TEST_ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
|
||||
TEST_ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
|
||||
TEST_ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
|
||||
TEST_ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
|
||||
TEST_ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
|
||||
TEST_ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
|
||||
run->xen.u.hcall.result = RETVALUE;
|
||||
continue;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user