2019-05-19 13:07:45 +01:00
# SPDX-License-Identifier: GPL-2.0-only
2021-04-16 22:00:51 +09:00
i n c l u d e . . / . . / . . / b u i l d / B u i l d . i n c l u d e
2019-04-11 15:51:19 +02:00
2018-03-27 11:49:19 +02:00
all :
2018-09-18 19:54:26 +02:00
top_srcdir = ../../../..
2018-12-12 20:25:14 -07:00
KSFT_KHDR_INSTALL := 1
2020-04-27 18:11:07 -06:00
# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
# directories and targets in this Makefile. "uname -m" doesn't map to
# arch specific sub-directory names.
#
# UNAME_M variable to used to run the compiles pointing to the right arch
# directories and build the right targets for these supported architectures.
#
# TEST_GEN_PROGS and LIBKVM are set using UNAME_M variable.
# LINUX_TOOL_ARCH_INCLUDE is set using ARCH variable.
#
# x86_64 targets are named to include x86_64 as a suffix and directories
# for includes are in x86_64 sub-directory. s390x and aarch64 follow the
# same convention. "uname -m" doesn't result in the correct mapping for
# s390x and aarch64.
#
# No change necessary for x86_64
2018-03-27 11:49:19 +02:00
UNAME_M := $( shell uname -m)
2020-04-27 18:11:07 -06:00
# Set UNAME_M for arm64 compile/install to work
i f e q ( $( ARCH ) , a r m 6 4 )
UNAME_M := aarch64
e n d i f
# Set UNAME_M s390x compile/install to work
i f e q ( $( ARCH ) , s 3 9 0 )
UNAME_M := s390x
e n d i f
2021-04-13 16:08:27 +02:00
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
2021-06-04 10:26:08 -07:00
LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
2021-10-07 23:34:37 +00:00
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c lib/aarch64/vgic.c
2020-12-07 10:41:25 -05:00
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
2018-03-27 11:49:19 +02:00
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
2021-03-18 15:56:29 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
2018-10-16 18:50:11 +02:00
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
2021-05-10 07:48:34 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/emulator_error_test
2021-01-29 17:18:21 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/get_cpuid_test
2021-03-18 15:09:49 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
2018-12-10 18:21:59 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
2021-05-21 11:52:04 +02:00
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
2021-09-16 18:15:48 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test
2020-10-27 16:10:44 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
2019-05-31 14:14:52 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
2021-06-22 13:05:29 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/mmu_role_test
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
2021-03-18 16:16:24 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
2020-05-26 14:51:07 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
2021-09-14 18:48:13 +03:00
TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
2020-10-12 12:47:16 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
2020-10-26 11:09:22 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
2019-09-26 15:01:15 +02:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
2019-05-02 11:31:41 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
2021-05-26 19:44:18 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test
2020-11-05 14:38:23 -08:00
TEST_GEN_PROGS_x86_64 += x86_64/xapic_ipi_test
2019-10-21 16:30:28 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
2020-05-05 16:50:00 -04:00
TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
2020-09-24 14:45:27 +02:00
TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
2021-02-01 13:10:39 +08:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
2020-12-04 01:02:04 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
2018-06-13 09:55:44 -04:00
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
2021-10-21 10:43:03 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
2020-01-23 10:04:27 -08:00
TEST_GEN_PROGS_x86_64 += demand_paging_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_x86_64 += dirty_log_test
2020-10-27 16:37:33 -07:00
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
2021-02-13 00:14:52 +00:00
TEST_GEN_PROGS_x86_64 += hardware_disable_test
2019-07-15 12:50:46 +02:00
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
KVM: selftests: Add a test for kvm page table code
This test serves as a performance tester and a bug reproducer for
kvm page table code (GPA->HPA mappings), so it gives guidance for
people trying to make some improvement for kvm.
The function guest_code() can cover the conditions where a single vcpu or
multiple vcpus access guest pages within the same memory region, in three
VM stages(before dirty logging, during dirty logging, after dirty logging).
Besides, the backing src memory type(ANONYMOUS/THP/HUGETLB) of the tested
memory region can be specified by users, which means normal page mappings
or block mappings can be chosen by users to be created in the test.
If ANONYMOUS memory is specified, kvm will create normal page mappings
for the tested memory region before dirty logging, and update attributes
of the page mappings from RO to RW during dirty logging. If THP/HUGETLB
memory is specified, kvm will create block mappings for the tested memory
region before dirty logging, and split the blcok mappings into normal page
mappings during dirty logging, and coalesce the page mappings back into
block mappings after dirty logging is stopped.
So in summary, as a performance tester, this test can present the
performance of kvm creating/updating normal page mappings, or the
performance of kvm creating/splitting/recovering block mappings,
through execution time.
When we need to coalesce the page mappings back to block mappings after
dirty logging is stopped, we have to firstly invalidate *all* the TLB
entries for the page mappings right before installation of the block entry,
because a TLB conflict abort error could occur if we can't invalidate the
TLB entries fully. We have hit this TLB conflict twice on aarch64 software
implementation and fixed it. As this test can imulate process from dirty
logging enabled to dirty logging stopped of a VM with block mappings,
so it can also reproduce this TLB conflict abort due to inadequate TLB
invalidation when coalescing tables.
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Message-Id: <20210330080856.14940-11-wangyanan55@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-30 16:08:56 +08:00
TEST_GEN_PROGS_x86_64 += kvm_page_table_test
2021-01-12 13:42:53 -08:00
TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
KVM: selftests: add a memslot-related performance benchmark
This benchmark contains the following tests:
* Map test, where the host unmaps guest memory while the guest writes to
it (maps it).
The test is designed in a way to make the unmap operation on the host
take a negligible amount of time in comparison with the mapping
operation in the guest.
The test area is actually split in two: the first half is being mapped
by the guest while the second half in being unmapped by the host.
Then a guest <-> host sync happens and the areas are reversed.
* Unmap test which is broadly similar to the above map test, but it is
designed in an opposite way: to make the mapping operation in the guest
take a negligible amount of time in comparison with the unmap operation
on the host.
This test is available in two variants: with per-page unmap operation
or a chunked one (using 2 MiB chunk size).
* Move active area test which involves moving the last (highest gfn)
memslot a bit back and forth on the host while the guest is
concurrently writing around the area being moved (including over the
moved memslot).
* Move inactive area test which is similar to the previous move active
area test, but now guest writes all happen outside of the area being
moved.
* Read / write test in which the guest writes to the beginning of each
page of the test area while the host writes to the middle of each such
page.
Then each side checks the values the other side has written.
This particular test is not expected to give different results depending
on particular memslots implementation, it is meant as a rough sanity
check and to provide insight on the spread of test results expected.
Each test performs its operation in a loop until a test period ends
(this is 5 seconds by default, but it is configurable).
Then the total count of loops done is divided by the actual elapsed
time to give the test result.
The tests have a configurable memslot cap with the "-s" test option, by
default the system maximum is used.
Each test is repeated a particular number of times (by default 20
times), the best result achieved is printed.
The test memory area is divided equally between memslots, the reminder
is added to the last memslot.
The test area size does not depend on the number of memslots in use.
The tests also measure the time that it took to add all these memslots.
The best result from the tests that use the whole test area is printed
after all the requested tests are done.
In general, these tests are designed to use as much memory as possible
(within reason) while still doing 100+ loops even on high memslot counts
with the default test length.
Increasing the test runtime makes it increasingly more likely that some
event will happen on the system during the test run, which might lower
the test result.
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Message-Id: <8d31bb3d92bc8fa33a9756fa802ee14266ab994e.1618253574.git.maciej.szmigiero@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-04-13 16:08:28 +02:00
TEST_GEN_PROGS_x86_64 += memslot_perf_test
2021-09-01 13:30:29 -07:00
TEST_GEN_PROGS_x86_64 += rseq_test
2020-04-10 16:17:06 -07:00
TEST_GEN_PROGS_x86_64 += set_memory_region_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_x86_64 += steal_time
2021-06-18 22:27:08 +00:00
TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
2021-09-16 18:15:51 +00:00
TEST_GEN_PROGS_x86_64 += system_counter_offset_test
2018-09-18 19:54:32 +02:00
2021-10-07 23:34:38 +00:00
TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
2021-06-10 18:10:20 -07:00
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
2020-10-29 21:17:01 +01:00
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
2021-08-18 20:21:33 +00:00
TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
2021-04-05 18:39:41 +02:00
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
2021-11-08 18:38:55 -08:00
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
2020-01-23 10:04:27 -08:00
TEST_GEN_PROGS_aarch64 += demand_paging_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_aarch64 += dirty_log_test
2020-11-11 13:26:34 +01:00
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
2019-05-23 18:43:09 +02:00
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
KVM: selftests: Add a test for kvm page table code
This test serves as a performance tester and a bug reproducer for
kvm page table code (GPA->HPA mappings), so it gives guidance for
people trying to make some improvement for kvm.
The function guest_code() can cover the conditions where a single vcpu or
multiple vcpus access guest pages within the same memory region, in three
VM stages(before dirty logging, during dirty logging, after dirty logging).
Besides, the backing src memory type(ANONYMOUS/THP/HUGETLB) of the tested
memory region can be specified by users, which means normal page mappings
or block mappings can be chosen by users to be created in the test.
If ANONYMOUS memory is specified, kvm will create normal page mappings
for the tested memory region before dirty logging, and update attributes
of the page mappings from RO to RW during dirty logging. If THP/HUGETLB
memory is specified, kvm will create block mappings for the tested memory
region before dirty logging, and split the blcok mappings into normal page
mappings during dirty logging, and coalesce the page mappings back into
block mappings after dirty logging is stopped.
So in summary, as a performance tester, this test can present the
performance of kvm creating/updating normal page mappings, or the
performance of kvm creating/splitting/recovering block mappings,
through execution time.
When we need to coalesce the page mappings back to block mappings after
dirty logging is stopped, we have to firstly invalidate *all* the TLB
entries for the page mappings right before installation of the block entry,
because a TLB conflict abort error could occur if we can't invalidate the
TLB entries fully. We have hit this TLB conflict twice on aarch64 software
implementation and fixed it. As this test can imulate process from dirty
logging enabled to dirty logging stopped of a VM with block mappings,
so it can also reproduce this TLB conflict abort due to inadequate TLB
invalidation when coalescing tables.
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Message-Id: <20210330080856.14940-11-wangyanan55@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-30 16:08:56 +08:00
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
2021-09-07 11:09:57 -07:00
TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test
TEST_GEN_PROGS_aarch64 += memslot_perf_test
2021-09-01 13:30:29 -07:00
TEST_GEN_PROGS_aarch64 += rseq_test
2020-04-10 16:17:06 -07:00
TEST_GEN_PROGS_aarch64 += set_memory_region_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_aarch64 += steal_time
2021-06-18 22:27:08 +00:00
TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test
2018-03-27 11:49:19 +02:00
2019-08-29 15:07:32 +02:00
TEST_GEN_PROGS_s390x = s390x/memop
2020-01-31 05:02:04 -05:00
TEST_GEN_PROGS_s390x += s390x/resets
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
2020-01-23 10:04:27 -08:00
TEST_GEN_PROGS_s390x += demand_paging_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_s390x += dirty_log_test
2019-05-23 18:43:09 +02:00
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
KVM: selftests: Add a test for kvm page table code
This test serves as a performance tester and a bug reproducer for
kvm page table code (GPA->HPA mappings), so it gives guidance for
people trying to make some improvement for kvm.
The function guest_code() can cover the conditions where a single vcpu or
multiple vcpus access guest pages within the same memory region, in three
VM stages(before dirty logging, during dirty logging, after dirty logging).
Besides, the backing src memory type(ANONYMOUS/THP/HUGETLB) of the tested
memory region can be specified by users, which means normal page mappings
or block mappings can be chosen by users to be created in the test.
If ANONYMOUS memory is specified, kvm will create normal page mappings
for the tested memory region before dirty logging, and update attributes
of the page mappings from RO to RW during dirty logging. If THP/HUGETLB
memory is specified, kvm will create block mappings for the tested memory
region before dirty logging, and split the blcok mappings into normal page
mappings during dirty logging, and coalesce the page mappings back into
block mappings after dirty logging is stopped.
So in summary, as a performance tester, this test can present the
performance of kvm creating/updating normal page mappings, or the
performance of kvm creating/splitting/recovering block mappings,
through execution time.
When we need to coalesce the page mappings back to block mappings after
dirty logging is stopped, we have to firstly invalidate *all* the TLB
entries for the page mappings right before installation of the block entry,
because a TLB conflict abort error could occur if we can't invalidate the
TLB entries fully. We have hit this TLB conflict twice on aarch64 software
implementation and fixed it. As this test can imulate process from dirty
logging enabled to dirty logging stopped of a VM with block mappings,
so it can also reproduce this TLB conflict abort due to inadequate TLB
invalidation when coalescing tables.
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Message-Id: <20210330080856.14940-11-wangyanan55@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-30 16:08:56 +08:00
TEST_GEN_PROGS_s390x += kvm_page_table_test
2021-09-01 13:30:29 -07:00
TEST_GEN_PROGS_s390x += rseq_test
2020-04-10 16:17:06 -07:00
TEST_GEN_PROGS_s390x += set_memory_region_test
2021-06-18 22:27:08 +00:00
TEST_GEN_PROGS_s390x += kvm_binary_stats_test
2018-03-27 11:49:19 +02:00
TEST_GEN_PROGS += $( TEST_GEN_PROGS_$( UNAME_M) )
LIBKVM += $( LIBKVM_$( UNAME_M) )
INSTALL_HDR_PATH = $( top_srcdir) /usr
LINUX_HDR_PATH = $( INSTALL_HDR_PATH) /include/
2018-09-18 19:54:26 +02:00
LINUX_TOOL_INCLUDE = $( top_srcdir) /tools/include
2020-06-05 16:20:28 +02:00
i f e q ( $( ARCH ) , x 8 6 _ 6 4 )
LINUX_TOOL_ARCH_INCLUDE = $( top_srcdir) /tools/arch/x86/include
e l s e
2020-04-27 18:11:07 -06:00
LINUX_TOOL_ARCH_INCLUDE = $( top_srcdir) /tools/arch/$( ARCH) /include
2020-06-05 16:20:28 +02:00
e n d i f
2019-05-17 11:04:45 +02:00
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std= gnu99 \
-fno-stack-protector -fno-PIE -I$( LINUX_TOOL_INCLUDE) \
2019-12-20 20:44:56 -08:00
-I$( LINUX_TOOL_ARCH_INCLUDE) -I$( LINUX_HDR_PATH) -Iinclude \
-I$( <D) -Iinclude/$( UNAME_M) -I..
2019-04-11 15:51:19 +02:00
no-pie-option := $( call try-run, echo 'int main() { return 0; }' | \
2019-10-02 17:14:30 -06:00
$( CC) -Werror -no-pie -x c - -o " $$ TMP " , -no-pie)
2019-04-11 15:51:19 +02:00
2019-05-24 12:27:01 +02:00
# On s390, build the testcases KVM-enabled
pgste-option = $( call try-run, echo 'int main() { return 0; }' | \
$( CC) -Werror -Wl$( comma) --s390-pgste -x c - -o " $$ TMP " ,-Wl$( comma) --s390-pgste)
LDFLAGS += -pthread $( no-pie-option) $( pgste-option)
2018-03-27 11:49:19 +02:00
# After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
i n c l u d e . . / l i b . m k
STATIC_LIBS := $( OUTPUT) /libkvm.a
2020-10-12 12:47:15 -07:00
LIBKVM_C := $( filter %.c,$( LIBKVM) )
LIBKVM_S := $( filter %.S,$( LIBKVM) )
LIBKVM_C_OBJ := $( patsubst %.c, $( OUTPUT) /%.o, $( LIBKVM_C) )
LIBKVM_S_OBJ := $( patsubst %.S, $( OUTPUT) /%.o, $( LIBKVM_S) )
EXTRA_CLEAN += $( LIBKVM_C_OBJ) $( LIBKVM_S_OBJ) $( STATIC_LIBS) cscope.*
x := $( shell mkdir -p $( sort $( dir $( LIBKVM_C_OBJ) $( LIBKVM_S_OBJ) ) ) )
$(LIBKVM_C_OBJ) : $( OUTPUT ) /%.o : %.c
$( CC) $( CFLAGS) $( CPPFLAGS) $( TARGET_ARCH) -c $< -o $@
2018-03-27 11:49:19 +02:00
2020-10-12 12:47:15 -07:00
$(LIBKVM_S_OBJ) : $( OUTPUT ) /%.o : %.S
2018-03-27 11:49:19 +02:00
$( CC) $( CFLAGS) $( CPPFLAGS) $( TARGET_ARCH) -c $< -o $@
2020-10-12 12:47:15 -07:00
LIBKVM_OBJS = $( LIBKVM_C_OBJ) $( LIBKVM_S_OBJ)
$(OUTPUT)/libkvm.a : $( LIBKVM_OBJS )
2018-03-27 11:49:19 +02:00
$( AR) crs $@ $^
2020-04-27 18:11:07 -06:00
x := $( shell mkdir -p $( sort $( dir $( TEST_GEN_PROGS) ) ) )
2018-09-04 12:47:21 +02:00
all : $( STATIC_LIBS )
2018-03-27 11:49:19 +02:00
$(TEST_GEN_PROGS) : $( STATIC_LIBS )
2018-09-18 19:54:27 +02:00
cscope : include_paths = $( LINUX_TOOL_INCLUDE ) $( LINUX_HDR_PATH ) include lib ..
cscope :
$( RM) cscope.*
( find $( include_paths) -name '*.h' \
-exec realpath --relative-base= $( PWD) { } \; ; \
find . -name '*.c' \
-exec realpath --relative-base= $( PWD) { } \; ) | sort -u > cscope.files
cscope -b