2019-05-19 13:07:45 +01:00
# SPDX-License-Identifier: GPL-2.0-only
2021-04-16 22:00:51 +09:00
i n c l u d e . . / . . / . . / b u i l d / B u i l d . i n c l u d e
2019-04-11 15:51:19 +02:00
2018-03-27 11:49:19 +02:00
all :
2018-09-18 19:54:26 +02:00
top_srcdir = ../../../..
2018-12-12 20:25:14 -07:00
KSFT_KHDR_INSTALL := 1
2020-04-27 18:11:07 -06:00
# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
# directories and targets in this Makefile. "uname -m" doesn't map to
# arch specific sub-directory names.
#
# UNAME_M variable to used to run the compiles pointing to the right arch
# directories and build the right targets for these supported architectures.
#
# TEST_GEN_PROGS and LIBKVM are set using UNAME_M variable.
# LINUX_TOOL_ARCH_INCLUDE is set using ARCH variable.
#
# x86_64 targets are named to include x86_64 as a suffix and directories
# for includes are in x86_64 sub-directory. s390x and aarch64 follow the
# same convention. "uname -m" doesn't result in the correct mapping for
# s390x and aarch64.
#
# No change necessary for x86_64
2018-03-27 11:49:19 +02:00
UNAME_M := $( shell uname -m)
2020-04-27 18:11:07 -06:00
# Set UNAME_M for arm64 compile/install to work
i f e q ( $( ARCH ) , a r m 6 4 )
UNAME_M := aarch64
e n d i f
# Set UNAME_M s390x compile/install to work
i f e q ( $( ARCH ) , s 3 9 0 )
UNAME_M := s390x
e n d i f
2020-12-18 15:17:34 +01:00
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
2020-10-12 12:47:15 -07:00
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
2019-07-31 17:15:23 +02:00
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
2020-12-07 10:41:25 -05:00
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
2018-03-27 11:49:19 +02:00
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
2021-03-18 15:56:29 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
2018-10-16 18:50:11 +02:00
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
2021-01-29 17:18:21 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/get_cpuid_test
2021-03-18 15:09:49 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
2018-12-10 18:21:59 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
2020-10-27 16:10:44 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
2019-05-31 14:14:52 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
2021-03-18 16:16:24 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
2020-05-26 14:51:07 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
2020-10-12 12:47:16 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
2020-10-26 11:09:22 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
2019-09-26 15:01:15 +02:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
2019-05-02 11:31:41 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
2019-05-21 17:13:58 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
2020-11-05 14:38:23 -08:00
TEST_GEN_PROGS_x86_64 += x86_64/xapic_ipi_test
2019-10-21 16:30:28 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
2020-05-05 16:50:00 -04:00
TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
2020-09-24 14:45:27 +02:00
TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
2021-02-01 13:10:39 +08:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
2020-12-04 01:02:04 +00:00
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
2018-06-13 09:55:44 -04:00
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
2020-01-23 10:04:27 -08:00
TEST_GEN_PROGS_x86_64 += demand_paging_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_x86_64 += dirty_log_test
2020-10-27 16:37:33 -07:00
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
2021-02-13 00:14:52 +00:00
TEST_GEN_PROGS_x86_64 += hardware_disable_test
2019-07-15 12:50:46 +02:00
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
KVM: selftests: Add a test for kvm page table code
This test serves as a performance tester and a bug reproducer for
kvm page table code (GPA->HPA mappings), so it gives guidance for
people trying to make some improvement for kvm.
The function guest_code() can cover the conditions where a single vcpu or
multiple vcpus access guest pages within the same memory region, in three
VM stages(before dirty logging, during dirty logging, after dirty logging).
Besides, the backing src memory type(ANONYMOUS/THP/HUGETLB) of the tested
memory region can be specified by users, which means normal page mappings
or block mappings can be chosen by users to be created in the test.
If ANONYMOUS memory is specified, kvm will create normal page mappings
for the tested memory region before dirty logging, and update attributes
of the page mappings from RO to RW during dirty logging. If THP/HUGETLB
memory is specified, kvm will create block mappings for the tested memory
region before dirty logging, and split the blcok mappings into normal page
mappings during dirty logging, and coalesce the page mappings back into
block mappings after dirty logging is stopped.
So in summary, as a performance tester, this test can present the
performance of kvm creating/updating normal page mappings, or the
performance of kvm creating/splitting/recovering block mappings,
through execution time.
When we need to coalesce the page mappings back to block mappings after
dirty logging is stopped, we have to firstly invalidate *all* the TLB
entries for the page mappings right before installation of the block entry,
because a TLB conflict abort error could occur if we can't invalidate the
TLB entries fully. We have hit this TLB conflict twice on aarch64 software
implementation and fixed it. As this test can imulate process from dirty
logging enabled to dirty logging stopped of a VM with block mappings,
so it can also reproduce this TLB conflict abort due to inadequate TLB
invalidation when coalescing tables.
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Message-Id: <20210330080856.14940-11-wangyanan55@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-30 16:08:56 +08:00
TEST_GEN_PROGS_x86_64 += kvm_page_table_test
2021-01-12 13:42:53 -08:00
TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
2020-04-10 16:17:06 -07:00
TEST_GEN_PROGS_x86_64 += set_memory_region_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_x86_64 += steal_time
2018-09-18 19:54:32 +02:00
2020-10-29 21:17:01 +01:00
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
2020-10-29 21:17:03 +01:00
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve
2021-04-05 18:39:41 +02:00
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
2020-01-23 10:04:27 -08:00
TEST_GEN_PROGS_aarch64 += demand_paging_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_aarch64 += dirty_log_test
2020-11-11 13:26:34 +01:00
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
2019-05-23 18:43:09 +02:00
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
KVM: selftests: Add a test for kvm page table code
This test serves as a performance tester and a bug reproducer for
kvm page table code (GPA->HPA mappings), so it gives guidance for
people trying to make some improvement for kvm.
The function guest_code() can cover the conditions where a single vcpu or
multiple vcpus access guest pages within the same memory region, in three
VM stages(before dirty logging, during dirty logging, after dirty logging).
Besides, the backing src memory type(ANONYMOUS/THP/HUGETLB) of the tested
memory region can be specified by users, which means normal page mappings
or block mappings can be chosen by users to be created in the test.
If ANONYMOUS memory is specified, kvm will create normal page mappings
for the tested memory region before dirty logging, and update attributes
of the page mappings from RO to RW during dirty logging. If THP/HUGETLB
memory is specified, kvm will create block mappings for the tested memory
region before dirty logging, and split the blcok mappings into normal page
mappings during dirty logging, and coalesce the page mappings back into
block mappings after dirty logging is stopped.
So in summary, as a performance tester, this test can present the
performance of kvm creating/updating normal page mappings, or the
performance of kvm creating/splitting/recovering block mappings,
through execution time.
When we need to coalesce the page mappings back to block mappings after
dirty logging is stopped, we have to firstly invalidate *all* the TLB
entries for the page mappings right before installation of the block entry,
because a TLB conflict abort error could occur if we can't invalidate the
TLB entries fully. We have hit this TLB conflict twice on aarch64 software
implementation and fixed it. As this test can imulate process from dirty
logging enabled to dirty logging stopped of a VM with block mappings,
so it can also reproduce this TLB conflict abort due to inadequate TLB
invalidation when coalescing tables.
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Message-Id: <20210330080856.14940-11-wangyanan55@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-30 16:08:56 +08:00
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
2020-04-10 16:17:06 -07:00
TEST_GEN_PROGS_aarch64 += set_memory_region_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_aarch64 += steal_time
2018-03-27 11:49:19 +02:00
2019-08-29 15:07:32 +02:00
TEST_GEN_PROGS_s390x = s390x/memop
2020-01-31 05:02:04 -05:00
TEST_GEN_PROGS_s390x += s390x/resets
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
2020-01-23 10:04:27 -08:00
TEST_GEN_PROGS_s390x += demand_paging_test
2020-03-13 16:56:44 +01:00
TEST_GEN_PROGS_s390x += dirty_log_test
2019-05-23 18:43:09 +02:00
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
KVM: selftests: Add a test for kvm page table code
This test serves as a performance tester and a bug reproducer for
kvm page table code (GPA->HPA mappings), so it gives guidance for
people trying to make some improvement for kvm.
The function guest_code() can cover the conditions where a single vcpu or
multiple vcpus access guest pages within the same memory region, in three
VM stages(before dirty logging, during dirty logging, after dirty logging).
Besides, the backing src memory type(ANONYMOUS/THP/HUGETLB) of the tested
memory region can be specified by users, which means normal page mappings
or block mappings can be chosen by users to be created in the test.
If ANONYMOUS memory is specified, kvm will create normal page mappings
for the tested memory region before dirty logging, and update attributes
of the page mappings from RO to RW during dirty logging. If THP/HUGETLB
memory is specified, kvm will create block mappings for the tested memory
region before dirty logging, and split the blcok mappings into normal page
mappings during dirty logging, and coalesce the page mappings back into
block mappings after dirty logging is stopped.
So in summary, as a performance tester, this test can present the
performance of kvm creating/updating normal page mappings, or the
performance of kvm creating/splitting/recovering block mappings,
through execution time.
When we need to coalesce the page mappings back to block mappings after
dirty logging is stopped, we have to firstly invalidate *all* the TLB
entries for the page mappings right before installation of the block entry,
because a TLB conflict abort error could occur if we can't invalidate the
TLB entries fully. We have hit this TLB conflict twice on aarch64 software
implementation and fixed it. As this test can imulate process from dirty
logging enabled to dirty logging stopped of a VM with block mappings,
so it can also reproduce this TLB conflict abort due to inadequate TLB
invalidation when coalescing tables.
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Message-Id: <20210330080856.14940-11-wangyanan55@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-03-30 16:08:56 +08:00
TEST_GEN_PROGS_s390x += kvm_page_table_test
2020-04-10 16:17:06 -07:00
TEST_GEN_PROGS_s390x += set_memory_region_test
2018-03-27 11:49:19 +02:00
TEST_GEN_PROGS += $( TEST_GEN_PROGS_$( UNAME_M) )
LIBKVM += $( LIBKVM_$( UNAME_M) )
INSTALL_HDR_PATH = $( top_srcdir) /usr
LINUX_HDR_PATH = $( INSTALL_HDR_PATH) /include/
2018-09-18 19:54:26 +02:00
LINUX_TOOL_INCLUDE = $( top_srcdir) /tools/include
2020-06-05 16:20:28 +02:00
i f e q ( $( ARCH ) , x 8 6 _ 6 4 )
LINUX_TOOL_ARCH_INCLUDE = $( top_srcdir) /tools/arch/x86/include
e l s e
2020-04-27 18:11:07 -06:00
LINUX_TOOL_ARCH_INCLUDE = $( top_srcdir) /tools/arch/$( ARCH) /include
2020-06-05 16:20:28 +02:00
e n d i f
2019-05-17 11:04:45 +02:00
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std= gnu99 \
-fno-stack-protector -fno-PIE -I$( LINUX_TOOL_INCLUDE) \
2019-12-20 20:44:56 -08:00
-I$( LINUX_TOOL_ARCH_INCLUDE) -I$( LINUX_HDR_PATH) -Iinclude \
-I$( <D) -Iinclude/$( UNAME_M) -I..
2019-04-11 15:51:19 +02:00
no-pie-option := $( call try-run, echo 'int main() { return 0; }' | \
2019-10-02 17:14:30 -06:00
$( CC) -Werror -no-pie -x c - -o " $$ TMP " , -no-pie)
2019-04-11 15:51:19 +02:00
2019-05-24 12:27:01 +02:00
# On s390, build the testcases KVM-enabled
pgste-option = $( call try-run, echo 'int main() { return 0; }' | \
$( CC) -Werror -Wl$( comma) --s390-pgste -x c - -o " $$ TMP " ,-Wl$( comma) --s390-pgste)
LDFLAGS += -pthread $( no-pie-option) $( pgste-option)
2018-03-27 11:49:19 +02:00
# After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
i n c l u d e . . / l i b . m k
STATIC_LIBS := $( OUTPUT) /libkvm.a
2020-10-12 12:47:15 -07:00
LIBKVM_C := $( filter %.c,$( LIBKVM) )
LIBKVM_S := $( filter %.S,$( LIBKVM) )
LIBKVM_C_OBJ := $( patsubst %.c, $( OUTPUT) /%.o, $( LIBKVM_C) )
LIBKVM_S_OBJ := $( patsubst %.S, $( OUTPUT) /%.o, $( LIBKVM_S) )
EXTRA_CLEAN += $( LIBKVM_C_OBJ) $( LIBKVM_S_OBJ) $( STATIC_LIBS) cscope.*
x := $( shell mkdir -p $( sort $( dir $( LIBKVM_C_OBJ) $( LIBKVM_S_OBJ) ) ) )
$(LIBKVM_C_OBJ) : $( OUTPUT ) /%.o : %.c
$( CC) $( CFLAGS) $( CPPFLAGS) $( TARGET_ARCH) -c $< -o $@
2018-03-27 11:49:19 +02:00
2020-10-12 12:47:15 -07:00
$(LIBKVM_S_OBJ) : $( OUTPUT ) /%.o : %.S
2018-03-27 11:49:19 +02:00
$( CC) $( CFLAGS) $( CPPFLAGS) $( TARGET_ARCH) -c $< -o $@
2020-10-12 12:47:15 -07:00
LIBKVM_OBJS = $( LIBKVM_C_OBJ) $( LIBKVM_S_OBJ)
$(OUTPUT)/libkvm.a : $( LIBKVM_OBJS )
2018-03-27 11:49:19 +02:00
$( AR) crs $@ $^
2020-04-27 18:11:07 -06:00
x := $( shell mkdir -p $( sort $( dir $( TEST_GEN_PROGS) ) ) )
2018-09-04 12:47:21 +02:00
all : $( STATIC_LIBS )
2018-03-27 11:49:19 +02:00
$(TEST_GEN_PROGS) : $( STATIC_LIBS )
2018-09-18 19:54:27 +02:00
cscope : include_paths = $( LINUX_TOOL_INCLUDE ) $( LINUX_HDR_PATH ) include lib ..
cscope :
$( RM) cscope.*
( find $( include_paths) -name '*.h' \
-exec realpath --relative-base= $( PWD) { } \; ; \
find . -name '*.c' \
-exec realpath --relative-base= $( PWD) { } \; ) | sort -u > cscope.files
cscope -b