2019-05-19 15:07:45 +03:00
# SPDX-License-Identifier: GPL-2.0-only
2019-04-11 16:51:19 +03:00
i n c l u d e . . / . . / . . / . . / s c r i p t s / K b u i l d . i n c l u d e
2018-03-27 12:49:19 +03:00
all :
2018-09-18 20:54:26 +03:00
top_srcdir = ../../../..
2018-12-13 06:25:14 +03:00
KSFT_KHDR_INSTALL := 1
2018-03-27 12:49:19 +03:00
UNAME_M := $( shell uname -m)
2019-07-31 18:15:23 +03:00
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/ucall.c
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
2019-07-31 18:15:24 +03:00
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
2018-03-27 12:49:19 +03:00
2019-05-21 20:13:58 +03:00
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
2018-10-16 19:50:11 +03:00
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
2018-12-10 20:21:59 +03:00
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
2019-05-31 17:14:52 +03:00
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
2019-05-21 20:13:58 +03:00
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
2019-09-26 16:01:15 +03:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
2019-05-02 21:31:41 +03:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
2019-05-21 20:13:58 +03:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
2019-10-22 02:30:28 +03:00
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
kvm: introduce manual dirty log reprotect
There are two problems with KVM_GET_DIRTY_LOG. First, and less important,
it can take kvm->mmu_lock for an extended period of time. Second, its user
can actually see many false positives in some cases. The latter is due
to a benign race like this:
1. KVM_GET_DIRTY_LOG returns a set of dirty pages and write protects
them.
2. The guest modifies the pages, causing them to be marked ditry.
3. Userspace actually copies the pages.
4. KVM_GET_DIRTY_LOG returns those pages as dirty again, even though
they were not written to since (3).
This is especially a problem for large guests, where the time between
(1) and (3) can be substantial. This patch introduces a new
capability which, when enabled, makes KVM_GET_DIRTY_LOG not
write-protect the pages it returns. Instead, userspace has to
explicitly clear the dirty log bits just before using the content
of the page. The new KVM_CLEAR_DIRTY_LOG ioctl can also operate on a
64-page granularity rather than requiring to sync a full memslot;
this way, the mmu_lock is taken for small amounts of time, and
only a small amount of time will pass between write protection
of pages and the sending of their content.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-10-23 03:36:47 +03:00
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
2019-05-21 20:13:58 +03:00
TEST_GEN_PROGS_x86_64 += dirty_log_test
2019-07-15 13:50:46 +03:00
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
2018-09-18 20:54:32 +03:00
kvm: introduce manual dirty log reprotect
There are two problems with KVM_GET_DIRTY_LOG. First, and less important,
it can take kvm->mmu_lock for an extended period of time. Second, its user
can actually see many false positives in some cases. The latter is due
to a benign race like this:
1. KVM_GET_DIRTY_LOG returns a set of dirty pages and write protects
them.
2. The guest modifies the pages, causing them to be marked ditry.
3. Userspace actually copies the pages.
4. KVM_GET_DIRTY_LOG returns those pages as dirty again, even though
they were not written to since (3).
This is especially a problem for large guests, where the time between
(1) and (3) can be substantial. This patch introduces a new
capability which, when enabled, makes KVM_GET_DIRTY_LOG not
write-protect the pages it returns. Instead, userspace has to
explicitly clear the dirty log bits just before using the content
of the page. The new KVM_CLEAR_DIRTY_LOG ioctl can also operate on a
64-page granularity rather than requiring to sync a full memslot;
this way, the mmu_lock is taken for small amounts of time, and
only a small amount of time will pass between write protection
of pages and the sending of their content.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-10-23 03:36:47 +03:00
TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
2019-05-21 20:13:58 +03:00
TEST_GEN_PROGS_aarch64 += dirty_log_test
2019-05-23 19:43:09 +03:00
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
2018-03-27 12:49:19 +03:00
2019-08-29 16:07:32 +03:00
TEST_GEN_PROGS_s390x = s390x/memop
2019-05-23 19:43:07 +03:00
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
2019-07-31 18:15:25 +03:00
TEST_GEN_PROGS_s390x += dirty_log_test
2019-05-23 19:43:09 +03:00
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
2018-03-27 12:49:19 +03:00
TEST_GEN_PROGS += $( TEST_GEN_PROGS_$( UNAME_M) )
LIBKVM += $( LIBKVM_$( UNAME_M) )
INSTALL_HDR_PATH = $( top_srcdir) /usr
LINUX_HDR_PATH = $( INSTALL_HDR_PATH) /include/
2018-09-18 20:54:26 +03:00
LINUX_TOOL_INCLUDE = $( top_srcdir) /tools/include
2019-05-17 12:04:45 +03:00
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std= gnu99 \
-fno-stack-protector -fno-PIE -I$( LINUX_TOOL_INCLUDE) \
-I$( LINUX_HDR_PATH) -Iinclude -I$( <D) -Iinclude/$( UNAME_M) -I..
2019-04-11 16:51:19 +03:00
no-pie-option := $( call try-run, echo 'int main() { return 0; }' | \
2019-10-03 02:14:30 +03:00
$( CC) -Werror -no-pie -x c - -o " $$ TMP " , -no-pie)
2019-04-11 16:51:19 +03:00
2019-05-24 13:27:01 +03:00
# On s390, build the testcases KVM-enabled
pgste-option = $( call try-run, echo 'int main() { return 0; }' | \
$( CC) -Werror -Wl$( comma) --s390-pgste -x c - -o " $$ TMP " ,-Wl$( comma) --s390-pgste)
LDFLAGS += -pthread $( no-pie-option) $( pgste-option)
2018-03-27 12:49:19 +03:00
# After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
i n c l u d e . . / l i b . m k
STATIC_LIBS := $( OUTPUT) /libkvm.a
LIBKVM_OBJ := $( patsubst %.c, $( OUTPUT) /%.o, $( LIBKVM) )
2018-09-18 20:54:27 +03:00
EXTRA_CLEAN += $( LIBKVM_OBJ) $( STATIC_LIBS) cscope.*
2018-03-27 12:49:19 +03:00
x := $( shell mkdir -p $( sort $( dir $( LIBKVM_OBJ) ) ) )
$(LIBKVM_OBJ) : $( OUTPUT ) /%.o : %.c
$( CC) $( CFLAGS) $( CPPFLAGS) $( TARGET_ARCH) -c $< -o $@
$(OUTPUT)/libkvm.a : $( LIBKVM_OBJ )
$( AR) crs $@ $^
2018-09-04 13:47:21 +03:00
all : $( STATIC_LIBS )
2018-03-27 12:49:19 +03:00
$(TEST_GEN_PROGS) : $( STATIC_LIBS )
2018-09-18 20:54:27 +03:00
cscope : include_paths = $( LINUX_TOOL_INCLUDE ) $( LINUX_HDR_PATH ) include lib ..
cscope :
$( RM) cscope.*
( find $( include_paths) -name '*.h' \
-exec realpath --relative-base= $( PWD) { } \; ; \
find . -name '*.c' \
-exec realpath --relative-base= $( PWD) { } \; ) | sort -u > cscope.files
cscope -b