2019-05-19 13:07:45 +01:00
# SPDX-License-Identifier: GPL-2.0-only
2019-04-11 15:51:19 +02:00
i n c l u d e . . / . . / . . / . . / s c r i p t s / K b u i l d . i n c l u d e
2018-03-27 11:49:19 +02:00
all :
2018-09-18 19:54:26 +02:00
top_srcdir = ../../../..
2018-12-12 20:25:14 -07:00
KSFT_KHDR_INSTALL := 1
2018-03-27 11:49:19 +02:00
UNAME_M := $( shell uname -m)
2018-09-18 19:54:25 +02:00
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c
2018-09-18 19:54:26 +02:00
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c
2018-09-18 19:54:32 +02:00
LIBKVM_aarch64 = lib/aarch64/processor.c
2019-05-23 18:43:06 +02:00
LIBKVM_s390x = lib/s390x/processor.c
2018-03-27 11:49:19 +02:00
2018-09-18 19:54:26 +02:00
TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
2018-10-16 18:50:11 +02:00
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
2018-12-10 18:21:59 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
2019-01-31 23:49:21 +01:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
2019-04-10 11:38:33 +02:00
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
2019-05-02 11:31:59 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/kvm_create_max_vcpus
2019-05-02 11:31:41 -07:00
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
2018-09-18 19:54:32 +02:00
TEST_GEN_PROGS_x86_64 += dirty_log_test
kvm: introduce manual dirty log reprotect
There are two problems with KVM_GET_DIRTY_LOG. First, and less important,
it can take kvm->mmu_lock for an extended period of time. Second, its user
can actually see many false positives in some cases. The latter is due
to a benign race like this:
1. KVM_GET_DIRTY_LOG returns a set of dirty pages and write protects
them.
2. The guest modifies the pages, causing them to be marked ditry.
3. Userspace actually copies the pages.
4. KVM_GET_DIRTY_LOG returns those pages as dirty again, even though
they were not written to since (3).
This is especially a problem for large guests, where the time between
(1) and (3) can be substantial. This patch introduces a new
capability which, when enabled, makes KVM_GET_DIRTY_LOG not
write-protect the pages it returns. Instead, userspace has to
explicitly clear the dirty log bits just before using the content
of the page. The new KVM_CLEAR_DIRTY_LOG ioctl can also operate on a
64-page granularity rather than requiring to sync a full memslot;
this way, the mmu_lock is taken for small amounts of time, and
only a small amount of time will pass between write protection
of pages and the sending of their content.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-10-23 02:36:47 +02:00
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
2018-09-18 19:54:32 +02:00
TEST_GEN_PROGS_aarch64 += dirty_log_test
kvm: introduce manual dirty log reprotect
There are two problems with KVM_GET_DIRTY_LOG. First, and less important,
it can take kvm->mmu_lock for an extended period of time. Second, its user
can actually see many false positives in some cases. The latter is due
to a benign race like this:
1. KVM_GET_DIRTY_LOG returns a set of dirty pages and write protects
them.
2. The guest modifies the pages, causing them to be marked ditry.
3. Userspace actually copies the pages.
4. KVM_GET_DIRTY_LOG returns those pages as dirty again, even though
they were not written to since (3).
This is especially a problem for large guests, where the time between
(1) and (3) can be substantial. This patch introduces a new
capability which, when enabled, makes KVM_GET_DIRTY_LOG not
write-protect the pages it returns. Instead, userspace has to
explicitly clear the dirty log bits just before using the content
of the page. The new KVM_CLEAR_DIRTY_LOG ioctl can also operate on a
64-page granularity rather than requiring to sync a full memslot;
this way, the mmu_lock is taken for small amounts of time, and
only a small amount of time will pass between write protection
of pages and the sending of their content.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-10-23 02:36:47 +02:00
TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
2018-03-27 11:49:19 +02:00
2019-05-23 18:43:07 +02:00
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
2018-03-27 11:49:19 +02:00
TEST_GEN_PROGS += $( TEST_GEN_PROGS_$( UNAME_M) )
LIBKVM += $( LIBKVM_$( UNAME_M) )
INSTALL_HDR_PATH = $( top_srcdir) /usr
LINUX_HDR_PATH = $( INSTALL_HDR_PATH) /include/
2018-09-18 19:54:26 +02:00
LINUX_TOOL_INCLUDE = $( top_srcdir) /tools/include
2019-05-17 11:04:45 +02:00
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std= gnu99 \
-fno-stack-protector -fno-PIE -I$( LINUX_TOOL_INCLUDE) \
-I$( LINUX_HDR_PATH) -Iinclude -I$( <D) -Iinclude/$( UNAME_M) -I..
2019-04-11 15:51:19 +02:00
no-pie-option := $( call try-run, echo 'int main() { return 0; }' | \
$( CC) -Werror $( KBUILD_CPPFLAGS) $( CC_OPTION_CFLAGS) -no-pie -x c - -o " $$ TMP " , -no-pie)
LDFLAGS += -pthread $( no-pie-option)
2018-03-27 11:49:19 +02:00
# After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
i n c l u d e . . / l i b . m k
STATIC_LIBS := $( OUTPUT) /libkvm.a
LIBKVM_OBJ := $( patsubst %.c, $( OUTPUT) /%.o, $( LIBKVM) )
2018-09-18 19:54:27 +02:00
EXTRA_CLEAN += $( LIBKVM_OBJ) $( STATIC_LIBS) cscope.*
2018-03-27 11:49:19 +02:00
x := $( shell mkdir -p $( sort $( dir $( LIBKVM_OBJ) ) ) )
$(LIBKVM_OBJ) : $( OUTPUT ) /%.o : %.c
$( CC) $( CFLAGS) $( CPPFLAGS) $( TARGET_ARCH) -c $< -o $@
$(OUTPUT)/libkvm.a : $( LIBKVM_OBJ )
$( AR) crs $@ $^
2018-09-04 12:47:21 +02:00
all : $( STATIC_LIBS )
2018-03-27 11:49:19 +02:00
$(TEST_GEN_PROGS) : $( STATIC_LIBS )
2018-09-18 19:54:27 +02:00
cscope : include_paths = $( LINUX_TOOL_INCLUDE ) $( LINUX_HDR_PATH ) include lib ..
cscope :
$( RM) cscope.*
( find $( include_paths) -name '*.h' \
-exec realpath --relative-base= $( PWD) { } \; ; \
find . -name '*.c' \
-exec realpath --relative-base= $( PWD) { } \; ) | sort -u > cscope.files
cscope -b