2020-10-28 02:37:33 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* KVM dirty page logging performance test
*
* Based on dirty_log_test . c
*
* Copyright ( C ) 2018 , Red Hat , Inc .
* Copyright ( C ) 2020 , Google , Inc .
*/
# include <stdio.h>
# include <stdlib.h>
# include <time.h>
# include <pthread.h>
# include <linux/bitmap.h>
# include "kvm_util.h"
# include "test_util.h"
2020-12-18 17:17:32 +03:00
# include "perf_test_util.h"
# include "guest_modes.h"
2022-01-18 04:57:03 +03:00
# ifdef __aarch64__
# include "aarch64/vgic.h"
# define GICD_BASE_GPA 0x8000000ULL
# define GICR_BASE_GPA 0x80A0000ULL
# endif
2020-10-28 02:37:33 +03:00
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
# define TEST_HOST_LOOP_N 2UL
2020-12-18 17:17:34 +03:00
static int nr_vcpus = 1 ;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE ;
2020-10-28 02:37:33 +03:00
/* Host variables */
2020-11-13 19:36:49 +03:00
static u64 dirty_log_manual_caps ;
2020-10-28 02:37:33 +03:00
static bool host_quit ;
2021-01-13 00:42:50 +03:00
static int iteration ;
static int vcpu_last_completed_iteration [ KVM_MAX_VCPUS ] ;
2020-10-28 02:37:33 +03:00
2021-11-11 03:12:55 +03:00
static void vcpu_worker ( struct perf_test_vcpu_args * vcpu_args )
2020-10-28 02:37:33 +03:00
{
int ret ;
struct kvm_vm * vm = perf_test_args . vm ;
uint64_t pages_count = 0 ;
struct kvm_run * run ;
struct timespec start ;
struct timespec ts_diff ;
struct timespec total = ( struct timespec ) { 0 } ;
struct timespec avg ;
int vcpu_id = vcpu_args - > vcpu_id ;
run = vcpu_state ( vm , vcpu_id ) ;
while ( ! READ_ONCE ( host_quit ) ) {
2021-01-13 00:42:50 +03:00
int current_iteration = READ_ONCE ( iteration ) ;
2020-10-28 02:37:33 +03:00
clock_gettime ( CLOCK_MONOTONIC , & start ) ;
ret = _vcpu_run ( vm , vcpu_id ) ;
2021-01-13 00:42:48 +03:00
ts_diff = timespec_elapsed ( start ) ;
2020-10-28 02:37:33 +03:00
TEST_ASSERT ( ret = = 0 , " vcpu_run failed: %d \n " , ret ) ;
TEST_ASSERT ( get_ucall ( vm , vcpu_id , NULL ) = = UCALL_SYNC ,
" Invalid guest sync status: exit_reason=%s \n " ,
exit_reason_str ( run - > exit_reason ) ) ;
pr_debug ( " Got sync event from vCPU %d \n " , vcpu_id ) ;
vcpu_last_completed_iteration [ vcpu_id ] = current_iteration ;
2021-01-13 00:42:50 +03:00
pr_debug ( " vCPU %d updated last completed iteration to %d \n " ,
2020-10-28 02:37:33 +03:00
vcpu_id , vcpu_last_completed_iteration [ vcpu_id ] ) ;
if ( current_iteration ) {
pages_count + = vcpu_args - > pages ;
total = timespec_add ( total , ts_diff ) ;
2021-01-13 00:42:50 +03:00
pr_debug ( " vCPU %d iteration %d dirty memory time: %ld.%.9lds \n " ,
2020-10-28 02:37:33 +03:00
vcpu_id , current_iteration , ts_diff . tv_sec ,
ts_diff . tv_nsec ) ;
} else {
2021-01-13 00:42:50 +03:00
pr_debug ( " vCPU %d iteration %d populate memory time: %ld.%.9lds \n " ,
2020-10-28 02:37:33 +03:00
vcpu_id , current_iteration , ts_diff . tv_sec ,
ts_diff . tv_nsec ) ;
}
while ( current_iteration = = READ_ONCE ( iteration ) & &
! READ_ONCE ( host_quit ) ) { }
}
avg = timespec_div ( total , vcpu_last_completed_iteration [ vcpu_id ] ) ;
2021-01-13 00:42:50 +03:00
pr_debug ( " \n vCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration) \n " ,
2020-10-28 02:37:33 +03:00
vcpu_id , pages_count , vcpu_last_completed_iteration [ vcpu_id ] ,
total . tv_sec , total . tv_nsec , avg . tv_sec , avg . tv_nsec ) ;
}
2020-12-18 17:17:32 +03:00
struct test_params {
unsigned long iterations ;
uint64_t phys_offset ;
int wr_fract ;
2021-01-13 00:42:52 +03:00
bool partition_vcpu_memory_access ;
2021-02-02 21:57:33 +03:00
enum vm_mem_backing_src_type backing_src ;
2021-08-05 01:28:44 +03:00
int slots ;
2020-12-18 17:17:32 +03:00
} ;
2021-08-05 01:28:44 +03:00
static void toggle_dirty_logging ( struct kvm_vm * vm , int slots , bool enable )
{
int i ;
for ( i = 0 ; i < slots ; i + + ) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i ;
int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0 ;
vm_mem_region_set_flags ( vm , slot , flags ) ;
}
}
static inline void enable_dirty_logging ( struct kvm_vm * vm , int slots )
{
toggle_dirty_logging ( vm , slots , true ) ;
}
static inline void disable_dirty_logging ( struct kvm_vm * vm , int slots )
{
toggle_dirty_logging ( vm , slots , false ) ;
}
2021-09-17 20:36:57 +03:00
static void get_dirty_log ( struct kvm_vm * vm , unsigned long * bitmaps [ ] , int slots )
2021-08-05 01:28:44 +03:00
{
int i ;
for ( i = 0 ; i < slots ; i + + ) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i ;
2021-09-17 20:36:57 +03:00
kvm_vm_get_dirty_log ( vm , slot , bitmaps [ i ] ) ;
2021-08-05 01:28:44 +03:00
}
}
2021-09-17 20:36:57 +03:00
static void clear_dirty_log ( struct kvm_vm * vm , unsigned long * bitmaps [ ] ,
int slots , uint64_t pages_per_slot )
2021-08-05 01:28:44 +03:00
{
int i ;
for ( i = 0 ; i < slots ; i + + ) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i ;
2021-09-17 20:36:57 +03:00
kvm_vm_clear_dirty_log ( vm , slot , bitmaps [ i ] , 0 , pages_per_slot ) ;
2021-08-05 01:28:44 +03:00
}
}
2021-09-17 20:36:57 +03:00
static unsigned long * * alloc_bitmaps ( int slots , uint64_t pages_per_slot )
{
unsigned long * * bitmaps ;
int i ;
bitmaps = malloc ( slots * sizeof ( bitmaps [ 0 ] ) ) ;
TEST_ASSERT ( bitmaps , " Failed to allocate bitmaps array. " ) ;
for ( i = 0 ; i < slots ; i + + ) {
bitmaps [ i ] = bitmap_zalloc ( pages_per_slot ) ;
TEST_ASSERT ( bitmaps [ i ] , " Failed to allocate slot bitmap. " ) ;
}
return bitmaps ;
}
static void free_bitmaps ( unsigned long * bitmaps [ ] , int slots )
{
int i ;
for ( i = 0 ; i < slots ; i + + )
free ( bitmaps [ i ] ) ;
free ( bitmaps ) ;
}
2020-12-18 17:17:32 +03:00
static void run_test ( enum vm_guest_mode mode , void * arg )
2020-10-28 02:37:33 +03:00
{
2020-12-18 17:17:32 +03:00
struct test_params * p = arg ;
2020-10-28 02:37:33 +03:00
struct kvm_vm * vm ;
2021-09-17 20:36:57 +03:00
unsigned long * * bitmaps ;
2020-10-28 02:37:33 +03:00
uint64_t guest_num_pages ;
uint64_t host_num_pages ;
2021-09-17 20:36:57 +03:00
uint64_t pages_per_slot ;
2020-10-28 02:37:33 +03:00
int vcpu_id ;
struct timespec start ;
struct timespec ts_diff ;
struct timespec get_dirty_log_total = ( struct timespec ) { 0 } ;
struct timespec vcpu_dirty_total = ( struct timespec ) { 0 } ;
struct timespec avg ;
struct kvm_enable_cap cap = { } ;
struct timespec clear_dirty_log_total = ( struct timespec ) { 0 } ;
2021-02-02 21:57:33 +03:00
vm = perf_test_create_vm ( mode , nr_vcpus , guest_percpu_mem_size ,
2021-11-11 03:03:09 +03:00
p - > slots , p - > backing_src ,
p - > partition_vcpu_memory_access ) ;
2020-10-28 02:37:33 +03:00
2021-11-11 03:03:10 +03:00
perf_test_set_wr_fract ( vm , p - > wr_fract ) ;
2020-10-28 02:37:33 +03:00
guest_num_pages = ( nr_vcpus * guest_percpu_mem_size ) > > vm_get_page_shift ( vm ) ;
guest_num_pages = vm_adjust_num_guest_pages ( mode , guest_num_pages ) ;
host_num_pages = vm_num_host_pages ( mode , guest_num_pages ) ;
2021-09-17 20:36:57 +03:00
pages_per_slot = host_num_pages / p - > slots ;
bitmaps = alloc_bitmaps ( p - > slots , pages_per_slot ) ;
2020-10-28 02:37:33 +03:00
2020-11-13 19:36:49 +03:00
if ( dirty_log_manual_caps ) {
cap . cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 ;
cap . args [ 0 ] = dirty_log_manual_caps ;
vm_enable_cap ( vm , & cap ) ;
}
2020-10-28 02:37:33 +03:00
2022-01-18 04:57:03 +03:00
# ifdef __aarch64__
vgic_v3_setup ( vm , nr_vcpus , 64 , GICD_BASE_GPA , GICR_BASE_GPA ) ;
# endif
2020-10-28 02:37:33 +03:00
/* Start the iterations */
iteration = 0 ;
host_quit = false ;
clock_gettime ( CLOCK_MONOTONIC , & start ) ;
2021-11-11 03:12:55 +03:00
for ( vcpu_id = 0 ; vcpu_id < nr_vcpus ; vcpu_id + + )
2021-01-13 00:42:51 +03:00
vcpu_last_completed_iteration [ vcpu_id ] = - 1 ;
2021-11-11 03:12:55 +03:00
perf_test_start_vcpu_threads ( nr_vcpus , vcpu_worker ) ;
2020-10-28 02:37:33 +03:00
2021-01-13 00:42:51 +03:00
/* Allow the vCPUs to populate memory */
2021-01-13 00:42:50 +03:00
pr_debug ( " Starting iteration %d - Populating \n " , iteration ) ;
2021-01-13 00:42:51 +03:00
for ( vcpu_id = 0 ; vcpu_id < nr_vcpus ; vcpu_id + + ) {
while ( READ_ONCE ( vcpu_last_completed_iteration [ vcpu_id ] ) ! =
iteration )
;
}
2020-10-28 02:37:33 +03:00
2021-01-13 00:42:48 +03:00
ts_diff = timespec_elapsed ( start ) ;
2020-10-28 02:37:33 +03:00
pr_info ( " Populate memory time: %ld.%.9lds \n " ,
ts_diff . tv_sec , ts_diff . tv_nsec ) ;
/* Enable dirty logging */
clock_gettime ( CLOCK_MONOTONIC , & start ) ;
2021-08-05 01:28:44 +03:00
enable_dirty_logging ( vm , p - > slots ) ;
2021-01-13 00:42:48 +03:00
ts_diff = timespec_elapsed ( start ) ;
2020-10-28 02:37:33 +03:00
pr_info ( " Enabling dirty logging time: %ld.%.9lds \n \n " ,
ts_diff . tv_sec , ts_diff . tv_nsec ) ;
2020-12-18 17:17:32 +03:00
while ( iteration < p - > iterations ) {
2020-10-28 02:37:33 +03:00
/*
* Incrementing the iteration number will start the vCPUs
* dirtying memory again .
*/
clock_gettime ( CLOCK_MONOTONIC , & start ) ;
iteration + + ;
2021-01-13 00:42:50 +03:00
pr_debug ( " Starting iteration %d \n " , iteration ) ;
2020-10-28 02:37:33 +03:00
for ( vcpu_id = 0 ; vcpu_id < nr_vcpus ; vcpu_id + + ) {
2021-01-13 00:42:49 +03:00
while ( READ_ONCE ( vcpu_last_completed_iteration [ vcpu_id ] )
! = iteration )
;
2020-10-28 02:37:33 +03:00
}
2021-01-13 00:42:48 +03:00
ts_diff = timespec_elapsed ( start ) ;
2020-10-28 02:37:33 +03:00
vcpu_dirty_total = timespec_add ( vcpu_dirty_total , ts_diff ) ;
2021-01-13 00:42:50 +03:00
pr_info ( " Iteration %d dirty memory time: %ld.%.9lds \n " ,
2020-10-28 02:37:33 +03:00
iteration , ts_diff . tv_sec , ts_diff . tv_nsec ) ;
clock_gettime ( CLOCK_MONOTONIC , & start ) ;
2021-09-17 20:36:57 +03:00
get_dirty_log ( vm , bitmaps , p - > slots ) ;
2021-01-13 00:42:48 +03:00
ts_diff = timespec_elapsed ( start ) ;
2020-10-28 02:37:33 +03:00
get_dirty_log_total = timespec_add ( get_dirty_log_total ,
ts_diff ) ;
2021-01-13 00:42:50 +03:00
pr_info ( " Iteration %d get dirty log time: %ld.%.9lds \n " ,
2020-10-28 02:37:33 +03:00
iteration , ts_diff . tv_sec , ts_diff . tv_nsec ) ;
2020-11-13 19:36:49 +03:00
if ( dirty_log_manual_caps ) {
clock_gettime ( CLOCK_MONOTONIC , & start ) ;
2021-09-17 20:36:57 +03:00
clear_dirty_log ( vm , bitmaps , p - > slots , pages_per_slot ) ;
2021-01-13 00:42:48 +03:00
ts_diff = timespec_elapsed ( start ) ;
2020-11-13 19:36:49 +03:00
clear_dirty_log_total = timespec_add ( clear_dirty_log_total ,
ts_diff ) ;
2021-01-13 00:42:50 +03:00
pr_info ( " Iteration %d clear dirty log time: %ld.%.9lds \n " ,
2020-11-13 19:36:49 +03:00
iteration , ts_diff . tv_sec , ts_diff . tv_nsec ) ;
}
2020-10-28 02:37:33 +03:00
}
/* Disable dirty logging */
clock_gettime ( CLOCK_MONOTONIC , & start ) ;
2021-08-05 01:28:44 +03:00
disable_dirty_logging ( vm , p - > slots ) ;
2021-01-13 00:42:48 +03:00
ts_diff = timespec_elapsed ( start ) ;
2020-10-28 02:37:33 +03:00
pr_info ( " Disabling dirty logging time: %ld.%.9lds \n " ,
ts_diff . tv_sec , ts_diff . tv_nsec ) ;
2021-02-02 21:57:34 +03:00
/* Tell the vcpu thread to quit */
host_quit = true ;
2021-11-11 03:12:55 +03:00
perf_test_join_vcpu_threads ( nr_vcpus ) ;
2021-02-02 21:57:34 +03:00
2020-12-18 17:17:32 +03:00
avg = timespec_div ( get_dirty_log_total , p - > iterations ) ;
2020-10-28 02:37:33 +03:00
pr_info ( " Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration) \n " ,
2020-12-18 17:17:32 +03:00
p - > iterations , get_dirty_log_total . tv_sec ,
2020-10-28 02:37:33 +03:00
get_dirty_log_total . tv_nsec , avg . tv_sec , avg . tv_nsec ) ;
2020-11-13 19:36:49 +03:00
if ( dirty_log_manual_caps ) {
2020-12-18 17:17:32 +03:00
avg = timespec_div ( clear_dirty_log_total , p - > iterations ) ;
2020-11-13 19:36:49 +03:00
pr_info ( " Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration) \n " ,
2020-12-18 17:17:32 +03:00
p - > iterations , clear_dirty_log_total . tv_sec ,
2020-11-13 19:36:49 +03:00
clear_dirty_log_total . tv_nsec , avg . tv_sec , avg . tv_nsec ) ;
}
2020-10-28 02:37:33 +03:00
2021-09-17 20:36:57 +03:00
free_bitmaps ( bitmaps , p - > slots ) ;
2020-12-18 17:17:34 +03:00
perf_test_destroy_vm ( vm ) ;
2020-10-28 02:37:33 +03:00
}
static void help ( char * name )
{
puts ( " " ) ;
2022-01-20 02:07:39 +03:00
printf ( " usage: %s [-h] [-i iterations] [-p offset] [-g] "
2021-08-05 01:28:44 +03:00
" [-m mode] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type] "
" [-x memslots] \n " , name ) ;
2020-10-28 02:37:33 +03:00
puts ( " " ) ;
printf ( " -i: specify iteration counts (default: % " PRIu64 " ) \n " ,
TEST_HOST_LOOP_N ) ;
2022-01-20 02:07:39 +03:00
printf ( " -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This \n "
" makes KVM_GET_DIRTY_LOG clear the dirty log (i.e. \n "
" KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled) \n "
" and writes will be tracked as soon as dirty logging is \n "
" enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET \n "
" is not enabled). \n " ) ;
2020-10-28 02:37:33 +03:00
printf ( " -p: specify guest physical test memory offset \n "
" Warning: a low offset can conflict with the loaded test code. \n " ) ;
2020-12-18 17:17:32 +03:00
guest_modes_help ( ) ;
2020-10-28 02:37:33 +03:00
printf ( " -b: specify the size of the memory region which should be \n "
" dirtied by each vCPU. e.g. 10M or 3G. \n "
" (default: 1G) \n " ) ;
printf ( " -f: specify the fraction of pages which should be written to \n "
" as opposed to simply read, in the form \n "
" 1/<fraction of pages to write>. \n "
" (default: 1 i.e. all pages are written to.) \n " ) ;
printf ( " -v: specify the number of vCPUs to run. \n " ) ;
2021-01-13 00:42:52 +03:00
printf ( " -o: Overlap guest memory accesses instead of partitioning \n "
" them into a separate region of memory for each vCPU. \n " ) ;
2021-09-17 20:36:56 +03:00
backing_src_help ( " -s " ) ;
2021-08-05 01:28:44 +03:00
printf ( " -x: Split the memory region into this number of memslots. \n "
2021-09-17 20:36:56 +03:00
" (default: 1) \n " ) ;
2020-10-28 02:37:33 +03:00
puts ( " " ) ;
exit ( 0 ) ;
}
int main ( int argc , char * argv [ ] )
{
2020-12-18 17:17:32 +03:00
int max_vcpus = kvm_check_cap ( KVM_CAP_MAX_VCPUS ) ;
struct test_params p = {
. iterations = TEST_HOST_LOOP_N ,
. wr_fract = 1 ,
2021-01-13 00:42:52 +03:00
. partition_vcpu_memory_access = true ,
2021-09-17 20:36:56 +03:00
. backing_src = DEFAULT_VM_MEM_SRC ,
2021-08-05 01:28:44 +03:00
. slots = 1 ,
2020-12-18 17:17:32 +03:00
} ;
int opt ;
2020-10-28 02:37:33 +03:00
dirty_log_manual_caps =
kvm_check_cap ( KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 ) ;
dirty_log_manual_caps & = ( KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET ) ;
2020-12-18 17:17:32 +03:00
guest_modes_append_default ( ) ;
2020-10-28 02:37:33 +03:00
2022-01-20 02:07:39 +03:00
while ( ( opt = getopt ( argc , argv , " ghi:p:m:b:f:v:os:x: " ) ) ! = - 1 ) {
2020-10-28 02:37:33 +03:00
switch ( opt ) {
2022-01-20 02:07:39 +03:00
case ' g ' :
dirty_log_manual_caps = 0 ;
break ;
2020-10-28 02:37:33 +03:00
case ' i ' :
2021-01-13 00:42:50 +03:00
p . iterations = atoi ( optarg ) ;
2020-10-28 02:37:33 +03:00
break ;
case ' p ' :
2020-12-18 17:17:32 +03:00
p . phys_offset = strtoull ( optarg , NULL , 0 ) ;
2020-10-28 02:37:33 +03:00
break ;
case ' m ' :
2020-12-18 17:17:32 +03:00
guest_modes_cmdline ( optarg ) ;
2020-10-28 02:37:33 +03:00
break ;
case ' b ' :
guest_percpu_mem_size = parse_size ( optarg ) ;
break ;
case ' f ' :
2020-12-18 17:17:32 +03:00
p . wr_fract = atoi ( optarg ) ;
TEST_ASSERT ( p . wr_fract > = 1 ,
2020-10-28 02:37:33 +03:00
" Write fraction cannot be less than one " ) ;
break ;
case ' v ' :
nr_vcpus = atoi ( optarg ) ;
2020-12-18 17:17:32 +03:00
TEST_ASSERT ( nr_vcpus > 0 & & nr_vcpus < = max_vcpus ,
" Invalid number of vcpus, must be between 1 and %d " , max_vcpus ) ;
2020-10-28 02:37:33 +03:00
break ;
2021-01-13 00:42:52 +03:00
case ' o ' :
p . partition_vcpu_memory_access = false ;
2021-07-14 01:09:56 +03:00
break ;
2021-02-02 21:57:33 +03:00
case ' s ' :
p . backing_src = parse_backing_src_type ( optarg ) ;
2021-01-13 00:42:52 +03:00
break ;
2021-08-05 01:28:44 +03:00
case ' x ' :
p . slots = atoi ( optarg ) ;
break ;
2020-10-28 02:37:33 +03:00
case ' h ' :
default :
help ( argv [ 0 ] ) ;
break ;
}
}
2020-12-18 17:17:32 +03:00
TEST_ASSERT ( p . iterations > = 2 , " The test should have at least two iterations " ) ;
2020-10-28 02:37:33 +03:00
2020-12-18 17:17:32 +03:00
pr_info ( " Test iterations: % " PRIu64 " \n " , p . iterations ) ;
2020-10-28 02:37:33 +03:00
2020-12-18 17:17:32 +03:00
for_each_guest_mode ( run_test , & p ) ;
2020-10-28 02:37:33 +03:00
return 0 ;
}