KVM: selftests: Introduce num-pages conversion utilities
Guests and hosts don't have to have the same page size. This means
calculations are necessary when selecting the number of guest pages
to allocate in order to ensure the number is compatible with the
host. Provide utilities to help with those calculations and apply
them where appropriate.
We also revert commit bffed38d4f
("kvm: selftests: aarch64:
dirty_log_test: fix unaligned memslot size") and then use
vm_adjust_num_guest_pages() there instead.
Signed-off-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
377a41c9ef
commit
87a802d93e
@ -178,12 +178,11 @@ static void *vcpu_worker(void *data)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void vm_dirty_log_verify(unsigned long *bmap)
|
||||
static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
|
||||
{
|
||||
uint64_t step = vm_num_host_pages(mode, 1);
|
||||
uint64_t page;
|
||||
uint64_t *value_ptr;
|
||||
uint64_t step = host_page_size >= guest_page_size ? 1 :
|
||||
guest_page_size / host_page_size;
|
||||
|
||||
for (page = 0; page < host_num_pages; page += step) {
|
||||
value_ptr = host_test_mem + page * host_page_size;
|
||||
@ -289,14 +288,14 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
|
||||
* case where the size is not aligned to 64 pages.
|
||||
*/
|
||||
guest_num_pages = (1ul << (DIRTY_MEM_BITS -
|
||||
vm_get_page_shift(vm))) + 16;
|
||||
vm_get_page_shift(vm))) + 3;
|
||||
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
|
||||
#ifdef __s390x__
|
||||
/* Round up to multiple of 1M (segment size) */
|
||||
guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
|
||||
#endif
|
||||
host_page_size = getpagesize();
|
||||
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
|
||||
!!((guest_num_pages * guest_page_size) % host_page_size);
|
||||
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
|
||||
|
||||
if (!phys_offset) {
|
||||
guest_test_phys_mem = (vm_get_max_gfn(vm) -
|
||||
@ -367,7 +366,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
|
||||
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
|
||||
host_num_pages);
|
||||
#endif
|
||||
vm_dirty_log_verify(bmap);
|
||||
vm_dirty_log_verify(mode, bmap);
|
||||
iteration++;
|
||||
sync_global_to_guest(vm, iteration);
|
||||
}
|
||||
|
@ -164,6 +164,14 @@ unsigned int vm_get_page_size(struct kvm_vm *vm);
|
||||
unsigned int vm_get_page_shift(struct kvm_vm *vm);
|
||||
unsigned int vm_get_max_gfn(struct kvm_vm *vm);
|
||||
|
||||
unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
|
||||
unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
|
||||
static inline unsigned int
|
||||
vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
|
||||
{
|
||||
return vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
|
||||
}
|
||||
|
||||
struct kvm_userspace_memory_region *
|
||||
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
|
||||
uint64_t end);
|
||||
|
@ -580,6 +580,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
|
||||
size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
|
||||
size_t alignment;
|
||||
|
||||
TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
|
||||
"Number of guest pages is not compatible with the host. "
|
||||
"Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
|
||||
|
||||
TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
|
||||
"address not on a page boundary.\n"
|
||||
" guest_paddr: 0x%lx vm->page_size: 0x%x",
|
||||
@ -1701,3 +1705,36 @@ unsigned int vm_get_max_gfn(struct kvm_vm *vm)
|
||||
{
|
||||
return vm->max_gfn;
|
||||
}
|
||||
|
||||
static unsigned int vm_calc_num_pages(unsigned int num_pages,
|
||||
unsigned int page_shift,
|
||||
unsigned int new_page_shift,
|
||||
bool ceil)
|
||||
{
|
||||
unsigned int n = 1 << (new_page_shift - page_shift);
|
||||
|
||||
if (page_shift >= new_page_shift)
|
||||
return num_pages * (1 << (page_shift - new_page_shift));
|
||||
|
||||
return num_pages / n + !!(ceil && num_pages % n);
|
||||
}
|
||||
|
||||
static inline int getpageshift(void)
|
||||
{
|
||||
return __builtin_ffs(getpagesize()) - 1;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
|
||||
{
|
||||
return vm_calc_num_pages(num_guest_pages,
|
||||
vm_guest_mode_params[mode].page_shift,
|
||||
getpageshift(), true);
|
||||
}
|
||||
|
||||
unsigned int
|
||||
vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
|
||||
{
|
||||
return vm_calc_num_pages(num_host_pages, getpageshift(),
|
||||
vm_guest_mode_params[mode].page_shift, false);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user