KVM: Use vcpu-specific gva->hva translation when querying host page size
[ Upstream commit f9b84e19221efc5f493156ee0329df3142085f28 ] Use kvm_vcpu_gfn_to_hva() when retrieving the host page size so that the correct set of memslots is used when handling x86 page faults in SMM. Fixes: 54bf36aac520 ("KVM: x86: use vcpu-specific functions to read/write/translate GFNs") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
09bd0033df
commit
7426ddf01f
@ -638,7 +638,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
gfn = gpa_to_gfn(kvm_eq.qaddr);
|
||||
|
||||
page_size = kvm_host_page_size(kvm, gfn);
|
||||
page_size = kvm_host_page_size(vcpu, gfn);
|
||||
if (1ull << kvm_eq.qshift > page_size) {
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
pr_warn("Incompatible host page size %lx!\n", page_size);
|
||||
|
@ -1286,12 +1286,12 @@ static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
|
||||
}
|
||||
|
||||
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
|
||||
static int host_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
{
|
||||
unsigned long page_size;
|
||||
int i, ret = 0;
|
||||
|
||||
page_size = kvm_host_page_size(kvm, gfn);
|
||||
page_size = kvm_host_page_size(vcpu, gfn);
|
||||
|
||||
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
||||
if (page_size >= KVM_HPAGE_SIZE(i))
|
||||
@ -1341,7 +1341,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
|
||||
if (unlikely(*force_pt_level))
|
||||
return PT_PAGE_TABLE_LEVEL;
|
||||
|
||||
host_level = host_mapping_level(vcpu->kvm, large_gfn);
|
||||
host_level = host_mapping_level(vcpu, large_gfn);
|
||||
|
||||
if (host_level == PT_PAGE_TABLE_LEVEL)
|
||||
return host_level;
|
||||
|
@ -751,7 +751,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
|
||||
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
|
||||
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
|
||||
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
|
||||
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
|
||||
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
|
||||
|
||||
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
|
||||
|
@ -1394,14 +1394,14 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
|
||||
|
||||
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
|
||||
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long addr, size;
|
||||
|
||||
size = PAGE_SIZE;
|
||||
|
||||
addr = gfn_to_hva(kvm, gfn);
|
||||
addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
|
||||
if (kvm_is_error_hva(addr))
|
||||
return PAGE_SIZE;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user