KVM: PPC: Book3S HV: Fix conversion to gfn-based MMU notifier callbacks
Commitb1c5356e87
("KVM: PPC: Convert to the gfn-based MMU notifier callbacks") causes unmap_gfn_range and age_gfn callbacks to only work on the first gfn in the range. It also makes the aging callbacks call into both radix and hash aging functions for radix guests. Fix this. Add warnings for the single-gfn calls that have been converted to range callbacks, in case they ever receieve ranges greater than 1. Fixes:b1c5356e87
("KVM: PPC: Convert to the gfn-based MMU notifier callbacks") Reported-by: Bharata B Rao <bharata@linux.ibm.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Tested-by: Bharata B Rao <bharata@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210505121509.1470207-1-npiggin@gmail.com
This commit is contained in:
parent
c6b05f4e23
commit
32b48bf851
@ -210,7 +210,7 @@ extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
|
|||||||
unsigned int lpid);
|
unsigned int lpid);
|
||||||
extern int kvmppc_radix_init(void);
|
extern int kvmppc_radix_init(void);
|
||||||
extern void kvmppc_radix_exit(void);
|
extern void kvmppc_radix_exit(void);
|
||||||
extern bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||||
unsigned long gfn);
|
unsigned long gfn);
|
||||||
extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||||
unsigned long gfn);
|
unsigned long gfn);
|
||||||
|
@ -795,7 +795,7 @@ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||||
unsigned long gfn)
|
unsigned long gfn)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
@ -829,15 +829,21 @@ static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|||||||
unlock_rmap(rmapp);
|
unlock_rmap(rmapp);
|
||||||
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
|
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
|
bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||||
{
|
{
|
||||||
if (kvm_is_radix(kvm))
|
gfn_t gfn;
|
||||||
return kvm_unmap_radix(kvm, range->slot, range->start);
|
|
||||||
|
|
||||||
return kvm_unmap_rmapp(kvm, range->slot, range->start);
|
if (kvm_is_radix(kvm)) {
|
||||||
|
for (gfn = range->start; gfn < range->end; gfn++)
|
||||||
|
kvm_unmap_radix(kvm, range->slot, gfn);
|
||||||
|
} else {
|
||||||
|
for (gfn = range->start; gfn < range->end; gfn++)
|
||||||
|
kvm_unmap_rmapp(kvm, range->slot, range->start);
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
|
void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
|
||||||
@ -924,10 +930,18 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|||||||
|
|
||||||
bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
|
bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||||
{
|
{
|
||||||
if (kvm_is_radix(kvm))
|
gfn_t gfn;
|
||||||
kvm_age_radix(kvm, range->slot, range->start);
|
bool ret = false;
|
||||||
|
|
||||||
return kvm_age_rmapp(kvm, range->slot, range->start);
|
if (kvm_is_radix(kvm)) {
|
||||||
|
for (gfn = range->start; gfn < range->end; gfn++)
|
||||||
|
ret |= kvm_age_radix(kvm, range->slot, gfn);
|
||||||
|
} else {
|
||||||
|
for (gfn = range->start; gfn < range->end; gfn++)
|
||||||
|
ret |= kvm_age_rmapp(kvm, range->slot, gfn);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||||
@ -965,18 +979,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|||||||
|
|
||||||
bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
|
bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||||
{
|
{
|
||||||
if (kvm_is_radix(kvm))
|
WARN_ON(range->start + 1 != range->end);
|
||||||
kvm_test_age_radix(kvm, range->slot, range->start);
|
|
||||||
|
|
||||||
|
if (kvm_is_radix(kvm))
|
||||||
|
return kvm_test_age_radix(kvm, range->slot, range->start);
|
||||||
|
else
|
||||||
return kvm_test_age_rmapp(kvm, range->slot, range->start);
|
return kvm_test_age_rmapp(kvm, range->slot, range->start);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
|
bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||||
{
|
{
|
||||||
if (kvm_is_radix(kvm))
|
WARN_ON(range->start + 1 != range->end);
|
||||||
return kvm_unmap_radix(kvm, range->slot, range->start);
|
|
||||||
|
|
||||||
return kvm_unmap_rmapp(kvm, range->slot, range->start);
|
if (kvm_is_radix(kvm))
|
||||||
|
kvm_unmap_radix(kvm, range->slot, range->start);
|
||||||
|
else
|
||||||
|
kvm_unmap_rmapp(kvm, range->slot, range->start);
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vcpus_running(struct kvm *kvm)
|
static int vcpus_running(struct kvm *kvm)
|
||||||
|
@ -993,7 +993,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Called with kvm->mmu_lock held */
|
/* Called with kvm->mmu_lock held */
|
||||||
bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||||
unsigned long gfn)
|
unsigned long gfn)
|
||||||
{
|
{
|
||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
@ -1002,14 +1002,13 @@ bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|||||||
|
|
||||||
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
|
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
|
||||||
uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
|
uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
|
||||||
return false;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
|
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
|
||||||
if (ptep && pte_present(*ptep))
|
if (ptep && pte_present(*ptep))
|
||||||
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
|
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
|
||||||
kvm->arch.lpid);
|
kvm->arch.lpid);
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called with kvm->mmu_lock held */
|
/* Called with kvm->mmu_lock held */
|
||||||
|
Loading…
Reference in New Issue
Block a user