KVM: PPC: Book3s: PR: Add (dumb) MMU Notifier support
Now that we have very simple MMU Notifier support for e500 in place, also add the same simple support to book3s. It gets us one step closer to actual fast support. Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
03d25c5bd5
commit
9b0cb3c808
@ -46,8 +46,7 @@
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_KVM_BOOK3S_64_HV) || defined(CONFIG_KVM_E500V2) || \
|
||||
defined(CONFIG_KVM_E500MC)
|
||||
#if !defined(CONFIG_KVM_440)
|
||||
#include <linux/mmu_notifier.h>
|
||||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
|
@ -36,6 +36,7 @@ config KVM_BOOK3S_64_HANDLER
|
||||
config KVM_BOOK3S_PR
|
||||
bool
|
||||
select KVM_MMIO
|
||||
select MMU_NOTIFIER
|
||||
|
||||
config KVM_BOOK3S_32
|
||||
tristate "KVM support for PowerPC book3s_32 processors"
|
||||
|
@ -254,6 +254,7 @@ next_pteg:
|
||||
|
||||
kvmppc_mmu_hpte_cache_map(vcpu, pte);
|
||||
|
||||
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
@ -168,6 +168,7 @@ map_again:
|
||||
|
||||
kvmppc_mmu_hpte_cache_map(vcpu, pte);
|
||||
}
|
||||
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
|
||||
|
||||
out:
|
||||
return r;
|
||||
|
@ -114,11 +114,6 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
|
||||
hlist_del_init_rcu(&pte->list_vpte);
|
||||
hlist_del_init_rcu(&pte->list_vpte_long);
|
||||
|
||||
if (pte->pte.may_write)
|
||||
kvm_release_pfn_dirty(pte->pfn);
|
||||
else
|
||||
kvm_release_pfn_clean(pte->pfn);
|
||||
|
||||
spin_unlock(&vcpu3s->mmu_lock);
|
||||
|
||||
vcpu3s->hpte_cache_count--;
|
||||
|
@ -90,8 +90,55 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* We misuse TLB_FLUSH to indicate that we want to clear
|
||||
all shadow cache entries */
|
||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
|
||||
kvmppc_mmu_pte_flush(vcpu, 0, 0);
|
||||
}
|
||||
|
||||
/************* MMU Notifiers *************/
|
||||
|
||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
trace_kvm_unmap_hva(hva);
|
||||
|
||||
/*
|
||||
* Flush all shadow tlb entries everywhere. This is slow, but
|
||||
* we are 100% sure that we catch the to be unmapped page
|
||||
*/
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
{
|
||||
/* kvm_unmap_hva flushes everything anyways */
|
||||
kvm_unmap_hva(kvm, start);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
/* XXX could be more clever ;) */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
/* XXX could be more clever ;) */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
{
|
||||
/* The page will get remapped properly on its next fault */
|
||||
kvm_unmap_hva(kvm, hva);
|
||||
}
|
||||
|
||||
/*****************************************/
|
||||
|
||||
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong smsr = vcpu->arch.shared->msr;
|
||||
|
Loading…
Reference in New Issue
Block a user