e47c4aee5b
Rename KVM's accessor for retrieving a 'struct kvm_mmu_page' from the associated host physical address to better convey what the function is doing. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200622202034.15093-7-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
64 lines
1.7 KiB
C
64 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __KVM_X86_MMU_INTERNAL_H
|
|
#define __KVM_X86_MMU_INTERNAL_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <asm/kvm_host.h>
|
|
|
|
struct kvm_mmu_page {
|
|
struct list_head link;
|
|
struct hlist_node hash_link;
|
|
struct list_head lpage_disallowed_link;
|
|
|
|
bool unsync;
|
|
u8 mmu_valid_gen;
|
|
bool mmio_cached;
|
|
bool lpage_disallowed; /* Can't be replaced by an equiv large page */
|
|
|
|
/*
|
|
* The following two entries are used to key the shadow page in the
|
|
* hash table.
|
|
*/
|
|
union kvm_mmu_page_role role;
|
|
gfn_t gfn;
|
|
|
|
u64 *spt;
|
|
/* hold the gfn of each spte inside spt */
|
|
gfn_t *gfns;
|
|
int root_count; /* Currently serving as active root */
|
|
unsigned int unsync_children;
|
|
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
|
|
DECLARE_BITMAP(unsync_child_bitmap, 512);
|
|
|
|
#ifdef CONFIG_X86_32
|
|
/*
|
|
* Used out of the mmu-lock to avoid reading spte values while an
|
|
* update is in progress; see the comments in __get_spte_lockless().
|
|
*/
|
|
int clear_spte_count;
|
|
#endif
|
|
|
|
/* Number of writes since the last time traversal visited this page. */
|
|
atomic_t write_flooding_count;
|
|
};
|
|
|
|
static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
|
|
{
|
|
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
|
|
|
|
return (struct kvm_mmu_page *)page_private(page);
|
|
}
|
|
|
|
static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
|
|
{
|
|
return to_shadow_page(__pa(sptep));
|
|
}
|
|
|
|
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
|
|
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
|
|
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot, u64 gfn);
|
|
|
|
#endif /* __KVM_X86_MMU_INTERNAL_H */
|