2020-06-22 13:20:31 -07:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef __KVM_X86_MMU_INTERNAL_H
# define __KVM_X86_MMU_INTERNAL_H
2020-06-22 13:20:32 -07:00
# include <linux/types.h>
# include <asm/kvm_host.h>
struct kvm_mmu_page {
struct list_head link ;
struct hlist_node hash_link ;
struct list_head lpage_disallowed_link ;
bool unsync ;
u8 mmu_valid_gen ;
bool mmio_cached ;
bool lpage_disallowed ; /* Can't be replaced by an equiv large page */
/*
* The following two entries are used to key the shadow page in the
* hash table .
*/
union kvm_mmu_page_role role ;
gfn_t gfn ;
u64 * spt ;
/* hold the gfn of each spte inside spt */
gfn_t * gfns ;
int root_count ; /* Currently serving as active root */
unsigned int unsync_children ;
struct kvm_rmap_head parent_ptes ; /* rmap pointers to parent sptes */
DECLARE_BITMAP ( unsync_child_bitmap , 512 ) ;
# ifdef CONFIG_X86_32
/*
* Used out of the mmu - lock to avoid reading spte values while an
* update is in progress ; see the comments in __get_spte_lockless ( ) .
*/
int clear_spte_count ;
# endif
/* Number of writes since the last time traversal visited this page. */
atomic_t write_flooding_count ;
} ;
2020-06-22 13:20:34 -07:00
static inline struct kvm_mmu_page * to_shadow_page ( hpa_t shadow_page )
2020-06-22 13:20:32 -07:00
{
struct page * page = pfn_to_page ( shadow_page > > PAGE_SHIFT ) ;
return ( struct kvm_mmu_page * ) page_private ( page ) ;
}
2020-06-22 13:20:33 -07:00
static inline struct kvm_mmu_page * sptep_to_sp ( u64 * sptep )
{
2020-06-22 13:20:34 -07:00
return to_shadow_page ( __pa ( sptep ) ) ;
2020-06-22 13:20:33 -07:00
}
2020-06-22 13:20:31 -07:00
void kvm_mmu_gfn_disallow_lpage ( struct kvm_memory_slot * slot , gfn_t gfn ) ;
void kvm_mmu_gfn_allow_lpage ( struct kvm_memory_slot * slot , gfn_t gfn ) ;
bool kvm_mmu_slot_gfn_write_protect ( struct kvm * kvm ,
struct kvm_memory_slot * slot , u64 gfn ) ;
# endif /* __KVM_X86_MMU_INTERNAL_H */