2020-06-22 13:20:31 -07:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef __KVM_X86_MMU_INTERNAL_H
# define __KVM_X86_MMU_INTERNAL_H
2020-06-22 13:20:32 -07:00
# include <linux/types.h>
2020-10-16 10:29:37 -04:00
# include <linux/kvm_host.h>
2020-06-22 13:20:32 -07:00
# include <asm/kvm_host.h>
2020-10-16 10:29:37 -04:00
# undef MMU_DEBUG
# ifdef MMU_DEBUG
extern bool dbg ;
# define pgprintk(x...) do { if (dbg) printk(x); } while (0)
2021-01-27 10:08:45 +08:00
# define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
2020-10-16 10:29:37 -04:00
# define MMU_WARN_ON(x) WARN_ON(x)
# else
# define pgprintk(x...) do { } while (0)
# define rmap_printk(x...) do { } while (0)
# define MMU_WARN_ON(x) do { } while (0)
# endif
2021-03-09 14:42:06 -08:00
/*
* Unlike regular MMU roots , PAE " roots " , a . k . a . PDPTEs / PDPTRs , have a PRESENT
* bit , and thus are guaranteed to be non - zero when valid . And , when a guest
* PDPTR is ! PRESENT , its corresponding PAE root cannot be set to INVALID_PAGE ,
* as the CPU would treat that as PRESENT PDPTR with reserved bits set . Use
* ' 0 ' instead of INVALID_PAGE to indicate an invalid PAE root .
*/
# define INVALID_PAE_ROOT 0
# define IS_VALID_PAE_ROOT(x) (!!(x))
2020-06-22 13:20:32 -07:00
struct kvm_mmu_page {
2021-09-01 15:10:23 -07:00
/*
* Note , " link " through " spt " fit in a single 64 byte cache line on
* 64 - bit kernels , keep it that way unless there ' s a reason not to .
*/
2020-06-22 13:20:32 -07:00
struct list_head link ;
struct hlist_node hash_link ;
2021-09-01 15:10:22 -07:00
bool tdp_mmu_page ;
2020-06-22 13:20:32 -07:00
bool unsync ;
u8 mmu_valid_gen ;
bool lpage_disallowed ; /* Can't be replaced by an equiv large page */
/*
* The following two entries are used to key the shadow page in the
* hash table .
*/
union kvm_mmu_page_role role ;
gfn_t gfn ;
u64 * spt ;
/* hold the gfn of each spte inside spt */
gfn_t * gfns ;
2021-04-01 16:37:29 -07:00
/* Currently serving as active root */
union {
int root_count ;
refcount_t tdp_mmu_root_count ;
} ;
2020-06-22 13:20:32 -07:00
unsigned int unsync_children ;
struct kvm_rmap_head parent_ptes ; /* rmap pointers to parent sptes */
DECLARE_BITMAP ( unsync_child_bitmap , 512 ) ;
2021-09-01 15:10:23 -07:00
struct list_head lpage_disallowed_link ;
2020-06-22 13:20:32 -07:00
# ifdef CONFIG_X86_32
/*
* Used out of the mmu - lock to avoid reading spte values while an
* update is in progress ; see the comments in __get_spte_lockless ( ) .
*/
int clear_spte_count ;
# endif
/* Number of writes since the last time traversal visited this page. */
atomic_t write_flooding_count ;
2020-10-14 20:26:44 +02:00
2021-02-06 09:53:33 -05:00
# ifdef CONFIG_X86_64
2021-03-18 15:28:01 +01:00
/* Used for freeing the page asynchronously if it is a TDP MMU page. */
2021-02-02 10:57:23 -08:00
struct rcu_head rcu_head ;
2021-02-06 09:53:33 -05:00
# endif
2020-06-22 13:20:32 -07:00
} ;
2020-10-14 20:26:44 +02:00
extern struct kmem_cache * mmu_page_header_cache ;
2020-06-22 13:20:34 -07:00
static inline struct kvm_mmu_page * to_shadow_page ( hpa_t shadow_page )
2020-06-22 13:20:32 -07:00
{
struct page * page = pfn_to_page ( shadow_page > > PAGE_SHIFT ) ;
return ( struct kvm_mmu_page * ) page_private ( page ) ;
}
2020-06-22 13:20:33 -07:00
static inline struct kvm_mmu_page * sptep_to_sp ( u64 * sptep )
{
2020-06-22 13:20:34 -07:00
return to_shadow_page ( __pa ( sptep ) ) ;
2020-06-22 13:20:33 -07:00
}
2021-03-25 19:19:45 -07:00
static inline int kvm_mmu_role_as_id ( union kvm_mmu_page_role role )
{
return role . smm ? 1 : 0 ;
}
2021-03-15 16:38:03 -07:00
static inline int kvm_mmu_page_as_id ( struct kvm_mmu_page * sp )
{
2021-03-25 19:19:45 -07:00
return kvm_mmu_role_as_id ( sp - > role ) ;
2021-03-15 16:38:03 -07:00
}
2020-10-16 10:29:37 -04:00
static inline bool kvm_vcpu_ad_need_write_protect ( struct kvm_vcpu * vcpu )
{
/*
2021-02-25 12:47:26 -08:00
* When using the EPT page - modification log , the GPAs in the CPU dirty
* log would come from L2 rather than L1 . Therefore , we need to rely
* on write protection to record dirty pages , which bypasses PML , since
* writes now result in a vmexit . Note , the check on CPU dirty logging
* being enabled is mandatory as the bits used to denote WP - only SPTEs
* are reserved for NPT w / PAE ( 32 - bit KVM ) .
2020-10-16 10:29:37 -04:00
*/
2021-02-25 12:47:26 -08:00
return vcpu - > arch . mmu = = & vcpu - > arch . guest_mmu & &
kvm_x86_ops . cpu_dirty_log_size ;
2020-10-16 10:29:37 -04:00
}
2021-08-17 08:46:45 -04:00
int mmu_try_to_unsync_pages ( struct kvm_vcpu * vcpu , struct kvm_memory_slot * slot ,
gfn_t gfn , bool can_unsync , bool speculative ) ;
2020-10-16 10:29:37 -04:00
2021-07-12 22:33:38 -04:00
void kvm_mmu_gfn_disallow_lpage ( const struct kvm_memory_slot * slot , gfn_t gfn ) ;
void kvm_mmu_gfn_allow_lpage ( const struct kvm_memory_slot * slot , gfn_t gfn ) ;
2020-06-22 13:20:31 -07:00
bool kvm_mmu_slot_gfn_write_protect ( struct kvm * kvm ,
2021-04-29 11:41:14 +08:00
struct kvm_memory_slot * slot , u64 gfn ,
int min_level ) ;
2020-10-14 20:26:45 +02:00
void kvm_flush_remote_tlbs_with_address ( struct kvm * kvm ,
u64 start_gfn , u64 pages ) ;
2021-07-30 18:04:52 -04:00
unsigned int pte_list_count ( struct kvm_rmap_head * rmap_head ) ;
2020-06-22 13:20:31 -07:00
2020-10-14 11:26:50 -07:00
/*
* Return values of handle_mmio_page_fault , mmu . page_fault , and fast_page_fault ( ) .
*
* RET_PF_RETRY : let CPU fault again on the address .
* RET_PF_EMULATE : mmio page fault , emulate the instruction directly .
* RET_PF_INVALID : the spte is invalid , let the real page fault path update it .
* RET_PF_FIXED : The faulting entry has been fixed .
* RET_PF_SPURIOUS : The faulting entry was already fixed , e . g . by another vCPU .
2021-07-13 22:09:53 +00:00
*
* Any names added to this enum should be exported to userspace for use in
* tracepoints via TRACE_DEFINE_ENUM ( ) in mmutrace . h
2020-10-14 11:26:50 -07:00
*/
enum {
RET_PF_RETRY = 0 ,
RET_PF_EMULATE ,
RET_PF_INVALID ,
RET_PF_FIXED ,
RET_PF_SPURIOUS ,
} ;
2021-04-01 16:37:24 -07:00
int kvm_mmu_max_mapping_level ( struct kvm * kvm ,
const struct kvm_memory_slot * slot , gfn_t gfn ,
kvm_pfn_t pfn , int max_level ) ;
2021-08-07 09:21:53 -04:00
void kvm_mmu_hugepage_adjust ( struct kvm_vcpu * vcpu , struct kvm_page_fault * fault ) ;
2021-08-06 04:35:50 -04:00
void disallowed_hugepage_adjust ( struct kvm_page_fault * fault , u64 spte , int cur_level ) ;
2020-10-14 11:26:50 -07:00
void * mmu_memory_cache_alloc ( struct kvm_mmu_memory_cache * mc ) ;
2020-10-14 11:27:00 -07:00
void account_huge_nx_page ( struct kvm * kvm , struct kvm_mmu_page * sp ) ;
void unaccount_huge_nx_page ( struct kvm * kvm , struct kvm_mmu_page * sp ) ;
2020-06-22 13:20:31 -07:00
# endif /* __KVM_X86_MMU_INTERNAL_H */