KVM: x86/mmu: Add shadow mask for effective host MTRR memtype
Add shadow_memtype_mask to capture that EPT needs a non-zero memtype mask instead of relying on TDP being enabled, as NPT doesn't need a non-zero mask. This is a glorified nop as kvm_x86_ops.get_mt_mask() returns zero for NPT anyways. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Message-Id: <20220715230016.3762909-4-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
82ffad2ddf
commit
38bf9d7bf2
@ -33,6 +33,7 @@ u64 __read_mostly shadow_mmio_value;
|
||||
u64 __read_mostly shadow_mmio_mask;
|
||||
u64 __read_mostly shadow_mmio_access_mask;
|
||||
u64 __read_mostly shadow_present_mask;
|
||||
u64 __read_mostly shadow_memtype_mask;
|
||||
u64 __read_mostly shadow_me_value;
|
||||
u64 __read_mostly shadow_me_mask;
|
||||
u64 __read_mostly shadow_acc_track_mask;
|
||||
@ -161,10 +162,10 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
|
||||
if (level > PG_LEVEL_4K)
|
||||
spte |= PT_PAGE_SIZE_MASK;
|
||||
if (tdp_enabled)
|
||||
spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
|
||||
kvm_is_mmio_pfn(pfn));
|
||||
|
||||
if (shadow_memtype_mask)
|
||||
spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
|
||||
kvm_is_mmio_pfn(pfn));
|
||||
if (host_writable)
|
||||
spte |= shadow_host_writable_mask;
|
||||
else
|
||||
@ -391,6 +392,13 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
|
||||
shadow_nx_mask = 0ull;
|
||||
shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
|
||||
shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
|
||||
/*
|
||||
* EPT overrides the host MTRRs, and so KVM must program the desired
|
||||
* memtype directly into the SPTEs. Note, this mask is just the mask
|
||||
* of all bits that factor into the memtype, the actual memtype must be
|
||||
* dynamically calculated, e.g. to ensure host MMIO is mapped UC.
|
||||
*/
|
||||
shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT;
|
||||
shadow_acc_track_mask = VMX_EPT_RWX_MASK;
|
||||
shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
|
||||
shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
|
||||
@ -441,6 +449,13 @@ void kvm_mmu_reset_all_pte_masks(void)
|
||||
shadow_nx_mask = PT64_NX_MASK;
|
||||
shadow_x_mask = 0;
|
||||
shadow_present_mask = PT_PRESENT_MASK;
|
||||
|
||||
/*
|
||||
* For shadow paging and NPT, KVM uses PAT entry '0' to encode WB
|
||||
* memtype in the SPTEs, i.e. relies on host MTRRs to provide the
|
||||
* correct memtype (WB is the "weakest" memtype).
|
||||
*/
|
||||
shadow_memtype_mask = 0;
|
||||
shadow_acc_track_mask = 0;
|
||||
shadow_me_mask = 0;
|
||||
shadow_me_value = 0;
|
||||
|
@ -147,6 +147,7 @@ extern u64 __read_mostly shadow_mmio_value;
|
||||
extern u64 __read_mostly shadow_mmio_mask;
|
||||
extern u64 __read_mostly shadow_mmio_access_mask;
|
||||
extern u64 __read_mostly shadow_present_mask;
|
||||
extern u64 __read_mostly shadow_memtype_mask;
|
||||
extern u64 __read_mostly shadow_me_value;
|
||||
extern u64 __read_mostly shadow_me_mask;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user