KVM: x86/mmu: replace direct_map with root_role.direct
direct_map is always equal to the direct field of the root page's role: - for shadow paging, direct_map is true if CR0.PG=0 and root_role.direct is copied from cpu_role.base.direct - for TDP, it is always true and root_role.direct is also always true - for shadow TDP, it is always false and root_role.direct is also always false Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
4d25502aa1
commit
347a0d0ded
@ -438,7 +438,6 @@ struct kvm_mmu {
|
||||
struct kvm_mmu_root_info root;
|
||||
union kvm_cpu_role cpu_role;
|
||||
union kvm_mmu_page_role root_role;
|
||||
bool direct_map;
|
||||
|
||||
/*
|
||||
* The pkru_mask indicates if protection key checks are needed. It
|
||||
|
@ -2028,7 +2028,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
int direct,
|
||||
unsigned int access)
|
||||
{
|
||||
bool direct_mmu = vcpu->arch.mmu->direct_map;
|
||||
bool direct_mmu = vcpu->arch.mmu->root_role.direct;
|
||||
union kvm_mmu_page_role role;
|
||||
struct hlist_head *sp_list;
|
||||
unsigned quadrant;
|
||||
@ -2133,7 +2133,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
|
||||
|
||||
if (iterator->level >= PT64_ROOT_4LEVEL &&
|
||||
vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
|
||||
!vcpu->arch.mmu->direct_map)
|
||||
!vcpu->arch.mmu->root_role.direct)
|
||||
iterator->level = PT32E_ROOT_LEVEL;
|
||||
|
||||
if (iterator->level == PT32E_ROOT_LEVEL) {
|
||||
@ -2515,7 +2515,7 @@ static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
|
||||
gpa_t gpa;
|
||||
int r;
|
||||
|
||||
if (vcpu->arch.mmu->direct_map)
|
||||
if (vcpu->arch.mmu->root_role.direct)
|
||||
return 0;
|
||||
|
||||
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
|
||||
@ -3596,7 +3596,8 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
|
||||
* equivalent level in the guest's NPT to shadow. Allocate the tables
|
||||
* on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
|
||||
*/
|
||||
if (mmu->direct_map || mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
|
||||
if (mmu->root_role.direct ||
|
||||
mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
|
||||
mmu->root_role.level < PT64_ROOT_4LEVEL)
|
||||
return 0;
|
||||
|
||||
@ -3693,7 +3694,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
|
||||
int i;
|
||||
struct kvm_mmu_page *sp;
|
||||
|
||||
if (vcpu->arch.mmu->direct_map)
|
||||
if (vcpu->arch.mmu->root_role.direct)
|
||||
return;
|
||||
|
||||
if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
|
||||
@ -3923,7 +3924,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
|
||||
arch.token = alloc_apf_token(vcpu);
|
||||
arch.gfn = gfn;
|
||||
arch.direct_map = vcpu->arch.mmu->direct_map;
|
||||
arch.direct_map = vcpu->arch.mmu->root_role.direct;
|
||||
arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
|
||||
|
||||
return kvm_setup_async_pf(vcpu, cr2_or_gpa,
|
||||
@ -4141,7 +4142,6 @@ static void nonpaging_init_context(struct kvm_mmu *context)
|
||||
context->gva_to_gpa = nonpaging_gva_to_gpa;
|
||||
context->sync_page = nonpaging_sync_page;
|
||||
context->invlpg = NULL;
|
||||
context->direct_map = true;
|
||||
}
|
||||
|
||||
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
|
||||
@ -4723,7 +4723,6 @@ static void paging64_init_context(struct kvm_mmu *context)
|
||||
context->gva_to_gpa = paging64_gva_to_gpa;
|
||||
context->sync_page = paging64_sync_page;
|
||||
context->invlpg = paging64_invlpg;
|
||||
context->direct_map = false;
|
||||
}
|
||||
|
||||
static void paging32_init_context(struct kvm_mmu *context)
|
||||
@ -4732,7 +4731,6 @@ static void paging32_init_context(struct kvm_mmu *context)
|
||||
context->gva_to_gpa = paging32_gva_to_gpa;
|
||||
context->sync_page = paging32_sync_page;
|
||||
context->invlpg = paging32_invlpg;
|
||||
context->direct_map = false;
|
||||
}
|
||||
|
||||
static union kvm_cpu_role
|
||||
@ -4822,7 +4820,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
|
||||
context->page_fault = kvm_tdp_page_fault;
|
||||
context->sync_page = nonpaging_sync_page;
|
||||
context->invlpg = NULL;
|
||||
context->direct_map = true;
|
||||
context->get_guest_pgd = get_cr3;
|
||||
context->get_pdptr = kvm_pdptr_read;
|
||||
context->inject_page_fault = kvm_inject_page_fault;
|
||||
@ -4952,7 +4949,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||
context->gva_to_gpa = ept_gva_to_gpa;
|
||||
context->sync_page = ept_sync_page;
|
||||
context->invlpg = ept_invlpg;
|
||||
context->direct_map = false;
|
||||
|
||||
update_permission_bitmask(context, true);
|
||||
context->pkru_mask = 0;
|
||||
reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
|
||||
@ -5068,13 +5065,13 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
|
||||
r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
|
||||
if (r)
|
||||
goto out;
|
||||
r = mmu_alloc_special_roots(vcpu);
|
||||
if (r)
|
||||
goto out;
|
||||
if (vcpu->arch.mmu->direct_map)
|
||||
if (vcpu->arch.mmu->root_role.direct)
|
||||
r = mmu_alloc_direct_roots(vcpu);
|
||||
else
|
||||
r = mmu_alloc_shadow_roots(vcpu);
|
||||
@ -5331,7 +5328,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
||||
void *insn, int insn_len)
|
||||
{
|
||||
int r, emulation_type = EMULTYPE_PF;
|
||||
bool direct = vcpu->arch.mmu->direct_map;
|
||||
bool direct = vcpu->arch.mmu->root_role.direct;
|
||||
|
||||
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
|
||||
return RET_PF_RETRY;
|
||||
@ -5362,7 +5359,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
||||
* paging in both guests. If true, we simply unprotect the page
|
||||
* and resume the guest.
|
||||
*/
|
||||
if (vcpu->arch.mmu->direct_map &&
|
||||
if (vcpu->arch.mmu->root_role.direct &&
|
||||
(error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
|
||||
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
|
||||
return 1;
|
||||
|
@ -8101,7 +8101,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
|
||||
return false;
|
||||
|
||||
if (!vcpu->arch.mmu->direct_map) {
|
||||
if (!vcpu->arch.mmu->root_role.direct) {
|
||||
/*
|
||||
* Write permission should be allowed since only
|
||||
* write access need to be emulated.
|
||||
@ -8134,7 +8134,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
kvm_release_pfn_clean(pfn);
|
||||
|
||||
/* The instructions are well-emulated on direct mmu. */
|
||||
if (vcpu->arch.mmu->direct_map) {
|
||||
if (vcpu->arch.mmu->root_role.direct) {
|
||||
unsigned int indirect_shadow_pages;
|
||||
|
||||
write_lock(&vcpu->kvm->mmu_lock);
|
||||
@ -8202,7 +8202,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
|
||||
vcpu->arch.last_retry_eip = ctxt->eip;
|
||||
vcpu->arch.last_retry_addr = cr2_or_gpa;
|
||||
|
||||
if (!vcpu->arch.mmu->direct_map)
|
||||
if (!vcpu->arch.mmu->root_role.direct)
|
||||
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
|
||||
|
||||
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
|
||||
@ -8482,7 +8482,7 @@ restart:
|
||||
ctxt->exception.address = cr2_or_gpa;
|
||||
|
||||
/* With shadow page tables, cr2 contains a GVA or nGPA. */
|
||||
if (vcpu->arch.mmu->direct_map) {
|
||||
if (vcpu->arch.mmu->root_role.direct) {
|
||||
ctxt->gpa_available = true;
|
||||
ctxt->gpa_val = cr2_or_gpa;
|
||||
}
|
||||
@ -12366,7 +12366,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
|
||||
{
|
||||
int r;
|
||||
|
||||
if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
|
||||
if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
|
||||
work->wakeup_all)
|
||||
return;
|
||||
|
||||
@ -12374,7 +12374,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
|
||||
if (unlikely(r))
|
||||
return;
|
||||
|
||||
if (!vcpu->arch.mmu->direct_map &&
|
||||
if (!vcpu->arch.mmu->root_role.direct &&
|
||||
work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
|
||||
return;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user