KVM: x86: Drop .post_leave_smm(), i.e. the manual post-RSM MMU reset
Drop the .post_leave_smm() emulator callback, which at this point is just a wrapper to kvm_mmu_reset_context(). The manual context reset is unnecessary, because unlike enter_smm() which calls vendor MSR/CR helpers directly, em_rsm() bounces through the KVM helpers, e.g. kvm_set_cr4(), which are responsible for processing side effects. em_rsm() is already subtly relying on this behavior as it doesn't manually do kvm_update_cpuid_runtime(), e.g. to recognize CR4.OSXSAVE changes. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210609185619.992058-9-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
committed by
Paolo Bonzini
parent
1270e647c8
commit
0128116550
@ -2591,8 +2591,14 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
|||||||
if (ret != X86EMUL_CONTINUE)
|
if (ret != X86EMUL_CONTINUE)
|
||||||
goto emulate_shutdown;
|
goto emulate_shutdown;
|
||||||
|
|
||||||
ctxt->ops->post_leave_smm(ctxt);
|
/*
|
||||||
|
* Note, the ctxt->ops callbacks are responsible for handling side
|
||||||
|
* effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
|
||||||
|
* runtime updates, etc... If that changes, e.g. this flow is moved
|
||||||
|
* out of the emulator to make it look more like enter_smm(), then
|
||||||
|
* those side effects need to be explicitly handled for both success
|
||||||
|
* and shutdown.
|
||||||
|
*/
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
|
|
||||||
emulate_shutdown:
|
emulate_shutdown:
|
||||||
|
@ -233,7 +233,6 @@ struct x86_emulate_ops {
|
|||||||
void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
|
void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
|
||||||
int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
|
int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
|
||||||
const char *smstate);
|
const char *smstate);
|
||||||
void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
|
|
||||||
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
|
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
|
||||||
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
|
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
|
||||||
};
|
};
|
||||||
|
@ -7222,11 +7222,6 @@ static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
|
|||||||
return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate);
|
return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
|
|
||||||
{
|
|
||||||
kvm_mmu_reset_context(emul_to_vcpu(ctxt));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
|
static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
|
||||||
{
|
{
|
||||||
kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
|
kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
|
||||||
@ -7280,7 +7275,6 @@ static const struct x86_emulate_ops emulate_ops = {
|
|||||||
.get_hflags = emulator_get_hflags,
|
.get_hflags = emulator_get_hflags,
|
||||||
.exiting_smm = emulator_exiting_smm,
|
.exiting_smm = emulator_exiting_smm,
|
||||||
.pre_leave_smm = emulator_pre_leave_smm,
|
.pre_leave_smm = emulator_pre_leave_smm,
|
||||||
.post_leave_smm = emulator_post_leave_smm,
|
|
||||||
.triple_fault = emulator_triple_fault,
|
.triple_fault = emulator_triple_fault,
|
||||||
.set_xcr = emulator_set_xcr,
|
.set_xcr = emulator_set_xcr,
|
||||||
};
|
};
|
||||||
|
Reference in New Issue
Block a user