KVM: x86: Drop current_vcpu for kvm_running_vcpu + kvm_arch_vcpu variable
Use the generic kvm_running_vcpu plus a new 'handling_intr_from_guest' variable in kvm_arch_vcpu instead of the semi-redundant current_vcpu. kvm_before/after_interrupt() must be called while the vCPU is loaded, (which protects against preemption), thus kvm_running_vcpu is guaranteed to be non-NULL when handling_intr_from_guest is non-zero. Switching to kvm_get_running_vcpu() will allows moving KVM's perf callbacks to generic code, and the new flag will be used in a future patch to more precisely identify the "NMI from guest" case. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Link: https://lore.kernel.org/r/20211111020738.2512932-11-seanjc@google.com
This commit is contained in:
parent
87b940a067
commit
73cd107b96
@ -773,6 +773,7 @@ struct kvm_vcpu_arch {
|
||||
unsigned nmi_pending; /* NMI queued after currently running handler */
|
||||
bool nmi_injected; /* Trying to inject an NMI this entry */
|
||||
bool smi_pending; /* SMI queued after currently running handler */
|
||||
u8 handling_intr_from_guest;
|
||||
|
||||
struct kvm_mtrr mtrr_state;
|
||||
u64 pat;
|
||||
@ -1895,8 +1896,6 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
|
||||
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
|
||||
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
|
||||
|
||||
unsigned int kvm_guest_state(void);
|
||||
|
||||
void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
|
||||
u32 size);
|
||||
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
|
||||
|
@ -87,7 +87,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
|
||||
* woken up. So we should wake it, but this is impossible from
|
||||
* NMI context. Do it from irq work instead.
|
||||
*/
|
||||
if (!kvm_guest_state())
|
||||
if (!kvm_handling_nmi_from_guest(pmc->vcpu))
|
||||
irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
|
||||
else
|
||||
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
|
||||
|
@ -8469,15 +8469,17 @@ static void kvm_timer_init(void)
|
||||
kvmclock_cpu_online, kvmclock_cpu_down_prep);
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(current_vcpu);
|
||||
|
||||
unsigned int kvm_guest_state(void)
|
||||
static inline bool kvm_pmi_in_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
|
||||
return vcpu && vcpu->arch.handling_intr_from_guest;
|
||||
}
|
||||
|
||||
static unsigned int kvm_guest_state(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
|
||||
unsigned int state;
|
||||
|
||||
if (!vcpu)
|
||||
if (!kvm_pmi_in_guest(vcpu))
|
||||
return 0;
|
||||
|
||||
state = PERF_GUEST_ACTIVE;
|
||||
@ -8489,9 +8491,10 @@ unsigned int kvm_guest_state(void)
|
||||
|
||||
static unsigned long kvm_guest_get_ip(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
|
||||
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
|
||||
|
||||
if (WARN_ON_ONCE(!vcpu))
|
||||
/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
|
||||
if (WARN_ON_ONCE(!kvm_pmi_in_guest(vcpu)))
|
||||
return 0;
|
||||
|
||||
return kvm_rip_read(vcpu);
|
||||
@ -8499,10 +8502,10 @@ static unsigned long kvm_guest_get_ip(void)
|
||||
|
||||
static unsigned int kvm_handle_intel_pt_intr(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
|
||||
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
|
||||
|
||||
/* '0' on failure so that the !PT case can use a RET0 static call. */
|
||||
if (!vcpu)
|
||||
if (!kvm_pmi_in_guest(vcpu))
|
||||
return 0;
|
||||
|
||||
kvm_make_request(KVM_REQ_PMI, vcpu);
|
||||
|
@ -385,18 +385,20 @@ static inline bool kvm_cstate_in_guest(struct kvm *kvm)
|
||||
return kvm->arch.cstate_in_guest;
|
||||
}
|
||||
|
||||
DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
|
||||
|
||||
static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__this_cpu_write(current_vcpu, vcpu);
|
||||
WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 1);
|
||||
}
|
||||
|
||||
static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__this_cpu_write(current_vcpu, NULL);
|
||||
WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
|
||||
}
|
||||
|
||||
static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!vcpu->arch.handling_intr_from_guest;
|
||||
}
|
||||
|
||||
static inline bool kvm_pat_valid(u64 data)
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user