KVM: x86: use static calls to reduce kvm_x86_ops overhead
Convert kvm_x86_ops to use static calls. Note that all kvm_x86_ops are covered here except for 'pmu_ops and 'nested ops'. Here are some numbers running cpuid in a loop of 1 million calls averaged over 5 runs, measured in the vm (lower is better). Intel Xeon 3000MHz: |default |mitigations=off ------------------------------------- vanilla |.671s |.486s static call|.573s(-15%)|.458s(-6%) AMD EPYC 2500MHz: |default |mitigations=off ------------------------------------- vanilla |.710s |.609s static call|.664s(-6%) |.609s(0%) Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Signed-off-by: Jason Baron <jbaron@akamai.com> Message-Id: <e057bf1b8a7ad15652df6eeba3f907ae758d3399.1610680941.git.jbaron@akamai.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9af5471bdb
commit
b3646477d4
@ -1374,7 +1374,7 @@ void kvm_arch_free_vm(struct kvm *kvm);
|
||||
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
|
||||
{
|
||||
if (kvm_x86_ops.tlb_remote_flush &&
|
||||
!kvm_x86_ops.tlb_remote_flush(kvm))
|
||||
!static_call(kvm_x86_tlb_remote_flush)(kvm))
|
||||
return 0;
|
||||
else
|
||||
return -ENOTSUPP;
|
||||
@ -1767,14 +1767,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
|
||||
|
||||
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_x86_ops.vcpu_blocking)
|
||||
kvm_x86_ops.vcpu_blocking(vcpu);
|
||||
static_call_cond(kvm_x86_vcpu_blocking)(vcpu);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_x86_ops.vcpu_unblocking)
|
||||
kvm_x86_ops.vcpu_unblocking(vcpu);
|
||||
static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
|
||||
|
@ -182,7 +182,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
|
||||
|
||||
/* Invoke the vendor callback only after the above state is updated. */
|
||||
kvm_x86_ops.vcpu_after_set_cpuid(vcpu);
|
||||
static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
|
||||
}
|
||||
|
||||
static int is_efer_nx(void)
|
||||
|
@ -1154,7 +1154,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
|
||||
addr = gfn_to_hva(kvm, gfn);
|
||||
if (kvm_is_error_hva(addr))
|
||||
return 1;
|
||||
kvm_x86_ops.patch_hypercall(vcpu, instructions);
|
||||
static_call(kvm_x86_patch_hypercall)(vcpu, instructions);
|
||||
((unsigned char *)instructions)[3] = 0xc3; /* ret */
|
||||
if (__copy_to_user((void __user *)addr, instructions, 4))
|
||||
return 1;
|
||||
@ -1745,7 +1745,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||
* hypercall generates UD from non zero cpl and real mode
|
||||
* per HYPER-V spec
|
||||
*/
|
||||
if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
|
||||
if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
|
@ -143,8 +143,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__kvm_migrate_apic_timer(vcpu);
|
||||
__kvm_migrate_pit_timer(vcpu);
|
||||
if (kvm_x86_ops.migrate_timers)
|
||||
kvm_x86_ops.migrate_timers(vcpu);
|
||||
static_call_cond(kvm_x86_migrate_timers)(vcpu);
|
||||
}
|
||||
|
||||
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
|
||||
|
@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
|
||||
return 0;
|
||||
|
||||
if (!kvm_register_is_available(vcpu, reg))
|
||||
kvm_x86_ops.cache_reg(vcpu, reg);
|
||||
static_call(kvm_x86_cache_reg)(vcpu, reg);
|
||||
|
||||
return vcpu->arch.regs[reg];
|
||||
}
|
||||
@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
|
||||
might_sleep(); /* on svm */
|
||||
|
||||
if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
|
||||
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);
|
||||
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
|
||||
|
||||
return vcpu->arch.walk_mmu->pdptrs[index];
|
||||
}
|
||||
@ -118,7 +118,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
|
||||
if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
|
||||
!kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
|
||||
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
|
||||
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
|
||||
return vcpu->arch.cr0 & mask;
|
||||
}
|
||||
|
||||
@ -132,14 +132,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
|
||||
if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
|
||||
!kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
|
||||
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
|
||||
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
|
||||
return vcpu->arch.cr4 & mask;
|
||||
}
|
||||
|
||||
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
|
||||
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
|
||||
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
|
||||
return vcpu->arch.cr3;
|
||||
}
|
||||
|
||||
|
@ -484,7 +484,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
|
||||
if (unlikely(vcpu->arch.apicv_active)) {
|
||||
/* need to update RVI */
|
||||
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
||||
kvm_x86_ops.hwapic_irr_update(vcpu,
|
||||
static_call(kvm_x86_hwapic_irr_update)(vcpu,
|
||||
apic_find_highest_irr(apic));
|
||||
} else {
|
||||
apic->irr_pending = false;
|
||||
@ -515,7 +515,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
|
||||
* just set SVI.
|
||||
*/
|
||||
if (unlikely(vcpu->arch.apicv_active))
|
||||
kvm_x86_ops.hwapic_isr_update(vcpu, vec);
|
||||
static_call(kvm_x86_hwapic_isr_update)(vcpu, vec);
|
||||
else {
|
||||
++apic->isr_count;
|
||||
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
|
||||
@ -563,8 +563,8 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
|
||||
* and must be left alone.
|
||||
*/
|
||||
if (unlikely(vcpu->arch.apicv_active))
|
||||
kvm_x86_ops.hwapic_isr_update(vcpu,
|
||||
apic_find_highest_isr(apic));
|
||||
static_call(kvm_x86_hwapic_isr_update)(vcpu,
|
||||
apic_find_highest_isr(apic));
|
||||
else {
|
||||
--apic->isr_count;
|
||||
BUG_ON(apic->isr_count < 0);
|
||||
@ -701,7 +701,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
|
||||
{
|
||||
int highest_irr;
|
||||
if (apic->vcpu->arch.apicv_active)
|
||||
highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu);
|
||||
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
|
||||
else
|
||||
highest_irr = apic_find_highest_irr(apic);
|
||||
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
|
||||
@ -1090,7 +1090,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
||||
apic->regs + APIC_TMR);
|
||||
}
|
||||
|
||||
if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) {
|
||||
if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
@ -1814,7 +1814,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
|
||||
{
|
||||
WARN_ON(preemptible());
|
||||
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
|
||||
kvm_x86_ops.cancel_hv_timer(apic->vcpu);
|
||||
static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
|
||||
apic->lapic_timer.hv_timer_in_use = false;
|
||||
}
|
||||
|
||||
@ -1831,7 +1831,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
|
||||
if (!ktimer->tscdeadline)
|
||||
return false;
|
||||
|
||||
if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
|
||||
if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
|
||||
return false;
|
||||
|
||||
ktimer->hv_timer_in_use = true;
|
||||
@ -2261,7 +2261,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
|
||||
kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
|
||||
|
||||
if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
|
||||
kvm_x86_ops.set_virtual_apic_mode(vcpu);
|
||||
static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
|
||||
|
||||
apic->base_address = apic->vcpu->arch.apic_base &
|
||||
MSR_IA32_APICBASE_BASE;
|
||||
@ -2338,9 +2338,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
vcpu->arch.pv_eoi.msr_val = 0;
|
||||
apic_update_ppr(apic);
|
||||
if (vcpu->arch.apicv_active) {
|
||||
kvm_x86_ops.apicv_post_state_restore(vcpu);
|
||||
kvm_x86_ops.hwapic_irr_update(vcpu, -1);
|
||||
kvm_x86_ops.hwapic_isr_update(vcpu, -1);
|
||||
static_call(kvm_x86_apicv_post_state_restore)(vcpu);
|
||||
static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
|
||||
static_call(kvm_x86_hwapic_isr_update)(vcpu, -1);
|
||||
}
|
||||
|
||||
vcpu->arch.apic_arb_prio = 0;
|
||||
@ -2601,10 +2601,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
||||
kvm_apic_update_apicv(vcpu);
|
||||
apic->highest_isr_cache = -1;
|
||||
if (vcpu->arch.apicv_active) {
|
||||
kvm_x86_ops.apicv_post_state_restore(vcpu);
|
||||
kvm_x86_ops.hwapic_irr_update(vcpu,
|
||||
static_call(kvm_x86_apicv_post_state_restore)(vcpu);
|
||||
static_call(kvm_x86_hwapic_irr_update)(vcpu,
|
||||
apic_find_highest_irr(apic));
|
||||
kvm_x86_ops.hwapic_isr_update(vcpu,
|
||||
static_call(kvm_x86_hwapic_isr_update)(vcpu,
|
||||
apic_find_highest_isr(apic));
|
||||
}
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
|
@ -102,7 +102,7 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
|
||||
if (!VALID_PAGE(root_hpa))
|
||||
return;
|
||||
|
||||
kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
|
||||
static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
|
||||
vcpu->arch.mmu->shadow_root_level);
|
||||
}
|
||||
|
||||
@ -174,8 +174,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
unsigned pte_access, unsigned pte_pkey,
|
||||
unsigned pfec)
|
||||
{
|
||||
int cpl = kvm_x86_ops.get_cpl(vcpu);
|
||||
unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
|
||||
int cpl = static_call(kvm_x86_get_cpl)(vcpu);
|
||||
unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
|
||||
|
||||
/*
|
||||
* If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
|
||||
|
@ -190,7 +190,7 @@ static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
|
||||
int ret = -ENOTSUPP;
|
||||
|
||||
if (range && kvm_x86_ops.tlb_remote_flush_with_range)
|
||||
ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range);
|
||||
ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
|
||||
|
||||
if (ret)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
@ -1283,8 +1283,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
|
||||
gfn_t gfn_offset, unsigned long mask)
|
||||
{
|
||||
if (kvm_x86_ops.enable_log_dirty_pt_masked)
|
||||
kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
|
||||
mask);
|
||||
static_call(kvm_x86_enable_log_dirty_pt_masked)(kvm, slot,
|
||||
gfn_offset,
|
||||
mask);
|
||||
else
|
||||
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
|
||||
}
|
||||
@ -1292,7 +1293,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
|
||||
int kvm_cpu_dirty_log_size(void)
|
||||
{
|
||||
if (kvm_x86_ops.cpu_dirty_log_size)
|
||||
return kvm_x86_ops.cpu_dirty_log_size();
|
||||
return static_call(kvm_x86_cpu_dirty_log_size)();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -4799,7 +4800,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
|
||||
if (r)
|
||||
goto out;
|
||||
kvm_mmu_load_pgd(vcpu);
|
||||
kvm_x86_ops.tlb_flush_current(vcpu);
|
||||
static_call(kvm_x86_tlb_flush_current)(vcpu);
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
@ -5080,7 +5081,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
if (is_noncanonical_address(gva, vcpu))
|
||||
return;
|
||||
|
||||
kvm_x86_ops.tlb_flush_gva(vcpu, gva);
|
||||
static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
|
||||
}
|
||||
|
||||
if (!mmu->invlpg)
|
||||
@ -5137,7 +5138,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
|
||||
}
|
||||
|
||||
if (tlb_flush)
|
||||
kvm_x86_ops.tlb_flush_gva(vcpu, gva);
|
||||
static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
|
||||
|
||||
++vcpu->stat.invlpg;
|
||||
|
||||
|
@ -120,7 +120,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
|
||||
if (level > PG_LEVEL_4K)
|
||||
spte |= PT_PAGE_SIZE_MASK;
|
||||
if (tdp_enabled)
|
||||
spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn,
|
||||
spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
|
||||
kvm_is_mmio_pfn(pfn));
|
||||
|
||||
if (host_writable)
|
||||
|
@ -373,7 +373,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
||||
return 1;
|
||||
|
||||
if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
|
||||
(kvm_x86_ops.get_cpl(vcpu) != 0) &&
|
||||
(static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
|
||||
(kvm_read_cr0(vcpu) & X86_CR0_PE))
|
||||
return 1;
|
||||
|
||||
|
@ -256,7 +256,7 @@ TRACE_EVENT(name, \
|
||||
__entry->guest_rip = kvm_rip_read(vcpu); \
|
||||
__entry->isa = isa; \
|
||||
__entry->vcpu_id = vcpu->vcpu_id; \
|
||||
kvm_x86_ops.get_exit_info(vcpu, &__entry->info1, \
|
||||
static_call(kvm_x86_get_exit_info)(vcpu, &__entry->info1, \
|
||||
&__entry->info2, \
|
||||
&__entry->intr_info, \
|
||||
&__entry->error_code); \
|
||||
@ -738,7 +738,7 @@ TRACE_EVENT(kvm_emulate_insn,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS);
|
||||
__entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS);
|
||||
__entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
|
||||
- vcpu->arch.emulate_ctxt->fetch.data;
|
||||
__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -98,7 +98,7 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (!is_long_mode(vcpu))
|
||||
return false;
|
||||
kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
|
||||
static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
|
||||
return cs_l;
|
||||
}
|
||||
|
||||
@ -129,7 +129,7 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
|
||||
static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
++vcpu->stat.tlb_flush;
|
||||
kvm_x86_ops.tlb_flush_current(vcpu);
|
||||
static_call(kvm_x86_tlb_flush_current)(vcpu);
|
||||
}
|
||||
|
||||
static inline int is_pae(struct kvm_vcpu *vcpu)
|
||||
@ -244,7 +244,7 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
|
||||
|
||||
static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
|
||||
return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu);
|
||||
}
|
||||
|
||||
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
|
||||
|
Loading…
x
Reference in New Issue
Block a user