KVM: LAPIC: Recalculate apic map in batch
In the vCPU reset and set APIC_BASE MSR path, the apic map will be recalculated several times, each time it will consume 10+ us observed by ftrace in my non-overcommit environment since the expensive memory allocate/mutex/rcu etc operations. This patch optimizes it by recaluating apic map in batch, I hope this can benefit the serverless scenario which can frequently create/destroy VMs. Before patch: kvm_lapic_reset ~27us After patch: kvm_lapic_reset ~14us Observed by ftrace, improve ~48%. Signed-off-by: Wanpeng Li <wanpengli@tencent.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
49f933d445
commit
4abaffce4d
@ -920,6 +920,7 @@ struct kvm_arch {
|
|||||||
atomic_t vapics_in_nmi_mode;
|
atomic_t vapics_in_nmi_mode;
|
||||||
struct mutex apic_map_lock;
|
struct mutex apic_map_lock;
|
||||||
struct kvm_apic_map *apic_map;
|
struct kvm_apic_map *apic_map;
|
||||||
|
bool apic_map_dirty;
|
||||||
|
|
||||||
bool apic_access_page_done;
|
bool apic_access_page_done;
|
||||||
unsigned long apicv_inhibit_reasons;
|
unsigned long apicv_inhibit_reasons;
|
||||||
|
@ -164,14 +164,28 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
|
|||||||
kvfree(map);
|
kvfree(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void recalculate_apic_map(struct kvm *kvm)
|
void kvm_recalculate_apic_map(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
struct kvm_apic_map *new, *old = NULL;
|
struct kvm_apic_map *new, *old = NULL;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
int i;
|
int i;
|
||||||
u32 max_id = 255; /* enough space for any xAPIC ID */
|
u32 max_id = 255; /* enough space for any xAPIC ID */
|
||||||
|
|
||||||
|
if (!kvm->arch.apic_map_dirty) {
|
||||||
|
/*
|
||||||
|
* Read kvm->arch.apic_map_dirty before
|
||||||
|
* kvm->arch.apic_map
|
||||||
|
*/
|
||||||
|
smp_rmb();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&kvm->arch.apic_map_lock);
|
mutex_lock(&kvm->arch.apic_map_lock);
|
||||||
|
if (!kvm->arch.apic_map_dirty) {
|
||||||
|
/* Someone else has updated the map. */
|
||||||
|
mutex_unlock(&kvm->arch.apic_map_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||||
if (kvm_apic_present(vcpu))
|
if (kvm_apic_present(vcpu))
|
||||||
@ -236,6 +250,12 @@ out:
|
|||||||
old = rcu_dereference_protected(kvm->arch.apic_map,
|
old = rcu_dereference_protected(kvm->arch.apic_map,
|
||||||
lockdep_is_held(&kvm->arch.apic_map_lock));
|
lockdep_is_held(&kvm->arch.apic_map_lock));
|
||||||
rcu_assign_pointer(kvm->arch.apic_map, new);
|
rcu_assign_pointer(kvm->arch.apic_map, new);
|
||||||
|
/*
|
||||||
|
* Write kvm->arch.apic_map before
|
||||||
|
* clearing apic->apic_map_dirty
|
||||||
|
*/
|
||||||
|
smp_wmb();
|
||||||
|
kvm->arch.apic_map_dirty = false;
|
||||||
mutex_unlock(&kvm->arch.apic_map_lock);
|
mutex_unlock(&kvm->arch.apic_map_lock);
|
||||||
|
|
||||||
if (old)
|
if (old)
|
||||||
@ -257,20 +277,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
|
|||||||
else
|
else
|
||||||
static_key_slow_inc(&apic_sw_disabled.key);
|
static_key_slow_inc(&apic_sw_disabled.key);
|
||||||
|
|
||||||
recalculate_apic_map(apic->vcpu->kvm);
|
apic->vcpu->kvm->arch.apic_map_dirty = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
|
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
|
||||||
{
|
{
|
||||||
kvm_lapic_set_reg(apic, APIC_ID, id << 24);
|
kvm_lapic_set_reg(apic, APIC_ID, id << 24);
|
||||||
recalculate_apic_map(apic->vcpu->kvm);
|
apic->vcpu->kvm->arch.apic_map_dirty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
|
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
|
||||||
{
|
{
|
||||||
kvm_lapic_set_reg(apic, APIC_LDR, id);
|
kvm_lapic_set_reg(apic, APIC_LDR, id);
|
||||||
recalculate_apic_map(apic->vcpu->kvm);
|
apic->vcpu->kvm->arch.apic_map_dirty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
|
static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
|
||||||
@ -286,7 +306,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
|
|||||||
|
|
||||||
kvm_lapic_set_reg(apic, APIC_ID, id);
|
kvm_lapic_set_reg(apic, APIC_ID, id);
|
||||||
kvm_lapic_set_reg(apic, APIC_LDR, ldr);
|
kvm_lapic_set_reg(apic, APIC_LDR, ldr);
|
||||||
recalculate_apic_map(apic->vcpu->kvm);
|
apic->vcpu->kvm->arch.apic_map_dirty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
|
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
|
||||||
@ -1906,7 +1926,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
|||||||
case APIC_DFR:
|
case APIC_DFR:
|
||||||
if (!apic_x2apic_mode(apic)) {
|
if (!apic_x2apic_mode(apic)) {
|
||||||
kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
|
kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
|
||||||
recalculate_apic_map(apic->vcpu->kvm);
|
apic->vcpu->kvm->arch.apic_map_dirty = true;
|
||||||
} else
|
} else
|
||||||
ret = 1;
|
ret = 1;
|
||||||
break;
|
break;
|
||||||
@ -2012,6 +2032,8 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kvm_recalculate_apic_map(apic->vcpu->kvm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
|
EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
|
||||||
@ -2160,7 +2182,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
|
|||||||
static_key_slow_dec_deferred(&apic_hw_disabled);
|
static_key_slow_dec_deferred(&apic_hw_disabled);
|
||||||
} else {
|
} else {
|
||||||
static_key_slow_inc(&apic_hw_disabled.key);
|
static_key_slow_inc(&apic_hw_disabled.key);
|
||||||
recalculate_apic_map(vcpu->kvm);
|
vcpu->kvm->arch.apic_map_dirty = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2201,6 +2223,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||||||
if (!apic)
|
if (!apic)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
vcpu->kvm->arch.apic_map_dirty = false;
|
||||||
/* Stop the timer in case it's a reset to an active apic */
|
/* Stop the timer in case it's a reset to an active apic */
|
||||||
hrtimer_cancel(&apic->lapic_timer.timer);
|
hrtimer_cancel(&apic->lapic_timer.timer);
|
||||||
|
|
||||||
@ -2252,6 +2275,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||||||
|
|
||||||
vcpu->arch.apic_arb_prio = 0;
|
vcpu->arch.apic_arb_prio = 0;
|
||||||
vcpu->arch.apic_attention = 0;
|
vcpu->arch.apic_attention = 0;
|
||||||
|
|
||||||
|
kvm_recalculate_apic_map(vcpu->kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2473,17 +2498,18 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
|||||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
|
||||||
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
|
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
|
||||||
/* set SPIV separately to get count of SW disabled APICs right */
|
/* set SPIV separately to get count of SW disabled APICs right */
|
||||||
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
|
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
|
||||||
|
|
||||||
r = kvm_apic_state_fixup(vcpu, s, true);
|
r = kvm_apic_state_fixup(vcpu, s, true);
|
||||||
if (r)
|
if (r) {
|
||||||
|
kvm_recalculate_apic_map(vcpu->kvm);
|
||||||
return r;
|
return r;
|
||||||
|
}
|
||||||
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
|
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
|
||||||
|
|
||||||
recalculate_apic_map(vcpu->kvm);
|
kvm_recalculate_apic_map(vcpu->kvm);
|
||||||
kvm_apic_set_version(vcpu);
|
kvm_apic_set_version(vcpu);
|
||||||
|
|
||||||
apic_update_ppr(apic);
|
apic_update_ppr(apic);
|
||||||
|
@ -78,6 +78,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
|
|||||||
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu);
|
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu);
|
||||||
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
|
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
|
||||||
u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
|
u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
|
||||||
|
void kvm_recalculate_apic_map(struct kvm *kvm);
|
||||||
void kvm_apic_set_version(struct kvm_vcpu *vcpu);
|
void kvm_apic_set_version(struct kvm_vcpu *vcpu);
|
||||||
int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val);
|
int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val);
|
||||||
int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
|
int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
|
||||||
|
@ -350,6 +350,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||||||
}
|
}
|
||||||
|
|
||||||
kvm_lapic_set_base(vcpu, msr_info->data);
|
kvm_lapic_set_base(vcpu, msr_info->data);
|
||||||
|
kvm_recalculate_apic_map(vcpu->kvm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
|
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
|
||||||
|
Loading…
Reference in New Issue
Block a user