KVM: change the way how lowest priority vcpu is calculated
The new way does not require additional loop over vcpus to calculate the one with lowest priority as one is chosen during delivery bitmap construction. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
343f94fe4d
commit
e1035715ef
@ -1836,20 +1836,9 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
|
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
|
||||||
unsigned long *bitmap)
|
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *lvcpu = kvm->vcpus[0];
|
return vcpu1->arch.xtp - vcpu2->arch.xtp;
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 1; i < kvm->arch.online_vcpus; i++) {
|
|
||||||
if (!kvm->vcpus[i])
|
|
||||||
continue;
|
|
||||||
if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
|
|
||||||
lvcpu = kvm->vcpus[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
return lvcpu;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||||
|
@ -22,6 +22,7 @@ int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
|
|||||||
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
|
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
|
||||||
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||||
int short_hand, int dest, int dest_mode);
|
int short_hand, int dest, int dest_mode);
|
||||||
|
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
|
||||||
bool kvm_apic_present(struct kvm_vcpu *vcpu);
|
bool kvm_apic_present(struct kvm_vcpu *vcpu);
|
||||||
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig);
|
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig);
|
||||||
|
|
||||||
|
@ -286,6 +286,7 @@ struct kvm_vcpu_arch {
|
|||||||
u64 shadow_efer;
|
u64 shadow_efer;
|
||||||
u64 apic_base;
|
u64 apic_base;
|
||||||
struct kvm_lapic *apic; /* kernel irqchip context */
|
struct kvm_lapic *apic; /* kernel irqchip context */
|
||||||
|
int32_t apic_arb_prio;
|
||||||
int mp_state;
|
int mp_state;
|
||||||
int sipi_vector;
|
int sipi_vector;
|
||||||
u64 ia32_misc_enable_msr;
|
u64 ia32_misc_enable_msr;
|
||||||
@ -400,7 +401,6 @@ struct kvm_arch{
|
|||||||
struct hlist_head irq_ack_notifier_list;
|
struct hlist_head irq_ack_notifier_list;
|
||||||
int vapics_in_nmi_mode;
|
int vapics_in_nmi_mode;
|
||||||
|
|
||||||
int round_robin_prev_vcpu;
|
|
||||||
unsigned int tss_addr;
|
unsigned int tss_addr;
|
||||||
struct page *apic_access_page;
|
struct page *apic_access_page;
|
||||||
|
|
||||||
|
@ -338,8 +338,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
|||||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||||
|
|
||||||
switch (delivery_mode) {
|
switch (delivery_mode) {
|
||||||
case APIC_DM_FIXED:
|
|
||||||
case APIC_DM_LOWEST:
|
case APIC_DM_LOWEST:
|
||||||
|
vcpu->arch.apic_arb_prio++;
|
||||||
|
case APIC_DM_FIXED:
|
||||||
/* FIXME add logic for vcpu on reset */
|
/* FIXME add logic for vcpu on reset */
|
||||||
if (unlikely(!apic_enabled(apic)))
|
if (unlikely(!apic_enabled(apic)))
|
||||||
break;
|
break;
|
||||||
@ -416,43 +417,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
|
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
|
||||||
unsigned long *bitmap)
|
|
||||||
{
|
{
|
||||||
int last;
|
return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
|
||||||
int next;
|
|
||||||
struct kvm_lapic *apic = NULL;
|
|
||||||
|
|
||||||
last = kvm->arch.round_robin_prev_vcpu;
|
|
||||||
next = last;
|
|
||||||
|
|
||||||
do {
|
|
||||||
if (++next == KVM_MAX_VCPUS)
|
|
||||||
next = 0;
|
|
||||||
if (kvm->vcpus[next] == NULL || !test_bit(next, bitmap))
|
|
||||||
continue;
|
|
||||||
apic = kvm->vcpus[next]->arch.apic;
|
|
||||||
if (apic && apic_enabled(apic))
|
|
||||||
break;
|
|
||||||
apic = NULL;
|
|
||||||
} while (next != last);
|
|
||||||
kvm->arch.round_robin_prev_vcpu = next;
|
|
||||||
|
|
||||||
if (!apic)
|
|
||||||
printk(KERN_DEBUG "vcpu not ready for apic_round_robin\n");
|
|
||||||
|
|
||||||
return apic;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
|
|
||||||
unsigned long *bitmap)
|
|
||||||
{
|
|
||||||
struct kvm_lapic *apic;
|
|
||||||
|
|
||||||
apic = kvm_apic_round_robin(kvm, vector, bitmap);
|
|
||||||
if (apic)
|
|
||||||
return apic->vcpu;
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void apic_set_eoi(struct kvm_lapic *apic)
|
static void apic_set_eoi(struct kvm_lapic *apic)
|
||||||
@ -908,6 +875,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
|
vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
|
||||||
apic_update_ppr(apic);
|
apic_update_ppr(apic);
|
||||||
|
|
||||||
|
vcpu->arch.apic_arb_prio = 0;
|
||||||
|
|
||||||
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
|
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
|
||||||
"0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
|
"0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
|
||||||
vcpu, kvm_apic_id(apic),
|
vcpu, kvm_apic_id(apic),
|
||||||
|
@ -64,10 +64,9 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
|
|||||||
return kvm->arch.vioapic;
|
return kvm->arch.vioapic;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
|
|
||||||
unsigned long *bitmap);
|
|
||||||
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||||
int short_hand, int dest, int dest_mode);
|
int short_hand, int dest, int dest_mode);
|
||||||
|
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
|
||||||
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
|
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
|
||||||
int kvm_ioapic_init(struct kvm *kvm);
|
int kvm_ioapic_init(struct kvm *kvm);
|
||||||
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
|
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
|
||||||
|
@ -47,7 +47,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src,
|
|||||||
int dest_id, int dest_mode, bool low_prio, int short_hand,
|
int dest_id, int dest_mode, bool low_prio, int short_hand,
|
||||||
unsigned long *deliver_bitmask)
|
unsigned long *deliver_bitmask)
|
||||||
{
|
{
|
||||||
int i;
|
int i, lowest = -1;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
|
|
||||||
if (dest_mode == 0 && dest_id == 0xff && low_prio)
|
if (dest_mode == 0 && dest_id == 0xff && low_prio)
|
||||||
@ -64,15 +64,18 @@ void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src,
|
|||||||
dest_mode))
|
dest_mode))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
__set_bit(i, deliver_bitmask);
|
if (!low_prio) {
|
||||||
|
__set_bit(i, deliver_bitmask);
|
||||||
|
} else {
|
||||||
|
if (lowest < 0)
|
||||||
|
lowest = i;
|
||||||
|
if (kvm_apic_compare_prio(vcpu, kvm->vcpus[lowest]) < 0)
|
||||||
|
lowest = i;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (low_prio) {
|
if (lowest != -1)
|
||||||
vcpu = kvm_get_lowest_prio_vcpu(kvm, 0, deliver_bitmask);
|
__set_bit(lowest, deliver_bitmask);
|
||||||
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
|
|
||||||
if (vcpu)
|
|
||||||
__set_bit(vcpu->vcpu_id, deliver_bitmask);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
||||||
|
Loading…
Reference in New Issue
Block a user