KVM: x86: Move CPUID.(EAX=0x12,ECX=1) mangling to __kvm_update_cpuid_runtime()
Full equality check of CPUID data on update (kvm_cpuid_check_equal()) may
fail for SGX enabled CPUs as CPUID.(EAX=0x12,ECX=1) is currently being
mangled in kvm_vcpu_after_set_cpuid(). Move it to
__kvm_update_cpuid_runtime() and split off cpuid_get_supported_xcr0()
helper as 'vcpu->arch.guest_supported_xcr0' update needs (logically)
to stay in kvm_vcpu_after_set_cpuid().
Cc: stable@vger.kernel.org
Fixes: feb627e8d6
("KVM: x86: Forbid KVM_SET_CPUID{,2} after KVM_RUN")
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20220124103606.2630588-2-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
1625566ec8
commit
5c89be1dd5
@ -196,10 +196,26 @@ void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->arch.pv_cpuid.features = best->eax;
|
vcpu->arch.pv_cpuid.features = best->eax;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calculate guest's supported XCR0 taking into account guest CPUID data and
|
||||||
|
* supported_xcr0 (comprised of host configuration and KVM_SUPPORTED_XCR0).
|
||||||
|
*/
|
||||||
|
static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
|
||||||
|
{
|
||||||
|
struct kvm_cpuid_entry2 *best;
|
||||||
|
|
||||||
|
best = cpuid_entry2_find(entries, nent, 0xd, 0);
|
||||||
|
if (!best)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
|
||||||
|
}
|
||||||
|
|
||||||
static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
|
static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
|
||||||
int nent)
|
int nent)
|
||||||
{
|
{
|
||||||
struct kvm_cpuid_entry2 *best;
|
struct kvm_cpuid_entry2 *best;
|
||||||
|
u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
|
||||||
|
|
||||||
best = cpuid_entry2_find(entries, nent, 1, 0);
|
best = cpuid_entry2_find(entries, nent, 1, 0);
|
||||||
if (best) {
|
if (best) {
|
||||||
@ -238,6 +254,21 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
|
|||||||
vcpu->arch.ia32_misc_enable_msr &
|
vcpu->arch.ia32_misc_enable_msr &
|
||||||
MSR_IA32_MISC_ENABLE_MWAIT);
|
MSR_IA32_MISC_ENABLE_MWAIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
|
||||||
|
* the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
|
||||||
|
* requested XCR0 value. The enclave's XFRM must be a subset of XCRO
|
||||||
|
* at the time of EENTER, thus adjust the allowed XFRM by the guest's
|
||||||
|
* supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
|
||||||
|
* '1' even on CPUs that don't support XSAVE.
|
||||||
|
*/
|
||||||
|
best = cpuid_entry2_find(entries, nent, 0x12, 0x1);
|
||||||
|
if (best) {
|
||||||
|
best->ecx &= guest_supported_xcr0 & 0xffffffff;
|
||||||
|
best->edx &= guest_supported_xcr0 >> 32;
|
||||||
|
best->ecx |= XFEATURE_MASK_FPSSE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
|
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
|
||||||
@ -261,27 +292,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
|||||||
kvm_apic_set_version(vcpu);
|
kvm_apic_set_version(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
|
|
||||||
if (!best)
|
|
||||||
vcpu->arch.guest_supported_xcr0 = 0;
|
|
||||||
else
|
|
||||||
vcpu->arch.guest_supported_xcr0 =
|
vcpu->arch.guest_supported_xcr0 =
|
||||||
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
|
cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
|
||||||
|
|
||||||
/*
|
|
||||||
* Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
|
|
||||||
* the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
|
|
||||||
* requested XCR0 value. The enclave's XFRM must be a subset of XCRO
|
|
||||||
* at the time of EENTER, thus adjust the allowed XFRM by the guest's
|
|
||||||
* supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
|
|
||||||
* '1' even on CPUs that don't support XSAVE.
|
|
||||||
*/
|
|
||||||
best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1);
|
|
||||||
if (best) {
|
|
||||||
best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff;
|
|
||||||
best->edx &= vcpu->arch.guest_supported_xcr0 >> 32;
|
|
||||||
best->ecx |= XFEATURE_MASK_FPSSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
kvm_update_pv_runtime(vcpu);
|
kvm_update_pv_runtime(vcpu);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user