Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Paolo Bonzini: "x86 KVM changes: - The usual accuracy improvements for nested virtualization - The usual round of code cleanups from Sean - Added back optimizations that were prematurely removed in 5.2 (the bare minimum needed to fix the regression was in 5.3-rc8, here comes the rest) - Support for UMWAIT/UMONITOR/TPAUSE - Direct L2->L0 TLB flushing when L0 is Hyper-V and L1 is KVM - Tell Windows guests if SMT is disabled on the host - More accurate detection of vmexit cost - Revert a pvqspinlock pessimization" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (56 commits) KVM: nVMX: cleanup and fix host 64-bit mode checks KVM: vmx: fix build warnings in hv_enable_direct_tlbflush() on i386 KVM: x86: Don't check kvm_rebooting in __kvm_handle_fault_on_reboot() KVM: x86: Drop ____kvm_handle_fault_on_reboot() KVM: VMX: Add error handling to VMREAD helper KVM: VMX: Optimize VMX instruction error and fault handling KVM: x86: Check kvm_rebooting in kvm_spurious_fault() KVM: selftests: fix ucall on x86 Revert "locking/pvqspinlock: Don't wait if vCPU is preempted" kvm: nvmx: limit atomic switch MSRs kvm: svm: Intercept RDPRU kvm: x86: Add "significant index" flag to a few CPUID leaves KVM: x86/mmu: Skip invalid pages during zapping iff root_count is zero KVM: x86/mmu: Explicitly track only a single invalid mmu generation KVM: x86/mmu: Revert "KVM: x86/mmu: Remove is_obsolete() call" KVM: x86/mmu: Revert "Revert "KVM: MMU: reclaim the zapped-obsolete page first"" KVM: x86/mmu: Revert "Revert "KVM: MMU: collapse TLB flushes when zap all pages"" KVM: x86/mmu: Revert "Revert "KVM: MMU: zap pages in batch"" KVM: x86/mmu: Revert "Revert "KVM: MMU: add tracepoint for kvm_mmu_invalidate_all_pages"" KVM: x86/mmu: Revert "Revert "KVM: MMU: show mmu_valid_gen in shadow page related tracepoints"" ...
This commit is contained in:
11
kernel/cpu.c
11
kernel/cpu.c
@ -392,8 +392,7 @@ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
|
||||
|
||||
void __init cpu_smt_disable(bool force)
|
||||
{
|
||||
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
|
||||
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
|
||||
if (!cpu_smt_possible())
|
||||
return;
|
||||
|
||||
if (force) {
|
||||
@ -438,6 +437,14 @@ static inline bool cpu_smt_allowed(unsigned int cpu)
|
||||
*/
|
||||
return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
|
||||
}
|
||||
|
||||
/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
|
||||
bool cpu_smt_possible(void)
|
||||
{
|
||||
return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
|
||||
cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_smt_possible);
|
||||
#else
|
||||
static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
|
||||
#endif
|
||||
|
@ -269,7 +269,7 @@ pv_wait_early(struct pv_node *prev, int loop)
|
||||
if ((loop & PV_PREV_CHECK_MASK) != 0)
|
||||
return false;
|
||||
|
||||
return READ_ONCE(prev->state) != vcpu_running || vcpu_is_preempted(prev->cpu);
|
||||
return READ_ONCE(prev->state) != vcpu_running;
|
||||
}
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user