x86/fpu: Remove clts()
The kernel doesn't use clts() any more. Remove it and all of its paravirt infrastructure. A careful reader may notice that xen_clts() appears to have been buggy -- it didn't update xen_cr0_value. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com> Cc: Rik van Riel <riel@redhat.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kvm list <kvm@vger.kernel.org> Link: http://lkml.kernel.org/r/3d3c8ca62f17579b9849a013d71e59a4d5d1b079.1477951965.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0d50612c04
commit
af25ed59b5
@ -41,11 +41,6 @@ static inline void set_debugreg(unsigned long val, int reg)
|
|||||||
PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
|
PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void clts(void)
|
|
||||||
{
|
|
||||||
PVOP_VCALL0(pv_cpu_ops.clts);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long read_cr0(void)
|
static inline unsigned long read_cr0(void)
|
||||||
{
|
{
|
||||||
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
|
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
|
||||||
|
@ -103,8 +103,6 @@ struct pv_cpu_ops {
|
|||||||
unsigned long (*get_debugreg)(int regno);
|
unsigned long (*get_debugreg)(int regno);
|
||||||
void (*set_debugreg)(int regno, unsigned long value);
|
void (*set_debugreg)(int regno, unsigned long value);
|
||||||
|
|
||||||
void (*clts)(void);
|
|
||||||
|
|
||||||
unsigned long (*read_cr0)(void);
|
unsigned long (*read_cr0)(void);
|
||||||
void (*write_cr0)(unsigned long);
|
void (*write_cr0)(unsigned long);
|
||||||
|
|
||||||
|
@ -6,11 +6,6 @@
|
|||||||
|
|
||||||
#include <asm/nops.h>
|
#include <asm/nops.h>
|
||||||
|
|
||||||
static inline void native_clts(void)
|
|
||||||
{
|
|
||||||
asm volatile("clts");
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Volatile isn't enough to prevent the compiler from reordering the
|
* Volatile isn't enough to prevent the compiler from reordering the
|
||||||
* read/write functions for the control registers and messing everything up.
|
* read/write functions for the control registers and messing everything up.
|
||||||
@ -208,12 +203,6 @@ static inline void load_gs_index(unsigned selector)
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Clear the 'TS' bit */
|
|
||||||
static inline void clts(void)
|
|
||||||
{
|
|
||||||
native_clts();
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif/* CONFIG_PARAVIRT */
|
#endif/* CONFIG_PARAVIRT */
|
||||||
|
|
||||||
static inline void clflush(volatile void *__p)
|
static inline void clflush(volatile void *__p)
|
||||||
|
@ -328,7 +328,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
|
|||||||
.cpuid = native_cpuid,
|
.cpuid = native_cpuid,
|
||||||
.get_debugreg = native_get_debugreg,
|
.get_debugreg = native_get_debugreg,
|
||||||
.set_debugreg = native_set_debugreg,
|
.set_debugreg = native_set_debugreg,
|
||||||
.clts = native_clts,
|
|
||||||
.read_cr0 = native_read_cr0,
|
.read_cr0 = native_read_cr0,
|
||||||
.write_cr0 = native_write_cr0,
|
.write_cr0 = native_write_cr0,
|
||||||
.read_cr4 = native_read_cr4,
|
.read_cr4 = native_read_cr4,
|
||||||
|
@ -8,7 +8,6 @@ DEF_NATIVE(pv_cpu_ops, iret, "iret");
|
|||||||
DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
|
DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
|
||||||
DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
|
DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
|
||||||
DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
|
DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
|
||||||
DEF_NATIVE(pv_cpu_ops, clts, "clts");
|
|
||||||
|
|
||||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||||
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
|
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
|
||||||
@ -48,7 +47,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|||||||
PATCH_SITE(pv_mmu_ops, read_cr2);
|
PATCH_SITE(pv_mmu_ops, read_cr2);
|
||||||
PATCH_SITE(pv_mmu_ops, read_cr3);
|
PATCH_SITE(pv_mmu_ops, read_cr3);
|
||||||
PATCH_SITE(pv_mmu_ops, write_cr3);
|
PATCH_SITE(pv_mmu_ops, write_cr3);
|
||||||
PATCH_SITE(pv_cpu_ops, clts);
|
|
||||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||||
case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
|
case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
|
||||||
if (pv_is_native_spin_unlock()) {
|
if (pv_is_native_spin_unlock()) {
|
||||||
|
@ -10,7 +10,6 @@ DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
|
|||||||
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
|
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
|
||||||
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
|
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
|
||||||
DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
|
DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
|
||||||
DEF_NATIVE(pv_cpu_ops, clts, "clts");
|
|
||||||
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
|
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
|
||||||
|
|
||||||
DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
|
DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
|
||||||
@ -58,7 +57,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|||||||
PATCH_SITE(pv_mmu_ops, read_cr2);
|
PATCH_SITE(pv_mmu_ops, read_cr2);
|
||||||
PATCH_SITE(pv_mmu_ops, read_cr3);
|
PATCH_SITE(pv_mmu_ops, read_cr3);
|
||||||
PATCH_SITE(pv_mmu_ops, write_cr3);
|
PATCH_SITE(pv_mmu_ops, write_cr3);
|
||||||
PATCH_SITE(pv_cpu_ops, clts);
|
|
||||||
PATCH_SITE(pv_mmu_ops, flush_tlb_single);
|
PATCH_SITE(pv_mmu_ops, flush_tlb_single);
|
||||||
PATCH_SITE(pv_cpu_ops, wbinvd);
|
PATCH_SITE(pv_cpu_ops, wbinvd);
|
||||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||||
|
@ -517,17 +517,6 @@ static unsigned long lguest_read_cr0(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Intel provided a special instruction to clear the TS bit for people too cool
|
|
||||||
* to use write_cr0() to do it. This "clts" instruction is faster, because all
|
|
||||||
* the vowels have been optimized out.
|
|
||||||
*/
|
|
||||||
static void lguest_clts(void)
|
|
||||||
{
|
|
||||||
lazy_hcall1(LHCALL_TS, 0);
|
|
||||||
current_cr0 &= ~X86_CR0_TS;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cr2 is the virtual address of the last page fault, which the Guest only ever
|
* cr2 is the virtual address of the last page fault, which the Guest only ever
|
||||||
* reads. The Host kindly writes this into our "struct lguest_data", so we
|
* reads. The Host kindly writes this into our "struct lguest_data", so we
|
||||||
@ -1429,7 +1418,6 @@ __init void lguest_init(void)
|
|||||||
pv_cpu_ops.load_tls = lguest_load_tls;
|
pv_cpu_ops.load_tls = lguest_load_tls;
|
||||||
pv_cpu_ops.get_debugreg = lguest_get_debugreg;
|
pv_cpu_ops.get_debugreg = lguest_get_debugreg;
|
||||||
pv_cpu_ops.set_debugreg = lguest_set_debugreg;
|
pv_cpu_ops.set_debugreg = lguest_set_debugreg;
|
||||||
pv_cpu_ops.clts = lguest_clts;
|
|
||||||
pv_cpu_ops.read_cr0 = lguest_read_cr0;
|
pv_cpu_ops.read_cr0 = lguest_read_cr0;
|
||||||
pv_cpu_ops.write_cr0 = lguest_write_cr0;
|
pv_cpu_ops.write_cr0 = lguest_write_cr0;
|
||||||
pv_cpu_ops.read_cr4 = lguest_read_cr4;
|
pv_cpu_ops.read_cr4 = lguest_read_cr4;
|
||||||
|
@ -980,17 +980,6 @@ static void xen_io_delay(void)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_clts(void)
|
|
||||||
{
|
|
||||||
struct multicall_space mcs;
|
|
||||||
|
|
||||||
mcs = xen_mc_entry(0);
|
|
||||||
|
|
||||||
MULTI_fpu_taskswitch(mcs.mc, 0);
|
|
||||||
|
|
||||||
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
|
||||||
}
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
|
static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
|
||||||
|
|
||||||
static unsigned long xen_read_cr0(void)
|
static unsigned long xen_read_cr0(void)
|
||||||
@ -1233,8 +1222,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
|
|||||||
.set_debugreg = xen_set_debugreg,
|
.set_debugreg = xen_set_debugreg,
|
||||||
.get_debugreg = xen_get_debugreg,
|
.get_debugreg = xen_get_debugreg,
|
||||||
|
|
||||||
.clts = xen_clts,
|
|
||||||
|
|
||||||
.read_cr0 = xen_read_cr0,
|
.read_cr0 = xen_read_cr0,
|
||||||
.write_cr0 = xen_write_cr0,
|
.write_cr0 = xen_write_cr0,
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user