Avoid the pointless function call to pv_lock_ops.vcpu_is_preempted() when a paravirt spinlock enabled kernel is ran on native hardware. Do this by patching out the CALL instruction with "XOR %RAX,%RAX" which has the same effect (0 return value). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: benh@kernel.crashing.org Cc: boqun.feng@gmail.com Cc: borntraeger@de.ibm.com Cc: bsingharora@gmail.com Cc: dave@stgolabs.net Cc: jgross@suse.com Cc: kernellwp@gmail.com Cc: konrad.wilk@oracle.com Cc: mpe@ellerman.id.au Cc: paulmck@linux.vnet.ibm.com Cc: paulus@samba.org Cc: pbonzini@redhat.com Cc: rkrcmar@redhat.com Cc: will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
92 lines
2.7 KiB
C
92 lines
2.7 KiB
C
#include <asm/paravirt.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <linux/stringify.h>
|
|
|
|
DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
|
|
DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
|
|
DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
|
|
DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
|
|
DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
|
|
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
|
|
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
|
|
DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
|
|
DEF_NATIVE(pv_cpu_ops, clts, "clts");
|
|
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
|
|
|
|
DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
|
|
DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
|
|
|
|
DEF_NATIVE(, mov32, "mov %edi, %eax");
|
|
DEF_NATIVE(, mov64, "mov %rdi, %rax");
|
|
|
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
|
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
|
|
DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
|
|
#endif
|
|
|
|
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
|
{
|
|
return paravirt_patch_insns(insnbuf, len,
|
|
start__mov32, end__mov32);
|
|
}
|
|
|
|
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
|
{
|
|
return paravirt_patch_insns(insnbuf, len,
|
|
start__mov64, end__mov64);
|
|
}
|
|
|
|
extern bool pv_is_native_spin_unlock(void);
|
|
extern bool pv_is_native_vcpu_is_preempted(void);
|
|
|
|
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
|
unsigned long addr, unsigned len)
|
|
{
|
|
const unsigned char *start, *end;
|
|
unsigned ret;
|
|
|
|
#define PATCH_SITE(ops, x) \
|
|
case PARAVIRT_PATCH(ops.x): \
|
|
start = start_##ops##_##x; \
|
|
end = end_##ops##_##x; \
|
|
goto patch_site
|
|
switch(type) {
|
|
PATCH_SITE(pv_irq_ops, restore_fl);
|
|
PATCH_SITE(pv_irq_ops, save_fl);
|
|
PATCH_SITE(pv_irq_ops, irq_enable);
|
|
PATCH_SITE(pv_irq_ops, irq_disable);
|
|
PATCH_SITE(pv_cpu_ops, usergs_sysret64);
|
|
PATCH_SITE(pv_cpu_ops, swapgs);
|
|
PATCH_SITE(pv_mmu_ops, read_cr2);
|
|
PATCH_SITE(pv_mmu_ops, read_cr3);
|
|
PATCH_SITE(pv_mmu_ops, write_cr3);
|
|
PATCH_SITE(pv_cpu_ops, clts);
|
|
PATCH_SITE(pv_mmu_ops, flush_tlb_single);
|
|
PATCH_SITE(pv_cpu_ops, wbinvd);
|
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
|
case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
|
|
if (pv_is_native_spin_unlock()) {
|
|
start = start_pv_lock_ops_queued_spin_unlock;
|
|
end = end_pv_lock_ops_queued_spin_unlock;
|
|
goto patch_site;
|
|
}
|
|
case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
|
|
if (pv_is_native_vcpu_is_preempted()) {
|
|
start = start_pv_lock_ops_vcpu_is_preempted;
|
|
end = end_pv_lock_ops_vcpu_is_preempted;
|
|
goto patch_site;
|
|
}
|
|
#endif
|
|
|
|
default:
|
|
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
|
break;
|
|
|
|
patch_site:
|
|
ret = paravirt_patch_insns(ibuf, len, start, end);
|
|
break;
|
|
}
|
|
#undef PATCH_SITE
|
|
return ret;
|
|
}
|