2008-01-30 13:32:10 +01:00
# include <asm/paravirt.h>
# include <asm/asm-offsets.h>
2008-01-30 13:33:19 +01:00
# include <linux/stringify.h>
2008-01-30 13:32:10 +01:00
DEF_NATIVE ( pv_irq_ops , irq_disable , " cli " ) ;
DEF_NATIVE ( pv_irq_ops , irq_enable , " sti " ) ;
DEF_NATIVE ( pv_irq_ops , restore_fl , " pushq %rdi; popfq " ) ;
DEF_NATIVE ( pv_irq_ops , save_fl , " pushfq; popq %rax " ) ;
DEF_NATIVE ( pv_mmu_ops , read_cr2 , " movq %cr2, %rax " ) ;
DEF_NATIVE ( pv_mmu_ops , read_cr3 , " movq %cr3, %rax " ) ;
DEF_NATIVE ( pv_mmu_ops , write_cr3 , " movq %rdi, %cr3 " ) ;
DEF_NATIVE ( pv_mmu_ops , flush_tlb_single , " invlpg (%rdi) " ) ;
DEF_NATIVE ( pv_cpu_ops , clts , " clts " ) ;
DEF_NATIVE ( pv_cpu_ops , wbinvd , " wbinvd " ) ;
2008-06-25 00:19:28 -04:00
DEF_NATIVE ( pv_cpu_ops , usergs_sysret64 , " swapgs; sysretq " ) ;
2008-01-30 13:32:10 +01:00
DEF_NATIVE ( pv_cpu_ops , swapgs , " swapgs " ) ;
2009-01-28 14:35:02 -08:00
DEF_NATIVE ( , mov32 , " mov %edi, %eax " ) ;
DEF_NATIVE ( , mov64 , " mov %rdi, %rax " ) ;
2015-05-11 09:47:23 +02:00
# if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
2015-04-24 14:56:38 -04:00
DEF_NATIVE ( pv_lock_ops , queued_spin_unlock , " movb $0, (%rdi) " ) ;
# endif
2009-01-28 14:35:02 -08:00
unsigned paravirt_patch_ident_32 ( void * insnbuf , unsigned len )
{
return paravirt_patch_insns ( insnbuf , len ,
start__mov32 , end__mov32 ) ;
}
unsigned paravirt_patch_ident_64 ( void * insnbuf , unsigned len )
{
return paravirt_patch_insns ( insnbuf , len ,
start__mov64 , end__mov64 ) ;
}
2015-04-24 14:56:38 -04:00
extern bool pv_is_native_spin_unlock ( void ) ;
2008-01-30 13:32:10 +01:00
unsigned native_patch ( u8 type , u16 clobbers , void * ibuf ,
unsigned long addr , unsigned len )
{
const unsigned char * start , * end ;
unsigned ret ;
# define PATCH_SITE(ops, x) \
case PARAVIRT_PATCH ( ops . x ) : \
start = start_ # # ops # # _ # # x ; \
end = end_ # # ops # # _ # # x ; \
goto patch_site
switch ( type ) {
PATCH_SITE ( pv_irq_ops , restore_fl ) ;
PATCH_SITE ( pv_irq_ops , save_fl ) ;
PATCH_SITE ( pv_irq_ops , irq_enable ) ;
PATCH_SITE ( pv_irq_ops , irq_disable ) ;
2008-06-25 00:19:28 -04:00
PATCH_SITE ( pv_cpu_ops , usergs_sysret64 ) ;
2008-01-30 13:32:10 +01:00
PATCH_SITE ( pv_cpu_ops , swapgs ) ;
PATCH_SITE ( pv_mmu_ops , read_cr2 ) ;
PATCH_SITE ( pv_mmu_ops , read_cr3 ) ;
PATCH_SITE ( pv_mmu_ops , write_cr3 ) ;
PATCH_SITE ( pv_cpu_ops , clts ) ;
PATCH_SITE ( pv_mmu_ops , flush_tlb_single ) ;
PATCH_SITE ( pv_cpu_ops , wbinvd ) ;
2015-05-11 09:47:23 +02:00
# if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
2015-04-24 14:56:38 -04:00
case PARAVIRT_PATCH ( pv_lock_ops . queued_spin_unlock ) :
if ( pv_is_native_spin_unlock ( ) ) {
start = start_pv_lock_ops_queued_spin_unlock ;
end = end_pv_lock_ops_queued_spin_unlock ;
goto patch_site ;
}
# endif
2008-01-30 13:32:10 +01:00
default :
ret = paravirt_patch_default ( type , clobbers , ibuf , addr , len ) ;
break ;
2015-04-24 14:56:38 -04:00
patch_site :
ret = paravirt_patch_insns ( ibuf , len , start , end ) ;
break ;
2008-01-30 13:32:10 +01:00
}
# undef PATCH_SITE
return ret ;
}