2008-01-30 13:32:10 +01:00
# include <asm/paravirt.h>
DEF_NATIVE ( pv_irq_ops , irq_disable , " cli " ) ;
DEF_NATIVE ( pv_irq_ops , irq_enable , " sti " ) ;
DEF_NATIVE ( pv_irq_ops , restore_fl , " push %eax; popf " ) ;
DEF_NATIVE ( pv_irq_ops , save_fl , " pushf; pop %eax " ) ;
DEF_NATIVE ( pv_cpu_ops , iret , " iret " ) ;
DEF_NATIVE ( pv_mmu_ops , read_cr2 , " mov %cr2, %eax " ) ;
DEF_NATIVE ( pv_mmu_ops , write_cr3 , " mov %eax, %cr3 " ) ;
DEF_NATIVE ( pv_mmu_ops , read_cr3 , " mov %cr3, %eax " ) ;
2016-05-18 20:43:02 +02:00
# if defined(CONFIG_PARAVIRT_SPINLOCKS)
2015-04-24 14:56:38 -04:00
DEF_NATIVE ( pv_lock_ops , queued_spin_unlock , " movb $0, (%eax) " ) ;
2016-11-15 16:47:06 +01:00
DEF_NATIVE ( pv_lock_ops , vcpu_is_preempted , " xor %eax, %eax " ) ;
2015-04-24 14:56:38 -04:00
# endif
2009-01-28 14:35:02 -08:00
unsigned paravirt_patch_ident_32 ( void * insnbuf , unsigned len )
{
/* arg in %eax, return in %eax */
return 0 ;
}
unsigned paravirt_patch_ident_64 ( void * insnbuf , unsigned len )
{
/* arg in %edx:%eax, return in %edx:%eax */
return 0 ;
}
2015-04-24 14:56:38 -04:00
extern bool pv_is_native_spin_unlock ( void ) ;
2016-11-15 16:47:06 +01:00
extern bool pv_is_native_vcpu_is_preempted ( void ) ;
2015-04-24 14:56:38 -04:00
2008-01-30 13:32:10 +01:00
unsigned native_patch ( u8 type , u16 clobbers , void * ibuf ,
unsigned long addr , unsigned len )
{
const unsigned char * start , * end ;
unsigned ret ;
# define PATCH_SITE(ops, x) \
case PARAVIRT_PATCH ( ops . x ) : \
start = start_ # # ops # # _ # # x ; \
end = end_ # # ops # # _ # # x ; \
goto patch_site
2008-08-02 21:25:43 +02:00
switch ( type ) {
2008-01-30 13:32:10 +01:00
PATCH_SITE ( pv_irq_ops , irq_disable ) ;
PATCH_SITE ( pv_irq_ops , irq_enable ) ;
PATCH_SITE ( pv_irq_ops , restore_fl ) ;
PATCH_SITE ( pv_irq_ops , save_fl ) ;
PATCH_SITE ( pv_cpu_ops , iret ) ;
PATCH_SITE ( pv_mmu_ops , read_cr2 ) ;
PATCH_SITE ( pv_mmu_ops , read_cr3 ) ;
PATCH_SITE ( pv_mmu_ops , write_cr3 ) ;
2016-05-18 20:43:02 +02:00
# if defined(CONFIG_PARAVIRT_SPINLOCKS)
2015-04-24 14:56:38 -04:00
case PARAVIRT_PATCH ( pv_lock_ops . queued_spin_unlock ) :
if ( pv_is_native_spin_unlock ( ) ) {
start = start_pv_lock_ops_queued_spin_unlock ;
end = end_pv_lock_ops_queued_spin_unlock ;
goto patch_site ;
}
2016-12-08 16:42:14 +01:00
goto patch_default ;
2016-11-15 16:47:06 +01:00
case PARAVIRT_PATCH ( pv_lock_ops . vcpu_is_preempted ) :
if ( pv_is_native_vcpu_is_preempted ( ) ) {
start = start_pv_lock_ops_vcpu_is_preempted ;
end = end_pv_lock_ops_vcpu_is_preempted ;
goto patch_site ;
}
2016-12-08 16:42:14 +01:00
goto patch_default ;
2015-04-24 14:56:38 -04:00
# endif
2008-01-30 13:32:10 +01:00
default :
2016-12-08 16:42:14 +01:00
patch_default :
2008-01-30 13:32:10 +01:00
ret = paravirt_patch_default ( type , clobbers , ibuf , addr , len ) ;
break ;
2015-04-24 14:56:38 -04:00
patch_site :
ret = paravirt_patch_insns ( ibuf , len , start , end ) ;
break ;
2008-01-30 13:32:10 +01:00
}
# undef PATCH_SITE
return ret ;
}