2006-03-23 02:59:32 -08:00
# include <linux/module.h>
2006-10-18 01:47:25 -04:00
# include <linux/sched.h>
2006-03-23 02:59:32 -08:00
# include <linux/spinlock.h>
# include <linux/list.h>
2007-07-22 11:12:31 +02:00
# include <linux/kprobes.h>
# include <linux/mm.h>
# include <linux/vmalloc.h>
2006-03-23 02:59:32 -08:00
# include <asm/alternative.h>
# include <asm/sections.h>
2007-07-22 11:12:31 +02:00
# include <asm/pgtable.h>
2007-07-22 11:12:32 +02:00
# include <asm/mce.h>
# include <asm/nmi.h>
2007-10-14 22:57:45 +02:00
# include <asm/vsyscall.h>
2006-03-23 02:59:32 -08:00
2007-08-10 22:31:03 +02:00
# define MAX_PATCH_LEN (255-1)
2007-07-21 17:10:25 +02:00
# ifdef CONFIG_HOTPLUG_CPU
static int smp_alt_once ;
2006-03-23 02:59:32 -08:00
2006-06-26 13:56:16 +02:00
static int __init bootonly ( char * str )
{
smp_alt_once = 1 ;
return 1 ;
}
2007-05-02 19:27:13 +02:00
__setup ( " smp-alt-boot " , bootonly ) ;
2007-07-21 17:10:25 +02:00
# else
# define smp_alt_once 1
# endif
static int debug_alternative ;
2007-05-02 19:27:13 +02:00
2006-06-26 13:56:16 +02:00
static int __init debug_alt ( char * str )
{
debug_alternative = 1 ;
return 1 ;
}
__setup ( " debug-alternative " , debug_alt ) ;
2007-07-21 17:10:25 +02:00
static int noreplace_smp ;
2007-05-02 19:27:13 +02:00
static int __init setup_noreplace_smp ( char * str )
{
noreplace_smp = 1 ;
return 1 ;
}
__setup ( " noreplace-smp " , setup_noreplace_smp ) ;
2007-05-02 19:27:16 +02:00
# ifdef CONFIG_PARAVIRT
static int noreplace_paravirt = 0 ;
static int __init setup_noreplace_paravirt ( char * str )
{
noreplace_paravirt = 1 ;
return 1 ;
}
__setup ( " noreplace-paravirt " , setup_noreplace_paravirt ) ;
# endif
2007-05-02 19:27:13 +02:00
2006-06-26 13:56:16 +02:00
# define DPRINTK(fmt, args...) if (debug_alternative) \
printk ( KERN_DEBUG fmt , args )
# ifdef GENERIC_NOP1
2006-03-23 02:59:32 -08:00
/* Use inline assembly to define this because the nops are defined
as inline assembly strings in the include files and we cannot
get them easily into strings . */
2007-10-17 18:04:37 +02:00
asm ( " \t .section .rodata, \" a \" \n intelnops: "
2006-03-23 02:59:32 -08:00
GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
GENERIC_NOP7 GENERIC_NOP8 ) ;
2007-10-17 18:04:37 +02:00
extern const unsigned char intelnops [ ] ;
static const unsigned char * const intel_nops [ ASM_NOP_MAX + 1 ] = {
2006-03-23 02:59:32 -08:00
NULL ,
intelnops ,
intelnops + 1 ,
intelnops + 1 + 2 ,
intelnops + 1 + 2 + 3 ,
intelnops + 1 + 2 + 3 + 4 ,
intelnops + 1 + 2 + 3 + 4 + 5 ,
intelnops + 1 + 2 + 3 + 4 + 5 + 6 ,
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 ,
} ;
2006-06-26 13:56:16 +02:00
# endif
# ifdef K8_NOP1
2007-10-17 18:04:37 +02:00
asm ( " \t .section .rodata, \" a \" \n k8nops: "
2006-06-26 13:56:16 +02:00
K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
K8_NOP7 K8_NOP8 ) ;
2007-10-17 18:04:37 +02:00
extern const unsigned char k8nops [ ] ;
static const unsigned char * const k8_nops [ ASM_NOP_MAX + 1 ] = {
2006-03-23 02:59:32 -08:00
NULL ,
k8nops ,
k8nops + 1 ,
k8nops + 1 + 2 ,
k8nops + 1 + 2 + 3 ,
k8nops + 1 + 2 + 3 + 4 ,
k8nops + 1 + 2 + 3 + 4 + 5 ,
k8nops + 1 + 2 + 3 + 4 + 5 + 6 ,
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 ,
} ;
2006-06-26 13:56:16 +02:00
# endif
# ifdef K7_NOP1
2007-10-17 18:04:37 +02:00
asm ( " \t .section .rodata, \" a \" \n k7nops: "
2006-06-26 13:56:16 +02:00
K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
K7_NOP7 K7_NOP8 ) ;
2007-10-17 18:04:37 +02:00
extern const unsigned char k7nops [ ] ;
static const unsigned char * const k7_nops [ ASM_NOP_MAX + 1 ] = {
2006-03-23 02:59:32 -08:00
NULL ,
k7nops ,
k7nops + 1 ,
k7nops + 1 + 2 ,
k7nops + 1 + 2 + 3 ,
k7nops + 1 + 2 + 3 + 4 ,
k7nops + 1 + 2 + 3 + 4 + 5 ,
k7nops + 1 + 2 + 3 + 4 + 5 + 6 ,
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 ,
} ;
2006-06-26 13:56:16 +02:00
# endif
# ifdef CONFIG_X86_64
extern char __vsyscall_0 ;
2007-10-17 18:04:37 +02:00
static inline const unsigned char * const * find_nop_table ( void )
2006-06-26 13:56:16 +02:00
{
return k8_nops ;
}
# else /* CONFIG_X86_64 */
2007-10-17 18:04:37 +02:00
static const struct nop {
2006-03-23 02:59:32 -08:00
int cpuid ;
2007-10-17 18:04:37 +02:00
const unsigned char * const * noptable ;
2006-03-23 02:59:32 -08:00
} noptypes [ ] = {
{ X86_FEATURE_K8 , k8_nops } ,
{ X86_FEATURE_K7 , k7_nops } ,
{ - 1 , NULL }
} ;
2007-10-17 18:04:37 +02:00
static const unsigned char * const * find_nop_table ( void )
2006-03-23 02:59:32 -08:00
{
2007-10-17 18:04:37 +02:00
const unsigned char * const * noptable = intel_nops ;
2006-03-23 02:59:32 -08:00
int i ;
for ( i = 0 ; noptypes [ i ] . cpuid > = 0 ; i + + ) {
if ( boot_cpu_has ( noptypes [ i ] . cpuid ) ) {
noptable = noptypes [ i ] . noptable ;
break ;
}
}
return noptable ;
}
2006-06-26 13:56:16 +02:00
# endif /* CONFIG_X86_64 */
2007-08-10 22:31:03 +02:00
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
static void add_nops ( void * insns , unsigned int len )
2006-12-07 02:14:08 +01:00
{
2007-10-17 18:04:37 +02:00
const unsigned char * const * noptable = find_nop_table ( ) ;
2006-12-07 02:14:08 +01:00
while ( len > 0 ) {
unsigned int noplen = len ;
if ( noplen > ASM_NOP_MAX )
noplen = ASM_NOP_MAX ;
2007-08-10 22:31:03 +02:00
memcpy ( insns , noptable [ noplen ] , noplen ) ;
2006-12-07 02:14:08 +01:00
insns + = noplen ;
len - = noplen ;
}
}
2006-06-26 13:56:16 +02:00
extern struct alt_instr __alt_instructions [ ] , __alt_instructions_end [ ] ;
extern u8 * __smp_locks [ ] , * __smp_locks_end [ ] ;
2006-03-23 02:59:32 -08:00
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
self modifying code . This implies that assymetric systems where
APs have less capabilities than the boot processor are not handled .
Tough . Make sure you disable such features by hand . */
void apply_alternatives ( struct alt_instr * start , struct alt_instr * end )
{
struct alt_instr * a ;
2007-08-10 22:31:03 +02:00
char insnbuf [ MAX_PATCH_LEN ] ;
2006-03-23 02:59:32 -08:00
DPRINTK ( " %s: alt table %p -> %p \n " , __FUNCTION__ , start , end ) ;
for ( a = start ; a < end ; a + + ) {
2007-08-10 22:31:03 +02:00
u8 * instr = a - > instr ;
2006-03-23 02:59:32 -08:00
BUG_ON ( a - > replacementlen > a - > instrlen ) ;
2007-08-10 22:31:03 +02:00
BUG_ON ( a - > instrlen > sizeof ( insnbuf ) ) ;
2006-03-23 02:59:32 -08:00
if ( ! boot_cpu_has ( a - > cpuid ) )
continue ;
2006-06-26 13:56:16 +02:00
# ifdef CONFIG_X86_64
/* vsyscall code is not mapped yet. resolve it manually. */
if ( instr > = ( u8 * ) VSYSCALL_START & & instr < ( u8 * ) VSYSCALL_END ) {
instr = __va ( instr - ( u8 * ) VSYSCALL_START + ( u8 * ) __pa_symbol ( & __vsyscall_0 ) ) ;
DPRINTK ( " %s: vsyscall fixup: %p => %p \n " ,
__FUNCTION__ , a - > instr , instr ) ;
}
# endif
2007-08-10 22:31:03 +02:00
memcpy ( insnbuf , a - > replacement , a - > replacementlen ) ;
add_nops ( insnbuf + a - > replacementlen ,
a - > instrlen - a - > replacementlen ) ;
text_poke ( instr , insnbuf , a - > instrlen ) ;
2006-03-23 02:59:32 -08:00
}
}
2006-07-01 04:36:18 -07:00
# ifdef CONFIG_SMP
2006-03-23 02:59:32 -08:00
static void alternatives_smp_lock ( u8 * * start , u8 * * end , u8 * text , u8 * text_end )
{
u8 * * ptr ;
for ( ptr = start ; ptr < end ; ptr + + ) {
if ( * ptr < text )
continue ;
if ( * ptr > text_end )
continue ;
2007-07-22 11:12:31 +02:00
text_poke ( * ptr , ( ( unsigned char [ ] ) { 0xf0 } ) , 1 ) ; /* add lock prefix */
2006-03-23 02:59:32 -08:00
} ;
}
static void alternatives_smp_unlock ( u8 * * start , u8 * * end , u8 * text , u8 * text_end )
{
u8 * * ptr ;
2007-08-10 22:31:03 +02:00
char insn [ 1 ] ;
2006-03-23 02:59:32 -08:00
2007-05-02 19:27:13 +02:00
if ( noreplace_smp )
return ;
2007-08-10 22:31:03 +02:00
add_nops ( insn , 1 ) ;
2006-03-23 02:59:32 -08:00
for ( ptr = start ; ptr < end ; ptr + + ) {
if ( * ptr < text )
continue ;
if ( * ptr > text_end )
continue ;
2007-08-10 22:31:03 +02:00
text_poke ( * ptr , insn , 1 ) ;
2006-03-23 02:59:32 -08:00
} ;
}
struct smp_alt_module {
/* what is this ??? */
struct module * mod ;
char * name ;
/* ptrs to lock prefixes */
u8 * * locks ;
u8 * * locks_end ;
/* .text segment, needed to avoid patching init code ;) */
u8 * text ;
u8 * text_end ;
struct list_head next ;
} ;
static LIST_HEAD ( smp_alt_modules ) ;
static DEFINE_SPINLOCK ( smp_alt ) ;
void alternatives_smp_module_add ( struct module * mod , char * name ,
void * locks , void * locks_end ,
void * text , void * text_end )
{
struct smp_alt_module * smp ;
unsigned long flags ;
2007-05-02 19:27:13 +02:00
if ( noreplace_smp )
return ;
2006-03-23 02:59:32 -08:00
if ( smp_alt_once ) {
if ( boot_cpu_has ( X86_FEATURE_UP ) )
alternatives_smp_unlock ( locks , locks_end ,
text , text_end ) ;
return ;
}
smp = kzalloc ( sizeof ( * smp ) , GFP_KERNEL ) ;
if ( NULL = = smp )
return ; /* we'll run the (safe but slow) SMP code then ... */
smp - > mod = mod ;
smp - > name = name ;
smp - > locks = locks ;
smp - > locks_end = locks_end ;
smp - > text = text ;
smp - > text_end = text_end ;
DPRINTK ( " %s: locks %p -> %p, text %p -> %p, name %s \n " ,
__FUNCTION__ , smp - > locks , smp - > locks_end ,
smp - > text , smp - > text_end , smp - > name ) ;
spin_lock_irqsave ( & smp_alt , flags ) ;
list_add_tail ( & smp - > next , & smp_alt_modules ) ;
if ( boot_cpu_has ( X86_FEATURE_UP ) )
alternatives_smp_unlock ( smp - > locks , smp - > locks_end ,
smp - > text , smp - > text_end ) ;
spin_unlock_irqrestore ( & smp_alt , flags ) ;
}
void alternatives_smp_module_del ( struct module * mod )
{
struct smp_alt_module * item ;
unsigned long flags ;
2007-05-02 19:27:13 +02:00
if ( smp_alt_once | | noreplace_smp )
2006-03-23 02:59:32 -08:00
return ;
spin_lock_irqsave ( & smp_alt , flags ) ;
list_for_each_entry ( item , & smp_alt_modules , next ) {
if ( mod ! = item - > mod )
continue ;
list_del ( & item - > next ) ;
spin_unlock_irqrestore ( & smp_alt , flags ) ;
DPRINTK ( " %s: %s \n " , __FUNCTION__ , item - > name ) ;
kfree ( item ) ;
return ;
}
spin_unlock_irqrestore ( & smp_alt , flags ) ;
}
void alternatives_smp_switch ( int smp )
{
struct smp_alt_module * mod ;
unsigned long flags ;
2006-07-03 00:24:57 -07:00
# ifdef CONFIG_LOCKDEP
/*
* A not yet fixed binutils section handling bug prevents
* alternatives - replacement from working reliably , so turn
* it off :
*/
printk ( " lockdep: not fixing up alternatives. \n " ) ;
return ;
# endif
2007-05-02 19:27:13 +02:00
if ( noreplace_smp | | smp_alt_once )
2006-03-23 02:59:32 -08:00
return ;
BUG_ON ( ! smp & & ( num_online_cpus ( ) > 1 ) ) ;
spin_lock_irqsave ( & smp_alt , flags ) ;
if ( smp ) {
printk ( KERN_INFO " SMP alternatives: switching to SMP code \n " ) ;
clear_bit ( X86_FEATURE_UP , boot_cpu_data . x86_capability ) ;
clear_bit ( X86_FEATURE_UP , cpu_data [ 0 ] . x86_capability ) ;
list_for_each_entry ( mod , & smp_alt_modules , next )
alternatives_smp_lock ( mod - > locks , mod - > locks_end ,
mod - > text , mod - > text_end ) ;
} else {
printk ( KERN_INFO " SMP alternatives: switching to UP code \n " ) ;
set_bit ( X86_FEATURE_UP , boot_cpu_data . x86_capability ) ;
set_bit ( X86_FEATURE_UP , cpu_data [ 0 ] . x86_capability ) ;
list_for_each_entry ( mod , & smp_alt_modules , next )
alternatives_smp_unlock ( mod - > locks , mod - > locks_end ,
mod - > text , mod - > text_end ) ;
}
spin_unlock_irqrestore ( & smp_alt , flags ) ;
}
2006-07-01 04:36:18 -07:00
# endif
2006-12-07 02:14:08 +01:00
# ifdef CONFIG_PARAVIRT
2007-05-02 19:27:14 +02:00
void apply_paravirt ( struct paravirt_patch_site * start ,
struct paravirt_patch_site * end )
2006-12-07 02:14:08 +01:00
{
2007-05-02 19:27:14 +02:00
struct paravirt_patch_site * p ;
2007-08-10 22:31:03 +02:00
char insnbuf [ MAX_PATCH_LEN ] ;
2006-12-07 02:14:08 +01:00
2007-05-02 19:27:16 +02:00
if ( noreplace_paravirt )
return ;
2006-12-07 02:14:08 +01:00
for ( p = start ; p < end ; p + + ) {
unsigned int used ;
2007-08-10 22:31:03 +02:00
BUG_ON ( p - > len > MAX_PATCH_LEN ) ;
2007-08-18 14:31:41 -07:00
/* prep the buffer with the original instructions */
memcpy ( insnbuf , p - > instr , p - > len ) ;
2007-08-10 22:31:03 +02:00
used = paravirt_ops . patch ( p - > instrtype , p - > clobbers , insnbuf ,
( unsigned long ) p - > instr , p - > len ) ;
2007-05-02 19:27:13 +02:00
2007-05-02 19:27:14 +02:00
BUG_ON ( used > p - > len ) ;
2006-12-07 02:14:08 +01:00
/* Pad the rest with nops */
2007-08-10 22:31:03 +02:00
add_nops ( insnbuf + used , p - > len - used ) ;
text_poke ( p - > instr , insnbuf , p - > len ) ;
2006-12-07 02:14:08 +01:00
}
}
2007-05-02 19:27:14 +02:00
extern struct paravirt_patch_site __start_parainstructions [ ] ,
2006-12-07 02:14:08 +01:00
__stop_parainstructions [ ] ;
# endif /* CONFIG_PARAVIRT */
2006-03-23 02:59:32 -08:00
void __init alternative_instructions ( void )
{
2006-10-19 23:29:04 -07:00
unsigned long flags ;
2007-07-22 11:12:32 +02:00
/* The patching is not fully atomic, so try to avoid local interruptions
that might execute the to be patched code .
Other CPUs are not running . */
stop_nmi ( ) ;
2007-08-10 22:31:06 +02:00
# ifdef CONFIG_X86_MCE
2007-07-22 11:12:32 +02:00
stop_mce ( ) ;
# endif
2006-10-19 23:29:04 -07:00
local_irq_save ( flags ) ;
2006-03-23 02:59:32 -08:00
apply_alternatives ( __alt_instructions , __alt_instructions_end ) ;
/* switch to patch-once-at-boottime-only mode and free the
* tables in case we know the number of CPUs will never ever
* change */
# ifdef CONFIG_HOTPLUG_CPU
if ( num_possible_cpus ( ) < 2 )
smp_alt_once = 1 ;
# endif
2006-07-01 04:36:18 -07:00
# ifdef CONFIG_SMP
2006-03-23 02:59:32 -08:00
if ( smp_alt_once ) {
if ( 1 = = num_possible_cpus ( ) ) {
printk ( KERN_INFO " SMP alternatives: switching to UP code \n " ) ;
set_bit ( X86_FEATURE_UP , boot_cpu_data . x86_capability ) ;
set_bit ( X86_FEATURE_UP , cpu_data [ 0 ] . x86_capability ) ;
alternatives_smp_unlock ( __smp_locks , __smp_locks_end ,
_text , _etext ) ;
}
} else {
alternatives_smp_module_add ( NULL , " core kernel " ,
__smp_locks , __smp_locks_end ,
_text , _etext ) ;
alternatives_smp_switch ( 0 ) ;
}
2006-07-01 04:36:18 -07:00
# endif
2007-05-02 19:27:16 +02:00
apply_paravirt ( __parainstructions , __parainstructions_end ) ;
2006-10-19 23:29:04 -07:00
local_irq_restore ( flags ) ;
2007-07-22 11:12:32 +02:00
2007-10-17 18:04:34 +02:00
if ( smp_alt_once )
free_init_pages ( " SMP alternatives " ,
( unsigned long ) __smp_locks ,
( unsigned long ) __smp_locks_end ) ;
2007-07-22 11:12:32 +02:00
restart_nmi ( ) ;
2007-08-10 22:31:06 +02:00
# ifdef CONFIG_X86_MCE
2007-07-22 11:12:32 +02:00
restart_mce ( ) ;
# endif
2006-03-23 02:59:32 -08:00
}
2007-07-22 11:12:31 +02:00
/*
* Warning :
* When you use this code to patch more than one byte of an instruction
* you need to make sure that other CPUs cannot execute this code in parallel .
* Also no thread must be currently preempted in the middle of these instructions .
* And on the local CPU you need to be protected again NMI or MCE handlers
* seeing an inconsistent instruction while you patch .
*/
2007-07-26 12:07:21 -07:00
void __kprobes text_poke ( void * addr , unsigned char * opcode , int len )
2007-07-22 11:12:31 +02:00
{
memcpy ( addr , opcode , len ) ;
sync_core ( ) ;
2007-09-06 16:59:52 +02:00
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs . */
2007-07-22 11:12:31 +02:00
}