2010-09-17 19:09:00 +04:00
/*
* jump label support
*
* Copyright ( C ) 2009 Jason Baron < jbaron @ redhat . com >
2011-03-17 00:29:47 +03:00
* Copyright ( C ) 2011 Peter Zijlstra < pzijlstr @ redhat . com >
2010-09-17 19:09:00 +04:00
*
*/
# include <linux/memory.h>
# include <linux/uaccess.h>
# include <linux/module.h>
# include <linux/list.h>
# include <linux/slab.h>
# include <linux/sort.h>
# include <linux/err.h>
2011-03-17 00:29:47 +03:00
# include <linux/jump_label.h>
2010-09-17 19:09:00 +04:00
# ifdef HAVE_JUMP_LABEL
/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX ( jump_label_mutex ) ;
2010-10-02 01:23:48 +04:00
void jump_label_lock ( void )
{
mutex_lock ( & jump_label_mutex ) ;
}
void jump_label_unlock ( void )
{
mutex_unlock ( & jump_label_mutex ) ;
}
2011-03-17 00:29:47 +03:00
bool jump_label_enabled ( struct jump_label_key * key )
{
return ! ! atomic_read ( & key - > enabled ) ;
}
2010-09-17 19:09:00 +04:00
static int jump_label_cmp ( const void * a , const void * b )
{
const struct jump_entry * jea = a ;
const struct jump_entry * jeb = b ;
if ( jea - > key < jeb - > key )
return - 1 ;
if ( jea - > key > jeb - > key )
return 1 ;
return 0 ;
}
static void
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( struct jump_entry * start , struct jump_entry * stop )
2010-09-17 19:09:00 +04:00
{
unsigned long size ;
size = ( ( ( unsigned long ) stop - ( unsigned long ) start )
/ sizeof ( struct jump_entry ) ) ;
sort ( start , size , sizeof ( struct jump_entry ) , jump_label_cmp , NULL ) ;
}
2011-03-17 00:29:47 +03:00
static void jump_label_update ( struct jump_label_key * key , int enable ) ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
void jump_label_inc ( struct jump_label_key * key )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
if ( atomic_inc_not_zero ( & key - > enabled ) )
return ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
jump_label_lock ( ) ;
2011-10-18 21:55:51 +04:00
if ( atomic_read ( & key - > enabled ) = = 0 )
2011-03-17 00:29:47 +03:00
jump_label_update ( key , JUMP_LABEL_ENABLE ) ;
2011-10-18 21:55:51 +04:00
atomic_inc ( & key - > enabled ) ;
2011-03-17 00:29:47 +03:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
2011-11-27 19:59:09 +04:00
static void __jump_label_dec ( struct jump_label_key * key ,
unsigned long rate_limit , struct delayed_work * work )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
if ( ! atomic_dec_and_mutex_lock ( & key - > enabled , & jump_label_mutex ) )
return ;
2010-09-17 19:09:00 +04:00
2011-11-27 19:59:09 +04:00
if ( rate_limit ) {
atomic_inc ( & key - > enabled ) ;
schedule_delayed_work ( work , rate_limit ) ;
} else
jump_label_update ( key , JUMP_LABEL_DISABLE ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
2011-11-27 19:59:09 +04:00
static void jump_label_update_timeout ( struct work_struct * work )
{
struct jump_label_key_deferred * key =
container_of ( work , struct jump_label_key_deferred , work . work ) ;
__jump_label_dec ( & key - > key , 0 , NULL ) ;
}
void jump_label_dec ( struct jump_label_key * key )
{
__jump_label_dec ( key , 0 , NULL ) ;
}
void jump_label_dec_deferred ( struct jump_label_key_deferred * key )
{
__jump_label_dec ( & key - > key , key - > timeout , & key - > work ) ;
}
void jump_label_rate_limit ( struct jump_label_key_deferred * key ,
unsigned long rl )
{
key - > timeout = rl ;
INIT_DELAYED_WORK ( & key - > work , jump_label_update_timeout ) ;
}
2010-09-17 19:09:08 +04:00
static int addr_conflict ( struct jump_entry * entry , void * start , void * end )
{
if ( entry - > code < = ( unsigned long ) end & &
entry - > code + JUMP_LABEL_NOP_SIZE > ( unsigned long ) start )
return 1 ;
return 0 ;
}
2011-03-17 00:29:47 +03:00
static int __jump_label_text_reserved ( struct jump_entry * iter_start ,
struct jump_entry * iter_stop , void * start , void * end )
2010-09-17 19:09:08 +04:00
{
struct jump_entry * iter ;
iter = iter_start ;
while ( iter < iter_stop ) {
2011-03-17 00:29:47 +03:00
if ( addr_conflict ( iter , start , end ) )
return 1 ;
2010-09-17 19:09:08 +04:00
iter + + ;
}
2011-03-17 00:29:47 +03:00
return 0 ;
}
2011-10-03 22:01:46 +04:00
/*
* Update code which is definitely not currently executing .
* Architectures which need heavyweight synchronization to modify
* running code can override this to make the non - live update case
* cheaper .
*/
void __weak arch_jump_label_transform_static ( struct jump_entry * entry ,
enum jump_label_type type )
{
arch_jump_label_transform ( entry , type ) ;
}
2011-03-17 00:29:47 +03:00
static void __jump_label_update ( struct jump_label_key * key ,
2011-05-10 14:43:46 +04:00
struct jump_entry * entry ,
struct jump_entry * stop , int enable )
2011-03-17 00:29:47 +03:00
{
2011-05-10 14:43:46 +04:00
for ( ; ( entry < stop ) & &
( entry - > key = = ( jump_label_t ) ( unsigned long ) key ) ;
entry + + ) {
2011-03-17 00:29:47 +03:00
/*
* entry - > code set to 0 invalidates module init text sections
* kernel_text_address ( ) verifies we are not in core kernel
* init code , see jump_label_invalidate_module_init ( ) .
*/
if ( entry - > code & & kernel_text_address ( entry - > code ) )
arch_jump_label_transform ( entry , enable ) ;
}
2010-09-17 19:09:08 +04:00
}
2011-10-13 03:17:54 +04:00
void __init jump_label_init ( void )
2010-09-17 19:09:00 +04:00
{
struct jump_entry * iter_start = __start___jump_table ;
struct jump_entry * iter_stop = __stop___jump_table ;
2011-03-17 00:29:47 +03:00
struct jump_label_key * key = NULL ;
2010-09-17 19:09:00 +04:00
struct jump_entry * iter ;
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( iter_start , iter_stop ) ;
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2011-09-29 22:10:05 +04:00
struct jump_label_key * iterk ;
iterk = ( struct jump_label_key * ) ( unsigned long ) iter - > key ;
2011-10-03 22:01:46 +04:00
arch_jump_label_transform_static ( iter , jump_label_enabled ( iterk ) ?
JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE ) ;
2011-09-29 22:10:05 +04:00
if ( iterk = = key )
2011-03-17 00:29:47 +03:00
continue ;
2011-09-29 22:10:05 +04:00
key = iterk ;
2011-03-17 00:29:47 +03:00
key - > entries = iter ;
# ifdef CONFIG_MODULES
key - > next = NULL ;
# endif
2010-09-17 19:09:00 +04:00
}
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
# ifdef CONFIG_MODULES
2011-03-17 00:29:47 +03:00
struct jump_label_mod {
struct jump_label_mod * next ;
struct jump_entry * entries ;
struct module * mod ;
} ;
static int __jump_label_mod_text_reserved ( void * start , void * end )
{
struct module * mod ;
mod = __module_text_address ( ( unsigned long ) start ) ;
if ( ! mod )
return 0 ;
WARN_ON_ONCE ( __module_text_address ( ( unsigned long ) end ) ! = mod ) ;
return __jump_label_text_reserved ( mod - > jump_entries ,
mod - > jump_entries + mod - > num_jump_entries ,
start , end ) ;
}
static void __jump_label_mod_update ( struct jump_label_key * key , int enable )
{
struct jump_label_mod * mod = key - > next ;
while ( mod ) {
2011-05-10 14:43:46 +04:00
struct module * m = mod - > mod ;
__jump_label_update ( key , mod - > entries ,
m - > jump_entries + m - > num_jump_entries ,
enable ) ;
2011-03-17 00:29:47 +03:00
mod = mod - > next ;
}
}
/***
* apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop ( )
* @ mod : module to patch
*
* Allow for run - time selection of the optimal nops . Before the module
* loads patch these with arch_get_jump_label_nop ( ) , which is specified by
* the arch specific jump label code .
*/
void jump_label_apply_nops ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
/* if the module doesn't have jump label entries, just return */
if ( iter_start = = iter_stop )
return ;
for ( iter = iter_start ; iter < iter_stop ; iter + + )
2011-10-03 22:01:46 +04:00
arch_jump_label_transform_static ( iter , JUMP_LABEL_DISABLE ) ;
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
static int jump_label_add_module ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
struct jump_label_key * key = NULL ;
struct jump_label_mod * jlm ;
2010-09-17 19:09:00 +04:00
/* if the module doesn't have jump label entries, just return */
2011-03-17 00:29:47 +03:00
if ( iter_start = = iter_stop )
2010-09-17 19:09:00 +04:00
return 0 ;
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( iter_start , iter_stop ) ;
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
if ( iter - > key = = ( jump_label_t ) ( unsigned long ) key )
continue ;
key = ( struct jump_label_key * ) ( unsigned long ) iter - > key ;
if ( __module_address ( iter - > key ) = = mod ) {
atomic_set ( & key - > enabled , 0 ) ;
key - > entries = iter ;
key - > next = NULL ;
continue ;
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
jlm = kzalloc ( sizeof ( struct jump_label_mod ) , GFP_KERNEL ) ;
if ( ! jlm )
return - ENOMEM ;
jlm - > mod = mod ;
jlm - > entries = iter ;
jlm - > next = key - > next ;
key - > next = jlm ;
if ( jump_label_enabled ( key ) )
2011-05-10 14:43:46 +04:00
__jump_label_update ( key , iter , iter_stop ,
JUMP_LABEL_ENABLE ) ;
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
2010-09-17 19:09:00 +04:00
return 0 ;
}
2011-03-17 00:29:47 +03:00
static void jump_label_del_module ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
struct jump_label_key * key = NULL ;
struct jump_label_mod * jlm , * * prev ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
if ( iter - > key = = ( jump_label_t ) ( unsigned long ) key )
continue ;
key = ( struct jump_label_key * ) ( unsigned long ) iter - > key ;
if ( __module_address ( iter - > key ) = = mod )
continue ;
prev = & key - > next ;
jlm = key - > next ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
while ( jlm & & jlm - > mod ! = mod ) {
prev = & jlm - > next ;
jlm = jlm - > next ;
}
if ( jlm ) {
* prev = jlm - > next ;
kfree ( jlm ) ;
2010-09-17 19:09:00 +04:00
}
}
}
2011-03-17 00:29:47 +03:00
static void jump_label_invalidate_module_init ( struct module * mod )
2010-10-02 01:23:41 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
2010-10-02 01:23:41 +04:00
struct jump_entry * iter ;
2011-03-17 00:29:47 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
if ( within_module_init ( iter - > code , mod ) )
iter - > code = 0 ;
2010-10-02 01:23:41 +04:00
}
}
2010-09-17 19:09:00 +04:00
static int
jump_label_module_notify ( struct notifier_block * self , unsigned long val ,
void * data )
{
struct module * mod = data ;
int ret = 0 ;
switch ( val ) {
case MODULE_STATE_COMING :
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
ret = jump_label_add_module ( mod ) ;
2010-09-17 19:09:00 +04:00
if ( ret )
2011-03-17 00:29:47 +03:00
jump_label_del_module ( mod ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
break ;
case MODULE_STATE_GOING :
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_del_module ( mod ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
break ;
2010-10-02 01:23:41 +04:00
case MODULE_STATE_LIVE :
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_invalidate_module_init ( mod ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-10-02 01:23:41 +04:00
break ;
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
return notifier_from_errno ( ret ) ;
2010-09-17 19:09:00 +04:00
}
struct notifier_block jump_label_module_nb = {
. notifier_call = jump_label_module_notify ,
2011-03-17 00:29:47 +03:00
. priority = 1 , /* higher than tracepoints */
2010-09-17 19:09:00 +04:00
} ;
2011-03-17 00:29:47 +03:00
static __init int jump_label_init_module ( void )
2010-09-17 19:09:00 +04:00
{
return register_module_notifier ( & jump_label_module_nb ) ;
}
2011-03-17 00:29:47 +03:00
early_initcall ( jump_label_init_module ) ;
2010-09-17 19:09:00 +04:00
# endif /* CONFIG_MODULES */
2011-03-17 00:29:47 +03:00
/***
* jump_label_text_reserved - check if addr range is reserved
* @ start : start text addr
* @ end : end text addr
*
* checks if the text addr located between @ start and @ end
* overlaps with any of the jump label patch addresses . Code
* that wants to modify kernel text should first verify that
* it does not overlap with any of the jump label addresses .
* Caller must hold jump_label_mutex .
*
* returns 1 if there is an overlap , 0 otherwise
*/
int jump_label_text_reserved ( void * start , void * end )
{
int ret = __jump_label_text_reserved ( __start___jump_table ,
__stop___jump_table , start , end ) ;
if ( ret )
return ret ;
# ifdef CONFIG_MODULES
ret = __jump_label_mod_text_reserved ( start , end ) ;
# endif
return ret ;
}
static void jump_label_update ( struct jump_label_key * key , int enable )
{
2011-06-21 06:35:55 +04:00
struct jump_entry * entry = key - > entries , * stop = __stop___jump_table ;
2011-03-17 00:29:47 +03:00
# ifdef CONFIG_MODULES
2011-06-21 06:35:55 +04:00
struct module * mod = __module_address ( ( jump_label_t ) key ) ;
2011-03-17 00:29:47 +03:00
__jump_label_mod_update ( key , enable ) ;
2011-06-21 06:35:55 +04:00
if ( mod )
stop = mod - > jump_entries + mod - > num_jump_entries ;
2011-03-17 00:29:47 +03:00
# endif
2011-06-21 06:35:55 +04:00
/* if there are no users, entry can be NULL */
if ( entry )
__jump_label_update ( key , entry , stop , enable ) ;
2011-03-17 00:29:47 +03:00
}
2010-09-17 19:09:00 +04:00
# endif