2010-09-17 19:09:00 +04:00
/*
* jump label support
*
* Copyright ( C ) 2009 Jason Baron < jbaron @ redhat . com >
2011-03-17 00:29:47 +03:00
* Copyright ( C ) 2011 Peter Zijlstra < pzijlstr @ redhat . com >
2010-09-17 19:09:00 +04:00
*
*/
# include <linux/memory.h>
# include <linux/uaccess.h>
# include <linux/module.h>
# include <linux/list.h>
# include <linux/slab.h>
# include <linux/sort.h>
# include <linux/err.h>
2012-02-24 11:31:31 +04:00
# include <linux/static_key.h>
2010-09-17 19:09:00 +04:00
# ifdef HAVE_JUMP_LABEL
/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX ( jump_label_mutex ) ;
2010-10-02 01:23:48 +04:00
void jump_label_lock ( void )
{
mutex_lock ( & jump_label_mutex ) ;
}
void jump_label_unlock ( void )
{
mutex_unlock ( & jump_label_mutex ) ;
}
2010-09-17 19:09:00 +04:00
static int jump_label_cmp ( const void * a , const void * b )
{
const struct jump_entry * jea = a ;
const struct jump_entry * jeb = b ;
if ( jea - > key < jeb - > key )
return - 1 ;
if ( jea - > key > jeb - > key )
return 1 ;
return 0 ;
}
static void
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( struct jump_entry * start , struct jump_entry * stop )
2010-09-17 19:09:00 +04:00
{
unsigned long size ;
size = ( ( ( unsigned long ) stop - ( unsigned long ) start )
/ sizeof ( struct jump_entry ) ) ;
sort ( start , size , sizeof ( struct jump_entry ) , jump_label_cmp , NULL ) ;
}
2012-02-24 11:31:31 +04:00
static void jump_label_update ( struct static_key * key , int enable ) ;
2010-09-17 19:09:00 +04:00
2012-02-24 11:31:31 +04:00
void static_key_slow_inc ( struct static_key * key )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
if ( atomic_inc_not_zero ( & key - > enabled ) )
return ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
jump_label_lock ( ) ;
2012-02-24 11:31:31 +04:00
if ( atomic_read ( & key - > enabled ) = = 0 ) {
if ( ! jump_label_get_branch_default ( key ) )
jump_label_update ( key , JUMP_LABEL_ENABLE ) ;
else
jump_label_update ( key , JUMP_LABEL_DISABLE ) ;
}
2011-10-18 21:55:51 +04:00
atomic_inc ( & key - > enabled ) ;
2011-03-17 00:29:47 +03:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
2012-02-24 11:31:31 +04:00
EXPORT_SYMBOL_GPL ( static_key_slow_inc ) ;
2010-09-17 19:09:00 +04:00
2012-02-24 11:31:31 +04:00
static void __static_key_slow_dec ( struct static_key * key ,
2011-11-27 19:59:09 +04:00
unsigned long rate_limit , struct delayed_work * work )
2010-09-17 19:09:00 +04:00
{
2012-02-22 00:02:53 +04:00
if ( ! atomic_dec_and_mutex_lock ( & key - > enabled , & jump_label_mutex ) ) {
WARN ( atomic_read ( & key - > enabled ) < 0 ,
" jump label: negative count! \n " ) ;
2011-03-17 00:29:47 +03:00
return ;
2012-02-22 00:02:53 +04:00
}
2010-09-17 19:09:00 +04:00
2011-11-27 19:59:09 +04:00
if ( rate_limit ) {
atomic_inc ( & key - > enabled ) ;
schedule_delayed_work ( work , rate_limit ) ;
2012-02-24 11:31:31 +04:00
} else {
if ( ! jump_label_get_branch_default ( key ) )
jump_label_update ( key , JUMP_LABEL_DISABLE ) ;
else
jump_label_update ( key , JUMP_LABEL_ENABLE ) ;
}
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
2011-11-27 19:59:09 +04:00
static void jump_label_update_timeout ( struct work_struct * work )
{
2012-02-24 11:31:31 +04:00
struct static_key_deferred * key =
container_of ( work , struct static_key_deferred , work . work ) ;
__static_key_slow_dec ( & key - > key , 0 , NULL ) ;
2011-11-27 19:59:09 +04:00
}
2012-02-24 11:31:31 +04:00
void static_key_slow_dec ( struct static_key * key )
2011-11-27 19:59:09 +04:00
{
2012-02-24 11:31:31 +04:00
__static_key_slow_dec ( key , 0 , NULL ) ;
2011-11-27 19:59:09 +04:00
}
2012-02-24 11:31:31 +04:00
EXPORT_SYMBOL_GPL ( static_key_slow_dec ) ;
2011-11-27 19:59:09 +04:00
2012-02-24 11:31:31 +04:00
void static_key_slow_dec_deferred ( struct static_key_deferred * key )
2011-11-27 19:59:09 +04:00
{
2012-02-24 11:31:31 +04:00
__static_key_slow_dec ( & key - > key , key - > timeout , & key - > work ) ;
2011-11-27 19:59:09 +04:00
}
2012-02-24 11:31:31 +04:00
EXPORT_SYMBOL_GPL ( static_key_slow_dec_deferred ) ;
2011-11-27 19:59:09 +04:00
2012-02-24 11:31:31 +04:00
void jump_label_rate_limit ( struct static_key_deferred * key ,
2011-11-27 19:59:09 +04:00
unsigned long rl )
{
key - > timeout = rl ;
INIT_DELAYED_WORK ( & key - > work , jump_label_update_timeout ) ;
}
2012-08-05 16:58:29 +04:00
EXPORT_SYMBOL_GPL ( jump_label_rate_limit ) ;
2011-11-27 19:59:09 +04:00
2010-09-17 19:09:08 +04:00
static int addr_conflict ( struct jump_entry * entry , void * start , void * end )
{
if ( entry - > code < = ( unsigned long ) end & &
entry - > code + JUMP_LABEL_NOP_SIZE > ( unsigned long ) start )
return 1 ;
return 0 ;
}
2011-03-17 00:29:47 +03:00
static int __jump_label_text_reserved ( struct jump_entry * iter_start ,
struct jump_entry * iter_stop , void * start , void * end )
2010-09-17 19:09:08 +04:00
{
struct jump_entry * iter ;
iter = iter_start ;
while ( iter < iter_stop ) {
2011-03-17 00:29:47 +03:00
if ( addr_conflict ( iter , start , end ) )
return 1 ;
2010-09-17 19:09:08 +04:00
iter + + ;
}
2011-03-17 00:29:47 +03:00
return 0 ;
}
2011-10-03 22:01:46 +04:00
/*
* Update code which is definitely not currently executing .
* Architectures which need heavyweight synchronization to modify
* running code can override this to make the non - live update case
* cheaper .
*/
2011-12-06 20:27:29 +04:00
void __weak __init_or_module arch_jump_label_transform_static ( struct jump_entry * entry ,
2011-10-03 22:01:46 +04:00
enum jump_label_type type )
{
arch_jump_label_transform ( entry , type ) ;
}
2012-02-24 11:31:31 +04:00
static void __jump_label_update ( struct static_key * key ,
2011-05-10 14:43:46 +04:00
struct jump_entry * entry ,
struct jump_entry * stop , int enable )
2011-03-17 00:29:47 +03:00
{
2011-05-10 14:43:46 +04:00
for ( ; ( entry < stop ) & &
( entry - > key = = ( jump_label_t ) ( unsigned long ) key ) ;
entry + + ) {
2011-03-17 00:29:47 +03:00
/*
* entry - > code set to 0 invalidates module init text sections
* kernel_text_address ( ) verifies we are not in core kernel
* init code , see jump_label_invalidate_module_init ( ) .
*/
if ( entry - > code & & kernel_text_address ( entry - > code ) )
arch_jump_label_transform ( entry , enable ) ;
}
2010-09-17 19:09:08 +04:00
}
2012-02-24 11:31:31 +04:00
static enum jump_label_type jump_label_type ( struct static_key * key )
{
bool true_branch = jump_label_get_branch_default ( key ) ;
bool state = static_key_enabled ( key ) ;
if ( ( ! true_branch & & state ) | | ( true_branch & & ! state ) )
return JUMP_LABEL_ENABLE ;
return JUMP_LABEL_DISABLE ;
}
2011-10-13 03:17:54 +04:00
void __init jump_label_init ( void )
2010-09-17 19:09:00 +04:00
{
struct jump_entry * iter_start = __start___jump_table ;
struct jump_entry * iter_stop = __stop___jump_table ;
2012-02-24 11:31:31 +04:00
struct static_key * key = NULL ;
2010-09-17 19:09:00 +04:00
struct jump_entry * iter ;
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( iter_start , iter_stop ) ;
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2012-02-24 11:31:31 +04:00
struct static_key * iterk ;
2011-09-29 22:10:05 +04:00
2012-02-24 11:31:31 +04:00
iterk = ( struct static_key * ) ( unsigned long ) iter - > key ;
arch_jump_label_transform_static ( iter , jump_label_type ( iterk ) ) ;
2011-09-29 22:10:05 +04:00
if ( iterk = = key )
2011-03-17 00:29:47 +03:00
continue ;
2011-09-29 22:10:05 +04:00
key = iterk ;
2012-02-24 11:31:31 +04:00
/*
* Set key - > entries to iter , but preserve JUMP_LABEL_TRUE_BRANCH .
*/
* ( ( unsigned long * ) & key - > entries ) + = ( unsigned long ) iter ;
2011-03-17 00:29:47 +03:00
# ifdef CONFIG_MODULES
key - > next = NULL ;
# endif
2010-09-17 19:09:00 +04:00
}
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
# ifdef CONFIG_MODULES
2012-02-24 11:31:31 +04:00
struct static_key_mod {
struct static_key_mod * next ;
2011-03-17 00:29:47 +03:00
struct jump_entry * entries ;
struct module * mod ;
} ;
static int __jump_label_mod_text_reserved ( void * start , void * end )
{
struct module * mod ;
mod = __module_text_address ( ( unsigned long ) start ) ;
if ( ! mod )
return 0 ;
WARN_ON_ONCE ( __module_text_address ( ( unsigned long ) end ) ! = mod ) ;
return __jump_label_text_reserved ( mod - > jump_entries ,
mod - > jump_entries + mod - > num_jump_entries ,
start , end ) ;
}
2012-02-24 11:31:31 +04:00
static void __jump_label_mod_update ( struct static_key * key , int enable )
2011-03-17 00:29:47 +03:00
{
2012-02-24 11:31:31 +04:00
struct static_key_mod * mod = key - > next ;
2011-03-17 00:29:47 +03:00
while ( mod ) {
2011-05-10 14:43:46 +04:00
struct module * m = mod - > mod ;
__jump_label_update ( key , mod - > entries ,
m - > jump_entries + m - > num_jump_entries ,
enable ) ;
2011-03-17 00:29:47 +03:00
mod = mod - > next ;
}
}
/***
* apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop ( )
* @ mod : module to patch
*
* Allow for run - time selection of the optimal nops . Before the module
* loads patch these with arch_get_jump_label_nop ( ) , which is specified by
* the arch specific jump label code .
*/
void jump_label_apply_nops ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
/* if the module doesn't have jump label entries, just return */
if ( iter_start = = iter_stop )
return ;
2011-07-06 16:20:14 +04:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2012-02-24 11:31:31 +04:00
arch_jump_label_transform_static ( iter , JUMP_LABEL_DISABLE ) ;
2011-07-06 16:20:14 +04:00
}
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
static int jump_label_add_module ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
2012-02-24 11:31:31 +04:00
struct static_key * key = NULL ;
struct static_key_mod * jlm ;
2010-09-17 19:09:00 +04:00
/* if the module doesn't have jump label entries, just return */
2011-03-17 00:29:47 +03:00
if ( iter_start = = iter_stop )
2010-09-17 19:09:00 +04:00
return 0 ;
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( iter_start , iter_stop ) ;
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2012-02-24 11:31:31 +04:00
struct static_key * iterk ;
2011-03-17 00:29:47 +03:00
2012-02-24 11:31:31 +04:00
iterk = ( struct static_key * ) ( unsigned long ) iter - > key ;
if ( iterk = = key )
continue ;
2011-03-17 00:29:47 +03:00
2012-02-24 11:31:31 +04:00
key = iterk ;
2011-03-17 00:29:47 +03:00
if ( __module_address ( iter - > key ) = = mod ) {
2012-02-24 11:31:31 +04:00
/*
* Set key - > entries to iter , but preserve JUMP_LABEL_TRUE_BRANCH .
*/
* ( ( unsigned long * ) & key - > entries ) + = ( unsigned long ) iter ;
2011-03-17 00:29:47 +03:00
key - > next = NULL ;
continue ;
2010-09-17 19:09:00 +04:00
}
2012-02-24 11:31:31 +04:00
jlm = kzalloc ( sizeof ( struct static_key_mod ) , GFP_KERNEL ) ;
2011-03-17 00:29:47 +03:00
if ( ! jlm )
return - ENOMEM ;
jlm - > mod = mod ;
jlm - > entries = iter ;
jlm - > next = key - > next ;
key - > next = jlm ;
2012-02-24 11:31:31 +04:00
if ( jump_label_type ( key ) = = JUMP_LABEL_ENABLE )
2011-07-06 16:20:14 +04:00
__jump_label_update ( key , iter , iter_stop , JUMP_LABEL_ENABLE ) ;
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
2010-09-17 19:09:00 +04:00
return 0 ;
}
2011-03-17 00:29:47 +03:00
static void jump_label_del_module ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
2012-02-24 11:31:31 +04:00
struct static_key * key = NULL ;
struct static_key_mod * jlm , * * prev ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
if ( iter - > key = = ( jump_label_t ) ( unsigned long ) key )
continue ;
2012-02-24 11:31:31 +04:00
key = ( struct static_key * ) ( unsigned long ) iter - > key ;
2011-03-17 00:29:47 +03:00
if ( __module_address ( iter - > key ) = = mod )
continue ;
prev = & key - > next ;
jlm = key - > next ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
while ( jlm & & jlm - > mod ! = mod ) {
prev = & jlm - > next ;
jlm = jlm - > next ;
}
if ( jlm ) {
* prev = jlm - > next ;
kfree ( jlm ) ;
2010-09-17 19:09:00 +04:00
}
}
}
2011-03-17 00:29:47 +03:00
static void jump_label_invalidate_module_init ( struct module * mod )
2010-10-02 01:23:41 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
2010-10-02 01:23:41 +04:00
struct jump_entry * iter ;
2011-03-17 00:29:47 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
if ( within_module_init ( iter - > code , mod ) )
iter - > code = 0 ;
2010-10-02 01:23:41 +04:00
}
}
2010-09-17 19:09:00 +04:00
static int
jump_label_module_notify ( struct notifier_block * self , unsigned long val ,
void * data )
{
struct module * mod = data ;
int ret = 0 ;
switch ( val ) {
case MODULE_STATE_COMING :
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
ret = jump_label_add_module ( mod ) ;
2010-09-17 19:09:00 +04:00
if ( ret )
2011-03-17 00:29:47 +03:00
jump_label_del_module ( mod ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
break ;
case MODULE_STATE_GOING :
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_del_module ( mod ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
break ;
2010-10-02 01:23:41 +04:00
case MODULE_STATE_LIVE :
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_invalidate_module_init ( mod ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-10-02 01:23:41 +04:00
break ;
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
return notifier_from_errno ( ret ) ;
2010-09-17 19:09:00 +04:00
}
struct notifier_block jump_label_module_nb = {
. notifier_call = jump_label_module_notify ,
2011-03-17 00:29:47 +03:00
. priority = 1 , /* higher than tracepoints */
2010-09-17 19:09:00 +04:00
} ;
2011-03-17 00:29:47 +03:00
static __init int jump_label_init_module ( void )
2010-09-17 19:09:00 +04:00
{
return register_module_notifier ( & jump_label_module_nb ) ;
}
2011-03-17 00:29:47 +03:00
early_initcall ( jump_label_init_module ) ;
2010-09-17 19:09:00 +04:00
# endif /* CONFIG_MODULES */
2011-03-17 00:29:47 +03:00
/***
* jump_label_text_reserved - check if addr range is reserved
* @ start : start text addr
* @ end : end text addr
*
* checks if the text addr located between @ start and @ end
* overlaps with any of the jump label patch addresses . Code
* that wants to modify kernel text should first verify that
* it does not overlap with any of the jump label addresses .
* Caller must hold jump_label_mutex .
*
* returns 1 if there is an overlap , 0 otherwise
*/
int jump_label_text_reserved ( void * start , void * end )
{
int ret = __jump_label_text_reserved ( __start___jump_table ,
__stop___jump_table , start , end ) ;
if ( ret )
return ret ;
# ifdef CONFIG_MODULES
ret = __jump_label_mod_text_reserved ( start , end ) ;
# endif
return ret ;
}
2012-02-24 11:31:31 +04:00
static void jump_label_update ( struct static_key * key , int enable )
2011-03-17 00:29:47 +03:00
{
2012-02-24 11:31:31 +04:00
struct jump_entry * stop = __stop___jump_table ;
struct jump_entry * entry = jump_label_get_entries ( key ) ;
2011-03-17 00:29:47 +03:00
# ifdef CONFIG_MODULES
2012-02-22 00:02:57 +04:00
struct module * mod = __module_address ( ( unsigned long ) key ) ;
2011-06-21 06:35:55 +04:00
2011-03-17 00:29:47 +03:00
__jump_label_mod_update ( key , enable ) ;
2011-06-21 06:35:55 +04:00
if ( mod )
stop = mod - > jump_entries + mod - > num_jump_entries ;
2011-03-17 00:29:47 +03:00
# endif
2011-06-21 06:35:55 +04:00
/* if there are no users, entry can be NULL */
if ( entry )
__jump_label_update ( key , entry , stop , enable ) ;
2011-03-17 00:29:47 +03:00
}
2010-09-17 19:09:00 +04:00
# endif