2010-09-17 19:09:00 +04:00
/*
* jump label support
*
* Copyright ( C ) 2009 Jason Baron < jbaron @ redhat . com >
2011-03-17 00:29:47 +03:00
* Copyright ( C ) 2011 Peter Zijlstra < pzijlstr @ redhat . com >
2010-09-17 19:09:00 +04:00
*
*/
# include <linux/memory.h>
# include <linux/uaccess.h>
# include <linux/module.h>
# include <linux/list.h>
# include <linux/slab.h>
# include <linux/sort.h>
# include <linux/err.h>
2012-02-24 11:31:31 +04:00
# include <linux/static_key.h>
2013-08-09 18:21:57 +04:00
# include <linux/jump_label_ratelimit.h>
2010-09-17 19:09:00 +04:00
# ifdef HAVE_JUMP_LABEL
/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX ( jump_label_mutex ) ;
2010-10-02 01:23:48 +04:00
void jump_label_lock ( void )
{
mutex_lock ( & jump_label_mutex ) ;
}
void jump_label_unlock ( void )
{
mutex_unlock ( & jump_label_mutex ) ;
}
2010-09-17 19:09:00 +04:00
static int jump_label_cmp ( const void * a , const void * b )
{
const struct jump_entry * jea = a ;
const struct jump_entry * jeb = b ;
if ( jea - > key < jeb - > key )
return - 1 ;
if ( jea - > key > jeb - > key )
return 1 ;
return 0 ;
}
static void
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( struct jump_entry * start , struct jump_entry * stop )
2010-09-17 19:09:00 +04:00
{
unsigned long size ;
size = ( ( ( unsigned long ) stop - ( unsigned long ) start )
/ sizeof ( struct jump_entry ) ) ;
sort ( start , size , sizeof ( struct jump_entry ) , jump_label_cmp , NULL ) ;
}
2015-07-24 16:06:37 +03:00
static void jump_label_update ( struct static_key * key ) ;
2015-07-24 15:55:40 +03:00
2012-02-24 11:31:31 +04:00
void static_key_slow_inc ( struct static_key * key )
2010-09-17 19:09:00 +04:00
{
2013-10-19 23:48:53 +04:00
STATIC_KEY_CHECK_USE ( ) ;
2011-03-17 00:29:47 +03:00
if ( atomic_inc_not_zero ( & key - > enabled ) )
return ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
jump_label_lock ( ) ;
2015-07-24 16:06:37 +03:00
if ( atomic_inc_return ( & key - > enabled ) = = 1 )
jump_label_update ( key ) ;
2011-03-17 00:29:47 +03:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
2012-02-24 11:31:31 +04:00
EXPORT_SYMBOL_GPL ( static_key_slow_inc ) ;
2010-09-17 19:09:00 +04:00
2012-02-24 11:31:31 +04:00
static void __static_key_slow_dec ( struct static_key * key ,
2011-11-27 19:59:09 +04:00
unsigned long rate_limit , struct delayed_work * work )
2010-09-17 19:09:00 +04:00
{
2012-02-22 00:02:53 +04:00
if ( ! atomic_dec_and_mutex_lock ( & key - > enabled , & jump_label_mutex ) ) {
WARN ( atomic_read ( & key - > enabled ) < 0 ,
" jump label: negative count! \n " ) ;
2011-03-17 00:29:47 +03:00
return ;
2012-02-22 00:02:53 +04:00
}
2010-09-17 19:09:00 +04:00
2011-11-27 19:59:09 +04:00
if ( rate_limit ) {
atomic_inc ( & key - > enabled ) ;
schedule_delayed_work ( work , rate_limit ) ;
2012-02-24 11:31:31 +04:00
} else {
2015-07-24 16:06:37 +03:00
jump_label_update ( key ) ;
2012-02-24 11:31:31 +04:00
}
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
2011-11-27 19:59:09 +04:00
static void jump_label_update_timeout ( struct work_struct * work )
{
2012-02-24 11:31:31 +04:00
struct static_key_deferred * key =
container_of ( work , struct static_key_deferred , work . work ) ;
__static_key_slow_dec ( & key - > key , 0 , NULL ) ;
2011-11-27 19:59:09 +04:00
}
2012-02-24 11:31:31 +04:00
void static_key_slow_dec ( struct static_key * key )
2011-11-27 19:59:09 +04:00
{
2013-10-19 23:48:53 +04:00
STATIC_KEY_CHECK_USE ( ) ;
2012-02-24 11:31:31 +04:00
__static_key_slow_dec ( key , 0 , NULL ) ;
2011-11-27 19:59:09 +04:00
}
2012-02-24 11:31:31 +04:00
EXPORT_SYMBOL_GPL ( static_key_slow_dec ) ;
2011-11-27 19:59:09 +04:00
2012-02-24 11:31:31 +04:00
void static_key_slow_dec_deferred ( struct static_key_deferred * key )
2011-11-27 19:59:09 +04:00
{
2013-10-19 23:48:53 +04:00
STATIC_KEY_CHECK_USE ( ) ;
2012-02-24 11:31:31 +04:00
__static_key_slow_dec ( & key - > key , key - > timeout , & key - > work ) ;
2011-11-27 19:59:09 +04:00
}
2012-02-24 11:31:31 +04:00
EXPORT_SYMBOL_GPL ( static_key_slow_dec_deferred ) ;
2011-11-27 19:59:09 +04:00
2012-02-24 11:31:31 +04:00
void jump_label_rate_limit ( struct static_key_deferred * key ,
2011-11-27 19:59:09 +04:00
unsigned long rl )
{
2013-10-19 23:48:53 +04:00
STATIC_KEY_CHECK_USE ( ) ;
2011-11-27 19:59:09 +04:00
key - > timeout = rl ;
INIT_DELAYED_WORK ( & key - > work , jump_label_update_timeout ) ;
}
2012-08-05 16:58:29 +04:00
EXPORT_SYMBOL_GPL ( jump_label_rate_limit ) ;
2011-11-27 19:59:09 +04:00
2010-09-17 19:09:08 +04:00
static int addr_conflict ( struct jump_entry * entry , void * start , void * end )
{
if ( entry - > code < = ( unsigned long ) end & &
entry - > code + JUMP_LABEL_NOP_SIZE > ( unsigned long ) start )
return 1 ;
return 0 ;
}
2011-03-17 00:29:47 +03:00
static int __jump_label_text_reserved ( struct jump_entry * iter_start ,
struct jump_entry * iter_stop , void * start , void * end )
2010-09-17 19:09:08 +04:00
{
struct jump_entry * iter ;
iter = iter_start ;
while ( iter < iter_stop ) {
2011-03-17 00:29:47 +03:00
if ( addr_conflict ( iter , start , end ) )
return 1 ;
2010-09-17 19:09:08 +04:00
iter + + ;
}
2011-03-17 00:29:47 +03:00
return 0 ;
}
2015-07-24 16:06:37 +03:00
/*
2011-10-03 22:01:46 +04:00
* Update code which is definitely not currently executing .
* Architectures which need heavyweight synchronization to modify
* running code can override this to make the non - live update case
* cheaper .
*/
2011-12-06 20:27:29 +04:00
void __weak __init_or_module arch_jump_label_transform_static ( struct jump_entry * entry ,
2011-10-03 22:01:46 +04:00
enum jump_label_type type )
{
2015-07-24 16:06:37 +03:00
arch_jump_label_transform ( entry , type ) ;
2011-10-03 22:01:46 +04:00
}
2015-07-24 16:06:37 +03:00
static inline struct jump_entry * static_key_entries ( struct static_key * key )
2011-03-17 00:29:47 +03:00
{
2015-07-24 16:06:37 +03:00
return ( struct jump_entry * ) ( ( unsigned long ) key - > entries & ~ JUMP_TYPE_MASK ) ;
2010-09-17 19:09:08 +04:00
}
2015-07-24 16:06:37 +03:00
static inline bool static_key_type ( struct static_key * key )
2012-02-24 11:31:31 +04:00
{
2015-07-24 16:06:37 +03:00
return ( unsigned long ) key - > entries & JUMP_TYPE_MASK ;
2015-07-24 15:55:40 +03:00
}
2012-02-24 11:31:31 +04:00
2015-07-24 16:02:27 +03:00
static inline struct static_key * jump_entry_key ( struct jump_entry * entry )
{
2015-07-24 16:09:55 +03:00
return ( struct static_key * ) ( ( unsigned long ) entry - > key & ~ 1UL ) ;
}
static bool jump_entry_branch ( struct jump_entry * entry )
{
return ( unsigned long ) entry - > key & 1UL ;
2015-07-24 16:02:27 +03:00
}
2015-07-24 16:06:37 +03:00
static enum jump_label_type jump_label_type ( struct jump_entry * entry )
2015-07-24 15:55:40 +03:00
{
2015-07-24 16:06:37 +03:00
struct static_key * key = jump_entry_key ( entry ) ;
2015-07-24 15:55:40 +03:00
bool enabled = static_key_enabled ( key ) ;
2015-07-24 16:09:55 +03:00
bool branch = jump_entry_branch ( entry ) ;
2012-02-24 11:31:31 +04:00
2015-07-24 16:09:55 +03:00
/* See the comment in linux/jump_label.h */
return enabled ^ branch ;
2012-02-24 11:31:31 +04:00
}
2015-07-24 16:06:37 +03:00
static void __jump_label_update ( struct static_key * key ,
struct jump_entry * entry ,
struct jump_entry * stop )
{
for ( ; ( entry < stop ) & & ( jump_entry_key ( entry ) = = key ) ; entry + + ) {
/*
* entry - > code set to 0 invalidates module init text sections
* kernel_text_address ( ) verifies we are not in core kernel
* init code , see jump_label_invalidate_module_init ( ) .
*/
if ( entry - > code & & kernel_text_address ( entry - > code ) )
arch_jump_label_transform ( entry , jump_label_type ( entry ) ) ;
}
}
2011-10-13 03:17:54 +04:00
void __init jump_label_init ( void )
2010-09-17 19:09:00 +04:00
{
struct jump_entry * iter_start = __start___jump_table ;
struct jump_entry * iter_stop = __stop___jump_table ;
2012-02-24 11:31:31 +04:00
struct static_key * key = NULL ;
2010-09-17 19:09:00 +04:00
struct jump_entry * iter ;
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( iter_start , iter_stop ) ;
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2012-02-24 11:31:31 +04:00
struct static_key * iterk ;
2011-09-29 22:10:05 +04:00
2015-07-24 16:09:55 +03:00
/* rewrite NOPs */
if ( jump_label_type ( iter ) = = JUMP_LABEL_NOP )
arch_jump_label_transform_static ( iter , JUMP_LABEL_NOP ) ;
2015-07-24 16:02:27 +03:00
iterk = jump_entry_key ( iter ) ;
2011-09-29 22:10:05 +04:00
if ( iterk = = key )
2011-03-17 00:29:47 +03:00
continue ;
2011-09-29 22:10:05 +04:00
key = iterk ;
2012-02-24 11:31:31 +04:00
/*
* Set key - > entries to iter , but preserve JUMP_LABEL_TRUE_BRANCH .
*/
* ( ( unsigned long * ) & key - > entries ) + = ( unsigned long ) iter ;
2011-03-17 00:29:47 +03:00
# ifdef CONFIG_MODULES
key - > next = NULL ;
# endif
2010-09-17 19:09:00 +04:00
}
2013-10-19 23:48:53 +04:00
static_key_initialized = true ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
# ifdef CONFIG_MODULES
2015-07-24 16:09:55 +03:00
static enum jump_label_type jump_label_init_type ( struct jump_entry * entry )
{
struct static_key * key = jump_entry_key ( entry ) ;
bool type = static_key_type ( key ) ;
bool branch = jump_entry_branch ( entry ) ;
/* See the comment in linux/jump_label.h */
return type ^ branch ;
}
2012-02-24 11:31:31 +04:00
struct static_key_mod {
struct static_key_mod * next ;
2011-03-17 00:29:47 +03:00
struct jump_entry * entries ;
struct module * mod ;
} ;
static int __jump_label_mod_text_reserved ( void * start , void * end )
{
struct module * mod ;
mod = __module_text_address ( ( unsigned long ) start ) ;
if ( ! mod )
return 0 ;
WARN_ON_ONCE ( __module_text_address ( ( unsigned long ) end ) ! = mod ) ;
return __jump_label_text_reserved ( mod - > jump_entries ,
mod - > jump_entries + mod - > num_jump_entries ,
start , end ) ;
}
2015-07-24 16:06:37 +03:00
static void __jump_label_mod_update ( struct static_key * key )
2011-03-17 00:29:47 +03:00
{
2015-07-24 16:06:37 +03:00
struct static_key_mod * mod ;
2011-03-17 00:29:47 +03:00
2015-07-24 16:06:37 +03:00
for ( mod = key - > next ; mod ; mod = mod - > next ) {
2011-05-10 14:43:46 +04:00
struct module * m = mod - > mod ;
__jump_label_update ( key , mod - > entries ,
2015-07-24 16:06:37 +03:00
m - > jump_entries + m - > num_jump_entries ) ;
2011-03-17 00:29:47 +03:00
}
}
/***
* apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop ( )
* @ mod : module to patch
*
* Allow for run - time selection of the optimal nops . Before the module
* loads patch these with arch_get_jump_label_nop ( ) , which is specified by
* the arch specific jump label code .
*/
void jump_label_apply_nops ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
/* if the module doesn't have jump label entries, just return */
if ( iter_start = = iter_stop )
return ;
2015-07-24 16:09:55 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
/* Only write NOPs for arch_branch_static(). */
if ( jump_label_init_type ( iter ) = = JUMP_LABEL_NOP )
arch_jump_label_transform_static ( iter , JUMP_LABEL_NOP ) ;
}
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
static int jump_label_add_module ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
2012-02-24 11:31:31 +04:00
struct static_key * key = NULL ;
struct static_key_mod * jlm ;
2010-09-17 19:09:00 +04:00
/* if the module doesn't have jump label entries, just return */
2011-03-17 00:29:47 +03:00
if ( iter_start = = iter_stop )
2010-09-17 19:09:00 +04:00
return 0 ;
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( iter_start , iter_stop ) ;
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2012-02-24 11:31:31 +04:00
struct static_key * iterk ;
2011-03-17 00:29:47 +03:00
2015-07-24 16:02:27 +03:00
iterk = jump_entry_key ( iter ) ;
2012-02-24 11:31:31 +04:00
if ( iterk = = key )
continue ;
2011-03-17 00:29:47 +03:00
2012-02-24 11:31:31 +04:00
key = iterk ;
2015-05-27 04:39:35 +03:00
if ( within_module ( iter - > key , mod ) ) {
2012-02-24 11:31:31 +04:00
/*
* Set key - > entries to iter , but preserve JUMP_LABEL_TRUE_BRANCH .
*/
* ( ( unsigned long * ) & key - > entries ) + = ( unsigned long ) iter ;
2011-03-17 00:29:47 +03:00
key - > next = NULL ;
continue ;
2010-09-17 19:09:00 +04:00
}
2012-02-24 11:31:31 +04:00
jlm = kzalloc ( sizeof ( struct static_key_mod ) , GFP_KERNEL ) ;
2011-03-17 00:29:47 +03:00
if ( ! jlm )
return - ENOMEM ;
jlm - > mod = mod ;
jlm - > entries = iter ;
jlm - > next = key - > next ;
key - > next = jlm ;
2015-07-24 16:09:55 +03:00
/* Only update if we've changed from our initial state */
if ( jump_label_type ( iter ) ! = jump_label_init_type ( iter ) )
2015-07-24 16:06:37 +03:00
__jump_label_update ( key , iter , iter_stop ) ;
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
2010-09-17 19:09:00 +04:00
return 0 ;
}
2011-03-17 00:29:47 +03:00
static void jump_label_del_module ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
2012-02-24 11:31:31 +04:00
struct static_key * key = NULL ;
struct static_key_mod * jlm , * * prev ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2015-07-24 16:02:27 +03:00
if ( jump_entry_key ( iter ) = = key )
2011-03-17 00:29:47 +03:00
continue ;
2015-07-24 16:02:27 +03:00
key = jump_entry_key ( iter ) ;
2011-03-17 00:29:47 +03:00
2015-05-27 04:39:35 +03:00
if ( within_module ( iter - > key , mod ) )
2011-03-17 00:29:47 +03:00
continue ;
prev = & key - > next ;
jlm = key - > next ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
while ( jlm & & jlm - > mod ! = mod ) {
prev = & jlm - > next ;
jlm = jlm - > next ;
}
if ( jlm ) {
* prev = jlm - > next ;
kfree ( jlm ) ;
2010-09-17 19:09:00 +04:00
}
}
}
2011-03-17 00:29:47 +03:00
static void jump_label_invalidate_module_init ( struct module * mod )
2010-10-02 01:23:41 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
2010-10-02 01:23:41 +04:00
struct jump_entry * iter ;
2011-03-17 00:29:47 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
if ( within_module_init ( iter - > code , mod ) )
iter - > code = 0 ;
2010-10-02 01:23:41 +04:00
}
}
2010-09-17 19:09:00 +04:00
static int
jump_label_module_notify ( struct notifier_block * self , unsigned long val ,
void * data )
{
struct module * mod = data ;
int ret = 0 ;
switch ( val ) {
case MODULE_STATE_COMING :
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
ret = jump_label_add_module ( mod ) ;
2010-09-17 19:09:00 +04:00
if ( ret )
2011-03-17 00:29:47 +03:00
jump_label_del_module ( mod ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
break ;
case MODULE_STATE_GOING :
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_del_module ( mod ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-09-17 19:09:00 +04:00
break ;
2010-10-02 01:23:41 +04:00
case MODULE_STATE_LIVE :
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_invalidate_module_init ( mod ) ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2010-10-02 01:23:41 +04:00
break ;
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
return notifier_from_errno ( ret ) ;
2010-09-17 19:09:00 +04:00
}
struct notifier_block jump_label_module_nb = {
. notifier_call = jump_label_module_notify ,
2011-03-17 00:29:47 +03:00
. priority = 1 , /* higher than tracepoints */
2010-09-17 19:09:00 +04:00
} ;
2011-03-17 00:29:47 +03:00
static __init int jump_label_init_module ( void )
2010-09-17 19:09:00 +04:00
{
return register_module_notifier ( & jump_label_module_nb ) ;
}
2011-03-17 00:29:47 +03:00
early_initcall ( jump_label_init_module ) ;
2010-09-17 19:09:00 +04:00
# endif /* CONFIG_MODULES */
2011-03-17 00:29:47 +03:00
/***
* jump_label_text_reserved - check if addr range is reserved
* @ start : start text addr
* @ end : end text addr
*
* checks if the text addr located between @ start and @ end
* overlaps with any of the jump label patch addresses . Code
* that wants to modify kernel text should first verify that
* it does not overlap with any of the jump label addresses .
* Caller must hold jump_label_mutex .
*
* returns 1 if there is an overlap , 0 otherwise
*/
int jump_label_text_reserved ( void * start , void * end )
{
int ret = __jump_label_text_reserved ( __start___jump_table ,
__stop___jump_table , start , end ) ;
if ( ret )
return ret ;
# ifdef CONFIG_MODULES
ret = __jump_label_mod_text_reserved ( start , end ) ;
# endif
return ret ;
}
2015-07-24 16:06:37 +03:00
static void jump_label_update ( struct static_key * key )
2011-03-17 00:29:47 +03:00
{
2012-02-24 11:31:31 +04:00
struct jump_entry * stop = __stop___jump_table ;
2015-07-24 15:55:40 +03:00
struct jump_entry * entry = static_key_entries ( key ) ;
2011-03-17 00:29:47 +03:00
# ifdef CONFIG_MODULES
2015-05-27 04:39:35 +03:00
struct module * mod ;
2011-06-21 06:35:55 +04:00
2015-07-24 16:06:37 +03:00
__jump_label_mod_update ( key ) ;
2011-06-21 06:35:55 +04:00
2015-05-27 04:39:35 +03:00
preempt_disable ( ) ;
mod = __module_address ( ( unsigned long ) key ) ;
2011-06-21 06:35:55 +04:00
if ( mod )
stop = mod - > jump_entries + mod - > num_jump_entries ;
2015-05-27 04:39:35 +03:00
preempt_enable ( ) ;
2011-03-17 00:29:47 +03:00
# endif
2011-06-21 06:35:55 +04:00
/* if there are no users, entry can be NULL */
if ( entry )
2015-07-24 16:06:37 +03:00
__jump_label_update ( key , entry , stop ) ;
2011-03-17 00:29:47 +03:00
}
2015-07-27 19:32:09 +03:00
# ifdef CONFIG_STATIC_KEYS_SELFTEST
static DEFINE_STATIC_KEY_TRUE ( sk_true ) ;
static DEFINE_STATIC_KEY_FALSE ( sk_false ) ;
static __init int jump_label_test ( void )
{
int i ;
for ( i = 0 ; i < 2 ; i + + ) {
WARN_ON ( static_key_enabled ( & sk_true . key ) ! = true ) ;
WARN_ON ( static_key_enabled ( & sk_false . key ) ! = false ) ;
WARN_ON ( ! static_branch_likely ( & sk_true ) ) ;
WARN_ON ( ! static_branch_unlikely ( & sk_true ) ) ;
WARN_ON ( static_branch_likely ( & sk_false ) ) ;
WARN_ON ( static_branch_unlikely ( & sk_false ) ) ;
static_branch_disable ( & sk_true ) ;
static_branch_enable ( & sk_false ) ;
WARN_ON ( static_key_enabled ( & sk_true . key ) = = true ) ;
WARN_ON ( static_key_enabled ( & sk_false . key ) = = false ) ;
WARN_ON ( static_branch_likely ( & sk_true ) ) ;
WARN_ON ( static_branch_unlikely ( & sk_true ) ) ;
WARN_ON ( ! static_branch_likely ( & sk_false ) ) ;
WARN_ON ( ! static_branch_unlikely ( & sk_false ) ) ;
static_branch_enable ( & sk_true ) ;
static_branch_disable ( & sk_false ) ;
}
return 0 ;
}
late_initcall ( jump_label_test ) ;
# endif /* STATIC_KEYS_SELFTEST */
# endif /* HAVE_JUMP_LABEL */