2010-09-17 19:09:00 +04:00
/*
* jump label support
*
* Copyright ( C ) 2009 Jason Baron < jbaron @ redhat . com >
2015-11-16 13:08:45 +03:00
* Copyright ( C ) 2011 Peter Zijlstra
2010-09-17 19:09:00 +04:00
*
*/
# include <linux/memory.h>
# include <linux/uaccess.h>
# include <linux/module.h>
# include <linux/list.h>
# include <linux/slab.h>
# include <linux/sort.h>
# include <linux/err.h>
2012-02-24 11:31:31 +04:00
# include <linux/static_key.h>
2013-08-09 18:21:57 +04:00
# include <linux/jump_label_ratelimit.h>
2016-08-03 23:46:36 +03:00
# include <linux/bug.h>
2017-05-24 11:15:35 +03:00
# include <linux/cpu.h>
2018-03-19 21:18:57 +03:00
# include <asm/sections.h>
2010-09-17 19:09:00 +04:00
# ifdef HAVE_JUMP_LABEL
/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX ( jump_label_mutex ) ;
2010-10-02 01:23:48 +04:00
void jump_label_lock ( void )
{
mutex_lock ( & jump_label_mutex ) ;
}
void jump_label_unlock ( void )
{
mutex_unlock ( & jump_label_mutex ) ;
}
2010-09-17 19:09:00 +04:00
static int jump_label_cmp ( const void * a , const void * b )
{
const struct jump_entry * jea = a ;
const struct jump_entry * jeb = b ;
if ( jea - > key < jeb - > key )
return - 1 ;
if ( jea - > key > jeb - > key )
return 1 ;
return 0 ;
}
static void
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( struct jump_entry * start , struct jump_entry * stop )
2010-09-17 19:09:00 +04:00
{
unsigned long size ;
size = ( ( ( unsigned long ) stop - ( unsigned long ) start )
/ sizeof ( struct jump_entry ) ) ;
sort ( start , size , sizeof ( struct jump_entry ) , jump_label_cmp , NULL ) ;
}
2015-07-24 16:06:37 +03:00
static void jump_label_update ( struct static_key * key ) ;
2015-07-24 15:55:40 +03:00
2016-08-03 23:46:36 +03:00
/*
* There are similar definitions for the ! HAVE_JUMP_LABEL case in jump_label . h .
* The use of ' atomic_read ( ) ' requires atomic . h and its problematic for some
* kernel headers such as kernel . h and others . Since static_key_count ( ) is not
* used in the branch statements as it is for the ! HAVE_JUMP_LABEL case its ok
* to have it be a function here . Similarly , for ' static_key_enable ( ) ' and
* ' static_key_disable ( ) ' , which require bug . h . This should allow jump_label . h
* to be included from most / all places for HAVE_JUMP_LABEL .
*/
int static_key_count ( struct static_key * key )
{
/*
* - 1 means the first static_key_slow_inc ( ) is in progress .
* static_key_enabled ( ) must return true , so return 1 here .
*/
int n = atomic_read ( & key - > enabled ) ;
return n > = 0 ? n : 1 ;
}
EXPORT_SYMBOL_GPL ( static_key_count ) ;
2018-01-23 00:53:28 +03:00
void static_key_slow_inc_cpuslocked ( struct static_key * key )
2010-09-17 19:09:00 +04:00
{
2016-06-21 19:52:17 +03:00
int v , v1 ;
2017-10-18 18:24:28 +03:00
STATIC_KEY_CHECK_USE ( key ) ;
2016-06-21 19:52:17 +03:00
/*
* Careful if we get concurrent static_key_slow_inc ( ) calls ;
* later calls must wait for the first one to _finish_ the
* jump_label_update ( ) process . At the same time , however ,
* the jump_label_update ( ) call below wants to see
* static_key_enabled ( & key ) for jumps to be updated properly .
*
* So give a special meaning to negative key - > enabled : it sends
* static_key_slow_inc ( ) down the slow path , and it is non - zero
* so it counts as " enabled " in jump_label_update ( ) . Note that
* atomic_inc_unless_negative ( ) checks > = 0 , so roll our own .
*/
for ( v = atomic_read ( & key - > enabled ) ; v > 0 ; v = v1 ) {
v1 = atomic_cmpxchg ( & key - > enabled , v , v + 1 ) ;
2017-08-01 11:02:55 +03:00
if ( likely ( v1 = = v ) )
2016-06-21 19:52:17 +03:00
return ;
}
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
jump_label_lock ( ) ;
2016-06-21 19:52:17 +03:00
if ( atomic_read ( & key - > enabled ) = = 0 ) {
atomic_set ( & key - > enabled , - 1 ) ;
2015-07-24 16:06:37 +03:00
jump_label_update ( key ) ;
2017-08-02 00:58:50 +03:00
/*
* Ensure that if the above cmpxchg loop observes our positive
* value , it must also observe all the text changes .
*/
atomic_set_release ( & key - > enabled , 1 ) ;
2016-06-21 19:52:17 +03:00
} else {
atomic_inc ( & key - > enabled ) ;
}
2011-03-17 00:29:47 +03:00
jump_label_unlock ( ) ;
2017-08-01 11:02:55 +03:00
}
void static_key_slow_inc ( struct static_key * key )
{
cpus_read_lock ( ) ;
static_key_slow_inc_cpuslocked ( key ) ;
2017-05-24 11:15:35 +03:00
cpus_read_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
2012-02-24 11:31:31 +04:00
EXPORT_SYMBOL_GPL ( static_key_slow_inc ) ;
2010-09-17 19:09:00 +04:00
2017-08-01 11:02:56 +03:00
void static_key_enable_cpuslocked ( struct static_key * key )
2017-08-01 18:24:04 +03:00
{
2017-10-18 18:24:28 +03:00
STATIC_KEY_CHECK_USE ( key ) ;
2017-08-01 11:02:56 +03:00
2017-08-01 18:24:04 +03:00
if ( atomic_read ( & key - > enabled ) > 0 ) {
WARN_ON_ONCE ( atomic_read ( & key - > enabled ) ! = 1 ) ;
return ;
}
jump_label_lock ( ) ;
if ( atomic_read ( & key - > enabled ) = = 0 ) {
atomic_set ( & key - > enabled , - 1 ) ;
jump_label_update ( key ) ;
2017-08-02 00:58:50 +03:00
/*
* See static_key_slow_inc ( ) .
*/
atomic_set_release ( & key - > enabled , 1 ) ;
2017-08-01 18:24:04 +03:00
}
jump_label_unlock ( ) ;
2017-08-01 11:02:56 +03:00
}
EXPORT_SYMBOL_GPL ( static_key_enable_cpuslocked ) ;
void static_key_enable ( struct static_key * key )
{
cpus_read_lock ( ) ;
static_key_enable_cpuslocked ( key ) ;
2017-08-01 18:24:04 +03:00
cpus_read_unlock ( ) ;
}
EXPORT_SYMBOL_GPL ( static_key_enable ) ;
2017-08-01 11:02:56 +03:00
void static_key_disable_cpuslocked ( struct static_key * key )
2017-08-01 18:24:04 +03:00
{
2017-10-18 18:24:28 +03:00
STATIC_KEY_CHECK_USE ( key ) ;
2017-08-01 11:02:56 +03:00
2017-08-01 18:24:04 +03:00
if ( atomic_read ( & key - > enabled ) ! = 1 ) {
WARN_ON_ONCE ( atomic_read ( & key - > enabled ) ! = 0 ) ;
return ;
}
jump_label_lock ( ) ;
if ( atomic_cmpxchg ( & key - > enabled , 1 , 0 ) )
jump_label_update ( key ) ;
jump_label_unlock ( ) ;
2017-08-01 11:02:56 +03:00
}
EXPORT_SYMBOL_GPL ( static_key_disable_cpuslocked ) ;
void static_key_disable ( struct static_key * key )
{
cpus_read_lock ( ) ;
static_key_disable_cpuslocked ( key ) ;
2017-08-01 18:24:04 +03:00
cpus_read_unlock ( ) ;
}
EXPORT_SYMBOL_GPL ( static_key_disable ) ;
2018-01-23 00:53:28 +03:00
static void __static_key_slow_dec_cpuslocked ( struct static_key * key ,
2017-08-01 11:02:55 +03:00
unsigned long rate_limit ,
struct delayed_work * work )
2010-09-17 19:09:00 +04:00
{
2016-06-21 19:52:17 +03:00
/*
* The negative count check is valid even when a negative
* key - > enabled is in use by static_key_slow_inc ( ) ; a
* __static_key_slow_dec ( ) before the first static_key_slow_inc ( )
* returns is unbalanced , because all other static_key_slow_inc ( )
* instances block while the update is in progress .
*/
2012-02-22 00:02:53 +04:00
if ( ! atomic_dec_and_mutex_lock ( & key - > enabled , & jump_label_mutex ) ) {
WARN ( atomic_read ( & key - > enabled ) < 0 ,
" jump label: negative count! \n " ) ;
2011-03-17 00:29:47 +03:00
return ;
2012-02-22 00:02:53 +04:00
}
2010-09-17 19:09:00 +04:00
2011-11-27 19:59:09 +04:00
if ( rate_limit ) {
atomic_inc ( & key - > enabled ) ;
schedule_delayed_work ( work , rate_limit ) ;
2012-02-24 11:31:31 +04:00
} else {
2015-07-24 16:06:37 +03:00
jump_label_update ( key ) ;
2012-02-24 11:31:31 +04:00
}
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2017-08-01 11:02:55 +03:00
}
static void __static_key_slow_dec ( struct static_key * key ,
unsigned long rate_limit ,
struct delayed_work * work )
{
cpus_read_lock ( ) ;
2018-01-23 00:53:28 +03:00
__static_key_slow_dec_cpuslocked ( key , rate_limit , work ) ;
2017-05-24 11:15:35 +03:00
cpus_read_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
2011-11-27 19:59:09 +04:00
static void jump_label_update_timeout ( struct work_struct * work )
{
2012-02-24 11:31:31 +04:00
struct static_key_deferred * key =
container_of ( work , struct static_key_deferred , work . work ) ;
__static_key_slow_dec ( & key - > key , 0 , NULL ) ;
2011-11-27 19:59:09 +04:00
}
2012-02-24 11:31:31 +04:00
void static_key_slow_dec ( struct static_key * key )
2011-11-27 19:59:09 +04:00
{
2017-10-18 18:24:28 +03:00
STATIC_KEY_CHECK_USE ( key ) ;
2012-02-24 11:31:31 +04:00
__static_key_slow_dec ( key , 0 , NULL ) ;
2011-11-27 19:59:09 +04:00
}
2012-02-24 11:31:31 +04:00
EXPORT_SYMBOL_GPL ( static_key_slow_dec ) ;
2011-11-27 19:59:09 +04:00
2018-01-23 00:53:28 +03:00
void static_key_slow_dec_cpuslocked ( struct static_key * key )
{
STATIC_KEY_CHECK_USE ( key ) ;
__static_key_slow_dec_cpuslocked ( key , 0 , NULL ) ;
}
2012-02-24 11:31:31 +04:00
void static_key_slow_dec_deferred ( struct static_key_deferred * key )
2011-11-27 19:59:09 +04:00
{
2017-10-18 18:24:28 +03:00
STATIC_KEY_CHECK_USE ( key ) ;
2012-02-24 11:31:31 +04:00
__static_key_slow_dec ( & key - > key , key - > timeout , & key - > work ) ;
2011-11-27 19:59:09 +04:00
}
2012-02-24 11:31:31 +04:00
EXPORT_SYMBOL_GPL ( static_key_slow_dec_deferred ) ;
2011-11-27 19:59:09 +04:00
2016-12-17 01:30:35 +03:00
void static_key_deferred_flush ( struct static_key_deferred * key )
{
2017-10-18 18:24:28 +03:00
STATIC_KEY_CHECK_USE ( key ) ;
2016-12-17 01:30:35 +03:00
flush_delayed_work ( & key - > work ) ;
}
EXPORT_SYMBOL_GPL ( static_key_deferred_flush ) ;
2012-02-24 11:31:31 +04:00
void jump_label_rate_limit ( struct static_key_deferred * key ,
2011-11-27 19:59:09 +04:00
unsigned long rl )
{
2017-10-18 18:24:28 +03:00
STATIC_KEY_CHECK_USE ( key ) ;
2011-11-27 19:59:09 +04:00
key - > timeout = rl ;
INIT_DELAYED_WORK ( & key - > work , jump_label_update_timeout ) ;
}
2012-08-05 16:58:29 +04:00
EXPORT_SYMBOL_GPL ( jump_label_rate_limit ) ;
2011-11-27 19:59:09 +04:00
2010-09-17 19:09:08 +04:00
static int addr_conflict ( struct jump_entry * entry , void * start , void * end )
{
if ( entry - > code < = ( unsigned long ) end & &
entry - > code + JUMP_LABEL_NOP_SIZE > ( unsigned long ) start )
return 1 ;
return 0 ;
}
2011-03-17 00:29:47 +03:00
static int __jump_label_text_reserved ( struct jump_entry * iter_start ,
struct jump_entry * iter_stop , void * start , void * end )
2010-09-17 19:09:08 +04:00
{
struct jump_entry * iter ;
iter = iter_start ;
while ( iter < iter_stop ) {
2011-03-17 00:29:47 +03:00
if ( addr_conflict ( iter , start , end ) )
return 1 ;
2010-09-17 19:09:08 +04:00
iter + + ;
}
2011-03-17 00:29:47 +03:00
return 0 ;
}
2015-07-24 16:06:37 +03:00
/*
2011-10-03 22:01:46 +04:00
* Update code which is definitely not currently executing .
* Architectures which need heavyweight synchronization to modify
* running code can override this to make the non - live update case
* cheaper .
*/
2011-12-06 20:27:29 +04:00
void __weak __init_or_module arch_jump_label_transform_static ( struct jump_entry * entry ,
2011-10-03 22:01:46 +04:00
enum jump_label_type type )
{
2015-07-24 16:06:37 +03:00
arch_jump_label_transform ( entry , type ) ;
2011-10-03 22:01:46 +04:00
}
2015-07-24 16:06:37 +03:00
static inline struct jump_entry * static_key_entries ( struct static_key * key )
2011-03-17 00:29:47 +03:00
{
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
WARN_ON_ONCE ( key - > type & JUMP_TYPE_LINKED ) ;
return ( struct jump_entry * ) ( key - > type & ~ JUMP_TYPE_MASK ) ;
2010-09-17 19:09:08 +04:00
}
2015-07-24 16:06:37 +03:00
static inline bool static_key_type ( struct static_key * key )
2012-02-24 11:31:31 +04:00
{
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
return key - > type & JUMP_TYPE_TRUE ;
}
static inline bool static_key_linked ( struct static_key * key )
{
return key - > type & JUMP_TYPE_LINKED ;
}
static inline void static_key_clear_linked ( struct static_key * key )
{
key - > type & = ~ JUMP_TYPE_LINKED ;
}
static inline void static_key_set_linked ( struct static_key * key )
{
key - > type | = JUMP_TYPE_LINKED ;
2015-07-24 15:55:40 +03:00
}
2012-02-24 11:31:31 +04:00
2015-07-24 16:02:27 +03:00
static inline struct static_key * jump_entry_key ( struct jump_entry * entry )
{
2015-07-24 16:09:55 +03:00
return ( struct static_key * ) ( ( unsigned long ) entry - > key & ~ 1UL ) ;
}
static bool jump_entry_branch ( struct jump_entry * entry )
{
return ( unsigned long ) entry - > key & 1UL ;
2015-07-24 16:02:27 +03:00
}
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
/***
* A ' struct static_key ' uses a union such that it either points directly
* to a table of ' struct jump_entry ' or to a linked list of modules which in
* turn point to ' struct jump_entry ' tables .
*
* The two lower bits of the pointer are used to keep track of which pointer
* type is in use and to store the initial branch direction , we use an access
* function which preserves these bits .
*/
static void static_key_set_entries ( struct static_key * key ,
struct jump_entry * entries )
{
unsigned long type ;
WARN_ON_ONCE ( ( unsigned long ) entries & JUMP_TYPE_MASK ) ;
type = key - > type & JUMP_TYPE_MASK ;
key - > entries = entries ;
key - > type | = type ;
}
2015-07-24 16:06:37 +03:00
static enum jump_label_type jump_label_type ( struct jump_entry * entry )
2015-07-24 15:55:40 +03:00
{
2015-07-24 16:06:37 +03:00
struct static_key * key = jump_entry_key ( entry ) ;
2015-07-24 15:55:40 +03:00
bool enabled = static_key_enabled ( key ) ;
2015-07-24 16:09:55 +03:00
bool branch = jump_entry_branch ( entry ) ;
2012-02-24 11:31:31 +04:00
2015-07-24 16:09:55 +03:00
/* See the comment in linux/jump_label.h */
return enabled ^ branch ;
2012-02-24 11:31:31 +04:00
}
2015-07-24 16:06:37 +03:00
static void __jump_label_update ( struct static_key * key ,
struct jump_entry * entry ,
struct jump_entry * stop )
{
for ( ; ( entry < stop ) & & ( jump_entry_key ( entry ) = = key ) ; entry + + ) {
/*
2018-02-20 20:37:52 +03:00
* An entry - > code of 0 indicates an entry which has been
* disabled because it was in an init text area .
2015-07-24 16:06:37 +03:00
*/
2018-02-20 20:37:52 +03:00
if ( entry - > code ) {
if ( kernel_text_address ( entry - > code ) )
arch_jump_label_transform ( entry , jump_label_type ( entry ) ) ;
else
2018-03-14 18:24:20 +03:00
WARN_ONCE ( 1 , " can't patch jump_label at %pS " ,
( void * ) ( unsigned long ) entry - > code ) ;
2018-02-20 20:37:52 +03:00
}
2015-07-24 16:06:37 +03:00
}
}
2011-10-13 03:17:54 +04:00
void __init jump_label_init ( void )
2010-09-17 19:09:00 +04:00
{
struct jump_entry * iter_start = __start___jump_table ;
struct jump_entry * iter_stop = __stop___jump_table ;
2012-02-24 11:31:31 +04:00
struct static_key * key = NULL ;
2010-09-17 19:09:00 +04:00
struct jump_entry * iter ;
2016-08-03 23:46:36 +03:00
/*
* Since we are initializing the static_key . enabled field with
* with the ' raw ' int values ( to avoid pulling in atomic . h ) in
* jump_label . h , let ' s make sure that is safe . There are only two
* cases to check since we initialize to 0 or 1.
*/
BUILD_BUG_ON ( ( int ) ATOMIC_INIT ( 0 ) ! = 0 ) ;
BUILD_BUG_ON ( ( int ) ATOMIC_INIT ( 1 ) ! = 1 ) ;
2016-07-23 12:12:37 +03:00
if ( static_key_initialized )
return ;
2017-05-24 11:15:35 +03:00
cpus_read_lock ( ) ;
2010-10-02 01:23:48 +04:00
jump_label_lock ( ) ;
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( iter_start , iter_stop ) ;
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2012-02-24 11:31:31 +04:00
struct static_key * iterk ;
2011-09-29 22:10:05 +04:00
2015-07-24 16:09:55 +03:00
/* rewrite NOPs */
if ( jump_label_type ( iter ) = = JUMP_LABEL_NOP )
arch_jump_label_transform_static ( iter , JUMP_LABEL_NOP ) ;
2015-07-24 16:02:27 +03:00
iterk = jump_entry_key ( iter ) ;
2011-09-29 22:10:05 +04:00
if ( iterk = = key )
2011-03-17 00:29:47 +03:00
continue ;
2011-09-29 22:10:05 +04:00
key = iterk ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
static_key_set_entries ( key , iter ) ;
2010-09-17 19:09:00 +04:00
}
2013-10-19 23:48:53 +04:00
static_key_initialized = true ;
2010-10-02 01:23:48 +04:00
jump_label_unlock ( ) ;
2017-05-24 11:15:35 +03:00
cpus_read_unlock ( ) ;
2010-09-17 19:09:00 +04:00
}
2018-03-19 21:18:57 +03:00
/* Disable any jump label entries in __init/__exit code */
void __init jump_label_invalidate_initmem ( void )
2018-02-20 20:37:51 +03:00
{
struct jump_entry * iter_start = __start___jump_table ;
struct jump_entry * iter_stop = __stop___jump_table ;
struct jump_entry * iter ;
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2018-03-19 21:18:57 +03:00
if ( init_section_contains ( ( void * ) ( unsigned long ) iter - > code , 1 ) )
2018-02-20 20:37:51 +03:00
iter - > code = 0 ;
}
}
2010-09-17 19:09:00 +04:00
# ifdef CONFIG_MODULES
2015-07-24 16:09:55 +03:00
static enum jump_label_type jump_label_init_type ( struct jump_entry * entry )
{
struct static_key * key = jump_entry_key ( entry ) ;
bool type = static_key_type ( key ) ;
bool branch = jump_entry_branch ( entry ) ;
/* See the comment in linux/jump_label.h */
return type ^ branch ;
}
2012-02-24 11:31:31 +04:00
struct static_key_mod {
struct static_key_mod * next ;
2011-03-17 00:29:47 +03:00
struct jump_entry * entries ;
struct module * mod ;
} ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
static inline struct static_key_mod * static_key_mod ( struct static_key * key )
{
WARN_ON_ONCE ( ! ( key - > type & JUMP_TYPE_LINKED ) ) ;
return ( struct static_key_mod * ) ( key - > type & ~ JUMP_TYPE_MASK ) ;
}
/***
* key - > type and key - > next are the same via union .
* This sets key - > next and preserves the type bits .
*
* See additional comments above static_key_set_entries ( ) .
*/
static void static_key_set_mod ( struct static_key * key ,
struct static_key_mod * mod )
{
unsigned long type ;
WARN_ON_ONCE ( ( unsigned long ) mod & JUMP_TYPE_MASK ) ;
type = key - > type & JUMP_TYPE_MASK ;
key - > next = mod ;
key - > type | = type ;
}
2011-03-17 00:29:47 +03:00
static int __jump_label_mod_text_reserved ( void * start , void * end )
{
struct module * mod ;
2016-07-27 05:47:35 +03:00
preempt_disable ( ) ;
2011-03-17 00:29:47 +03:00
mod = __module_text_address ( ( unsigned long ) start ) ;
2016-07-27 05:47:35 +03:00
WARN_ON_ONCE ( __module_text_address ( ( unsigned long ) end ) ! = mod ) ;
preempt_enable ( ) ;
2011-03-17 00:29:47 +03:00
if ( ! mod )
return 0 ;
return __jump_label_text_reserved ( mod - > jump_entries ,
mod - > jump_entries + mod - > num_jump_entries ,
start , end ) ;
}
2015-07-24 16:06:37 +03:00
static void __jump_label_mod_update ( struct static_key * key )
2011-03-17 00:29:47 +03:00
{
2015-07-24 16:06:37 +03:00
struct static_key_mod * mod ;
2011-03-17 00:29:47 +03:00
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
for ( mod = static_key_mod ( key ) ; mod ; mod = mod - > next ) {
struct jump_entry * stop ;
struct module * m ;
/*
* NULL if the static_key is defined in a module
* that does not use it
*/
if ( ! mod - > entries )
continue ;
2011-05-10 14:43:46 +04:00
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
m = mod - > mod ;
if ( ! m )
stop = __stop___jump_table ;
else
stop = m - > jump_entries + m - > num_jump_entries ;
__jump_label_update ( key , mod - > entries , stop ) ;
2011-03-17 00:29:47 +03:00
}
}
/***
* apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop ( )
* @ mod : module to patch
*
* Allow for run - time selection of the optimal nops . Before the module
* loads patch these with arch_get_jump_label_nop ( ) , which is specified by
* the arch specific jump label code .
*/
void jump_label_apply_nops ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
/* if the module doesn't have jump label entries, just return */
if ( iter_start = = iter_stop )
return ;
2015-07-24 16:09:55 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
/* Only write NOPs for arch_branch_static(). */
if ( jump_label_init_type ( iter ) = = JUMP_LABEL_NOP )
arch_jump_label_transform_static ( iter , JUMP_LABEL_NOP ) ;
}
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
static int jump_label_add_module ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
2012-02-24 11:31:31 +04:00
struct static_key * key = NULL ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
struct static_key_mod * jlm , * jlm2 ;
2010-09-17 19:09:00 +04:00
/* if the module doesn't have jump label entries, just return */
2011-03-17 00:29:47 +03:00
if ( iter_start = = iter_stop )
2010-09-17 19:09:00 +04:00
return 0 ;
2011-03-17 00:29:47 +03:00
jump_label_sort_entries ( iter_start , iter_stop ) ;
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2012-02-24 11:31:31 +04:00
struct static_key * iterk ;
2011-03-17 00:29:47 +03:00
2015-07-24 16:02:27 +03:00
iterk = jump_entry_key ( iter ) ;
2012-02-24 11:31:31 +04:00
if ( iterk = = key )
continue ;
2011-03-17 00:29:47 +03:00
2012-02-24 11:31:31 +04:00
key = iterk ;
2015-05-27 04:39:35 +03:00
if ( within_module ( iter - > key , mod ) ) {
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
static_key_set_entries ( key , iter ) ;
2011-03-17 00:29:47 +03:00
continue ;
2010-09-17 19:09:00 +04:00
}
2012-02-24 11:31:31 +04:00
jlm = kzalloc ( sizeof ( struct static_key_mod ) , GFP_KERNEL ) ;
2011-03-17 00:29:47 +03:00
if ( ! jlm )
return - ENOMEM ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
if ( ! static_key_linked ( key ) ) {
jlm2 = kzalloc ( sizeof ( struct static_key_mod ) ,
GFP_KERNEL ) ;
if ( ! jlm2 ) {
kfree ( jlm ) ;
return - ENOMEM ;
}
preempt_disable ( ) ;
jlm2 - > mod = __module_address ( ( unsigned long ) key ) ;
preempt_enable ( ) ;
jlm2 - > entries = static_key_entries ( key ) ;
jlm2 - > next = NULL ;
static_key_set_mod ( key , jlm2 ) ;
static_key_set_linked ( key ) ;
}
2011-03-17 00:29:47 +03:00
jlm - > mod = mod ;
jlm - > entries = iter ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
jlm - > next = static_key_mod ( key ) ;
static_key_set_mod ( key , jlm ) ;
static_key_set_linked ( key ) ;
2011-03-17 00:29:47 +03:00
2015-07-24 16:09:55 +03:00
/* Only update if we've changed from our initial state */
if ( jump_label_type ( iter ) ! = jump_label_init_type ( iter ) )
2015-07-24 16:06:37 +03:00
__jump_label_update ( key , iter , iter_stop ) ;
2010-09-17 19:09:00 +04:00
}
2011-03-17 00:29:47 +03:00
2010-09-17 19:09:00 +04:00
return 0 ;
}
2011-03-17 00:29:47 +03:00
static void jump_label_del_module ( struct module * mod )
2010-09-17 19:09:00 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
struct jump_entry * iter ;
2012-02-24 11:31:31 +04:00
struct static_key * key = NULL ;
struct static_key_mod * jlm , * * prev ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
2015-07-24 16:02:27 +03:00
if ( jump_entry_key ( iter ) = = key )
2011-03-17 00:29:47 +03:00
continue ;
2015-07-24 16:02:27 +03:00
key = jump_entry_key ( iter ) ;
2011-03-17 00:29:47 +03:00
2015-05-27 04:39:35 +03:00
if ( within_module ( iter - > key , mod ) )
2011-03-17 00:29:47 +03:00
continue ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
/* No memory during module load */
if ( WARN_ON ( ! static_key_linked ( key ) ) )
continue ;
2011-03-17 00:29:47 +03:00
prev = & key - > next ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
jlm = static_key_mod ( key ) ;
2010-09-17 19:09:00 +04:00
2011-03-17 00:29:47 +03:00
while ( jlm & & jlm - > mod ! = mod ) {
prev = & jlm - > next ;
jlm = jlm - > next ;
}
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
/* No memory during module load */
if ( WARN_ON ( ! jlm ) )
continue ;
if ( prev = = & key - > next )
static_key_set_mod ( key , jlm - > next ) ;
else
2011-03-17 00:29:47 +03:00
* prev = jlm - > next ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
kfree ( jlm ) ;
jlm = static_key_mod ( key ) ;
/* if only one etry is left, fold it back into the static_key */
if ( jlm - > next = = NULL ) {
static_key_set_entries ( key , jlm - > entries ) ;
static_key_clear_linked ( key ) ;
2011-03-17 00:29:47 +03:00
kfree ( jlm ) ;
2010-09-17 19:09:00 +04:00
}
}
}
2018-02-20 20:37:51 +03:00
/* Disable any jump label entries in module init code */
2011-03-17 00:29:47 +03:00
static void jump_label_invalidate_module_init ( struct module * mod )
2010-10-02 01:23:41 +04:00
{
2011-03-17 00:29:47 +03:00
struct jump_entry * iter_start = mod - > jump_entries ;
struct jump_entry * iter_stop = iter_start + mod - > num_jump_entries ;
2010-10-02 01:23:41 +04:00
struct jump_entry * iter ;
2011-03-17 00:29:47 +03:00
for ( iter = iter_start ; iter < iter_stop ; iter + + ) {
if ( within_module_init ( iter - > code , mod ) )
iter - > code = 0 ;
2010-10-02 01:23:41 +04:00
}
}
2010-09-17 19:09:00 +04:00
static int
jump_label_module_notify ( struct notifier_block * self , unsigned long val ,
void * data )
{
struct module * mod = data ;
int ret = 0 ;
2017-05-24 11:15:35 +03:00
cpus_read_lock ( ) ;
jump_label_lock ( ) ;
2010-09-17 19:09:00 +04:00
switch ( val ) {
case MODULE_STATE_COMING :
2011-03-17 00:29:47 +03:00
ret = jump_label_add_module ( mod ) ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
if ( ret ) {
WARN ( 1 , " Failed to allocatote memory: jump_label may not work properly. \n " ) ;
2011-03-17 00:29:47 +03:00
jump_label_del_module ( mod ) ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
}
2010-09-17 19:09:00 +04:00
break ;
case MODULE_STATE_GOING :
2011-03-17 00:29:47 +03:00
jump_label_del_module ( mod ) ;
2010-09-17 19:09:00 +04:00
break ;
2010-10-02 01:23:41 +04:00
case MODULE_STATE_LIVE :
2011-03-17 00:29:47 +03:00
jump_label_invalidate_module_init ( mod ) ;
2010-10-02 01:23:41 +04:00
break ;
2010-09-17 19:09:00 +04:00
}
2017-05-24 11:15:35 +03:00
jump_label_unlock ( ) ;
cpus_read_unlock ( ) ;
2011-03-17 00:29:47 +03:00
return notifier_from_errno ( ret ) ;
2010-09-17 19:09:00 +04:00
}
2016-06-17 20:19:40 +03:00
static struct notifier_block jump_label_module_nb = {
2010-09-17 19:09:00 +04:00
. notifier_call = jump_label_module_notify ,
2011-03-17 00:29:47 +03:00
. priority = 1 , /* higher than tracepoints */
2010-09-17 19:09:00 +04:00
} ;
2011-03-17 00:29:47 +03:00
static __init int jump_label_init_module ( void )
2010-09-17 19:09:00 +04:00
{
return register_module_notifier ( & jump_label_module_nb ) ;
}
2011-03-17 00:29:47 +03:00
early_initcall ( jump_label_init_module ) ;
2010-09-17 19:09:00 +04:00
# endif /* CONFIG_MODULES */
2011-03-17 00:29:47 +03:00
/***
* jump_label_text_reserved - check if addr range is reserved
* @ start : start text addr
* @ end : end text addr
*
* checks if the text addr located between @ start and @ end
* overlaps with any of the jump label patch addresses . Code
* that wants to modify kernel text should first verify that
* it does not overlap with any of the jump label addresses .
* Caller must hold jump_label_mutex .
*
* returns 1 if there is an overlap , 0 otherwise
*/
int jump_label_text_reserved ( void * start , void * end )
{
int ret = __jump_label_text_reserved ( __start___jump_table ,
__stop___jump_table , start , end ) ;
if ( ret )
return ret ;
# ifdef CONFIG_MODULES
ret = __jump_label_mod_text_reserved ( start , end ) ;
# endif
return ret ;
}
2015-07-24 16:06:37 +03:00
static void jump_label_update ( struct static_key * key )
2011-03-17 00:29:47 +03:00
{
2012-02-24 11:31:31 +04:00
struct jump_entry * stop = __stop___jump_table ;
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
struct jump_entry * entry ;
2011-03-17 00:29:47 +03:00
# ifdef CONFIG_MODULES
2015-05-27 04:39:35 +03:00
struct module * mod ;
2011-06-21 06:35:55 +04:00
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
if ( static_key_linked ( key ) ) {
__jump_label_mod_update ( key ) ;
return ;
}
2011-06-21 06:35:55 +04:00
2015-05-27 04:39:35 +03:00
preempt_disable ( ) ;
mod = __module_address ( ( unsigned long ) key ) ;
2011-06-21 06:35:55 +04:00
if ( mod )
stop = mod - > jump_entries + mod - > num_jump_entries ;
2015-05-27 04:39:35 +03:00
preempt_enable ( ) ;
2011-03-17 00:29:47 +03:00
# endif
jump_label: Reduce the size of struct static_key
The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.
Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.
In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.
For my .config, the patch increased the text by 778 bytes, but reduced
the data + bss size by 14912, for a net savings of 14,134 bytes.
text data bss dec hex filename
8092427 5016512 790528 13899467 d416cb vmlinux.pre
8093205 5001600 790528 13885333 d3df95 vmlinux.post
Link: http://lkml.kernel.org/r/1486154544-4321-1-git-send-email-jbaron@akamai.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-02-03 23:42:24 +03:00
entry = static_key_entries ( key ) ;
2011-06-21 06:35:55 +04:00
/* if there are no users, entry can be NULL */
if ( entry )
2015-07-24 16:06:37 +03:00
__jump_label_update ( key , entry , stop ) ;
2011-03-17 00:29:47 +03:00
}
2015-07-27 19:32:09 +03:00
# ifdef CONFIG_STATIC_KEYS_SELFTEST
static DEFINE_STATIC_KEY_TRUE ( sk_true ) ;
static DEFINE_STATIC_KEY_FALSE ( sk_false ) ;
static __init int jump_label_test ( void )
{
int i ;
for ( i = 0 ; i < 2 ; i + + ) {
WARN_ON ( static_key_enabled ( & sk_true . key ) ! = true ) ;
WARN_ON ( static_key_enabled ( & sk_false . key ) ! = false ) ;
WARN_ON ( ! static_branch_likely ( & sk_true ) ) ;
WARN_ON ( ! static_branch_unlikely ( & sk_true ) ) ;
WARN_ON ( static_branch_likely ( & sk_false ) ) ;
WARN_ON ( static_branch_unlikely ( & sk_false ) ) ;
static_branch_disable ( & sk_true ) ;
static_branch_enable ( & sk_false ) ;
WARN_ON ( static_key_enabled ( & sk_true . key ) = = true ) ;
WARN_ON ( static_key_enabled ( & sk_false . key ) = = false ) ;
WARN_ON ( static_branch_likely ( & sk_true ) ) ;
WARN_ON ( static_branch_unlikely ( & sk_true ) ) ;
WARN_ON ( ! static_branch_likely ( & sk_false ) ) ;
WARN_ON ( ! static_branch_unlikely ( & sk_false ) ) ;
static_branch_enable ( & sk_true ) ;
static_branch_disable ( & sk_false ) ;
}
return 0 ;
}
2017-11-14 00:48:47 +03:00
early_initcall ( jump_label_test ) ;
2015-07-27 19:32:09 +03:00
# endif /* STATIC_KEYS_SELFTEST */
# endif /* HAVE_JUMP_LABEL */