2019-05-20 19:08:01 +02:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2012-09-26 10:09:40 +01:00
/* Module internals
*
* Copyright ( C ) 2012 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
2023-04-13 22:28:39 -07:00
* Copyright ( C ) 2023 Luis Chamberlain < mcgrof @ kernel . org >
2012-09-26 10:09:40 +01:00
*/
2018-06-29 16:37:08 +02:00
# include <linux/elf.h>
2022-03-22 14:03:33 +00:00
# include <linux/compiler.h>
# include <linux/module.h>
2022-03-22 14:03:32 +00:00
# include <linux/mutex.h>
2022-03-22 14:03:35 +00:00
# include <linux/rculist.h>
2022-05-02 21:51:04 +01:00
# include <linux/rcupdate.h>
2022-06-12 17:21:56 +02:00
# include <linux/mm.h>
2022-03-22 14:03:32 +00:00
# ifndef ARCH_SHF_SMALL
# define ARCH_SHF_SMALL 0
# endif
2022-03-22 14:03:36 +00:00
/*
module: replace module_layout with module_memory
module_layout manages different types of memory (text, data, rodata, etc.)
in one allocation, which is problematic for some reasons:
1. It is hard to enable CONFIG_STRICT_MODULE_RWX.
2. It is hard to use huge pages in modules (and not break strict rwx).
3. Many archs uses module_layout for arch-specific data, but it is not
obvious how these data are used (are they RO, RX, or RW?)
Improve the scenario by replacing 2 (or 3) module_layout per module with
up to 7 module_memory per module:
MOD_TEXT,
MOD_DATA,
MOD_RODATA,
MOD_RO_AFTER_INIT,
MOD_INIT_TEXT,
MOD_INIT_DATA,
MOD_INIT_RODATA,
and allocating them separately. This adds slightly more entries to
mod_tree (from up to 3 entries per module, to up to 7 entries per
module). However, this at most adds a small constant overhead to
__module_address(), which is expected to be fast.
Various archs use module_layout for different data. These data are put
into different module_memory based on their location in module_layout.
IOW, data that used to go with text is allocated with MOD_MEM_TYPE_TEXT;
data that used to go with data is allocated with MOD_MEM_TYPE_DATA, etc.
module_memory simplifies quite some of the module code. For example,
ARCH_WANTS_MODULES_DATA_IN_VMALLOC is a lot cleaner, as it just uses a
different allocator for the data. kernel/module/strict_rwx.c is also
much cleaner with module_memory.
Signed-off-by: Song Liu <song@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
2023-02-06 16:28:02 -08:00
* Use highest 4 bits of sh_entsize to store the mod_mem_type of this
* section . This leaves 28 bits for offset on 32 - bit systems , which is
* about 256 MiB ( WARN_ON_ONCE if we exceed that ) .
2022-03-22 14:03:36 +00:00
*/
module: replace module_layout with module_memory
module_layout manages different types of memory (text, data, rodata, etc.)
in one allocation, which is problematic for some reasons:
1. It is hard to enable CONFIG_STRICT_MODULE_RWX.
2. It is hard to use huge pages in modules (and not break strict rwx).
3. Many archs uses module_layout for arch-specific data, but it is not
obvious how these data are used (are they RO, RX, or RW?)
Improve the scenario by replacing 2 (or 3) module_layout per module with
up to 7 module_memory per module:
MOD_TEXT,
MOD_DATA,
MOD_RODATA,
MOD_RO_AFTER_INIT,
MOD_INIT_TEXT,
MOD_INIT_DATA,
MOD_INIT_RODATA,
and allocating them separately. This adds slightly more entries to
mod_tree (from up to 3 entries per module, to up to 7 entries per
module). However, this at most adds a small constant overhead to
__module_address(), which is expected to be fast.
Various archs use module_layout for different data. These data are put
into different module_memory based on their location in module_layout.
IOW, data that used to go with text is allocated with MOD_MEM_TYPE_TEXT;
data that used to go with data is allocated with MOD_MEM_TYPE_DATA, etc.
module_memory simplifies quite some of the module code. For example,
ARCH_WANTS_MODULES_DATA_IN_VMALLOC is a lot cleaner, as it just uses a
different allocator for the data. kernel/module/strict_rwx.c is also
much cleaner with module_memory.
Signed-off-by: Song Liu <song@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
2023-02-06 16:28:02 -08:00
# define SH_ENTSIZE_TYPE_BITS 4
# define SH_ENTSIZE_TYPE_SHIFT (BITS_PER_LONG - SH_ENTSIZE_TYPE_BITS)
# define SH_ENTSIZE_TYPE_MASK ((1UL << SH_ENTSIZE_TYPE_BITS) - 1)
# define SH_ENTSIZE_OFFSET_MASK ((1UL << (BITS_PER_LONG - SH_ENTSIZE_TYPE_BITS)) - 1)
/* Maximum number of characters written by module_flags() */
# define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
2022-03-22 14:03:36 +00:00
kbuild: generate KSYMTAB entries by modpost
Commit 7b4537199a4a ("kbuild: link symbol CRCs at final link, removing
CONFIG_MODULE_REL_CRCS") made modpost output CRCs in the same way
whether the EXPORT_SYMBOL() is placed in *.c or *.S.
For further cleanups, this commit applies a similar approach to the
entire data structure of EXPORT_SYMBOL().
The EXPORT_SYMBOL() compilation is split into two stages.
When a source file is compiled, EXPORT_SYMBOL() will be converted into
a dummy symbol in the .export_symbol section.
For example,
EXPORT_SYMBOL(foo);
EXPORT_SYMBOL_NS_GPL(bar, BAR_NAMESPACE);
will be encoded into the following assembly code:
.section ".export_symbol","a"
__export_symbol_foo:
.asciz "" /* license */
.asciz "" /* name space */
.balign 8
.quad foo /* symbol reference */
.previous
.section ".export_symbol","a"
__export_symbol_bar:
.asciz "GPL" /* license */
.asciz "BAR_NAMESPACE" /* name space */
.balign 8
.quad bar /* symbol reference */
.previous
They are mere markers to tell modpost the name, license, and namespace
of the symbols. They will be dropped from the final vmlinux and modules
because the *(.export_symbol) will go into /DISCARD/ in the linker script.
Then, modpost extracts all the information about EXPORT_SYMBOL() from the
.export_symbol section, and generates the final C code:
KSYMTAB_FUNC(foo, "", "");
KSYMTAB_FUNC(bar, "_gpl", "BAR_NAMESPACE");
KSYMTAB_FUNC() (or KSYMTAB_DATA() if it is data) is expanded to struct
kernel_symbol that will be linked to the vmlinux or a module.
With this change, EXPORT_SYMBOL() works in the same way for *.c and *.S
files, providing the following benefits.
[1] Deprecate EXPORT_DATA_SYMBOL()
In the old days, EXPORT_SYMBOL() was only available in C files. To export
a symbol in *.S, EXPORT_SYMBOL() was placed in a separate *.c file.
arch/arm/kernel/armksyms.c is one example written in the classic manner.
Commit 22823ab419d8 ("EXPORT_SYMBOL() for asm") removed this limitation.
Since then, EXPORT_SYMBOL() can be placed close to the symbol definition
in *.S files. It was a nice improvement.
However, as that commit mentioned, you need to use EXPORT_DATA_SYMBOL()
for data objects on some architectures.
In the new approach, modpost checks symbol's type (STT_FUNC or not),
and outputs KSYMTAB_FUNC() or KSYMTAB_DATA() accordingly.
There are only two users of EXPORT_DATA_SYMBOL:
EXPORT_DATA_SYMBOL_GPL(empty_zero_page) (arch/ia64/kernel/head.S)
EXPORT_DATA_SYMBOL(ia64_ivt) (arch/ia64/kernel/ivt.S)
They are transformed as follows and output into .vmlinux.export.c
KSYMTAB_DATA(empty_zero_page, "_gpl", "");
KSYMTAB_DATA(ia64_ivt, "", "");
The other EXPORT_SYMBOL users in ia64 assembly are output as
KSYMTAB_FUNC().
EXPORT_DATA_SYMBOL() is now deprecated.
[2] merge <linux/export.h> and <asm-generic/export.h>
There are two similar header implementations:
include/linux/export.h for .c files
include/asm-generic/export.h for .S files
Ideally, the functionality should be consistent between them, but they
tend to diverge.
Commit 8651ec01daed ("module: add support for symbol namespaces.") did
not support the namespace for *.S files.
This commit shifts the essential implementation part to C, which supports
EXPORT_SYMBOL_NS() for *.S files.
<asm/export.h> and <asm-generic/export.h> will remain as a wrapper of
<linux/export.h> for a while.
They will be removed after #include <asm/export.h> directives are all
replaced with #include <linux/export.h>.
[3] Implement CONFIG_TRIM_UNUSED_KSYMS in one-pass algorithm (by a later commit)
When CONFIG_TRIM_UNUSED_KSYMS is enabled, Kbuild recursively traverses
the directory tree to determine which EXPORT_SYMBOL to trim. If an
EXPORT_SYMBOL turns out to be unused by anyone, Kbuild begins the
second traverse, where some source files are recompiled with their
EXPORT_SYMBOL() tuned into a no-op.
We can do this better now; modpost can selectively emit KSYMTAB entries
that are really used by modules.
Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
2023-06-12 00:50:52 +09:00
struct kernel_symbol {
# ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
int value_offset ;
int name_offset ;
int namespace_offset ;
# else
unsigned long value ;
const char * name ;
const char * namespace ;
# endif
} ;
2022-03-22 14:03:32 +00:00
extern struct mutex module_mutex ;
extern struct list_head modules ;
2022-03-22 14:03:42 +00:00
extern struct module_attribute * modinfo_attrs [ ] ;
extern size_t modinfo_attrs_count ;
2022-03-22 14:03:32 +00:00
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab [ ] ;
extern const struct kernel_symbol __stop___ksymtab [ ] ;
extern const struct kernel_symbol __start___ksymtab_gpl [ ] ;
extern const struct kernel_symbol __stop___ksymtab_gpl [ ] ;
extern const s32 __start___kcrctab [ ] ;
extern const s32 __start___kcrctab_gpl [ ] ;
2018-06-29 16:37:08 +02:00
struct load_info {
const char * name ;
/* pointer to module in temporary copy, freed at end of load_module() */
struct module * mod ;
Elf_Ehdr * hdr ;
unsigned long len ;
Elf_Shdr * sechdrs ;
char * secstrings , * strtab ;
2019-02-25 11:59:58 -08:00
unsigned long symoffs , stroffs , init_typeoffs , core_typeoffs ;
2018-06-29 16:37:08 +02:00
bool sig_ok ;
# ifdef CONFIG_KALLSYMS
unsigned long mod_kallsyms_init_off ;
2022-01-05 13:55:12 -08:00
# endif
# ifdef CONFIG_MODULE_DECOMPRESS
2023-03-28 20:03:19 -07:00
# ifdef CONFIG_MODULE_STATS
unsigned long compressed_len ;
# endif
2022-01-05 13:55:12 -08:00
struct page * * pages ;
unsigned int max_pages ;
unsigned int used_pages ;
2018-06-29 16:37:08 +02:00
# endif
struct {
unsigned int sym , str , mod , vers , info , pcpu ;
} index ;
} ;
2022-03-22 14:03:44 +00:00
enum mod_license {
NOT_GPL_ONLY ,
GPL_ONLY ,
} ;
struct find_symbol_arg {
/* Input */
const char * name ;
bool gplok ;
bool warn ;
/* Output */
struct module * owner ;
const s32 * crc ;
const struct kernel_symbol * sym ;
enum mod_license license ;
} ;
2022-03-22 14:03:33 +00:00
int mod_verify_sig ( const void * mod , struct load_info * info ) ;
2022-03-22 14:03:44 +00:00
int try_to_force_load ( struct module * mod , const char * reason ) ;
bool find_symbol ( struct find_symbol_arg * fsa ) ;
2022-03-22 14:03:39 +00:00
struct module * find_module_all ( const char * name , size_t len , bool even_unformed ) ;
int cmp_name ( const void * name , const void * sym ) ;
module: replace module_layout with module_memory
module_layout manages different types of memory (text, data, rodata, etc.)
in one allocation, which is problematic for some reasons:
1. It is hard to enable CONFIG_STRICT_MODULE_RWX.
2. It is hard to use huge pages in modules (and not break strict rwx).
3. Many archs uses module_layout for arch-specific data, but it is not
obvious how these data are used (are they RO, RX, or RW?)
Improve the scenario by replacing 2 (or 3) module_layout per module with
up to 7 module_memory per module:
MOD_TEXT,
MOD_DATA,
MOD_RODATA,
MOD_RO_AFTER_INIT,
MOD_INIT_TEXT,
MOD_INIT_DATA,
MOD_INIT_RODATA,
and allocating them separately. This adds slightly more entries to
mod_tree (from up to 3 entries per module, to up to 7 entries per
module). However, this at most adds a small constant overhead to
__module_address(), which is expected to be fast.
Various archs use module_layout for different data. These data are put
into different module_memory based on their location in module_layout.
IOW, data that used to go with text is allocated with MOD_MEM_TYPE_TEXT;
data that used to go with data is allocated with MOD_MEM_TYPE_DATA, etc.
module_memory simplifies quite some of the module code. For example,
ARCH_WANTS_MODULES_DATA_IN_VMALLOC is a lot cleaner, as it just uses a
different allocator for the data. kernel/module/strict_rwx.c is also
much cleaner with module_memory.
Signed-off-by: Song Liu <song@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
2023-02-06 16:28:02 -08:00
long module_get_offset_and_type ( struct module * mod , enum mod_mem_type type ,
Elf_Shdr * sechdr , unsigned int section ) ;
2022-07-14 16:39:31 +01:00
char * module_flags ( struct module * mod , char * buf , bool show_state ) ;
2022-05-02 21:51:03 +01:00
size_t module_flags_taint ( unsigned long taints , char * buf ) ;
2022-03-22 14:03:39 +00:00
2023-03-19 14:27:36 -07:00
char * module_next_tag_pair ( char * string , unsigned long * secsize ) ;
2023-03-19 14:27:37 -07:00
# define for_each_modinfo_entry(entry, info, name) \
for ( entry = get_modinfo ( info , name ) ; entry ; entry = get_next_modinfo ( info , name , entry ) )
2022-05-02 21:51:04 +01:00
static inline void module_assert_mutex_or_preempt ( void )
{
# ifdef CONFIG_LOCKDEP
if ( unlikely ( ! debug_locks ) )
return ;
WARN_ON_ONCE ( ! rcu_read_lock_sched_held ( ) & &
! lockdep_is_held ( & module_mutex ) ) ;
# endif
}
2022-03-22 14:03:39 +00:00
static inline unsigned long kernel_symbol_value ( const struct kernel_symbol * sym )
{
# ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
return ( unsigned long ) offset_to_ptr ( & sym - > value_offset ) ;
# else
return sym - > value ;
# endif
}
2022-01-05 13:55:12 -08:00
2022-03-22 14:03:34 +00:00
# ifdef CONFIG_LIVEPATCH
int copy_module_elf ( struct module * mod , struct load_info * info ) ;
void free_module_elf ( struct module * mod ) ;
# else /* !CONFIG_LIVEPATCH */
static inline int copy_module_elf ( struct module * mod , struct load_info * info )
{
return 0 ;
}
static inline void free_module_elf ( struct module * mod ) { }
# endif /* CONFIG_LIVEPATCH */
static inline bool set_livepatch_module ( struct module * mod )
{
# ifdef CONFIG_LIVEPATCH
mod - > klp = true ;
return true ;
# else
return false ;
# endif
}
2023-03-28 20:03:19 -07:00
/**
* enum fail_dup_mod_reason - state at which a duplicate module was detected
*
* @ FAIL_DUP_MOD_BECOMING : the module is read properly , passes all checks but
* we ' ve determined that another module with the same name is already loaded
* or being processed on our & modules list . This happens on early_mod_check ( )
* right before layout_and_allocate ( ) . The kernel would have already
* vmalloc ( ) ' d space for the entire module through finit_module ( ) . If
* decompression was used two vmap ( ) spaces were used . These failures can
* happen when userspace has not seen the module present on the kernel and
* tries to load the module multiple times at same time .
* @ FAIL_DUP_MOD_LOAD : the module has been read properly , passes all validation
* checks and the kernel determines that the module was unique and because
* of this allocated yet another private kernel copy of the module space in
* layout_and_allocate ( ) but after this determined in add_unformed_module ( )
* that another module with the same name is already loaded or being processed .
* These failures should be mitigated as much as possible and are indicative
* of really fast races in loading modules . Without module decompression
* they waste twice as much vmap space . With module decompression three
* times the module ' s size vmap space is wasted .
*/
enum fail_dup_mod_reason {
FAIL_DUP_MOD_BECOMING = 0 ,
FAIL_DUP_MOD_LOAD ,
} ;
# ifdef CONFIG_MODULE_DEBUGFS
extern struct dentry * mod_debugfs_root ;
# endif
# ifdef CONFIG_MODULE_STATS
# define mod_stat_add_long(count, var) atomic_long_add(count, var)
# define mod_stat_inc(name) atomic_inc(name)
extern atomic_long_t total_mod_size ;
extern atomic_long_t total_text_size ;
extern atomic_long_t invalid_kread_bytes ;
extern atomic_long_t invalid_decompress_bytes ;
extern atomic_t modcount ;
extern atomic_t failed_kreads ;
extern atomic_t failed_decompress ;
struct mod_fail_load {
struct list_head list ;
char name [ MODULE_NAME_LEN ] ;
atomic_long_t count ;
unsigned long dup_fail_mask ;
} ;
int try_add_failed_module ( const char * name , enum fail_dup_mod_reason reason ) ;
void mod_stat_bump_invalid ( struct load_info * info , int flags ) ;
void mod_stat_bump_becoming ( struct load_info * info , int flags ) ;
# else
# define mod_stat_add_long(name, var)
# define mod_stat_inc(name)
static inline int try_add_failed_module ( const char * name ,
enum fail_dup_mod_reason reason )
{
return 0 ;
}
static inline void mod_stat_bump_invalid ( struct load_info * info , int flags )
{
}
static inline void mod_stat_bump_becoming ( struct load_info * info , int flags )
{
}
# endif /* CONFIG_MODULE_STATS */
2023-04-13 22:28:39 -07:00
# ifdef CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS
bool kmod_dup_request_exists_wait ( char * module_name , bool wait , int * dup_ret ) ;
void kmod_dup_request_announce ( char * module_name , int ret ) ;
# else
static inline bool kmod_dup_request_exists_wait ( char * module_name , bool wait , int * dup_ret )
{
return false ;
}
static inline void kmod_dup_request_announce ( char * module_name , int ret )
{
}
# endif
2022-05-02 21:52:52 +01:00
# ifdef CONFIG_MODULE_UNLOAD_TAINT_TRACKING
struct mod_unload_taint {
struct list_head list ;
char name [ MODULE_NAME_LEN ] ;
unsigned long taints ;
u64 count ;
} ;
int try_add_tainted_module ( struct module * mod ) ;
void print_unloaded_tainted_modules ( void ) ;
# else /* !CONFIG_MODULE_UNLOAD_TAINT_TRACKING */
static inline int try_add_tainted_module ( struct module * mod )
{
return 0 ;
}
static inline void print_unloaded_tainted_modules ( void )
{
}
# endif /* CONFIG_MODULE_UNLOAD_TAINT_TRACKING */
2022-01-05 13:55:12 -08:00
# ifdef CONFIG_MODULE_DECOMPRESS
int module_decompress ( struct load_info * info , const void * buf , size_t size ) ;
void module_decompress_cleanup ( struct load_info * info ) ;
# else
static inline int module_decompress ( struct load_info * info ,
const void * buf , size_t size )
{
return - EOPNOTSUPP ;
}
2022-03-22 14:03:33 +00:00
2022-01-05 13:55:12 -08:00
static inline void module_decompress_cleanup ( struct load_info * info )
{
}
# endif
2022-03-22 14:03:35 +00:00
struct mod_tree_root {
2022-02-23 13:02:11 +01:00
# ifdef CONFIG_MODULES_TREE_LOOKUP
2022-03-22 14:03:35 +00:00
struct latch_tree_root root ;
2022-02-23 13:02:11 +01:00
# endif
2022-03-22 14:03:35 +00:00
unsigned long addr_min ;
unsigned long addr_max ;
module: replace module_layout with module_memory
module_layout manages different types of memory (text, data, rodata, etc.)
in one allocation, which is problematic for some reasons:
1. It is hard to enable CONFIG_STRICT_MODULE_RWX.
2. It is hard to use huge pages in modules (and not break strict rwx).
3. Many archs uses module_layout for arch-specific data, but it is not
obvious how these data are used (are they RO, RX, or RW?)
Improve the scenario by replacing 2 (or 3) module_layout per module with
up to 7 module_memory per module:
MOD_TEXT,
MOD_DATA,
MOD_RODATA,
MOD_RO_AFTER_INIT,
MOD_INIT_TEXT,
MOD_INIT_DATA,
MOD_INIT_RODATA,
and allocating them separately. This adds slightly more entries to
mod_tree (from up to 3 entries per module, to up to 7 entries per
module). However, this at most adds a small constant overhead to
__module_address(), which is expected to be fast.
Various archs use module_layout for different data. These data are put
into different module_memory based on their location in module_layout.
IOW, data that used to go with text is allocated with MOD_MEM_TYPE_TEXT;
data that used to go with data is allocated with MOD_MEM_TYPE_DATA, etc.
module_memory simplifies quite some of the module code. For example,
ARCH_WANTS_MODULES_DATA_IN_VMALLOC is a lot cleaner, as it just uses a
different allocator for the data. kernel/module/strict_rwx.c is also
much cleaner with module_memory.
Signed-off-by: Song Liu <song@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
2023-02-06 16:28:02 -08:00
# ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
unsigned long data_addr_min ;
unsigned long data_addr_max ;
# endif
2022-03-22 14:03:35 +00:00
} ;
extern struct mod_tree_root mod_tree ;
2022-02-23 13:02:11 +01:00
# ifdef CONFIG_MODULES_TREE_LOOKUP
2022-03-22 14:03:35 +00:00
void mod_tree_insert ( struct module * mod ) ;
void mod_tree_remove_init ( struct module * mod ) ;
void mod_tree_remove ( struct module * mod ) ;
2022-02-23 13:02:12 +01:00
struct module * mod_find ( unsigned long addr , struct mod_tree_root * tree ) ;
2022-03-22 14:03:35 +00:00
# else /* !CONFIG_MODULES_TREE_LOOKUP */
static inline void mod_tree_insert ( struct module * mod ) { }
static inline void mod_tree_remove_init ( struct module * mod ) { }
static inline void mod_tree_remove ( struct module * mod ) { }
2022-02-23 13:02:12 +01:00
static inline struct module * mod_find ( unsigned long addr , struct mod_tree_root * tree )
2022-03-22 14:03:35 +00:00
{
struct module * mod ;
list_for_each_entry_rcu ( mod , & modules , list ,
lockdep_is_held ( & module_mutex ) ) {
if ( within_module ( addr , mod ) )
return mod ;
}
return NULL ;
}
# endif /* CONFIG_MODULES_TREE_LOOKUP */
2022-03-22 14:03:36 +00:00
void module_enable_ro ( const struct module * mod , bool after_init ) ;
void module_enable_nx ( const struct module * mod ) ;
2022-02-23 10:00:59 +01:00
void module_enable_x ( const struct module * mod ) ;
2022-03-22 14:03:36 +00:00
int module_enforce_rwx_sections ( Elf_Ehdr * hdr , Elf_Shdr * sechdrs ,
char * secstrings , struct module * mod ) ;
2022-03-22 14:03:37 +00:00
# ifdef CONFIG_MODULE_SIG
int module_sig_check ( struct load_info * info , int flags ) ;
# else /* !CONFIG_MODULE_SIG */
static inline int module_sig_check ( struct load_info * info , int flags )
{
return 0 ;
}
# endif /* !CONFIG_MODULE_SIG */
2022-03-22 14:03:38 +00:00
# ifdef CONFIG_DEBUG_KMEMLEAK
void kmemleak_load_module ( const struct module * mod , const struct load_info * info ) ;
# else /* !CONFIG_DEBUG_KMEMLEAK */
static inline void kmemleak_load_module ( const struct module * mod ,
const struct load_info * info ) { }
# endif /* CONFIG_DEBUG_KMEMLEAK */
2022-03-22 14:03:39 +00:00
# ifdef CONFIG_KALLSYMS
void init_build_id ( struct module * mod , const struct load_info * info ) ;
void layout_symtab ( struct module * mod , struct load_info * info ) ;
void add_kallsyms ( struct module * mod , const struct load_info * info ) ;
static inline bool sect_empty ( const Elf_Shdr * sect )
{
return ! ( sect - > sh_flags & SHF_ALLOC ) | | sect - > sh_size = = 0 ;
}
# else /* !CONFIG_KALLSYMS */
static inline void init_build_id ( struct module * mod , const struct load_info * info ) { }
static inline void layout_symtab ( struct module * mod , struct load_info * info ) { }
static inline void add_kallsyms ( struct module * mod , const struct load_info * info ) { }
# endif /* CONFIG_KALLSYMS */
2022-03-22 14:03:42 +00:00
# ifdef CONFIG_SYSFS
int mod_sysfs_setup ( struct module * mod , const struct load_info * info ,
struct kernel_param * kparam , unsigned int num_params ) ;
void mod_sysfs_teardown ( struct module * mod ) ;
void init_param_lock ( struct module * mod ) ;
# else /* !CONFIG_SYSFS */
static inline int mod_sysfs_setup ( struct module * mod ,
const struct load_info * info ,
struct kernel_param * kparam ,
unsigned int num_params )
{
return 0 ;
}
static inline void mod_sysfs_teardown ( struct module * mod ) { }
static inline void init_param_lock ( struct module * mod ) { }
# endif /* CONFIG_SYSFS */
2022-03-22 14:03:44 +00:00
# ifdef CONFIG_MODVERSIONS
int check_version ( const struct load_info * info ,
const char * symname , struct module * mod , const s32 * crc ) ;
void module_layout ( struct module * mod , struct modversion_info * ver , struct kernel_param * kp ,
struct kernel_symbol * ks , struct tracepoint * const * tp ) ;
int check_modstruct_version ( const struct load_info * info , struct module * mod ) ;
int same_magic ( const char * amagic , const char * bmagic , bool has_crcs ) ;
# else /* !CONFIG_MODVERSIONS */
static inline int check_version ( const struct load_info * info ,
const char * symname ,
struct module * mod ,
const s32 * crc )
{
return 1 ;
}
static inline int check_modstruct_version ( const struct load_info * info ,
struct module * mod )
{
return 1 ;
}
static inline int same_magic ( const char * amagic , const char * bmagic , bool has_crcs )
{
return strcmp ( amagic , bmagic ) = = 0 ;
}
# endif /* CONFIG_MODVERSIONS */