The modversion symbol CRCs are emitted as ELF symbols, which allows us to easily populate the kcrctab sections by relying on the linker to associate each kcrctab slot with the correct value. This has a couple of downsides: - Given that the CRCs are treated as memory addresses, we waste 4 bytes for each CRC on 64 bit architectures, - On architectures that support runtime relocation, a R_<arch>_RELATIVE relocation entry is emitted for each CRC value, which identifies it as a quantity that requires fixing up based on the actual runtime load offset of the kernel. This results in corrupted CRCs unless we explicitly undo the fixup (and this is currently being handled in the core module code) - Such runtime relocation entries take up 24 bytes of __init space each, resulting in a x8 overhead in [uncompressed] kernel size for CRCs. Switching to explicit 32 bit values on 64 bit architectures fixes most of these issues, given that 32 bit values are not treated as quantities that require fixing up based on the actual runtime load offset. Note that on some ELF64 architectures [such as PPC64], these 32-bit values are still emitted as [absolute] runtime relocatable quantities, even if the value resolves to a build time constant. Since relative relocations are always resolved at build time, this patch enables MODULE_REL_CRCS on powerpc when CONFIG_RELOCATABLE=y, which turns the absolute CRC references into relative references into .rodata where the actual CRC value is stored. So redefine all CRC fields and variables as u32, and redefine the __CRC_SYMBOL() macro for 64 bit builds to emit the CRC reference using inline assembler (which is necessary since 64-bit C code cannot use 32-bit types to hold memory addresses, even if they are ultimately resolved using values that do not exceed 0xffffffff). To avoid potential problems with legacy 32-bit architectures using legacy toolchains, the equivalent C definition of the kcrctab entry is retained for 32-bit architectures. Note that this mostly reverts commit d4703aefdbc8 ("module: handle ppc64 relocating kcrctabs when CONFIG_RELOCATABLE=y") Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
95 lines
2.4 KiB
C
95 lines
2.4 KiB
C
#ifndef _ASM_POWERPC_MODULE_H
|
|
#define _ASM_POWERPC_MODULE_H
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/list.h>
|
|
#include <asm/bug.h>
|
|
#include <asm-generic/module.h>
|
|
|
|
|
|
#ifndef __powerpc64__
|
|
/*
|
|
* Thanks to Paul M for explaining this.
|
|
*
|
|
* PPC can only do rel jumps += 32MB, and often the kernel and other
|
|
* modules are further away than this. So, we jump to a table of
|
|
* trampolines attached to the module (the Procedure Linkage Table)
|
|
* whenever that happens.
|
|
*/
|
|
|
|
struct ppc_plt_entry {
|
|
/* 16 byte jump instruction sequence (4 instructions) */
|
|
unsigned int jump[4];
|
|
};
|
|
#endif /* __powerpc64__ */
|
|
|
|
|
|
struct mod_arch_specific {
|
|
#ifdef __powerpc64__
|
|
unsigned int stubs_section; /* Index of stubs section in module */
|
|
unsigned int toc_section; /* What section is the TOC? */
|
|
bool toc_fixed; /* Have we fixed up .TOC.? */
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
unsigned long toc;
|
|
unsigned long tramp;
|
|
#endif
|
|
|
|
#else /* powerpc64 */
|
|
/* Indices of PLT sections within module. */
|
|
unsigned int core_plt_section;
|
|
unsigned int init_plt_section;
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
unsigned long tramp;
|
|
#endif
|
|
#endif /* powerpc64 */
|
|
|
|
/* List of BUG addresses, source line numbers and filenames */
|
|
struct list_head bug_list;
|
|
struct bug_entry *bug_table;
|
|
unsigned int num_bugs;
|
|
};
|
|
|
|
/*
|
|
* Select ELF headers.
|
|
* Make empty section for module_frob_arch_sections to expand.
|
|
*/
|
|
|
|
#ifdef __powerpc64__
|
|
# ifdef MODULE
|
|
asm(".section .stubs,\"ax\",@nobits; .align 3; .previous");
|
|
# endif
|
|
#else
|
|
# ifdef MODULE
|
|
asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
|
|
asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
|
|
# endif /* MODULE */
|
|
#endif
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
# ifdef MODULE
|
|
asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
|
|
# endif /* MODULE */
|
|
#endif
|
|
|
|
int module_trampoline_target(struct module *mod, unsigned long trampoline,
|
|
unsigned long *target);
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
|
|
#else
|
|
static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_MODULE_H */
|