aacd149b62
Currently, when KASLR is in effect, we set up the kernel virtual address space twice: the first time, the KASLR seed is looked up in the device tree, and the kernel virtual mapping is torn down and recreated again, after which the relocations are applied a second time. The latter step means that statically initialized global pointer variables will be reset to their initial values, and to ensure that BSS variables are not set to values based on the initial translation, they are cleared again as well. All of this is needed because we need the command line (taken from the DT) to tell us whether or not to randomize the virtual address space before entering the kernel proper. However, this code has expanded little by little and now creates global state unrelated to the virtual randomization of the kernel before the mapping is torn down and set up again, and the BSS cleared for a second time. This has created some issues in the past, and it would be better to avoid this little dance if possible. So instead, let's use the temporary mapping of the device tree, and execute the bare minimum of code to decide whether or not KASLR should be enabled, and what the seed is. Only then, create the virtual kernel mapping, clear BSS, etc and proceed as normal. This avoids the issues around inconsistent global state due to BSS being cleared twice, and is generally more maintainable, as it permits us to defer all the remaining DT parsing and KASLR initialization to a later time. This means the relocation fixup code runs only a single time as well, allowing us to simplify the RELR handling code too, which is not idempotent and was therefore required to keep track of the offset that was applied the first time around. Note that this means we have to clone a pair of FDT library objects, so that we can control how they are built - we need the stack protector and other instrumentation disabled so that the code can tolerate being called this early. Note that only the kernel page tables and the temporary stack are mapped read-write at this point, which ensures that the early code does not modify any global state inadvertently. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20220624150651.1358849-21-ardb@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
94 lines
2.7 KiB
C
94 lines
2.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
|
|
*/
|
|
|
|
#include <linux/cache.h>
|
|
#include <linux/crc32.h>
|
|
#include <linux/init.h>
|
|
#include <linux/libfdt.h>
|
|
#include <linux/mm_types.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/types.h>
|
|
#include <linux/pgtable.h>
|
|
#include <linux/random.h>
|
|
|
|
#include <asm/fixmap.h>
|
|
#include <asm/kernel-pgtable.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/mmu.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/setup.h>
|
|
|
|
u64 __ro_after_init module_alloc_base;
|
|
u16 __initdata memstart_offset_seed;
|
|
|
|
struct arm64_ftr_override kaslr_feature_override __initdata;
|
|
|
|
static int __init kaslr_init(void)
|
|
{
|
|
u64 module_range;
|
|
u32 seed;
|
|
|
|
/*
|
|
* Set a reasonable default for module_alloc_base in case
|
|
* we end up running with module randomization disabled.
|
|
*/
|
|
module_alloc_base = (u64)_etext - MODULES_VSIZE;
|
|
|
|
if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
|
|
pr_info("KASLR disabled on command line\n");
|
|
return 0;
|
|
}
|
|
|
|
if (!kaslr_offset()) {
|
|
pr_warn("KASLR disabled due to lack of seed\n");
|
|
return 0;
|
|
}
|
|
|
|
pr_info("KASLR enabled\n");
|
|
|
|
/*
|
|
* KASAN without KASAN_VMALLOC does not expect the module region to
|
|
* intersect the vmalloc region, since shadow memory is allocated for
|
|
* each module at load time, whereas the vmalloc region will already be
|
|
* shadowed by KASAN zero pages.
|
|
*/
|
|
BUILD_BUG_ON((IS_ENABLED(CONFIG_KASAN_GENERIC) ||
|
|
IS_ENABLED(CONFIG_KASAN_SW_TAGS)) &&
|
|
!IS_ENABLED(CONFIG_KASAN_VMALLOC));
|
|
|
|
seed = get_random_u32();
|
|
|
|
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
|
|
/*
|
|
* Randomize the module region over a 2 GB window covering the
|
|
* kernel. This reduces the risk of modules leaking information
|
|
* about the address of the kernel itself, but results in
|
|
* branches between modules and the core kernel that are
|
|
* resolved via PLTs. (Branches between modules will be
|
|
* resolved normally.)
|
|
*/
|
|
module_range = SZ_2G - (u64)(_end - _stext);
|
|
module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR);
|
|
} else {
|
|
/*
|
|
* Randomize the module region by setting module_alloc_base to
|
|
* a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
|
|
* _stext) . This guarantees that the resulting region still
|
|
* covers [_stext, _etext], and that all relative branches can
|
|
* be resolved without veneers unless this region is exhausted
|
|
* and we fall back to a larger 2GB window in module_alloc()
|
|
* when ARM64_MODULE_PLTS is enabled.
|
|
*/
|
|
module_range = MODULES_VSIZE - (u64)(_etext - _stext);
|
|
}
|
|
|
|
/* use the lower 21 bits to randomize the base of the module region */
|
|
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
|
|
module_alloc_base &= PAGE_MASK;
|
|
|
|
return 0;
|
|
}
|
|
subsys_initcall(kaslr_init)
|