linux/arch/arm64/kernel/process.c
Linus Torvalds 268325bda5 Random number generator updates for Linux 6.2-rc1.
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEq5lC5tSkz8NBJiCnSfxwEqXeA64FAmOU+U8ACgkQSfxwEqXe
 A67NnQ//Y5DltmvibyPd7r1TFT2gUYv+Rx3sUV9ZE1NYptd/SWhhcL8c5FZ70Fuw
 bSKCa1uiWjOxosjXT1kGrWq3de7q7oUpAPSOGxgxzoaNURIt58N/ajItCX/4Au8I
 RlGAScHy5e5t41/26a498kB6qJ441fBEqCYKQpPLINMBAhe8TQ+NVp0rlpUwNHFX
 WrUGg4oKWxdBIW3HkDirQjJWDkkAiklRTifQh/Al4b6QDbOnRUGGCeckNOhixsvS
 waHWTld+Td8jRrA4b82tUb2uVZ2/b8dEvj/A8CuTv4yC0lywoyMgBWmJAGOC+UmT
 ZVNdGW02Jc2T+Iap8ZdsEmeLHNqbli4+IcbY5xNlov+tHJ2oz41H9TZoYKbudlr6
 /ReAUPSn7i50PhbQlEruj3eg+M2gjOeh8OF8UKwwRK8PghvyWQ1ScW0l3kUhPIhI
 PdIG6j4+D2mJc1FIj2rTVB+Bg933x6S+qx4zDxGlNp62AARUFYf6EgyD6aXFQVuX
 RxcKb6cjRuFkzFiKc8zkqg5edZH+IJcPNuIBmABqTGBOxbZWURXzIQvK/iULqZa4
 CdGAFIs6FuOh8pFHLI3R4YoHBopbHup/xKDEeAO9KZGyeVIuOSERDxxo5f/ITzcq
 APvT77DFOEuyvanr8RMqqh0yUjzcddXqw9+ieufsAyDwjD9DTuE=
 =QRhK
 -----END PGP SIGNATURE-----

Merge tag 'random-6.2-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random

Pull random number generator updates from Jason Donenfeld:

 - Replace prandom_u32_max() and various open-coded variants of it,
   there is now a new family of functions that uses fast rejection
   sampling to choose properly uniformly random numbers within an
   interval:

       get_random_u32_below(ceil) - [0, ceil)
       get_random_u32_above(floor) - (floor, U32_MAX]
       get_random_u32_inclusive(floor, ceil) - [floor, ceil]

   Coccinelle was used to convert all current users of
   prandom_u32_max(), as well as many open-coded patterns, resulting in
   improvements throughout the tree.

   I'll have a "late" 6.1-rc1 pull for you that removes the now unused
   prandom_u32_max() function, just in case any other trees add a new
   use case of it that needs to converted. According to linux-next,
   there may be two trivial cases of prandom_u32_max() reintroductions
   that are fixable with a 's/.../.../'. So I'll have for you a final
   conversion patch doing that alongside the removal patch during the
   second week.

   This is a treewide change that touches many files throughout.

 - More consistent use of get_random_canary().

 - Updates to comments, documentation, tests, headers, and
   simplification in configuration.

 - The arch_get_random*_early() abstraction was only used by arm64 and
   wasn't entirely useful, so this has been replaced by code that works
   in all relevant contexts.

 - The kernel will use and manage random seeds in non-volatile EFI
   variables, refreshing a variable with a fresh seed when the RNG is
   initialized. The RNG GUID namespace is then hidden from efivarfs to
   prevent accidental leakage.

   These changes are split into random.c infrastructure code used in the
   EFI subsystem, in this pull request, and related support inside of
   EFISTUB, in Ard's EFI tree. These are co-dependent for full
   functionality, but the order of merging doesn't matter.

 - Part of the infrastructure added for the EFI support is also used for
   an improvement to the way vsprintf initializes its siphash key,
   replacing an sleep loop wart.

 - The hardware RNG framework now always calls its correct random.c
   input function, add_hwgenerator_randomness(), rather than sometimes
   going through helpers better suited for other cases.

 - The add_latent_entropy() function has long been called from the fork
   handler, but is a no-op when the latent entropy gcc plugin isn't
   used, which is fine for the purposes of latent entropy.

   But it was missing out on the cycle counter that was also being mixed
   in beside the latent entropy variable. So now, if the latent entropy
   gcc plugin isn't enabled, add_latent_entropy() will expand to a call
   to add_device_randomness(NULL, 0), which adds a cycle counter,
   without the absent latent entropy variable.

 - The RNG is now reseeded from a delayed worker, rather than on demand
   when used. Always running from a worker allows it to make use of the
   CPU RNG on platforms like S390x, whose instructions are too slow to
   do so from interrupts. It also has the effect of adding in new inputs
   more frequently with more regularity, amounting to a long term
   transcript of random values. Plus, it helps a bit with the upcoming
   vDSO implementation (which isn't yet ready for 6.2).

 - The jitter entropy algorithm now tries to execute on many different
   CPUs, round-robining, in hopes of hitting even more memory latencies
   and other unpredictable effects. It also will mix in a cycle counter
   when the entropy timer fires, in addition to being mixed in from the
   main loop, to account more explicitly for fluctuations in that timer
   firing. And the state it touches is now kept within the same cache
   line, so that it's assured that the different execution contexts will
   cause latencies.

* tag 'random-6.2-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random: (23 commits)
  random: include <linux/once.h> in the right header
  random: align entropy_timer_state to cache line
  random: mix in cycle counter when jitter timer fires
  random: spread out jitter callback to different CPUs
  random: remove extraneous period and add a missing one in comments
  efi: random: refresh non-volatile random seed when RNG is initialized
  vsprintf: initialize siphash key using notifier
  random: add back async readiness notifier
  random: reseed in delayed work rather than on-demand
  random: always mix cycle counter in add_latent_entropy()
  hw_random: use add_hwgenerator_randomness() for early entropy
  random: modernize documentation comment on get_random_bytes()
  random: adjust comment to account for removed function
  random: remove early archrandom abstraction
  random: use random.trust_{bootloader,cpu} command line option only
  stackprotector: actually use get_random_canary()
  stackprotector: move get_random_canary() into stackprotector.h
  treewide: use get_random_u32_inclusive() when possible
  treewide: use get_random_u32_{above,below}() instead of manual loop
  treewide: use get_random_u32_below() instead of deprecated function
  ...
2022-12-12 16:22:22 -08:00

760 lines
20 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/process.c
*
* Original Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/compat.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/nospec.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/utsname.h>
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/personality.h>
#include <linux/notifier.h>
#include <trace/events/power.h>
#include <linux/percpu.h>
#include <linux/thread_info.h>
#include <linux/prctl.h>
#include <linux/stacktrace.h>
#include <asm/alternative.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
#include <asm/processor.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/system_misc.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __ro_after_init;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
/*
* Function pointers to optional machine specific functions
*/
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
cpu_die();
}
#endif
/*
* Called by kexec, immediately prior to machine_kexec().
*
* This must completely disable all secondary CPUs; simply causing those CPUs
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
* kexec'd kernel to use any and all RAM as it sees fit, without having to
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
* functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
*/
void machine_shutdown(void)
{
smp_shutdown_nonboot_cpus(reboot_cpu);
}
/*
* Halting simply requires that the secondary CPUs stop performing any
* activity (executing tasks, handling interrupts). smp_send_stop()
* achieves this.
*/
void machine_halt(void)
{
local_irq_disable();
smp_send_stop();
while (1);
}
/*
* Power-off simply requires that the secondary CPUs stop performing any
* activity (executing tasks, handling interrupts). smp_send_stop()
* achieves this. When the system power is turned off, it will take all CPUs
* with it.
*/
void machine_power_off(void)
{
local_irq_disable();
smp_send_stop();
do_kernel_power_off();
}
/*
* Restart requires that the secondary CPUs stop performing any activity
* while the primary CPU resets the system. Systems with multiple CPUs must
* provide a HW restart implementation, to ensure that all CPUs reset at once.
* This is required so that any code running after reset on the primary CPU
* doesn't have to co-ordinate with other CPUs to ensure they aren't still
* executing pre-reset code, and using RAM that the primary CPU's code wishes
* to use. Implementing such co-ordination would be essentially impossible.
*/
void machine_restart(char *cmd)
{
/* Disable interrupts first */
local_irq_disable();
smp_send_stop();
/*
* UpdateCapsule() depends on the system being reset via
* ResetSystem().
*/
if (efi_enabled(EFI_RUNTIME_SERVICES))
efi_reboot(reboot_mode, NULL);
/* Now call the architecture specific reboot code. */
do_kernel_restart(cmd);
/*
* Whoops - the architecture was unable to reboot.
*/
printk("Reboot failed -- System halted\n");
while (1);
}
#define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
static const char *const btypes[] = {
bstr(NONE, "--"),
bstr( JC, "jc"),
bstr( C, "-c"),
bstr( J , "j-")
};
#undef bstr
static void print_pstate(struct pt_regs *regs)
{
u64 pstate = regs->pstate;
if (compat_user_mode(regs)) {
printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n",
pstate,
pstate & PSR_AA32_N_BIT ? 'N' : 'n',
pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
pstate & PSR_AA32_C_BIT ? 'C' : 'c',
pstate & PSR_AA32_V_BIT ? 'V' : 'v',
pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
pstate & PSR_AA32_T_BIT ? "T32" : "A32",
pstate & PSR_AA32_E_BIT ? "BE" : "LE",
pstate & PSR_AA32_A_BIT ? 'A' : 'a',
pstate & PSR_AA32_I_BIT ? 'I' : 'i',
pstate & PSR_AA32_F_BIT ? 'F' : 'f',
pstate & PSR_AA32_DIT_BIT ? '+' : '-',
pstate & PSR_AA32_SSBS_BIT ? '+' : '-');
} else {
const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
PSR_BTYPE_SHIFT];
printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n",
pstate,
pstate & PSR_N_BIT ? 'N' : 'n',
pstate & PSR_Z_BIT ? 'Z' : 'z',
pstate & PSR_C_BIT ? 'C' : 'c',
pstate & PSR_V_BIT ? 'V' : 'v',
pstate & PSR_D_BIT ? 'D' : 'd',
pstate & PSR_A_BIT ? 'A' : 'a',
pstate & PSR_I_BIT ? 'I' : 'i',
pstate & PSR_F_BIT ? 'F' : 'f',
pstate & PSR_PAN_BIT ? '+' : '-',
pstate & PSR_UAO_BIT ? '+' : '-',
pstate & PSR_TCO_BIT ? '+' : '-',
pstate & PSR_DIT_BIT ? '+' : '-',
pstate & PSR_SSBS_BIT ? '+' : '-',
btype_str);
}
}
void __show_regs(struct pt_regs *regs)
{
int i, top_reg;
u64 lr, sp;
if (compat_user_mode(regs)) {
lr = regs->compat_lr;
sp = regs->compat_sp;
top_reg = 12;
} else {
lr = regs->regs[30];
sp = regs->sp;
top_reg = 29;
}
show_regs_print_info(KERN_DEFAULT);
print_pstate(regs);
if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
}
printk("sp : %016llx\n", sp);
if (system_uses_irq_prio_masking())
printk("pmr_save: %08llx\n", regs->pmr_save);
i = top_reg;
while (i >= 0) {
printk("x%-2d: %016llx", i, regs->regs[i]);
while (i-- % 3)
pr_cont(" x%-2d: %016llx", i, regs->regs[i]);
pr_cont("\n");
}
}
void show_regs(struct pt_regs *regs)
{
__show_regs(regs);
dump_backtrace(regs, NULL, KERN_DEFAULT);
}
static void tls_thread_flush(void)
{
write_sysreg(0, tpidr_el0);
if (system_supports_tpidr2())
write_sysreg_s(0, SYS_TPIDR2_EL0);
if (is_compat_task()) {
current->thread.uw.tp_value = 0;
/*
* We need to ensure ordering between the shadow state and the
* hardware state, so that we don't corrupt the hardware state
* with a stale shadow state during context switch.
*/
barrier();
write_sysreg(0, tpidrro_el0);
}
}
static void flush_tagged_addr_state(void)
{
if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
clear_thread_flag(TIF_TAGGED_ADDR);
}
void flush_thread(void)
{
fpsimd_flush_thread();
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
flush_tagged_addr_state();
}
void arch_release_task_struct(struct task_struct *tsk)
{
fpsimd_release_task(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
if (current->mm)
fpsimd_preserve_current_state();
*dst = *src;
/* We rely on the above assignment to initialize dst's thread_flags: */
BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
/*
* Detach src's sve_state (if any) from dst so that it does not
* get erroneously used or freed prematurely. dst's copies
* will be allocated on demand later on if dst uses SVE.
* For consistency, also clear TIF_SVE here: this could be done
* later in copy_process(), but to avoid tripping up future
* maintainers it is best not to leave TIF flags and buffers in
* an inconsistent state, even temporarily.
*/
dst->thread.sve_state = NULL;
clear_tsk_thread_flag(dst, TIF_SVE);
/*
* In the unlikely event that we create a new thread with ZA
* enabled we should retain the ZA state so duplicate it here.
* This may be shortly freed if we exec() or if CLONE_SETTLS
* but it's simpler to do it here. To avoid confusing the rest
* of the code ensure that we have a sve_state allocated
* whenever za_state is allocated.
*/
if (thread_za_enabled(&src->thread)) {
dst->thread.sve_state = kzalloc(sve_state_size(src),
GFP_KERNEL);
if (!dst->thread.sve_state)
return -ENOMEM;
dst->thread.za_state = kmemdup(src->thread.za_state,
za_state_size(src),
GFP_KERNEL);
if (!dst->thread.za_state) {
kfree(dst->thread.sve_state);
dst->thread.sve_state = NULL;
return -ENOMEM;
}
} else {
dst->thread.za_state = NULL;
clear_tsk_thread_flag(dst, TIF_SME);
}
dst->thread.fp_type = FP_STATE_FPSIMD;
/* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
return 0;
}
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long stack_start = args->stack;
unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
/*
* In case p was allocated the same task_struct pointer as some
* other recently-exited task, make sure p is disassociated from
* any cpu that may have run that now-exited task recently.
* Otherwise we could erroneously skip reloading the FPSIMD
* registers for p.
*/
fpsimd_flush_task_state(p);
ptrauth_thread_init_kernel(p);
if (likely(!args->fn)) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
/*
* Read the current TLS pointer from tpidr_el0 as it may be
* out-of-sync with the saved value.
*/
*task_user_tls(p) = read_sysreg(tpidr_el0);
if (system_supports_tpidr2())
p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
if (stack_start) {
if (is_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start;
else
childregs->sp = stack_start;
}
/*
* If a TLS pointer was passed to clone, use it for the new
* thread. We also reset TPIDR2 if it's in use.
*/
if (clone_flags & CLONE_SETTLS) {
p->thread.uw.tp_value = tls;
p->thread.tpidr2_el0 = 0;
}
} else {
/*
* A kthread has no context to ERET to, so ensure any buggy
* ERET is treated as an illegal exception return.
*
* When a user task is created from a kthread, childregs will
* be initialized by start_thread() or start_compat_thread().
*/
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
p->thread.cpu_context.x19 = (unsigned long)args->fn;
p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
/*
* For the benefit of the unwinder, set up childregs->stackframe
* as the final frame for the new task.
*/
p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
ptrace_hw_copy_thread(p);
return 0;
}
void tls_preserve_current_state(void)
{
*task_user_tls(current) = read_sysreg(tpidr_el0);
if (system_supports_tpidr2() && !is_compat_task())
current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
}
static void tls_thread_switch(struct task_struct *next)
{
tls_preserve_current_state();
if (is_compat_thread(task_thread_info(next)))
write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
else if (!arm64_kernel_unmapped_at_el0())
write_sysreg(0, tpidrro_el0);
write_sysreg(*task_user_tls(next), tpidr_el0);
if (system_supports_tpidr2())
write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
}
/*
* Force SSBS state on context-switch, since it may be lost after migrating
* from a CPU which treats the bit as RES0 in a heterogeneous system.
*/
static void ssbs_thread_switch(struct task_struct *next)
{
/*
* Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early.
*/
if (unlikely(next->flags & PF_KTHREAD))
return;
/*
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
if (cpus_have_const_cap(ARM64_SSBS))
return;
spectre_v4_enable_task_mitigation(next);
}
/*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace.
*
* This is *only* for exception entry from EL0, and is not valid until we
* __switch_to() a user task.
*/
DEFINE_PER_CPU(struct task_struct *, __entry_task);
static void entry_task_switch(struct task_struct *next)
{
__this_cpu_write(__entry_task, next);
}
/*
* ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
* Ensure access is disabled when switching to a 32bit task, ensure
* access is enabled when switching to a 64bit task.
*/
static void erratum_1418040_thread_switch(struct task_struct *next)
{
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
!this_cpu_has_cap(ARM64_WORKAROUND_1418040))
return;
if (is_compat_thread(task_thread_info(next)))
sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
else
sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
}
static void erratum_1418040_new_exec(void)
{
preempt_disable();
erratum_1418040_thread_switch(current);
preempt_enable();
}
/*
* __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
* this function must be called with preemption disabled and the update to
* sctlr_user must be made in the same preemption disabled block so that
* __switch_to() does not see the variable update before the SCTLR_EL1 one.
*/
void update_sctlr_el1(u64 sctlr)
{
/*
* EnIA must not be cleared while in the kernel as this is necessary for
* in-kernel PAC. It will be cleared on kernel exit if needed.
*/
sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
/* ISB required for the kernel uaccess routines when setting TCF0. */
isb();
}
/*
* Thread switching.
*/
__notrace_funcgraph __sched
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
entry_task_switch(next);
ssbs_thread_switch(next);
erratum_1418040_thread_switch(next);
ptrauth_thread_switch_user(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb(ish);
/*
* MTE thread switching must happen after the DSB above to ensure that
* any asynchronous tag check faults have been logged in the TFSR*_EL1
* registers.
*/
mte_thread_switch(next);
/* avoid expensive SCTLR_EL1 accesses if no change */
if (prev->thread.sctlr_user != next->thread.sctlr_user)
update_sctlr_el1(next->thread.sctlr_user);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
return last;
}
struct wchan_info {
unsigned long pc;
int count;
};
static bool get_wchan_cb(void *arg, unsigned long pc)
{
struct wchan_info *wchan_info = arg;
if (!in_sched_functions(pc)) {
wchan_info->pc = pc;
return false;
}
return wchan_info->count++ < 16;
}
unsigned long __get_wchan(struct task_struct *p)
{
struct wchan_info wchan_info = {
.pc = 0,
.count = 0,
};
if (!try_get_task_stack(p))
return 0;
arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL);
put_task_stack(p);
return wchan_info.pc;
}
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(PAGE_SIZE);
return sp & ~0xf;
}
#ifdef CONFIG_COMPAT
int compat_elf_check_arch(const struct elf32_hdr *hdr)
{
if (!system_supports_32bit_el0())
return false;
if ((hdr)->e_machine != EM_ARM)
return false;
if (!((hdr)->e_flags & EF_ARM_EABI_MASK))
return false;
/*
* Prevent execve() of a 32-bit program from a deadline task
* if the restricted affinity mask would be inadmissible on an
* asymmetric system.
*/
return !static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
!dl_task_check_affinity(current, system_32bit_el0_cpumask());
}
#endif
/*
* Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
*/
void arch_setup_new_exec(void)
{
unsigned long mmflags = 0;
if (is_compat_task()) {
mmflags = MMCF_AARCH32;
/*
* Restrict the CPU affinity mask for a 32-bit task so that
* it contains only 32-bit-capable CPUs.
*
* From the perspective of the task, this looks similar to
* what would happen if the 64-bit-only CPUs were hot-unplugged
* at the point of execve(), although we try a bit harder to
* honour the cpuset hierarchy.
*/
if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
force_compatible_cpus_allowed_ptr(current);
} else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
relax_compatible_cpus_allowed_ptr(current);
}
current->mm->context.flags = mmflags;
ptrauth_thread_init_user();
mte_thread_init_user();
erratum_1418040_new_exec();
if (task_spec_ssb_noexec(current)) {
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
PR_SPEC_ENABLE);
}
}
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
/*
* Control the relaxed ABI allowing tagged user addresses into the kernel.
*/
static unsigned int tagged_addr_disabled;
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
{
unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
struct thread_info *ti = task_thread_info(task);
if (is_compat_thread(ti))
return -EINVAL;
if (system_supports_mte())
valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \
| PR_MTE_TAG_MASK;
if (arg & ~valid_mask)
return -EINVAL;
/*
* Do not allow the enabling of the tagged address ABI if globally
* disabled via sysctl abi.tagged_addr_disabled.
*/
if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
return -EINVAL;
if (set_mte_ctrl(task, arg) != 0)
return -EINVAL;
update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
return 0;
}
long get_tagged_addr_ctrl(struct task_struct *task)
{
long ret = 0;
struct thread_info *ti = task_thread_info(task);
if (is_compat_thread(ti))
return -EINVAL;
if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
ret = PR_TAGGED_ADDR_ENABLE;
ret |= get_mte_ctrl(task);
return ret;
}
/*
* Global sysctl to disable the tagged user addresses support. This control
* only prevents the tagged address ABI enabling via prctl() and does not
* disable it for tasks that already opted in to the relaxed ABI.
*/
static struct ctl_table tagged_addr_sysctl_table[] = {
{
.procname = "tagged_addr_disabled",
.mode = 0644,
.data = &tagged_addr_disabled,
.maxlen = sizeof(int),
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{ }
};
static int __init tagged_addr_init(void)
{
if (!register_sysctl("abi", tagged_addr_sysctl_table))
return -EINVAL;
return 0;
}
core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
#ifdef CONFIG_BINFMT_ELF
int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
bool has_interp, bool is_interp)
{
/*
* For dynamically linked executables the interpreter is
* responsible for setting PROT_BTI on everything except
* itself.
*/
if (is_interp != has_interp)
return prot;
if (!(state->flags & ARM64_ELF_BTI))
return prot;
if (prot & PROT_EXEC)
prot |= PROT_BTI;
return prot;
}
#endif