linux/kernel/scs.c
Will Deacon 51189c7a7e arm64: scs: Store absolute SCS stack pointer value in thread_info
Storing the SCS information in thread_info as a {base,offset} pair
introduces an additional load instruction on the ret-to-user path,
since the SCS stack pointer in x18 has to be converted back to an offset
by subtracting the base.

Replace the offset with the absolute SCS stack pointer value instead
and avoid the redundant load.

Tested-by: Sami Tolvanen <samitolvanen@google.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
2020-05-18 17:47:22 +01:00

108 lines
2.0 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Shadow Call Stack support.
*
* Copyright (C) 2019 Google LLC
*/
#include <linux/kasan.h>
#include <linux/mm.h>
#include <linux/scs.h>
#include <linux/slab.h>
#include <linux/vmstat.h>
#include <asm/scs.h>
static struct kmem_cache *scs_cache;
static void *scs_alloc(int node)
{
void *s;
s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
if (s) {
*__scs_magic(s) = SCS_END_MAGIC;
/*
* Poison the allocation to catch unintentional accesses to
* the shadow stack when KASAN is enabled.
*/
kasan_poison_object_data(scs_cache, s);
}
return s;
}
static void scs_free(void *s)
{
kasan_unpoison_object_data(scs_cache, s);
kmem_cache_free(scs_cache, s);
}
void __init scs_init(void)
{
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
}
static struct page *__scs_page(struct task_struct *tsk)
{
return virt_to_page(task_scs(tsk));
}
static void scs_account(struct task_struct *tsk, int account)
{
mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_KB,
account * (SCS_SIZE / 1024));
}
int scs_prepare(struct task_struct *tsk, int node)
{
void *s = scs_alloc(node);
if (!s)
return -ENOMEM;
task_scs(tsk) = task_scs_sp(tsk) = s;
scs_account(tsk, 1);
return 0;
}
static void scs_check_usage(struct task_struct *tsk)
{
static unsigned long highest;
unsigned long *p, prev, curr = highest, used = 0;
if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE))
return;
for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
if (!READ_ONCE_NOCHECK(*p))
break;
used++;
}
while (used > curr) {
prev = cmpxchg_relaxed(&highest, curr, used);
if (prev == curr) {
pr_info("%s (%d): highest shadow stack usage: %lu bytes\n",
tsk->comm, task_pid_nr(tsk), used);
break;
}
curr = prev;
}
}
void scs_release(struct task_struct *tsk)
{
void *s = task_scs(tsk);
if (!s)
return;
WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n");
scs_check_usage(tsk);
scs_account(tsk, -1);
scs_free(s);
}