stackleak: rework stack low bound handling
In stackleak_task_init(), stackleak_track_stack(), and __stackleak_erase(), we open-code skipping the STACK_END_MAGIC at the bottom of the stack. Each case is implemented slightly differently, and only the __stackleak_erase() case is commented. In stackleak_task_init() and stackleak_track_stack() we unconditionally add sizeof(unsigned long) to the lowest stack address. In stackleak_task_init() we use end_of_stack() for this, and in stackleak_track_stack() we use task_stack_page(). In __stackleak_erase() we handle this by detecting if `kstack_ptr` has hit the stack end boundary, and if so, conditionally moving it above the magic. This patch adds a new stackleak_task_low_bound() helper which is used in all three cases, which unconditionally adds sizeof(unsigned long) to the lowest address on the task stack, with commentary as to why. This uses end_of_stack() as stackleak_task_init() did prior to this patch, as this is consistent with the code in kernel/fork.c which initializes the STACK_END_MAGIC value. In __stackleak_erase() we no longer need to check whether we've spilled into the STACK_END_MAGIC value, as stackleak_track_stack() ensures that `current->lowest_stack` stops immediately above this, and similarly the poison scan will stop immediately above this. For stackleak_task_init() and stackleak_track_stack() this results in no change to code generation. For __stackleak_erase() the generated assembly is slightly simpler and shorter. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Alexander Popov <alex.popov@linux.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220427173128.2603085-5-mark.rutland@arm.com
This commit is contained in:
parent
ac7838b4e1
commit
9ec79840d6
@ -15,9 +15,22 @@
|
||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
/*
|
||||
* The lowest address on tsk's stack which we can plausibly erase.
|
||||
*/
|
||||
static __always_inline unsigned long
|
||||
stackleak_task_low_bound(const struct task_struct *tsk)
|
||||
{
|
||||
/*
|
||||
* The lowest unsigned long on the task stack contains STACK_END_MAGIC,
|
||||
* which we must not corrupt.
|
||||
*/
|
||||
return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
|
||||
}
|
||||
|
||||
static inline void stackleak_task_init(struct task_struct *t)
|
||||
{
|
||||
t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
|
||||
t->lowest_stack = stackleak_task_low_bound(t);
|
||||
# ifdef CONFIG_STACKLEAK_METRICS
|
||||
t->prev_lowest_stack = t->lowest_stack;
|
||||
# endif
|
||||
|
@ -72,9 +72,11 @@ late_initcall(stackleak_sysctls_init);
|
||||
|
||||
static __always_inline void __stackleak_erase(void)
|
||||
{
|
||||
const unsigned long task_stack_low = stackleak_task_low_bound(current);
|
||||
|
||||
/* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
|
||||
unsigned long kstack_ptr = current->lowest_stack;
|
||||
unsigned long boundary = (unsigned long)end_of_stack(current);
|
||||
unsigned long boundary = task_stack_low;
|
||||
unsigned int poison_count = 0;
|
||||
const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
|
||||
|
||||
@ -88,13 +90,6 @@ static __always_inline void __stackleak_erase(void)
|
||||
kstack_ptr -= sizeof(unsigned long);
|
||||
}
|
||||
|
||||
/*
|
||||
* One 'long int' at the bottom of the thread stack is reserved and
|
||||
* should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y).
|
||||
*/
|
||||
if (kstack_ptr == boundary)
|
||||
kstack_ptr += sizeof(unsigned long);
|
||||
|
||||
#ifdef CONFIG_STACKLEAK_METRICS
|
||||
current->prev_lowest_stack = kstack_ptr;
|
||||
#endif
|
||||
@ -140,8 +135,7 @@ void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
|
||||
/* 'lowest_stack' should be aligned on the register width boundary */
|
||||
sp = ALIGN(sp, sizeof(unsigned long));
|
||||
if (sp < current->lowest_stack &&
|
||||
sp >= (unsigned long)task_stack_page(current) +
|
||||
sizeof(unsigned long)) {
|
||||
sp >= stackleak_task_low_bound(current)) {
|
||||
current->lowest_stack = sp;
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user