kfence: move saving stack trace of allocations into __kfence_alloc()
Move the saving of the stack trace of allocations into __kfence_alloc(), so that the stack entries array can be used outside of kfence_guarded_alloc() and we avoid potentially unwinding the stack multiple times. Link: https://lkml.kernel.org/r/20210923104803.2620285-3-elver@google.com Signed-off-by: Marco Elver <elver@google.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Alexander Potapenko <glider@google.com> Cc: Aleksandr Nogikh <nogikh@google.com> Cc: Jann Horn <jannh@google.com> Cc: Taras Madan <tarasmadan@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
9a19aeb566
commit
a9ab52bbcb
@@ -187,19 +187,26 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
|
|||||||
* Update the object's metadata state, including updating the alloc/free stacks
|
* Update the object's metadata state, including updating the alloc/free stacks
|
||||||
* depending on the state transition.
|
* depending on the state transition.
|
||||||
*/
|
*/
|
||||||
static noinline void metadata_update_state(struct kfence_metadata *meta,
|
static noinline void
|
||||||
enum kfence_object_state next)
|
metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
|
||||||
|
unsigned long *stack_entries, size_t num_stack_entries)
|
||||||
{
|
{
|
||||||
struct kfence_track *track =
|
struct kfence_track *track =
|
||||||
next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
|
next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
|
||||||
|
|
||||||
lockdep_assert_held(&meta->lock);
|
lockdep_assert_held(&meta->lock);
|
||||||
|
|
||||||
/*
|
if (stack_entries) {
|
||||||
* Skip over 1 (this) functions; noinline ensures we do not accidentally
|
memcpy(track->stack_entries, stack_entries,
|
||||||
* skip over the caller by never inlining.
|
num_stack_entries * sizeof(stack_entries[0]));
|
||||||
*/
|
} else {
|
||||||
track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
|
/*
|
||||||
|
* Skip over 1 (this) functions; noinline ensures we do not
|
||||||
|
* accidentally skip over the caller by never inlining.
|
||||||
|
*/
|
||||||
|
num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
|
||||||
|
}
|
||||||
|
track->num_stack_entries = num_stack_entries;
|
||||||
track->pid = task_pid_nr(current);
|
track->pid = task_pid_nr(current);
|
||||||
track->cpu = raw_smp_processor_id();
|
track->cpu = raw_smp_processor_id();
|
||||||
track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
|
track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
|
||||||
@@ -261,7 +268,8 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
|
static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
|
||||||
|
unsigned long *stack_entries, size_t num_stack_entries)
|
||||||
{
|
{
|
||||||
struct kfence_metadata *meta = NULL;
|
struct kfence_metadata *meta = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -320,7 +328,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
|
|||||||
addr = (void *)meta->addr;
|
addr = (void *)meta->addr;
|
||||||
|
|
||||||
/* Update remaining metadata. */
|
/* Update remaining metadata. */
|
||||||
metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
|
metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
|
||||||
/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
|
/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
|
||||||
WRITE_ONCE(meta->cache, cache);
|
WRITE_ONCE(meta->cache, cache);
|
||||||
meta->size = size;
|
meta->size = size;
|
||||||
@@ -400,7 +408,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
|
|||||||
memzero_explicit(addr, meta->size);
|
memzero_explicit(addr, meta->size);
|
||||||
|
|
||||||
/* Mark the object as freed. */
|
/* Mark the object as freed. */
|
||||||
metadata_update_state(meta, KFENCE_OBJECT_FREED);
|
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&meta->lock, flags);
|
raw_spin_unlock_irqrestore(&meta->lock, flags);
|
||||||
|
|
||||||
@@ -742,6 +750,9 @@ void kfence_shutdown_cache(struct kmem_cache *s)
|
|||||||
|
|
||||||
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
|
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
|
||||||
{
|
{
|
||||||
|
unsigned long stack_entries[KFENCE_STACK_DEPTH];
|
||||||
|
size_t num_stack_entries;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perform size check before switching kfence_allocation_gate, so that
|
* Perform size check before switching kfence_allocation_gate, so that
|
||||||
* we don't disable KFENCE without making an allocation.
|
* we don't disable KFENCE without making an allocation.
|
||||||
@@ -786,7 +797,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
|
|||||||
if (!READ_ONCE(kfence_enabled))
|
if (!READ_ONCE(kfence_enabled))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return kfence_guarded_alloc(s, size, flags);
|
num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
|
||||||
|
|
||||||
|
return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t kfence_ksize(const void *addr)
|
size_t kfence_ksize(const void *addr)
|
||||||
|
Reference in New Issue
Block a user