mm: page_owner: detect page_owner recursion via task_struct
Before the change page_owner recursion was detected via fetching backtrace and inspecting it for current instruction pointer. It has a few problems: - it is slightly slow as it requires extra backtrace and a linear stack scan of the result - it is too late to check if backtrace fetching required memory allocation itself (ia64's unwinder requires it). To simplify recursion tracking let's use page_owner recursion flag in 'struct task_struct'. The change make page_owner=on work on ia64 by avoiding infinite recursion in: kmalloc() -> __set_page_owner() -> save_stack() -> unwind() [ia64-specific] -> build_script() -> kmalloc() -> __set_page_owner() [we short-circuit here] -> save_stack() -> unwind() [recursion] Link: https://lkml.kernel.org/r/20210402115342.1463781-1-slyfox@gentoo.org Signed-off-by: Sergei Trofimovich <slyfox@gentoo.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ben Segall <bsegall@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Daniel Bristot de Oliveira <bristot@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
608b5d668c
commit
8e9b16c476
@ -841,6 +841,10 @@ struct task_struct {
|
||||
/* Stalled due to lack of memory */
|
||||
unsigned in_memstall:1;
|
||||
#endif
|
||||
#ifdef CONFIG_PAGE_OWNER
|
||||
/* Used by page_owner=on to detect recursion in page tracking. */
|
||||
unsigned in_page_owner:1;
|
||||
#endif
|
||||
|
||||
unsigned long atomic_flags; /* Flags requiring atomic access. */
|
||||
|
||||
|
@ -98,42 +98,30 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
|
||||
return (void *)page_ext + page_owner_ops.offset;
|
||||
}
|
||||
|
||||
static inline bool check_recursive_alloc(unsigned long *entries,
|
||||
unsigned int nr_entries,
|
||||
unsigned long ip)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < nr_entries; i++) {
|
||||
if (entries[i] == ip)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static noinline depot_stack_handle_t save_stack(gfp_t flags)
|
||||
{
|
||||
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
|
||||
depot_stack_handle_t handle;
|
||||
unsigned int nr_entries;
|
||||
|
||||
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
|
||||
|
||||
/*
|
||||
* We need to check recursion here because our request to
|
||||
* stackdepot could trigger memory allocation to save new
|
||||
* entry. New memory allocation would reach here and call
|
||||
* stack_depot_save_entries() again if we don't catch it. There is
|
||||
* still not enough memory in stackdepot so it would try to
|
||||
* allocate memory again and loop forever.
|
||||
* Avoid recursion.
|
||||
*
|
||||
* Sometimes page metadata allocation tracking requires more
|
||||
* memory to be allocated:
|
||||
* - when new stack trace is saved to stack depot
|
||||
* - when backtrace itself is calculated (ia64)
|
||||
*/
|
||||
if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
|
||||
if (current->in_page_owner)
|
||||
return dummy_handle;
|
||||
current->in_page_owner = 1;
|
||||
|
||||
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
|
||||
handle = stack_depot_save(entries, nr_entries, flags);
|
||||
if (!handle)
|
||||
handle = failure_handle;
|
||||
|
||||
current->in_page_owner = 0;
|
||||
return handle;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user