x86/dumpstack/64: Add noinstr version of get_stack_info()
The get_stack_info() functionality is needed in the entry code for the #VC exception handler. Provide a version of it in the .text.noinstr section which can be called safely from there. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20200907131613.12703-45-joro@8bytes.org
This commit is contained in:
parent
315562c9af
commit
6b27edd74a
@ -35,6 +35,8 @@ bool in_entry_stack(unsigned long *stack, struct stack_info *info);
|
||||
|
||||
int get_stack_info(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info, unsigned long *visit_mask);
|
||||
bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info);
|
||||
|
||||
const char *stack_type_name(enum stack_type type);
|
||||
|
||||
|
@ -29,8 +29,8 @@ static int die_counter;
|
||||
|
||||
static struct pt_regs exec_summary_regs;
|
||||
|
||||
bool in_task_stack(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info)
|
||||
bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info)
|
||||
{
|
||||
unsigned long *begin = task_stack_page(task);
|
||||
unsigned long *end = task_stack_page(task) + THREAD_SIZE;
|
||||
@ -46,7 +46,8 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool in_entry_stack(unsigned long *stack, struct stack_info *info)
|
||||
/* Called from get_stack_info_noinstr - so must be noinstr too */
|
||||
bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info)
|
||||
{
|
||||
struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
|
||||
|
||||
|
@ -85,7 +85,7 @@ struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
|
||||
EPAGERANGE(VC2),
|
||||
};
|
||||
|
||||
static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
|
||||
static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info)
|
||||
{
|
||||
unsigned long begin, end, stk = (unsigned long)stack;
|
||||
const struct estack_pages *ep;
|
||||
@ -126,7 +126,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
||||
static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
||||
{
|
||||
unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
|
||||
unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long));
|
||||
@ -151,32 +151,38 @@ static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info)
|
||||
{
|
||||
if (in_task_stack(stack, task, info))
|
||||
return true;
|
||||
|
||||
if (task != current)
|
||||
return false;
|
||||
|
||||
if (in_exception_stack(stack, info))
|
||||
return true;
|
||||
|
||||
if (in_irq_stack(stack, info))
|
||||
return true;
|
||||
|
||||
if (in_entry_stack(stack, info))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int get_stack_info(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info, unsigned long *visit_mask)
|
||||
{
|
||||
task = task ? : current;
|
||||
|
||||
if (!stack)
|
||||
goto unknown;
|
||||
|
||||
task = task ? : current;
|
||||
|
||||
if (in_task_stack(stack, task, info))
|
||||
goto recursion_check;
|
||||
|
||||
if (task != current)
|
||||
if (!get_stack_info_noinstr(stack, task, info))
|
||||
goto unknown;
|
||||
|
||||
if (in_exception_stack(stack, info))
|
||||
goto recursion_check;
|
||||
|
||||
if (in_irq_stack(stack, info))
|
||||
goto recursion_check;
|
||||
|
||||
if (in_entry_stack(stack, info))
|
||||
goto recursion_check;
|
||||
|
||||
goto unknown;
|
||||
|
||||
recursion_check:
|
||||
/*
|
||||
* Make sure we don't iterate through any given stack more than once.
|
||||
* If it comes up a second time then there's something wrong going on:
|
||||
|
@ -21,7 +21,8 @@ DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
|
||||
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
|
||||
#endif
|
||||
|
||||
struct cpu_entry_area *get_cpu_entry_area(int cpu)
|
||||
/* Is called from entry code, so must be noinstr */
|
||||
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
|
||||
{
|
||||
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
|
||||
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
|
||||
|
Loading…
x
Reference in New Issue
Block a user