arm64 fixes for -rc3
- Evaluate uaccess macro arguments outside of the critical section - Tighten up VM_BUG_ON() in pmd_populate_kernel() to avoid false positive - Fix ftrace stack unwinding using HAVE_FUNCTION_GRAPH_RET_ADDR_PTR -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmGgqs4QHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNBKAB/4r/+K4xjuP4x1CX7Tv3VRkwLvkEiHYdm64 Ljsf0e//AnezlWRJsR+MOKlp81bLcJu7Y3U+jkDYCjbJEWWwANC6/3dGmtmW4XPc hgQtb+ngS/HjyVD83epSgtAo85L6xfOgExThYTWmiQGpwsyBnMAD21MQmPtllHjX xvifkIYzLIUdw4orQv+RyY262kI26y2ugj4BdZ4KSzUiCJWv3T+Tywmf8q8S6a5W s491oB/63bR24bytL3sni7ltDdg42/24arCoYOJQ8WCbvFDY9seCH0OX8jhbZzR/ pcn61SsaPF58MKcccQjjRnWv1rvy9sXGci1QoxWjmuC2CviJxovZ =uDnd -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "Three arm64 fixes. The main one is a fix to the way in which we evaluate the macro arguments to our uaccess routines, which we _think_ might be the root cause behind some unkillable tasks we've seen in the Android arm64 CI farm (testing is ongoing). In any case, it's worth fixing. Other than that, we've toned down an over-zealous VM_BUG_ON() and fixed ftrace stack unwinding in a bunch of cases. Summary: - Evaluate uaccess macro arguments outside of the critical section - Tighten up VM_BUG_ON() in pmd_populate_kernel() to avoid false positive - Fix ftrace stack unwinding using HAVE_FUNCTION_GRAPH_RET_ADDR_PTR" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: uaccess: avoid blocking within critical sections arm64: mm: Fix VM_BUG_ON(mm != &init_mm) for trans_pgd arm64: ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
This commit is contained in:
commit
f17fb26d4d
@ -12,6 +12,17 @@
|
||||
|
||||
#define HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
|
||||
/*
|
||||
* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
|
||||
* "return address pointer" which can be used to uniquely identify a return
|
||||
* address which has been overwritten.
|
||||
*
|
||||
* On arm64 we use the address of the caller's frame record, which remains the
|
||||
* same for the lifetime of the instrumented function, unlike the return
|
||||
* address in the LR.
|
||||
*/
|
||||
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#else
|
||||
|
@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
||||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
||||
{
|
||||
VM_BUG_ON(mm != &init_mm);
|
||||
VM_BUG_ON(mm && mm != &init_mm);
|
||||
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
|
||||
}
|
||||
|
||||
|
@ -47,9 +47,6 @@ struct stack_info {
|
||||
* @prev_type: The type of stack this frame record was on, or a synthetic
|
||||
* value of STACK_TYPE_UNKNOWN. This is used to detect a
|
||||
* transition from one stack to another.
|
||||
*
|
||||
* @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
|
||||
* replacement lr value in the ftrace graph stack.
|
||||
*/
|
||||
struct stackframe {
|
||||
unsigned long fp;
|
||||
@ -57,9 +54,6 @@ struct stackframe {
|
||||
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
|
||||
unsigned long prev_fp;
|
||||
enum stack_type prev_type;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
int graph;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
struct llist_node *kr_cur;
|
||||
#endif
|
||||
|
@ -281,12 +281,22 @@ do { \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_get_user(x, ptr, err) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
|
||||
__typeof__(x) __rgu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_get_mem("ldtr", x, ptr, err); \
|
||||
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
\
|
||||
(x) = __rgu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
@ -310,14 +320,22 @@ do { \
|
||||
|
||||
#define get_user __get_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __gkn_dst = (dst); \
|
||||
__typeof__(src) __gkn_src = (src); \
|
||||
int __gkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_get_mem("ldr", *((type *)(dst)), \
|
||||
(__force type *)(src), __gkn_err); \
|
||||
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
|
||||
(__force type *)(__gkn_src), __gkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__gkn_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
@ -351,11 +369,19 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_put_user(x, ptr, err) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
|
||||
__typeof__(*(ptr)) __rpu_val = (x); \
|
||||
__chk_user_ptr(__rpu_ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_put_mem("sttr", x, ptr, err); \
|
||||
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
} while (0)
|
||||
|
||||
@ -380,14 +406,22 @@ do { \
|
||||
|
||||
#define put_user __put_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __pkn_dst = (dst); \
|
||||
__typeof__(src) __pkn_src = (src); \
|
||||
int __pkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_put_mem("str", *((type *)(src)), \
|
||||
(__force type *)(dst), __pkn_err); \
|
||||
__raw_put_mem("str", *((type *)(__pkn_src)), \
|
||||
(__force type *)(__pkn_dst), __pkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__pkn_err)) \
|
||||
goto err_label; \
|
||||
} while(0)
|
||||
|
@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command)
|
||||
* on the way back to parent. For this purpose, this function is called
|
||||
* in _mcount() or ftrace_caller() to replace return address (*parent) on
|
||||
* the call stack to return_to_handler.
|
||||
*
|
||||
* Note that @frame_pointer is used only for sanity check later.
|
||||
*/
|
||||
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
unsigned long frame_pointer)
|
||||
@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
*/
|
||||
old = *parent;
|
||||
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer,
|
||||
(void *)frame_pointer)) {
|
||||
*parent = return_hooker;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
|
||||
{
|
||||
frame->fp = fp;
|
||||
frame->pc = pc;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
frame->graph = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
frame->kr_cur = NULL;
|
||||
#endif
|
||||
@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
frame->prev_fp = fp;
|
||||
frame->prev_type = info.type;
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if (tsk->ret_stack &&
|
||||
(ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
|
||||
struct ftrace_ret_stack *ret_stack;
|
||||
(frame->pc == (unsigned long)return_to_handler)) {
|
||||
unsigned long orig_pc;
|
||||
/*
|
||||
* This is a case where function graph tracer has
|
||||
* modified a return address (LR) in a stack frame
|
||||
* to hook a function return.
|
||||
* So replace it to an original value.
|
||||
*/
|
||||
ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
|
||||
if (WARN_ON_ONCE(!ret_stack))
|
||||
orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
|
||||
(void *)frame->fp);
|
||||
if (WARN_ON_ONCE(frame->pc == orig_pc))
|
||||
return -EINVAL;
|
||||
frame->pc = ret_stack->ret;
|
||||
frame->pc = orig_pc;
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
|
||||
#endif
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(unwind_frame);
|
||||
|
Loading…
x
Reference in New Issue
Block a user