rseq: Avoid infinite recursion when delivering SIGSEGV
When delivering a signal to a task that is using rseq, we call into __rseq_handle_notify_resume() so that the registers pushed in the sigframe are updated to reflect the state of the restartable sequence (for example, ensuring that the signal returns to the abort handler if necessary). However, if the rseq management fails due to an unrecoverable fault when accessing userspace or certain combinations of RSEQ_CS_* flags, then we will attempt to deliver a SIGSEGV. This has the potential for infinite recursion if the rseq code continuously fails on signal delivery. Avoid this problem by using force_sigsegv() instead of force_sig(), which is explicitly designed to reset the SEGV handler to SIG_DFL in the case of a recursive fault. In doing so, remove rseq_signal_deliver() from the internal rseq API and have an optional struct ksignal * parameter to rseq_handle_notify_resume() instead. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: peterz@infradead.org Cc: paulmck@linux.vnet.ibm.com Cc: boqun.feng@gmail.com Link: https://lkml.kernel.org/r/1529664307-983-1-git-send-email-will.deacon@arm.com
This commit is contained in:
parent
9a789fcfe8
commit
784e0300fe
@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|||||||
* Increment event counter and perform fixup for the pre-signal
|
* Increment event counter and perform fixup for the pre-signal
|
||||||
* frame.
|
* frame.
|
||||||
*/
|
*/
|
||||||
rseq_signal_deliver(regs);
|
rseq_signal_deliver(ksig, regs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up the stack frame
|
* Set up the stack frame
|
||||||
@ -666,7 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
|||||||
} else {
|
} else {
|
||||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||||
tracehook_notify_resume(regs);
|
tracehook_notify_resume(regs);
|
||||||
rseq_handle_notify_resume(regs);
|
rseq_handle_notify_resume(NULL, regs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk)
|
|||||||
/* Re-enable the breakpoints for the signal stack */
|
/* Re-enable the breakpoints for the signal stack */
|
||||||
thread_change_pc(tsk, tsk->thread.regs);
|
thread_change_pc(tsk, tsk->thread.regs);
|
||||||
|
|
||||||
rseq_signal_deliver(tsk->thread.regs);
|
rseq_signal_deliver(&ksig, tsk->thread.regs);
|
||||||
|
|
||||||
if (is32) {
|
if (is32) {
|
||||||
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
|
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
|
||||||
@ -170,7 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
|||||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||||
tracehook_notify_resume(regs);
|
tracehook_notify_resume(regs);
|
||||||
rseq_handle_notify_resume(regs);
|
rseq_handle_notify_resume(NULL, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
user_enter();
|
user_enter();
|
||||||
|
@ -164,7 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
|
|||||||
if (cached_flags & _TIF_NOTIFY_RESUME) {
|
if (cached_flags & _TIF_NOTIFY_RESUME) {
|
||||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||||
tracehook_notify_resume(regs);
|
tracehook_notify_resume(regs);
|
||||||
rseq_handle_notify_resume(regs);
|
rseq_handle_notify_resume(NULL, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cached_flags & _TIF_USER_RETURN_NOTIFY)
|
if (cached_flags & _TIF_USER_RETURN_NOTIFY)
|
||||||
|
@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
|
|||||||
* Increment event counter and perform fixup for the pre-signal
|
* Increment event counter and perform fixup for the pre-signal
|
||||||
* frame.
|
* frame.
|
||||||
*/
|
*/
|
||||||
rseq_signal_deliver(regs);
|
rseq_signal_deliver(ksig, regs);
|
||||||
|
|
||||||
/* Set up the stack frame */
|
/* Set up the stack frame */
|
||||||
if (is_ia32_frame(ksig)) {
|
if (is_ia32_frame(ksig)) {
|
||||||
|
@ -1799,20 +1799,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
|
|||||||
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
|
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __rseq_handle_notify_resume(struct pt_regs *regs);
|
void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
|
||||||
|
|
||||||
static inline void rseq_handle_notify_resume(struct pt_regs *regs)
|
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
|
||||||
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (current->rseq)
|
if (current->rseq)
|
||||||
__rseq_handle_notify_resume(regs);
|
__rseq_handle_notify_resume(ksig, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rseq_signal_deliver(struct pt_regs *regs)
|
static inline void rseq_signal_deliver(struct ksignal *ksig,
|
||||||
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
__set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
|
__set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
rseq_handle_notify_resume(regs);
|
rseq_handle_notify_resume(ksig, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* rseq_preempt() requires preemption to be disabled. */
|
/* rseq_preempt() requires preemption to be disabled. */
|
||||||
@ -1861,10 +1863,12 @@ static inline void rseq_execve(struct task_struct *t)
|
|||||||
static inline void rseq_set_notify_resume(struct task_struct *t)
|
static inline void rseq_set_notify_resume(struct task_struct *t)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void rseq_handle_notify_resume(struct pt_regs *regs)
|
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
|
||||||
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void rseq_signal_deliver(struct pt_regs *regs)
|
static inline void rseq_signal_deliver(struct ksignal *ksig,
|
||||||
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void rseq_preempt(struct task_struct *t)
|
static inline void rseq_preempt(struct task_struct *t)
|
||||||
|
@ -251,10 +251,10 @@ static int rseq_ip_fixup(struct pt_regs *regs)
|
|||||||
* respect to other threads scheduled on the same CPU, and with respect
|
* respect to other threads scheduled on the same CPU, and with respect
|
||||||
* to signal handlers.
|
* to signal handlers.
|
||||||
*/
|
*/
|
||||||
void __rseq_handle_notify_resume(struct pt_regs *regs)
|
void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct task_struct *t = current;
|
struct task_struct *t = current;
|
||||||
int ret;
|
int ret, sig;
|
||||||
|
|
||||||
if (unlikely(t->flags & PF_EXITING))
|
if (unlikely(t->flags & PF_EXITING))
|
||||||
return;
|
return;
|
||||||
@ -268,7 +268,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
force_sig(SIGSEGV, t);
|
sig = ksig ? ksig->sig : 0;
|
||||||
|
force_sigsegv(sig, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_RSEQ
|
#ifdef CONFIG_DEBUG_RSEQ
|
||||||
|
Loading…
Reference in New Issue
Block a user