Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 FPU updates from Ingo Molnar: "x86 FPU handling fixes, cleanups and enhancements from Oleg. The signal handling race fix and the __restore_xstate_sig() preemption fix for eager-mode is marked for -stable as well" * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86: copy_thread: Don't nullify ->ptrace_bps twice x86, fpu: Shift "fpu_counter = 0" from copy_thread() to arch_dup_task_struct() x86, fpu: copy_process: Sanitize fpu->last_cpu initialization x86, fpu: copy_process: Avoid fpu_alloc/copy if !used_math() x86, fpu: Change __thread_fpu_begin() to use use_eager_fpu() x86, fpu: __restore_xstate_sig()->math_state_restore() needs preempt_disable() x86, fpu: shift drop_init_fpu() from save_xstate_sig() to handle_signal()
This commit is contained in:
commit
c7b228adca
@ -344,7 +344,7 @@ static inline void __thread_fpu_end(struct task_struct *tsk)
|
|||||||
|
|
||||||
static inline void __thread_fpu_begin(struct task_struct *tsk)
|
static inline void __thread_fpu_begin(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU))
|
if (!use_eager_fpu())
|
||||||
clts();
|
clts();
|
||||||
__thread_set_has_fpu(tsk);
|
__thread_set_has_fpu(tsk);
|
||||||
}
|
}
|
||||||
|
@ -64,14 +64,16 @@ EXPORT_SYMBOL_GPL(task_xstate_cachep);
|
|||||||
*/
|
*/
|
||||||
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
|
|
||||||
*dst = *src;
|
*dst = *src;
|
||||||
if (fpu_allocated(&src->thread.fpu)) {
|
|
||||||
memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
|
dst->thread.fpu_counter = 0;
|
||||||
ret = fpu_alloc(&dst->thread.fpu);
|
dst->thread.fpu.has_fpu = 0;
|
||||||
if (ret)
|
dst->thread.fpu.last_cpu = ~0;
|
||||||
return ret;
|
dst->thread.fpu.state = NULL;
|
||||||
|
if (tsk_used_math(src)) {
|
||||||
|
int err = fpu_alloc(&dst->thread.fpu);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
fpu_copy(dst, src);
|
fpu_copy(dst, src);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -138,6 +138,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||||||
|
|
||||||
p->thread.sp = (unsigned long) childregs;
|
p->thread.sp = (unsigned long) childregs;
|
||||||
p->thread.sp0 = (unsigned long) (childregs+1);
|
p->thread.sp0 = (unsigned long) (childregs+1);
|
||||||
|
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
|
||||||
|
|
||||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||||
/* kernel thread */
|
/* kernel thread */
|
||||||
@ -152,9 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||||||
childregs->orig_ax = -1;
|
childregs->orig_ax = -1;
|
||||||
childregs->cs = __KERNEL_CS | get_kernel_rpl();
|
childregs->cs = __KERNEL_CS | get_kernel_rpl();
|
||||||
childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
|
childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
|
||||||
p->thread.fpu_counter = 0;
|
|
||||||
p->thread.io_bitmap_ptr = NULL;
|
p->thread.io_bitmap_ptr = NULL;
|
||||||
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
*childregs = *current_pt_regs();
|
*childregs = *current_pt_regs();
|
||||||
@ -165,13 +164,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||||||
p->thread.ip = (unsigned long) ret_from_fork;
|
p->thread.ip = (unsigned long) ret_from_fork;
|
||||||
task_user_gs(p) = get_user_gs(current_pt_regs());
|
task_user_gs(p) = get_user_gs(current_pt_regs());
|
||||||
|
|
||||||
p->thread.fpu_counter = 0;
|
|
||||||
p->thread.io_bitmap_ptr = NULL;
|
p->thread.io_bitmap_ptr = NULL;
|
||||||
tsk = current;
|
tsk = current;
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
|
|
||||||
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
|
|
||||||
|
|
||||||
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
|
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
|
||||||
p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
|
p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
|
||||||
IO_BITMAP_BYTES, GFP_KERNEL);
|
IO_BITMAP_BYTES, GFP_KERNEL);
|
||||||
|
@ -163,7 +163,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||||||
p->thread.sp = (unsigned long) childregs;
|
p->thread.sp = (unsigned long) childregs;
|
||||||
p->thread.usersp = me->thread.usersp;
|
p->thread.usersp = me->thread.usersp;
|
||||||
set_tsk_thread_flag(p, TIF_FORK);
|
set_tsk_thread_flag(p, TIF_FORK);
|
||||||
p->thread.fpu_counter = 0;
|
|
||||||
p->thread.io_bitmap_ptr = NULL;
|
p->thread.io_bitmap_ptr = NULL;
|
||||||
|
|
||||||
savesegment(gs, p->thread.gsindex);
|
savesegment(gs, p->thread.gsindex);
|
||||||
@ -193,8 +192,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||||||
childregs->sp = sp;
|
childregs->sp = sp;
|
||||||
|
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
|
|
||||||
|
|
||||||
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
|
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
|
||||||
p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
|
p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
|
||||||
IO_BITMAP_BYTES, GFP_KERNEL);
|
IO_BITMAP_BYTES, GFP_KERNEL);
|
||||||
|
@ -675,6 +675,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|||||||
* handler too.
|
* handler too.
|
||||||
*/
|
*/
|
||||||
regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
|
regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
|
||||||
|
/*
|
||||||
|
* Ensure the signal handler starts with the new fpu state.
|
||||||
|
*/
|
||||||
|
if (used_math())
|
||||||
|
drop_init_fpu(current);
|
||||||
}
|
}
|
||||||
signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
|
signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
|
||||||
}
|
}
|
||||||
|
@ -271,8 +271,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|||||||
if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
|
if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
drop_init_fpu(tsk); /* trigger finit */
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -402,8 +400,11 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|||||||
set_used_math();
|
set_used_math();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_eager_fpu())
|
if (use_eager_fpu()) {
|
||||||
|
preempt_disable();
|
||||||
math_state_restore();
|
math_state_restore();
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user