arch: setup PF_IO_WORKER threads like PF_KTHREAD
PF_IO_WORKER are kernel threads too, but they aren't PF_KTHREAD in the sense that we don't assign ->set_child_tid with our own structure. Just ensure that every arch sets up the PF_IO_WORKER threads like kthreads in the arch implementation of copy_thread(). Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
958234d5ec
commit
4727dc20e0
@ -249,7 +249,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
childti->pcb.ksp = (unsigned long) childstack;
|
||||
childti->pcb.flags = 1; /* set FEN, clear everything else */
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
memset(childstack, 0,
|
||||
sizeof(struct switch_stack) + sizeof(struct pt_regs));
|
||||
|
@ -191,7 +191,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
childksp[0] = 0; /* fp */
|
||||
childksp[1] = (unsigned long)ret_from_fork; /* blink */
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(c_regs, 0, sizeof(struct pt_regs));
|
||||
|
||||
c_callee->r13 = kthread_arg;
|
||||
|
@ -243,7 +243,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
thread->cpu_domain = get_domain();
|
||||
#endif
|
||||
|
||||
if (likely(!(p->flags & PF_KTHREAD))) {
|
||||
if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->ARM_r0 = 0;
|
||||
if (stack_start)
|
||||
|
@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
|
||||
ptrauth_thread_init_kernel(p);
|
||||
|
||||
if (likely(!(p->flags & PF_KTHREAD))) {
|
||||
if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->regs[0] = 0;
|
||||
|
||||
|
@ -49,7 +49,7 @@ int copy_thread(unsigned long clone_flags,
|
||||
/* setup thread.sp for switch_to !!! */
|
||||
p->thread.sp = (unsigned long)childstack;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childstack->r15 = (unsigned long) ret_from_kernel_thread;
|
||||
childstack->r10 = kthread_arg;
|
||||
|
@ -112,7 +112,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
|
||||
childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->retpc = (unsigned long) ret_from_kernel_thread;
|
||||
childregs->er4 = topstk; /* arg */
|
||||
|
@ -73,7 +73,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
sizeof(*ss));
|
||||
ss->lr = (unsigned long)ret_from_fork;
|
||||
p->thread.switch_sp = ss;
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
/* r24 <- fn, r25 <- arg */
|
||||
ss->r24 = usp;
|
||||
|
@ -338,7 +338,7 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base,
|
||||
|
||||
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(!user_stack_base)) {
|
||||
/* fork_idle() called us */
|
||||
return 0;
|
||||
|
@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
*/
|
||||
p->thread.fs = get_fs().seg;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
memset(frame, 0, sizeof(struct fork_frame));
|
||||
frame->regs.sr = PS_S;
|
||||
|
@ -59,7 +59,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* if we're creating a new kernel thread then just zeroing all
|
||||
* the registers. That's OK for a brand new thread.*/
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
|
@ -120,7 +120,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
/* Put the stack after the struct pt_regs. */
|
||||
childksp = (unsigned long) childregs;
|
||||
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
unsigned long status = p->thread.cp0_status;
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
|
@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
|
||||
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
/* kernel thread fn */
|
||||
p->thread.cpu_context.r6 = stack_start;
|
||||
|
@ -109,7 +109,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
struct switch_stack *childstack =
|
||||
((struct switch_stack *)childregs) - 1;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childstack, 0,
|
||||
sizeof(struct switch_stack) + sizeof(struct pt_regs));
|
||||
|
||||
|
@ -167,7 +167,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
sp -= sizeof(struct pt_regs);
|
||||
kregs = (struct pt_regs *)sp;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(kregs, 0, sizeof(struct pt_regs));
|
||||
kregs->gpr[20] = usp; /* fn, kernel thread */
|
||||
kregs->gpr[22] = arg;
|
||||
|
@ -112,7 +112,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
/* p->thread holds context to be restored by __switch_to() */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* Kernel thread */
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gp = gp_in_global;
|
||||
|
@ -130,7 +130,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
frame->sf.gprs[9] = (unsigned long)frame;
|
||||
|
||||
/* Store access registers to kernel stack of new process. */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
memset(&frame->childregs, 0, sizeof(struct pt_regs));
|
||||
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
|
||||
|
@ -114,7 +114,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
|
||||
childregs = task_pt_regs(p);
|
||||
p->thread.sp = (unsigned long) childregs;
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
p->thread.pc = (unsigned long) ret_from_kernel_thread;
|
||||
childregs->regs[4] = arg;
|
||||
|
@ -309,7 +309,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
||||
ti->ksp = (unsigned long) new_stack;
|
||||
p->thread.kregs = childregs;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
extern int nwindows;
|
||||
unsigned long psr;
|
||||
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
|
||||
|
@ -597,7 +597,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
||||
sizeof(struct sparc_stackf));
|
||||
t->fpsaved[0] = 0;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(child_trap_frame, 0, child_stack_sz);
|
||||
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
|
||||
(current_pt_regs()->tstate + 1) & TSTATE_CWP;
|
||||
|
@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
||||
unsigned long arg, struct task_struct * p, unsigned long tls)
|
||||
{
|
||||
void (*handler)(void);
|
||||
int kthread = current->flags & PF_KTHREAD;
|
||||
int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER);
|
||||
int ret = 0;
|
||||
|
||||
p->thread = (struct thread_struct) INIT_THREAD;
|
||||
|
@ -161,7 +161,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
||||
#endif
|
||||
|
||||
/* Kernel thread ? */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
kthread_frame_init(frame, sp, arg);
|
||||
return 0;
|
||||
|
@ -217,7 +217,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
|
||||
|
||||
p->thread.sp = (unsigned long)childregs;
|
||||
|
||||
if (!(p->flags & PF_KTHREAD)) {
|
||||
if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
unsigned long usp = usp_thread_fn ?
|
||||
usp_thread_fn : regs->areg[1];
|
||||
|
Loading…
Reference in New Issue
Block a user