fork: move copy_io to block/blk-ioc.c
Move the copying of the I/O context to the block layer as that is where we can use the proper low-level interfaces. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211126115817.2087431-3-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e92a559e6c
commit
88c9a2ce52
@ -322,6 +322,33 @@ struct io_context *get_task_io_context(struct task_struct *task,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
||||
{
|
||||
struct io_context *ioc = current->io_context;
|
||||
struct io_context *new_ioc;
|
||||
|
||||
/*
|
||||
* Share io context with parent, if CLONE_IO is set
|
||||
*/
|
||||
if (clone_flags & CLONE_IO) {
|
||||
get_io_context_active(ioc);
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
|
||||
atomic_inc(&ioc->nr_tasks);
|
||||
|
||||
tsk->io_context = ioc;
|
||||
} else if (ioprio_valid(ioc->ioprio)) {
|
||||
new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
|
||||
if (unlikely(!new_ioc))
|
||||
return -ENOMEM;
|
||||
|
||||
new_ioc->ioprio = ioc->ioprio;
|
||||
put_io_context(new_ioc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ioc_lookup_icq - lookup io_cq from ioc
|
||||
* @ioc: the associated io_context
|
||||
|
@ -129,14 +129,6 @@ static inline void get_io_context_active(struct io_context *ioc)
|
||||
atomic_inc(&ioc->active_ref);
|
||||
}
|
||||
|
||||
static inline void ioc_task_link(struct io_context *ioc)
|
||||
{
|
||||
get_io_context_active(ioc);
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
|
||||
atomic_inc(&ioc->nr_tasks);
|
||||
}
|
||||
|
||||
struct task_struct;
|
||||
#ifdef CONFIG_BLOCK
|
||||
void put_io_context(struct io_context *ioc);
|
||||
@ -144,10 +136,21 @@ void put_io_context_active(struct io_context *ioc);
|
||||
void exit_io_context(struct task_struct *task);
|
||||
struct io_context *get_task_io_context(struct task_struct *task,
|
||||
gfp_t gfp_flags, int node);
|
||||
int __copy_io(unsigned long clone_flags, struct task_struct *tsk);
|
||||
static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
||||
{
|
||||
if (!current->io_context)
|
||||
return 0;
|
||||
return __copy_io(clone_flags, tsk);
|
||||
}
|
||||
#else
|
||||
struct io_context;
|
||||
static inline void put_io_context(struct io_context *ioc) { }
|
||||
static inline void exit_io_context(struct task_struct *task) { }
|
||||
#endif
|
||||
static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
#endif
|
||||
#endif /* IOCONTEXT_H */
|
||||
|
@ -1556,32 +1556,6 @@ out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_BLOCK
|
||||
struct io_context *ioc = current->io_context;
|
||||
struct io_context *new_ioc;
|
||||
|
||||
if (!ioc)
|
||||
return 0;
|
||||
/*
|
||||
* Share io context with parent, if CLONE_IO is set
|
||||
*/
|
||||
if (clone_flags & CLONE_IO) {
|
||||
ioc_task_link(ioc);
|
||||
tsk->io_context = ioc;
|
||||
} else if (ioprio_valid(ioc->ioprio)) {
|
||||
new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
|
||||
if (unlikely(!new_ioc))
|
||||
return -ENOMEM;
|
||||
|
||||
new_ioc->ioprio = ioc->ioprio;
|
||||
put_io_context(new_ioc);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
|
||||
{
|
||||
struct sighand_struct *sig;
|
||||
|
Loading…
Reference in New Issue
Block a user