block: add io_context->active_ref
Currently ioc->nr_tasks is used to decide two things - whether an ioc is done issuing IOs and whether it's shared by multiple tasks. This patch separate out the first into ioc->active_ref, which is acquired and released using {get|put}_io_context_active() respectively. This will be used to associate bio's with a given task. This patch doesn't introduce any visible behavior change. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3d48749d93
commit
f6e8d01bee
@ -149,20 +149,20 @@ void put_io_context(struct io_context *ioc)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(put_io_context);
|
EXPORT_SYMBOL(put_io_context);
|
||||||
|
|
||||||
/* Called by the exiting task */
|
/**
|
||||||
void exit_io_context(struct task_struct *task)
|
* put_io_context_active - put active reference on ioc
|
||||||
|
* @ioc: ioc of interest
|
||||||
|
*
|
||||||
|
* Undo get_io_context_active(). If active reference reaches zero after
|
||||||
|
* put, @ioc can never issue further IOs and ioscheds are notified.
|
||||||
|
*/
|
||||||
|
void put_io_context_active(struct io_context *ioc)
|
||||||
{
|
{
|
||||||
struct io_context *ioc;
|
|
||||||
struct io_cq *icq;
|
|
||||||
struct hlist_node *n;
|
struct hlist_node *n;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct io_cq *icq;
|
||||||
|
|
||||||
task_lock(task);
|
if (!atomic_dec_and_test(&ioc->active_ref)) {
|
||||||
ioc = task->io_context;
|
|
||||||
task->io_context = NULL;
|
|
||||||
task_unlock(task);
|
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&ioc->nr_tasks)) {
|
|
||||||
put_io_context(ioc);
|
put_io_context(ioc);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -191,6 +191,20 @@ retry:
|
|||||||
put_io_context(ioc);
|
put_io_context(ioc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Called by the exiting task */
|
||||||
|
void exit_io_context(struct task_struct *task)
|
||||||
|
{
|
||||||
|
struct io_context *ioc;
|
||||||
|
|
||||||
|
task_lock(task);
|
||||||
|
ioc = task->io_context;
|
||||||
|
task->io_context = NULL;
|
||||||
|
task_unlock(task);
|
||||||
|
|
||||||
|
atomic_dec(&ioc->nr_tasks);
|
||||||
|
put_io_context_active(ioc);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ioc_clear_queue - break any ioc association with the specified queue
|
* ioc_clear_queue - break any ioc association with the specified queue
|
||||||
* @q: request_queue being cleared
|
* @q: request_queue being cleared
|
||||||
@ -223,7 +237,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
|
|||||||
|
|
||||||
/* initialize */
|
/* initialize */
|
||||||
atomic_long_set(&ioc->refcount, 1);
|
atomic_long_set(&ioc->refcount, 1);
|
||||||
atomic_set(&ioc->nr_tasks, 1);
|
atomic_set(&ioc->active_ref, 1);
|
||||||
spin_lock_init(&ioc->lock);
|
spin_lock_init(&ioc->lock);
|
||||||
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
|
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
|
||||||
INIT_HLIST_HEAD(&ioc->icq_list);
|
INIT_HLIST_HEAD(&ioc->icq_list);
|
||||||
|
@ -1865,7 +1865,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
|||||||
* task has exited, don't wait
|
* task has exited, don't wait
|
||||||
*/
|
*/
|
||||||
cic = cfqd->active_cic;
|
cic = cfqd->active_cic;
|
||||||
if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
|
if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2841,7 +2841,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||||||
|
|
||||||
if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
|
if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
|
||||||
enable_idle = 0;
|
enable_idle = 0;
|
||||||
else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
|
else if (!atomic_read(&cic->icq.ioc->active_ref) ||
|
||||||
!cfqd->cfq_slice_idle ||
|
!cfqd->cfq_slice_idle ||
|
||||||
(!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
|
(!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
|
||||||
enable_idle = 0;
|
enable_idle = 0;
|
||||||
|
@ -100,6 +100,7 @@ struct io_cq {
|
|||||||
*/
|
*/
|
||||||
struct io_context {
|
struct io_context {
|
||||||
atomic_long_t refcount;
|
atomic_long_t refcount;
|
||||||
|
atomic_t active_ref;
|
||||||
atomic_t nr_tasks;
|
atomic_t nr_tasks;
|
||||||
|
|
||||||
/* all the fields below are protected by this lock */
|
/* all the fields below are protected by this lock */
|
||||||
@ -120,17 +121,34 @@ struct io_context {
|
|||||||
struct work_struct release_work;
|
struct work_struct release_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void ioc_task_link(struct io_context *ioc)
|
/**
|
||||||
|
* get_io_context_active - get active reference on ioc
|
||||||
|
* @ioc: ioc of interest
|
||||||
|
*
|
||||||
|
* Only iocs with active reference can issue new IOs. This function
|
||||||
|
* acquires an active reference on @ioc. The caller must already have an
|
||||||
|
* active reference on @ioc.
|
||||||
|
*/
|
||||||
|
static inline void get_io_context_active(struct io_context *ioc)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
|
WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
|
||||||
WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
|
WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
|
||||||
atomic_long_inc(&ioc->refcount);
|
atomic_long_inc(&ioc->refcount);
|
||||||
|
atomic_inc(&ioc->active_ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ioc_task_link(struct io_context *ioc)
|
||||||
|
{
|
||||||
|
get_io_context_active(ioc);
|
||||||
|
|
||||||
|
WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
|
||||||
atomic_inc(&ioc->nr_tasks);
|
atomic_inc(&ioc->nr_tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
#ifdef CONFIG_BLOCK
|
#ifdef CONFIG_BLOCK
|
||||||
void put_io_context(struct io_context *ioc);
|
void put_io_context(struct io_context *ioc);
|
||||||
|
void put_io_context_active(struct io_context *ioc);
|
||||||
void exit_io_context(struct task_struct *task);
|
void exit_io_context(struct task_struct *task);
|
||||||
struct io_context *get_task_io_context(struct task_struct *task,
|
struct io_context *get_task_io_context(struct task_struct *task,
|
||||||
gfp_t gfp_flags, int node);
|
gfp_t gfp_flags, int node);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user