workqueue: Reap workers via kthread_stop() and remove detach_completion
The code to kick off the destruction of workers is now in a process context (idle_cull_fn()), so kthread_stop() can be used in the process context to replace the work of pool->detach_completion. The wakeup in wake_dying_workers() is unneeded after this change, but it is harmless, jut keep it here until next patch renames wake_dying_workers() rather than renaming it again and again. Cc: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
b56c720718
commit
68f83057b9
@ -216,7 +216,6 @@ struct worker_pool {
|
||||
struct worker *manager; /* L: purely informational */
|
||||
struct list_head workers; /* A: attached workers */
|
||||
struct list_head dying_workers; /* A: workers about to die */
|
||||
struct completion *detach_completion; /* all workers detached */
|
||||
|
||||
struct ida worker_ida; /* worker IDs for task name */
|
||||
|
||||
@ -2696,7 +2695,6 @@ static void worker_attach_to_pool(struct worker *worker,
|
||||
static void worker_detach_from_pool(struct worker *worker)
|
||||
{
|
||||
struct worker_pool *pool = worker->pool;
|
||||
struct completion *detach_completion = NULL;
|
||||
|
||||
/* there is one permanent BH worker per CPU which should never detach */
|
||||
WARN_ON_ONCE(pool->flags & POOL_BH);
|
||||
@ -2707,15 +2705,10 @@ static void worker_detach_from_pool(struct worker *worker)
|
||||
list_del(&worker->node);
|
||||
worker->pool = NULL;
|
||||
|
||||
if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
|
||||
detach_completion = pool->detach_completion;
|
||||
mutex_unlock(&wq_pool_attach_mutex);
|
||||
|
||||
/* clear leftover flags without pool->lock after it is detached */
|
||||
worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
|
||||
|
||||
if (detach_completion)
|
||||
complete(detach_completion);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2816,10 +2809,9 @@ static void unbind_worker(struct worker *worker)
|
||||
|
||||
static void wake_dying_workers(struct list_head *cull_list)
|
||||
{
|
||||
struct worker *worker, *tmp;
|
||||
struct worker *worker;
|
||||
|
||||
list_for_each_entry_safe(worker, tmp, cull_list, entry) {
|
||||
list_del_init(&worker->entry);
|
||||
list_for_each_entry(worker, cull_list, entry) {
|
||||
unbind_worker(worker);
|
||||
/*
|
||||
* If the worker was somehow already running, then it had to be
|
||||
@ -2835,6 +2827,17 @@ static void wake_dying_workers(struct list_head *cull_list)
|
||||
}
|
||||
}
|
||||
|
||||
static void reap_dying_workers(struct list_head *cull_list)
|
||||
{
|
||||
struct worker *worker, *tmp;
|
||||
|
||||
list_for_each_entry_safe(worker, tmp, cull_list, entry) {
|
||||
list_del_init(&worker->entry);
|
||||
kthread_stop_put(worker->task);
|
||||
kfree(worker);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* set_worker_dying - Tag a worker for destruction
|
||||
* @worker: worker to be destroyed
|
||||
@ -2866,6 +2869,9 @@ static void set_worker_dying(struct worker *worker, struct list_head *list)
|
||||
|
||||
list_move(&worker->entry, list);
|
||||
list_move(&worker->node, &pool->dying_workers);
|
||||
|
||||
/* get an extra task struct reference for later kthread_stop_put() */
|
||||
get_task_struct(worker->task);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2949,6 +2955,8 @@ static void idle_cull_fn(struct work_struct *work)
|
||||
raw_spin_unlock_irq(&pool->lock);
|
||||
wake_dying_workers(&cull_list);
|
||||
mutex_unlock(&wq_pool_attach_mutex);
|
||||
|
||||
reap_dying_workers(&cull_list);
|
||||
}
|
||||
|
||||
static void send_mayday(struct work_struct *work)
|
||||
@ -3330,7 +3338,6 @@ woke_up:
|
||||
ida_free(&pool->worker_ida, worker->id);
|
||||
worker_detach_from_pool(worker);
|
||||
WARN_ON_ONCE(!list_empty(&worker->entry));
|
||||
kfree(worker);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4863,7 +4870,6 @@ static void rcu_free_pool(struct rcu_head *rcu)
|
||||
*/
|
||||
static void put_unbound_pool(struct worker_pool *pool)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(detach_completion);
|
||||
struct worker *worker;
|
||||
LIST_HEAD(cull_list);
|
||||
|
||||
@ -4917,12 +4923,9 @@ static void put_unbound_pool(struct worker_pool *pool)
|
||||
|
||||
wake_dying_workers(&cull_list);
|
||||
|
||||
if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
|
||||
pool->detach_completion = &detach_completion;
|
||||
mutex_unlock(&wq_pool_attach_mutex);
|
||||
|
||||
if (pool->detach_completion)
|
||||
wait_for_completion(pool->detach_completion);
|
||||
reap_dying_workers(&cull_list);
|
||||
|
||||
/* shut down the timers */
|
||||
del_timer_sync(&pool->idle_timer);
|
||||
|
Loading…
x
Reference in New Issue
Block a user