workqueue: Move pwq->max_active to wq->max_active
[ Upstream commit a045a272d887575da17ad86d6573e82871b50c27 ] max_active is a workqueue-wide setting and the configured value is stored in wq->saved_max_active; however, the effective value was stored in pwq->max_active. While this is harmless, it makes max_active update process more complicated and gets in the way of the planned max_active semantic updates for unbound workqueues. This patches moves pwq->max_active to wq->max_active. This simplifies the code and makes freezing and noop max_active updates cheaper too. No user-visible behavior change is intended. As wq->max_active is updated while holding wq mutex but read without any locking, it now uses WRITE/READ_ONCE(). A new locking locking rule WO is added for it. v2: wq->max_active now uses WRITE/READ_ONCE() as suggested by Lai. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com> Stable-dep-of: 5797b1c18919 ("workqueue: Implement system-wide nr_active enforcement for unbound workqueues") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
43a181f8f4
commit
82e098f5be
@ -143,6 +143,9 @@ enum {
|
||||
*
|
||||
* WR: wq->mutex protected for writes. RCU protected for reads.
|
||||
*
|
||||
* WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read
|
||||
* with READ_ONCE() without locking.
|
||||
*
|
||||
* MD: wq_mayday_lock protected.
|
||||
*
|
||||
* WD: Used internally by the watchdog.
|
||||
@ -250,7 +253,6 @@ struct pool_workqueue {
|
||||
* is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
|
||||
*/
|
||||
int nr_active; /* L: nr of active works */
|
||||
int max_active; /* L: max active works */
|
||||
struct list_head inactive_works; /* L: inactive works */
|
||||
struct list_head pwqs_node; /* WR: node on wq->pwqs */
|
||||
struct list_head mayday_node; /* MD: node on wq->maydays */
|
||||
@ -298,7 +300,8 @@ struct workqueue_struct {
|
||||
struct worker *rescuer; /* MD: rescue worker */
|
||||
|
||||
int nr_drainers; /* WQ: drain in progress */
|
||||
int saved_max_active; /* WQ: saved pwq max_active */
|
||||
int max_active; /* WO: max active works */
|
||||
int saved_max_active; /* WQ: saved max_active */
|
||||
|
||||
struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
|
||||
struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
|
||||
@ -1486,7 +1489,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
|
||||
pwq->nr_active--;
|
||||
if (!list_empty(&pwq->inactive_works)) {
|
||||
/* one down, submit an inactive one */
|
||||
if (pwq->nr_active < pwq->max_active)
|
||||
if (pwq->nr_active < READ_ONCE(pwq->wq->max_active))
|
||||
pwq_activate_first_inactive(pwq);
|
||||
}
|
||||
}
|
||||
@ -1787,7 +1790,13 @@ retry:
|
||||
pwq->nr_in_flight[pwq->work_color]++;
|
||||
work_flags = work_color_to_flags(pwq->work_color);
|
||||
|
||||
if (likely(pwq->nr_active < pwq->max_active)) {
|
||||
/*
|
||||
* Limit the number of concurrently active work items to max_active.
|
||||
* @work must also queue behind existing inactive work items to maintain
|
||||
* ordering when max_active changes. See wq_adjust_max_active().
|
||||
*/
|
||||
if (list_empty(&pwq->inactive_works) &&
|
||||
pwq->nr_active < READ_ONCE(pwq->wq->max_active)) {
|
||||
if (list_empty(&pool->worklist))
|
||||
pool->watchdog_ts = jiffies;
|
||||
|
||||
@ -4136,50 +4145,6 @@ static void pwq_release_workfn(struct kthread_work *work)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pwq_adjust_max_active - update a pwq's max_active to the current setting
|
||||
* @pwq: target pool_workqueue
|
||||
*
|
||||
* If @pwq isn't freezing, set @pwq->max_active to the associated
|
||||
* workqueue's saved_max_active and activate inactive work items
|
||||
* accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
|
||||
*/
|
||||
static void pwq_adjust_max_active(struct pool_workqueue *pwq)
|
||||
{
|
||||
struct workqueue_struct *wq = pwq->wq;
|
||||
bool freezable = wq->flags & WQ_FREEZABLE;
|
||||
unsigned long flags;
|
||||
|
||||
/* for @wq->saved_max_active */
|
||||
lockdep_assert_held(&wq->mutex);
|
||||
|
||||
/* fast exit for non-freezable wqs */
|
||||
if (!freezable && pwq->max_active == wq->saved_max_active)
|
||||
return;
|
||||
|
||||
/* this function can be called during early boot w/ irq disabled */
|
||||
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
||||
|
||||
/*
|
||||
* During [un]freezing, the caller is responsible for ensuring that
|
||||
* this function is called at least once after @workqueue_freezing
|
||||
* is updated and visible.
|
||||
*/
|
||||
if (!freezable || !workqueue_freezing) {
|
||||
pwq->max_active = wq->saved_max_active;
|
||||
|
||||
while (!list_empty(&pwq->inactive_works) &&
|
||||
pwq->nr_active < pwq->max_active)
|
||||
pwq_activate_first_inactive(pwq);
|
||||
|
||||
kick_pool(pwq->pool);
|
||||
} else {
|
||||
pwq->max_active = 0;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
||||
}
|
||||
|
||||
/* initialize newly allocated @pwq which is associated with @wq and @pool */
|
||||
static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
|
||||
struct worker_pool *pool)
|
||||
@ -4212,9 +4177,6 @@ static void link_pwq(struct pool_workqueue *pwq)
|
||||
/* set the matching work_color */
|
||||
pwq->work_color = wq->work_color;
|
||||
|
||||
/* sync max_active to the current setting */
|
||||
pwq_adjust_max_active(pwq);
|
||||
|
||||
/* link in @pwq */
|
||||
list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
|
||||
}
|
||||
@ -4665,6 +4627,52 @@ static int init_rescuer(struct workqueue_struct *wq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* wq_adjust_max_active - update a wq's max_active to the current setting
|
||||
* @wq: target workqueue
|
||||
*
|
||||
* If @wq isn't freezing, set @wq->max_active to the saved_max_active and
|
||||
* activate inactive work items accordingly. If @wq is freezing, clear
|
||||
* @wq->max_active to zero.
|
||||
*/
|
||||
static void wq_adjust_max_active(struct workqueue_struct *wq)
|
||||
{
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
lockdep_assert_held(&wq->mutex);
|
||||
|
||||
if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) {
|
||||
WRITE_ONCE(wq->max_active, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (wq->max_active == wq->saved_max_active)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Update @wq->max_active and then kick inactive work items if more
|
||||
* active work items are allowed. This doesn't break work item ordering
|
||||
* because new work items are always queued behind existing inactive
|
||||
* work items if there are any.
|
||||
*/
|
||||
WRITE_ONCE(wq->max_active, wq->saved_max_active);
|
||||
|
||||
for_each_pwq(pwq, wq) {
|
||||
unsigned long flags;
|
||||
|
||||
/* this function can be called during early boot w/ irq disabled */
|
||||
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
||||
|
||||
while (!list_empty(&pwq->inactive_works) &&
|
||||
pwq->nr_active < wq->max_active)
|
||||
pwq_activate_first_inactive(pwq);
|
||||
|
||||
kick_pool(pwq->pool);
|
||||
|
||||
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
__printf(1, 4)
|
||||
struct workqueue_struct *alloc_workqueue(const char *fmt,
|
||||
unsigned int flags,
|
||||
@ -4672,7 +4680,6 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
|
||||
{
|
||||
va_list args;
|
||||
struct workqueue_struct *wq;
|
||||
struct pool_workqueue *pwq;
|
||||
int len;
|
||||
|
||||
/*
|
||||
@ -4711,6 +4718,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
|
||||
|
||||
/* init wq */
|
||||
wq->flags = flags;
|
||||
wq->max_active = max_active;
|
||||
wq->saved_max_active = max_active;
|
||||
mutex_init(&wq->mutex);
|
||||
atomic_set(&wq->nr_pwqs_to_flush, 0);
|
||||
@ -4739,8 +4747,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
|
||||
mutex_lock(&wq_pool_mutex);
|
||||
|
||||
mutex_lock(&wq->mutex);
|
||||
for_each_pwq(pwq, wq)
|
||||
pwq_adjust_max_active(pwq);
|
||||
wq_adjust_max_active(wq);
|
||||
mutex_unlock(&wq->mutex);
|
||||
|
||||
list_add_tail_rcu(&wq->list, &workqueues);
|
||||
@ -4878,8 +4885,6 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
|
||||
*/
|
||||
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
||||
{
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
/* disallow meddling with max_active for ordered workqueues */
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return;
|
||||
@ -4890,9 +4895,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
||||
|
||||
wq->flags &= ~__WQ_ORDERED;
|
||||
wq->saved_max_active = max_active;
|
||||
|
||||
for_each_pwq(pwq, wq)
|
||||
pwq_adjust_max_active(pwq);
|
||||
wq_adjust_max_active(wq);
|
||||
|
||||
mutex_unlock(&wq->mutex);
|
||||
}
|
||||
@ -5139,8 +5142,8 @@ static void show_pwq(struct pool_workqueue *pwq)
|
||||
pr_info(" pwq %d:", pool->id);
|
||||
pr_cont_pool_info(pool);
|
||||
|
||||
pr_cont(" active=%d/%d refcnt=%d%s\n",
|
||||
pwq->nr_active, pwq->max_active, pwq->refcnt,
|
||||
pr_cont(" active=%d refcnt=%d%s\n",
|
||||
pwq->nr_active, pwq->refcnt,
|
||||
!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
|
||||
|
||||
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
|
||||
@ -5688,7 +5691,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
|
||||
void freeze_workqueues_begin(void)
|
||||
{
|
||||
struct workqueue_struct *wq;
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
mutex_lock(&wq_pool_mutex);
|
||||
|
||||
@ -5697,8 +5699,7 @@ void freeze_workqueues_begin(void)
|
||||
|
||||
list_for_each_entry(wq, &workqueues, list) {
|
||||
mutex_lock(&wq->mutex);
|
||||
for_each_pwq(pwq, wq)
|
||||
pwq_adjust_max_active(pwq);
|
||||
wq_adjust_max_active(wq);
|
||||
mutex_unlock(&wq->mutex);
|
||||
}
|
||||
|
||||
@ -5763,7 +5764,6 @@ out_unlock:
|
||||
void thaw_workqueues(void)
|
||||
{
|
||||
struct workqueue_struct *wq;
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
mutex_lock(&wq_pool_mutex);
|
||||
|
||||
@ -5775,8 +5775,7 @@ void thaw_workqueues(void)
|
||||
/* restore max_active and repopulate worklist */
|
||||
list_for_each_entry(wq, &workqueues, list) {
|
||||
mutex_lock(&wq->mutex);
|
||||
for_each_pwq(pwq, wq)
|
||||
pwq_adjust_max_active(pwq);
|
||||
wq_adjust_max_active(wq);
|
||||
mutex_unlock(&wq->mutex);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user