Merge branch 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo: "All trivial. Two comment updates and one more initialization sanity check in flush_work()" * 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: Fix spelling in source code comments workqueue: fix typo in comment workqueue: Try to catch flush_work() without INIT_WORK().
This commit is contained in:
commit
abf7c3d8dd
@ -648,7 +648,7 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
|
|||||||
* The following mb guarantees that previous clear of a PENDING bit
|
* The following mb guarantees that previous clear of a PENDING bit
|
||||||
* will not be reordered with any speculative LOADS or STORES from
|
* will not be reordered with any speculative LOADS or STORES from
|
||||||
* work->current_func, which is executed afterwards. This possible
|
* work->current_func, which is executed afterwards. This possible
|
||||||
* reordering can lead to a missed execution on attempt to qeueue
|
* reordering can lead to a missed execution on attempt to queue
|
||||||
* the same @work. E.g. consider this case:
|
* the same @work. E.g. consider this case:
|
||||||
*
|
*
|
||||||
* CPU#0 CPU#1
|
* CPU#0 CPU#1
|
||||||
@ -1343,7 +1343,7 @@ static bool is_chained_work(struct workqueue_struct *wq)
|
|||||||
|
|
||||||
worker = current_wq_worker();
|
worker = current_wq_worker();
|
||||||
/*
|
/*
|
||||||
* Return %true iff I'm a worker execuing a work item on @wq. If
|
* Return %true iff I'm a worker executing a work item on @wq. If
|
||||||
* I'm @worker, it's safe to dereference it without locking.
|
* I'm @worker, it's safe to dereference it without locking.
|
||||||
*/
|
*/
|
||||||
return worker && worker->current_pwq->wq == wq;
|
return worker && worker->current_pwq->wq == wq;
|
||||||
@ -1725,7 +1725,7 @@ static void rcu_work_rcufn(struct rcu_head *rcu)
|
|||||||
*
|
*
|
||||||
* Return: %false if @rwork was already pending, %true otherwise. Note
|
* Return: %false if @rwork was already pending, %true otherwise. Note
|
||||||
* that a full RCU grace period is guaranteed only after a %true return.
|
* that a full RCU grace period is guaranteed only after a %true return.
|
||||||
* While @rwork is guarnateed to be executed after a %false return, the
|
* While @rwork is guaranteed to be executed after a %false return, the
|
||||||
* execution may happen before a full RCU grace period has passed.
|
* execution may happen before a full RCU grace period has passed.
|
||||||
*/
|
*/
|
||||||
bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
|
bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
|
||||||
@ -3017,6 +3017,9 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
|
|||||||
if (WARN_ON(!wq_online))
|
if (WARN_ON(!wq_online))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (WARN_ON(!work->func))
|
||||||
|
return false;
|
||||||
|
|
||||||
if (!from_cancel) {
|
if (!from_cancel) {
|
||||||
lock_map_acquire(&work->lockdep_map);
|
lock_map_acquire(&work->lockdep_map);
|
||||||
lock_map_release(&work->lockdep_map);
|
lock_map_release(&work->lockdep_map);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user