workqueue: introduce cpu_singlethread_map
The code like if (is_single_threaded(wq)) do_something(singlethread_cpu); else { for_each_cpu_mask(cpu, cpu_populated_map) do_something(cpu); } looks very annoying. We can add "static cpumask_t cpu_singlethread_map" and simplify the code. Lessens .text a bit, and imho makes the code more readable. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
dfb4b82e1c
commit
b1f4ec172f
@ -69,6 +69,7 @@ static DEFINE_MUTEX(workqueue_mutex);
|
|||||||
static LIST_HEAD(workqueues);
|
static LIST_HEAD(workqueues);
|
||||||
|
|
||||||
static int singlethread_cpu __read_mostly;
|
static int singlethread_cpu __read_mostly;
|
||||||
|
static cpumask_t cpu_singlethread_map __read_mostly;
|
||||||
/* optimization, we could use cpu_possible_map */
|
/* optimization, we could use cpu_possible_map */
|
||||||
static cpumask_t cpu_populated_map __read_mostly;
|
static cpumask_t cpu_populated_map __read_mostly;
|
||||||
|
|
||||||
@ -78,6 +79,12 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
|
|||||||
return list_empty(&wq->list);
|
return list_empty(&wq->list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
|
||||||
|
{
|
||||||
|
return is_single_threaded(wq)
|
||||||
|
? &cpu_singlethread_map : &cpu_populated_map;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the workqueue on which a work item is to be run
|
* Set the workqueue on which a work item is to be run
|
||||||
* - Must *only* be called if the pending flag is set
|
* - Must *only* be called if the pending flag is set
|
||||||
@ -393,16 +400,12 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
|
|||||||
*/
|
*/
|
||||||
void fastcall flush_workqueue(struct workqueue_struct *wq)
|
void fastcall flush_workqueue(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
|
const cpumask_t *cpu_map = wq_cpu_map(wq);
|
||||||
|
int cpu
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
for_each_cpu_mask(cpu, *cpu_map)
|
||||||
if (is_single_threaded(wq))
|
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
|
||||||
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
|
|
||||||
else {
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
for_each_cpu_mask(cpu, cpu_populated_map)
|
|
||||||
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(flush_workqueue);
|
EXPORT_SYMBOL_GPL(flush_workqueue);
|
||||||
|
|
||||||
@ -439,7 +442,9 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
|
|||||||
*/
|
*/
|
||||||
void flush_work(struct workqueue_struct *wq, struct work_struct *work)
|
void flush_work(struct workqueue_struct *wq, struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
const cpumask_t *cpu_map = wq_cpu_map(wq);
|
||||||
struct cpu_workqueue_struct *cwq;
|
struct cpu_workqueue_struct *cwq;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
@ -457,14 +462,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
|
|||||||
work_release(work);
|
work_release(work);
|
||||||
spin_unlock_irq(&cwq->lock);
|
spin_unlock_irq(&cwq->lock);
|
||||||
|
|
||||||
if (is_single_threaded(wq))
|
for_each_cpu_mask(cpu, *cpu_map)
|
||||||
wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
|
wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
||||||
else {
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
for_each_cpu_mask(cpu, cpu_populated_map)
|
|
||||||
wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(flush_work);
|
EXPORT_SYMBOL_GPL(flush_work);
|
||||||
|
|
||||||
@ -757,22 +756,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
|
|||||||
*/
|
*/
|
||||||
void destroy_workqueue(struct workqueue_struct *wq)
|
void destroy_workqueue(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
|
const cpumask_t *cpu_map = wq_cpu_map(wq);
|
||||||
struct cpu_workqueue_struct *cwq;
|
struct cpu_workqueue_struct *cwq;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
if (is_single_threaded(wq)) {
|
mutex_lock(&workqueue_mutex);
|
||||||
cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu);
|
list_del(&wq->list);
|
||||||
cleanup_workqueue_thread(cwq, singlethread_cpu);
|
mutex_unlock(&workqueue_mutex);
|
||||||
} else {
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
mutex_lock(&workqueue_mutex);
|
for_each_cpu_mask(cpu, *cpu_map) {
|
||||||
list_del(&wq->list);
|
cwq = per_cpu_ptr(wq->cpu_wq, cpu);
|
||||||
mutex_unlock(&workqueue_mutex);
|
cleanup_workqueue_thread(cwq, cpu);
|
||||||
|
|
||||||
for_each_cpu_mask(cpu, cpu_populated_map) {
|
|
||||||
cwq = per_cpu_ptr(wq->cpu_wq, cpu);
|
|
||||||
cleanup_workqueue_thread(cwq, cpu);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
free_percpu(wq->cpu_wq);
|
free_percpu(wq->cpu_wq);
|
||||||
@ -831,6 +825,7 @@ void init_workqueues(void)
|
|||||||
{
|
{
|
||||||
cpu_populated_map = cpu_online_map;
|
cpu_populated_map = cpu_online_map;
|
||||||
singlethread_cpu = first_cpu(cpu_possible_map);
|
singlethread_cpu = first_cpu(cpu_possible_map);
|
||||||
|
cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
|
||||||
hotcpu_notifier(workqueue_cpu_callback, 0);
|
hotcpu_notifier(workqueue_cpu_callback, 0);
|
||||||
keventd_wq = create_workqueue("events");
|
keventd_wq = create_workqueue("events");
|
||||||
BUG_ON(!keventd_wq);
|
BUG_ON(!keventd_wq);
|
||||||
|
Loading…
Reference in New Issue
Block a user