workqueue: replace WORK_CPU_NONE/LAST with WORK_CPU_END

Now that workqueue has moved away from gcwqs, workqueue no longer has
the need to have a CPU identifier indicating "no cpu associated" - we
now use WORK_OFFQ_POOL_NONE instead - and most uses of WORK_CPU_NONE
are gone.

The only left usage is as the end marker for for_each_*wq*()
iterators, where the name WORK_CPU_NONE is confusing w/o actual
WORK_CPU_NONE usages.  Similarly, WORK_CPU_LAST which equals
WORK_CPU_NONE no longer makes sense.

Replace both WORK_CPU_NONE and LAST with WORK_CPU_END.  This patch
doesn't introduce any functional difference.

tj: s/WORK_CPU_LAST/WORK_CPU_END/ and rewrote the description.

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Lai Jiangshan 2013-02-06 18:04:53 -08:00 committed by Tejun Heo
parent 706026c214
commit 6be195886a
2 changed files with 6 additions and 7 deletions

View File

@ -57,8 +57,7 @@ enum {
/* special cpu IDs */ /* special cpu IDs */
WORK_CPU_UNBOUND = NR_CPUS, WORK_CPU_UNBOUND = NR_CPUS,
WORK_CPU_NONE = NR_CPUS + 1, WORK_CPU_END = NR_CPUS + 1,
WORK_CPU_LAST = WORK_CPU_NONE,
/* /*
* Reserve 7 bits off of cwq pointer w/ debugobjects turned * Reserve 7 bits off of cwq pointer w/ debugobjects turned

View File

@ -258,7 +258,7 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
if (sw & 2) if (sw & 2)
return WORK_CPU_UNBOUND; return WORK_CPU_UNBOUND;
} }
return WORK_CPU_NONE; return WORK_CPU_END;
} }
static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask, static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
@ -282,17 +282,17 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
*/ */
#define for_each_wq_cpu(cpu) \ #define for_each_wq_cpu(cpu) \
for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \ for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \
(cpu) < WORK_CPU_NONE; \ (cpu) < WORK_CPU_END; \
(cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3)) (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3))
#define for_each_online_wq_cpu(cpu) \ #define for_each_online_wq_cpu(cpu) \
for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3); \ for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3); \
(cpu) < WORK_CPU_NONE; \ (cpu) < WORK_CPU_END; \
(cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
#define for_each_cwq_cpu(cpu, wq) \ #define for_each_cwq_cpu(cpu, wq) \
for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \ for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \
(cpu) < WORK_CPU_NONE; \ (cpu) < WORK_CPU_END; \
(cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq))) (cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq)))
#ifdef CONFIG_DEBUG_OBJECTS_WORK #ifdef CONFIG_DEBUG_OBJECTS_WORK
@ -3796,7 +3796,7 @@ static int __init init_workqueues(void)
/* make sure we have enough bits for OFFQ pool ID */ /* make sure we have enough bits for OFFQ pool ID */
BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
WORK_CPU_LAST * NR_STD_WORKER_POOLS); WORK_CPU_END * NR_STD_WORKER_POOLS);
cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);