Merge branch 'for-3.10-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fixes from Tejun Heo:
 "Three more workqueue regression fixes.

   - Fix unbalanced unlock in trylock failure path of manage_workers().
     This shouldn't happen often in the wild but is possible.

   - While making schedule_work() and friends inline, they become
     unavailable to !GPL modules.  Allow !GPL modules to access basic
     stuff - system_wq and queue_*work_on() - so that schedule_work()
     and friends can be used.

   - During boot, the unbound NUMA support code allocates a cpumask for
     each possible node using alloc_cpumask_var_node(), which ends up
     trying to allocate node-specific memory even for offline nodes
     triggering BUG in the memory alloc code.  Use NUMA_NO_NODE for
     offline nodes."

* 'for-3.10-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: don't perform NUMA-aware allocations on offline nodes in wq_numa_init()
  workqueue: Make schedule_work() available again to non GPL modules
  workqueue: correct handling of the pool spin_lock
This commit is contained in:
Linus Torvalds 2013-05-16 12:03:28 -07:00
commit 4a007ed926

View File

@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
struct workqueue_struct *system_wq __read_mostly; struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_wq); EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly; struct workqueue_struct *system_highpri_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_highpri_wq); EXPORT_SYMBOL_GPL(system_highpri_wq);
struct workqueue_struct *system_long_wq __read_mostly; struct workqueue_struct *system_long_wq __read_mostly;
@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
local_irq_restore(flags); local_irq_restore(flags);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(queue_work_on); EXPORT_SYMBOL(queue_work_on);
void delayed_work_timer_fn(unsigned long __data) void delayed_work_timer_fn(unsigned long __data)
{ {
@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
local_irq_restore(flags); local_irq_restore(flags);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(queue_delayed_work_on); EXPORT_SYMBOL(queue_delayed_work_on);
/** /**
* mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker)
if (unlikely(!mutex_trylock(&pool->manager_mutex))) { if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
mutex_lock(&pool->manager_mutex); mutex_lock(&pool->manager_mutex);
spin_lock_irq(&pool->lock);
ret = true; ret = true;
} }
@ -4904,7 +4905,8 @@ static void __init wq_numa_init(void)
BUG_ON(!tbl); BUG_ON(!tbl);
for_each_node(node) for_each_node(node)
BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node)); BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
node_online(node) ? node : NUMA_NO_NODE));
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
node = cpu_to_node(cpu); node = cpu_to_node(cpu);