linux/lib/percpu_counter.c

175 lines
3.8 KiB
C
Raw Normal View History

/*
* Fast batching percpu counters.
*/
#include <linux/percpu_counter.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/module.h>
static LIST_HEAD(percpu_counters);
static DEFINE_MUTEX(percpu_counters_lock);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
spin_lock(&fbc->lock);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
spin_unlock(&fbc->lock);
}
EXPORT_SYMBOL(percpu_counter_set);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
s32 *pcount;
int cpu = get_cpu();
pcount = per_cpu_ptr(fbc->counters, cpu);
count = *pcount + amount;
if (count >= batch || count <= -batch) {
spin_lock(&fbc->lock);
fbc->count += count;
*pcount = 0;
spin_unlock(&fbc->lock);
} else {
*pcount = count;
}
put_cpu();
}
EXPORT_SYMBOL(__percpu_counter_add);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
spin_lock(&fbc->lock);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
spin_unlock(&fbc->lock);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key)
{
spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
fbc->counters = alloc_percpu(s32);
if (!fbc->counters)
return -ENOMEM;
#ifdef CONFIG_HOTPLUG_CPU
percpu: fix list_head init bug in __percpu_counter_init() WARNING: at lib/list_debug.c:26 __list_add+0x3f/0x81() Hardware name: Express5800/B120a [N8400-085] list_add corruption. next->prev should be prev (ffffffff81a7ea00), but was dead000000200200. (next=ffff88080b872d58). Modules linked in: aoe ipt_MASQUERADE iptable_nat nf_nat autofs4 sunrpc bridge 8021q garp stp llc ipv6 cpufreq_ondemand acpi_cpufreq freq_table dm_round_robin dm_multipath kvm_intel kvm uinput lpfc scsi_transport_fc igb ioatdma scsi_tgt i2c_i801 i2c_core dca iTCO_wdt iTCO_vendor_support pcspkr shpchp megaraid_sas [last unloaded: aoe] Pid: 54, comm: events/3 Tainted: G W 2.6.34-vanilla1 #1 Call Trace: [<ffffffff8104bd77>] warn_slowpath_common+0x7c/0x94 [<ffffffff8104bde6>] warn_slowpath_fmt+0x41/0x43 [<ffffffff8120fd2e>] __list_add+0x3f/0x81 [<ffffffff81212a12>] __percpu_counter_init+0x59/0x6b [<ffffffff810d8499>] bdi_init+0x118/0x17e [<ffffffff811f2c50>] blk_alloc_queue_node+0x79/0x143 [<ffffffff811f2d2b>] blk_alloc_queue+0x11/0x13 [<ffffffffa02a931d>] aoeblk_gdalloc+0x8e/0x1c9 [aoe] [<ffffffffa02aa655>] aoecmd_sleepwork+0x25/0xa8 [aoe] [<ffffffff8106186c>] worker_thread+0x1a9/0x237 [<ffffffffa02aa630>] ? aoecmd_sleepwork+0x0/0xa8 [aoe] [<ffffffff81065827>] ? autoremove_wake_function+0x0/0x39 [<ffffffff810616c3>] ? worker_thread+0x0/0x237 [<ffffffff810653ad>] kthread+0x7f/0x87 [<ffffffff8100aa24>] kernel_thread_helper+0x4/0x10 [<ffffffff8106532e>] ? kthread+0x0/0x87 [<ffffffff8100aa20>] ? kernel_thread_helper+0x0/0x10 It's because there is no initialization code for a list_head contained in the struct backing_dev_info under CONFIG_HOTPLUG_CPU, and the bug comes up when block device drivers calling blk_alloc_queue() are used. In case of me, I got them by using aoe. Signed-off-by: Masanori Itoh <itoumsn@nttdata.co.jp> Cc: Tejun Heo <tj@kernel.org> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-10-27 01:21:20 +04:00
INIT_LIST_HEAD(&fbc->list);
mutex_lock(&percpu_counters_lock);
list_add(&fbc->list, &percpu_counters);
mutex_unlock(&percpu_counters_lock);
#endif
return 0;
}
EXPORT_SYMBOL(__percpu_counter_init);
void percpu_counter_destroy(struct percpu_counter *fbc)
{
if (!fbc->counters)
return;
#ifdef CONFIG_HOTPLUG_CPU
mutex_lock(&percpu_counters_lock);
list_del(&fbc->list);
mutex_unlock(&percpu_counters_lock);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
}
EXPORT_SYMBOL(percpu_counter_destroy);
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);
static void compute_batch_value(void)
{
int nr = num_online_cpus();
percpu_counter_batch = max(32, nr*2);
}
static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
unsigned long action, void *hcpu)
{
#ifdef CONFIG_HOTPLUG_CPU
unsigned int cpu;
struct percpu_counter *fbc;
compute_batch_value();
if (action != CPU_DEAD)
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
mutex_lock(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
spin_lock_irqsave(&fbc->lock, flags);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
spin_unlock_irqrestore(&fbc->lock, flags);
}
mutex_unlock(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}
/*
* Compare counter against given value.
* Return 1 if greater, 0 if equal and -1 if less
*/
int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
s64 count;
count = percpu_counter_read(fbc);
/* Check to see if rough count will be sufficient for comparison */
if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
if (count > rhs)
return 1;
else
return -1;
}
/* Need to use precise count */
count = percpu_counter_sum(fbc);
if (count > rhs)
return 1;
else if (count < rhs)
return -1;
else
return 0;
}
EXPORT_SYMBOL(percpu_counter_compare);
static int __init percpu_counter_startup(void)
{
compute_batch_value();
hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
return 0;
}
module_init(percpu_counter_startup);