2006-06-23 13:05:40 +04:00
/*
* Fast batching percpu counters .
*/
# include <linux/percpu_counter.h>
2007-07-16 10:39:51 +04:00
# include <linux/notifier.h>
# include <linux/mutex.h>
# include <linux/init.h>
# include <linux/cpu.h>
2006-06-23 13:05:40 +04:00
# include <linux/module.h>
2007-07-16 10:39:51 +04:00
# ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD ( percpu_counters ) ;
static DEFINE_MUTEX ( percpu_counters_lock ) ;
# endif
2007-10-17 10:25:44 +04:00
void percpu_counter_set ( struct percpu_counter * fbc , s64 amount )
{
int cpu ;
spin_lock ( & fbc - > lock ) ;
for_each_possible_cpu ( cpu ) {
s32 * pcount = per_cpu_ptr ( fbc - > counters , cpu ) ;
* pcount = 0 ;
}
fbc - > count = amount ;
spin_unlock ( & fbc - > lock ) ;
}
EXPORT_SYMBOL ( percpu_counter_set ) ;
2007-10-17 10:25:43 +04:00
void __percpu_counter_add ( struct percpu_counter * fbc , s64 amount , s32 batch )
2006-06-23 13:05:40 +04:00
{
2007-10-17 10:25:43 +04:00
s64 count ;
2006-06-23 13:05:41 +04:00
s32 * pcount ;
2006-06-23 13:05:40 +04:00
int cpu = get_cpu ( ) ;
pcount = per_cpu_ptr ( fbc - > counters , cpu ) ;
count = * pcount + amount ;
2007-10-17 10:25:43 +04:00
if ( count > = batch | | count < = - batch ) {
2006-06-23 13:05:40 +04:00
spin_lock ( & fbc - > lock ) ;
fbc - > count + = count ;
* pcount = 0 ;
spin_unlock ( & fbc - > lock ) ;
} else {
* pcount = count ;
}
put_cpu ( ) ;
}
2007-10-17 10:25:43 +04:00
EXPORT_SYMBOL ( __percpu_counter_add ) ;
2006-06-23 13:05:40 +04:00
/*
* Add up all the per - cpu counts , return the result . This is a more accurate
* but much slower version of percpu_counter_read_positive ( )
*/
2007-10-17 10:25:45 +04:00
s64 __percpu_counter_sum ( struct percpu_counter * fbc )
2006-06-23 13:05:40 +04:00
{
2006-06-23 13:05:41 +04:00
s64 ret ;
2006-06-23 13:05:40 +04:00
int cpu ;
spin_lock ( & fbc - > lock ) ;
ret = fbc - > count ;
2007-07-16 10:39:51 +04:00
for_each_online_cpu ( cpu ) {
2006-06-23 13:05:41 +04:00
s32 * pcount = per_cpu_ptr ( fbc - > counters , cpu ) ;
2006-06-23 13:05:40 +04:00
ret + = * pcount ;
}
spin_unlock ( & fbc - > lock ) ;
2007-10-17 10:25:45 +04:00
return ret ;
2006-06-23 13:05:40 +04:00
}
2007-10-17 10:25:45 +04:00
EXPORT_SYMBOL ( __percpu_counter_sum ) ;
2007-07-16 10:39:51 +04:00
2007-10-17 10:25:45 +04:00
int percpu_counter_init ( struct percpu_counter * fbc , s64 amount )
2007-07-16 10:39:51 +04:00
{
spin_lock_init ( & fbc - > lock ) ;
fbc - > count = amount ;
fbc - > counters = alloc_percpu ( s32 ) ;
2007-10-17 10:25:45 +04:00
if ( ! fbc - > counters )
return - ENOMEM ;
2007-07-16 10:39:51 +04:00
# ifdef CONFIG_HOTPLUG_CPU
mutex_lock ( & percpu_counters_lock ) ;
list_add ( & fbc - > list , & percpu_counters ) ;
mutex_unlock ( & percpu_counters_lock ) ;
# endif
2007-10-17 10:25:45 +04:00
return 0 ;
2007-07-16 10:39:51 +04:00
}
EXPORT_SYMBOL ( percpu_counter_init ) ;
void percpu_counter_destroy ( struct percpu_counter * fbc )
{
2007-10-17 10:25:45 +04:00
if ( ! fbc - > counters )
return ;
2007-07-16 10:39:51 +04:00
free_percpu ( fbc - > counters ) ;
# ifdef CONFIG_HOTPLUG_CPU
mutex_lock ( & percpu_counters_lock ) ;
list_del ( & fbc - > list ) ;
mutex_unlock ( & percpu_counters_lock ) ;
# endif
}
EXPORT_SYMBOL ( percpu_counter_destroy ) ;
# ifdef CONFIG_HOTPLUG_CPU
static int __cpuinit percpu_counter_hotcpu_callback ( struct notifier_block * nb ,
unsigned long action , void * hcpu )
{
unsigned int cpu ;
struct percpu_counter * fbc ;
if ( action ! = CPU_DEAD )
return NOTIFY_OK ;
cpu = ( unsigned long ) hcpu ;
mutex_lock ( & percpu_counters_lock ) ;
list_for_each_entry ( fbc , & percpu_counters , list ) {
s32 * pcount ;
spin_lock ( & fbc - > lock ) ;
pcount = per_cpu_ptr ( fbc - > counters , cpu ) ;
fbc - > count + = * pcount ;
* pcount = 0 ;
spin_unlock ( & fbc - > lock ) ;
}
mutex_unlock ( & percpu_counters_lock ) ;
return NOTIFY_OK ;
}
static int __init percpu_counter_startup ( void )
{
hotcpu_notifier ( percpu_counter_hotcpu_callback , 0 ) ;
return 0 ;
}
module_init ( percpu_counter_startup ) ;
# endif