2006-06-23 13:05:40 +04:00
/*
* Fast batching percpu counters .
*/
# include <linux/percpu_counter.h>
2007-07-16 10:39:51 +04:00
# include <linux/notifier.h>
# include <linux/mutex.h>
# include <linux/init.h>
# include <linux/cpu.h>
2006-06-23 13:05:40 +04:00
# include <linux/module.h>
2007-07-16 10:39:51 +04:00
static LIST_HEAD ( percpu_counters ) ;
static DEFINE_MUTEX ( percpu_counters_lock ) ;
2007-10-17 10:25:44 +04:00
void percpu_counter_set ( struct percpu_counter * fbc , s64 amount )
{
int cpu ;
spin_lock ( & fbc - > lock ) ;
for_each_possible_cpu ( cpu ) {
s32 * pcount = per_cpu_ptr ( fbc - > counters , cpu ) ;
* pcount = 0 ;
}
fbc - > count = amount ;
spin_unlock ( & fbc - > lock ) ;
}
EXPORT_SYMBOL ( percpu_counter_set ) ;
2007-10-17 10:25:43 +04:00
void __percpu_counter_add ( struct percpu_counter * fbc , s64 amount , s32 batch )
2006-06-23 13:05:40 +04:00
{
2007-10-17 10:25:43 +04:00
s64 count ;
2006-06-23 13:05:41 +04:00
s32 * pcount ;
2006-06-23 13:05:40 +04:00
int cpu = get_cpu ( ) ;
pcount = per_cpu_ptr ( fbc - > counters , cpu ) ;
count = * pcount + amount ;
2007-10-17 10:25:43 +04:00
if ( count > = batch | | count < = - batch ) {
2006-06-23 13:05:40 +04:00
spin_lock ( & fbc - > lock ) ;
fbc - > count + = count ;
* pcount = 0 ;
spin_unlock ( & fbc - > lock ) ;
} else {
* pcount = count ;
}
put_cpu ( ) ;
}
2007-10-17 10:25:43 +04:00
EXPORT_SYMBOL ( __percpu_counter_add ) ;
2006-06-23 13:05:40 +04:00
/*
* Add up all the per - cpu counts , return the result . This is a more accurate
* but much slower version of percpu_counter_read_positive ( )
*/
2008-12-10 00:14:14 +03:00
s64 __percpu_counter_sum ( struct percpu_counter * fbc )
2006-06-23 13:05:40 +04:00
{
2006-06-23 13:05:41 +04:00
s64 ret ;
2006-06-23 13:05:40 +04:00
int cpu ;
spin_lock ( & fbc - > lock ) ;
ret = fbc - > count ;
2007-07-16 10:39:51 +04:00
for_each_online_cpu ( cpu ) {
2006-06-23 13:05:41 +04:00
s32 * pcount = per_cpu_ptr ( fbc - > counters , cpu ) ;
2006-06-23 13:05:40 +04:00
ret + = * pcount ;
}
spin_unlock ( & fbc - > lock ) ;
2007-10-17 10:25:45 +04:00
return ret ;
2006-06-23 13:05:40 +04:00
}
2007-10-17 10:25:45 +04:00
EXPORT_SYMBOL ( __percpu_counter_sum ) ;
2007-07-16 10:39:51 +04:00
2008-12-26 17:08:55 +03:00
int __percpu_counter_init ( struct percpu_counter * fbc , s64 amount ,
struct lock_class_key * key )
2007-07-16 10:39:51 +04:00
{
spin_lock_init ( & fbc - > lock ) ;
2008-12-26 17:08:55 +03:00
lockdep_set_class ( & fbc - > lock , key ) ;
2007-07-16 10:39:51 +04:00
fbc - > count = amount ;
fbc - > counters = alloc_percpu ( s32 ) ;
2007-10-17 10:25:45 +04:00
if ( ! fbc - > counters )
return - ENOMEM ;
2007-07-16 10:39:51 +04:00
# ifdef CONFIG_HOTPLUG_CPU
2010-10-27 01:21:20 +04:00
INIT_LIST_HEAD ( & fbc - > list ) ;
2007-07-16 10:39:51 +04:00
mutex_lock ( & percpu_counters_lock ) ;
list_add ( & fbc - > list , & percpu_counters ) ;
mutex_unlock ( & percpu_counters_lock ) ;
# endif
2007-10-17 10:25:45 +04:00
return 0 ;
2007-07-16 10:39:51 +04:00
}
2008-12-26 17:08:55 +03:00
EXPORT_SYMBOL ( __percpu_counter_init ) ;
2007-07-16 10:39:51 +04:00
void percpu_counter_destroy ( struct percpu_counter * fbc )
{
2007-10-17 10:25:45 +04:00
if ( ! fbc - > counters )
return ;
2007-07-16 10:39:51 +04:00
# ifdef CONFIG_HOTPLUG_CPU
mutex_lock ( & percpu_counters_lock ) ;
list_del ( & fbc - > list ) ;
mutex_unlock ( & percpu_counters_lock ) ;
# endif
2008-12-10 00:14:11 +03:00
free_percpu ( fbc - > counters ) ;
fbc - > counters = NULL ;
2007-07-16 10:39:51 +04:00
}
EXPORT_SYMBOL ( percpu_counter_destroy ) ;
2009-01-07 01:41:04 +03:00
int percpu_counter_batch __read_mostly = 32 ;
EXPORT_SYMBOL ( percpu_counter_batch ) ;
static void compute_batch_value ( void )
{
int nr = num_online_cpus ( ) ;
percpu_counter_batch = max ( 32 , nr * 2 ) ;
}
2007-07-16 10:39:51 +04:00
static int __cpuinit percpu_counter_hotcpu_callback ( struct notifier_block * nb ,
unsigned long action , void * hcpu )
{
2009-01-07 01:41:04 +03:00
# ifdef CONFIG_HOTPLUG_CPU
2007-07-16 10:39:51 +04:00
unsigned int cpu ;
struct percpu_counter * fbc ;
2009-01-07 01:41:04 +03:00
compute_batch_value ( ) ;
2007-07-16 10:39:51 +04:00
if ( action ! = CPU_DEAD )
return NOTIFY_OK ;
cpu = ( unsigned long ) hcpu ;
mutex_lock ( & percpu_counters_lock ) ;
list_for_each_entry ( fbc , & percpu_counters , list ) {
s32 * pcount ;
2007-10-19 10:40:47 +04:00
unsigned long flags ;
2007-07-16 10:39:51 +04:00
2007-10-19 10:40:47 +04:00
spin_lock_irqsave ( & fbc - > lock , flags ) ;
2007-07-16 10:39:51 +04:00
pcount = per_cpu_ptr ( fbc - > counters , cpu ) ;
fbc - > count + = * pcount ;
* pcount = 0 ;
2007-10-19 10:40:47 +04:00
spin_unlock_irqrestore ( & fbc - > lock , flags ) ;
2007-07-16 10:39:51 +04:00
}
mutex_unlock ( & percpu_counters_lock ) ;
2009-01-07 01:41:04 +03:00
# endif
2007-07-16 10:39:51 +04:00
return NOTIFY_OK ;
}
2010-08-10 04:19:04 +04:00
/*
* Compare counter against given value .
* Return 1 if greater , 0 if equal and - 1 if less
*/
int percpu_counter_compare ( struct percpu_counter * fbc , s64 rhs )
{
s64 count ;
count = percpu_counter_read ( fbc ) ;
/* Check to see if rough count will be sufficient for comparison */
if ( abs ( count - rhs ) > ( percpu_counter_batch * num_online_cpus ( ) ) ) {
if ( count > rhs )
return 1 ;
else
return - 1 ;
}
/* Need to use precise count */
count = percpu_counter_sum ( fbc ) ;
if ( count > rhs )
return 1 ;
else if ( count < rhs )
return - 1 ;
else
return 0 ;
}
EXPORT_SYMBOL ( percpu_counter_compare ) ;
2007-07-16 10:39:51 +04:00
static int __init percpu_counter_startup ( void )
{
2009-01-07 01:41:04 +03:00
compute_batch_value ( ) ;
2007-07-16 10:39:51 +04:00
hotcpu_notifier ( percpu_counter_hotcpu_callback , 0 ) ;
return 0 ;
}
module_init ( percpu_counter_startup ) ;