2006-09-26 10:31:50 +04:00
/*
* linux / mm / allocpercpu . c
*
2008-07-04 20:59:22 +04:00
* Separated from slab . c August 11 , 2006 Christoph Lameter
2006-09-26 10:31:50 +04:00
*/
# include <linux/mm.h>
# include <linux/module.h>
2008-03-05 01:28:35 +03:00
# ifndef cache_line_size
# define cache_line_size() L1_CACHE_BYTES
# endif
2006-09-26 10:31:50 +04:00
/**
* percpu_depopulate - depopulate per - cpu data for given cpu
* @ __pdata : per - cpu data to depopulate
* @ cpu : depopulate per - cpu data for this cpu
*
* Depopulating per - cpu data for a cpu going offline would be a typical
* use case . You need to register a cpu hotplug handler for that purpose .
*/
2008-07-26 06:46:23 +04:00
static void percpu_depopulate ( void * __pdata , int cpu )
2006-09-26 10:31:50 +04:00
{
struct percpu_data * pdata = __percpu_disguise ( __pdata ) ;
2006-12-07 07:32:37 +03:00
kfree ( pdata - > ptrs [ cpu ] ) ;
pdata - > ptrs [ cpu ] = NULL ;
2006-09-26 10:31:50 +04:00
}
/**
* percpu_depopulate_mask - depopulate per - cpu data for some cpu ' s
* @ __pdata : per - cpu data to depopulate
* @ mask : depopulate per - cpu data for cpu ' s selected through mask bits
*/
2008-07-26 06:46:23 +04:00
static void __percpu_depopulate_mask ( void * __pdata , cpumask_t * mask )
2006-09-26 10:31:50 +04:00
{
int cpu ;
2008-05-12 23:21:13 +04:00
for_each_cpu_mask_nr ( cpu , * mask )
2006-09-26 10:31:50 +04:00
percpu_depopulate ( __pdata , cpu ) ;
}
2008-07-26 06:46:23 +04:00
# define percpu_depopulate_mask(__pdata, mask) \
__percpu_depopulate_mask ( ( __pdata ) , & ( mask ) )
2006-09-26 10:31:50 +04:00
/**
* percpu_populate - populate per - cpu data for given cpu
* @ __pdata : per - cpu data to populate further
* @ size : size of per - cpu object
* @ gfp : may sleep or not etc .
* @ cpu : populate per - data for this cpu
*
* Populating per - cpu data for a cpu coming online would be a typical
* use case . You need to register a cpu hotplug handler for that purpose .
* Per - cpu object is populated with zeroed buffer .
*/
2008-07-26 06:46:23 +04:00
static void * percpu_populate ( void * __pdata , size_t size , gfp_t gfp , int cpu )
2006-09-26 10:31:50 +04:00
{
struct percpu_data * pdata = __percpu_disguise ( __pdata ) ;
int node = cpu_to_node ( cpu ) ;
2008-03-05 01:28:35 +03:00
/*
* We should make sure each CPU gets private memory .
*/
size = roundup ( size , cache_line_size ( ) ) ;
2006-09-26 10:31:50 +04:00
BUG_ON ( pdata - > ptrs [ cpu ] ) ;
2007-07-17 15:03:29 +04:00
if ( node_online ( node ) )
pdata - > ptrs [ cpu ] = kmalloc_node ( size , gfp | __GFP_ZERO , node ) ;
else
2006-09-26 10:31:50 +04:00
pdata - > ptrs [ cpu ] = kzalloc ( size , gfp ) ;
return pdata - > ptrs [ cpu ] ;
}
/**
* percpu_populate_mask - populate per - cpu data for more cpu ' s
* @ __pdata : per - cpu data to populate further
* @ size : size of per - cpu object
* @ gfp : may sleep or not etc .
* @ mask : populate per - cpu data for cpu ' s selected through mask bits
*
* Per - cpu objects are populated with zeroed buffers .
*/
2008-07-26 06:46:23 +04:00
static int __percpu_populate_mask ( void * __pdata , size_t size , gfp_t gfp ,
cpumask_t * mask )
2006-09-26 10:31:50 +04:00
{
2008-04-05 05:11:12 +04:00
cpumask_t populated ;
2006-09-26 10:31:50 +04:00
int cpu ;
2008-04-05 05:11:12 +04:00
cpus_clear ( populated ) ;
2008-05-12 23:21:13 +04:00
for_each_cpu_mask_nr ( cpu , * mask )
2006-09-26 10:31:50 +04:00
if ( unlikely ( ! percpu_populate ( __pdata , size , gfp , cpu ) ) ) {
__percpu_depopulate_mask ( __pdata , & populated ) ;
return - ENOMEM ;
} else
cpu_set ( cpu , populated ) ;
return 0 ;
}
2008-07-26 06:46:23 +04:00
# define percpu_populate_mask(__pdata, size, gfp, mask) \
__percpu_populate_mask ( ( __pdata ) , ( size ) , ( gfp ) , & ( mask ) )
2006-09-26 10:31:50 +04:00
/**
* percpu_alloc_mask - initial setup of per - cpu data
* @ size : size of per - cpu object
* @ gfp : may sleep or not etc .
* @ mask : populate per - data for cpu ' s selected through mask bits
*
* Populating per - cpu data for all online cpu ' s would be a typical use case ,
* which is simplified by the percpu_alloc ( ) wrapper .
* Per - cpu objects are populated with zeroed buffers .
*/
void * __percpu_alloc_mask ( size_t size , gfp_t gfp , cpumask_t * mask )
{
2008-03-05 01:28:35 +03:00
/*
* We allocate whole cache lines to avoid false sharing
*/
size_t sz = roundup ( nr_cpu_ids * sizeof ( void * ) , cache_line_size ( ) ) ;
void * pdata = kzalloc ( sz , gfp ) ;
2006-09-26 10:31:50 +04:00
void * __pdata = __percpu_disguise ( pdata ) ;
if ( unlikely ( ! pdata ) )
return NULL ;
if ( likely ( ! __percpu_populate_mask ( __pdata , size , gfp , mask ) ) )
return __pdata ;
kfree ( pdata ) ;
return NULL ;
}
EXPORT_SYMBOL_GPL ( __percpu_alloc_mask ) ;
/**
* percpu_free - final cleanup of per - cpu data
* @ __pdata : object to clean up
*
* We simply clean up any per - cpu object left . No need for the client to
* track and specify through a bis mask which per - cpu objects are to free .
*/
void percpu_free ( void * __pdata )
{
2006-12-07 07:32:37 +03:00
if ( unlikely ( ! __pdata ) )
return ;
2006-09-26 10:31:50 +04:00
__percpu_depopulate_mask ( __pdata , & cpu_possible_map ) ;
kfree ( __percpu_disguise ( __pdata ) ) ;
}
EXPORT_SYMBOL_GPL ( percpu_free ) ;