2005-04-17 02:20:36 +04:00
/*
* linux / mm / mempool . c
*
* memory buffer pool support . Such pools are mostly used
* for guaranteed , deadlock - free memory allocations during
* extreme VM load .
*
* started by Ingo Molnar , Copyright ( C ) 2001
*/
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/module.h>
# include <linux/mempool.h>
# include <linux/blkdev.h>
# include <linux/writeback.h>
static void add_element ( mempool_t * pool , void * element )
{
BUG_ON ( pool - > curr_nr > = pool - > min_nr ) ;
pool - > elements [ pool - > curr_nr + + ] = element ;
}
static void * remove_element ( mempool_t * pool )
{
BUG_ON ( pool - > curr_nr < = 0 ) ;
return pool - > elements [ - - pool - > curr_nr ] ;
}
static void free_pool ( mempool_t * pool )
{
while ( pool - > curr_nr ) {
void * element = remove_element ( pool ) ;
pool - > free ( element , pool - > pool_data ) ;
}
kfree ( pool - > elements ) ;
kfree ( pool ) ;
}
/**
* mempool_create - create a memory pool
* @ min_nr : the minimum number of elements guaranteed to be
* allocated for this pool .
* @ alloc_fn : user - defined element - allocation function .
* @ free_fn : user - defined element - freeing function .
* @ pool_data : optional private data available to the user - defined functions .
*
* this function creates and allocates a guaranteed size , preallocated
2007-02-10 12:45:59 +03:00
* memory pool . The pool can be used from the mempool_alloc ( ) and mempool_free ( )
2005-04-17 02:20:36 +04:00
* functions . This function might sleep . Both the alloc_fn ( ) and the free_fn ( )
2007-02-10 12:45:59 +03:00
* functions might sleep - as long as the mempool_alloc ( ) function is not called
2005-04-17 02:20:36 +04:00
* from IRQ contexts .
*/
2005-06-23 11:08:19 +04:00
mempool_t * mempool_create ( int min_nr , mempool_alloc_t * alloc_fn ,
2005-04-17 02:20:36 +04:00
mempool_free_t * free_fn , void * pool_data )
{
2005-06-23 11:08:19 +04:00
return mempool_create_node ( min_nr , alloc_fn , free_fn , pool_data , - 1 ) ;
}
EXPORT_SYMBOL ( mempool_create ) ;
2005-04-17 02:20:36 +04:00
2005-06-23 11:08:19 +04:00
mempool_t * mempool_create_node ( int min_nr , mempool_alloc_t * alloc_fn ,
mempool_free_t * free_fn , void * pool_data , int node_id )
{
mempool_t * pool ;
2007-07-17 15:03:29 +04:00
pool = kmalloc_node ( sizeof ( * pool ) , GFP_KERNEL | __GFP_ZERO , node_id ) ;
2005-04-17 02:20:36 +04:00
if ( ! pool )
return NULL ;
2005-06-23 11:08:19 +04:00
pool - > elements = kmalloc_node ( min_nr * sizeof ( void * ) ,
GFP_KERNEL , node_id ) ;
2005-04-17 02:20:36 +04:00
if ( ! pool - > elements ) {
kfree ( pool ) ;
return NULL ;
}
spin_lock_init ( & pool - > lock ) ;
pool - > min_nr = min_nr ;
pool - > pool_data = pool_data ;
init_waitqueue_head ( & pool - > wait ) ;
pool - > alloc = alloc_fn ;
pool - > free = free_fn ;
/*
* First pre - allocate the guaranteed number of buffers .
*/
while ( pool - > curr_nr < pool - > min_nr ) {
void * element ;
element = pool - > alloc ( GFP_KERNEL , pool - > pool_data ) ;
if ( unlikely ( ! element ) ) {
free_pool ( pool ) ;
return NULL ;
}
add_element ( pool , element ) ;
}
return pool ;
}
2005-06-23 11:08:19 +04:00
EXPORT_SYMBOL ( mempool_create_node ) ;
2005-04-17 02:20:36 +04:00
/**
* mempool_resize - resize an existing memory pool
* @ pool : pointer to the memory pool which was allocated via
* mempool_create ( ) .
* @ new_min_nr : the new minimum number of elements guaranteed to be
* allocated for this pool .
* @ gfp_mask : the usual allocation bitmask .
*
* This function shrinks / grows the pool . In the case of growing ,
* it cannot be guaranteed that the pool will be grown to the new
* size immediately , but new mempool_free ( ) calls will refill it .
*
* Note , the caller must guarantee that no mempool_destroy is called
* while this function is running . mempool_alloc ( ) & mempool_free ( )
* might be called ( eg . from IRQ contexts ) while this function executes .
*/
2005-10-07 10:46:04 +04:00
int mempool_resize ( mempool_t * pool , int new_min_nr , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
void * element ;
void * * new_elements ;
unsigned long flags ;
BUG_ON ( new_min_nr < = 0 ) ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( new_min_nr < = pool - > min_nr ) {
while ( new_min_nr < pool - > curr_nr ) {
element = remove_element ( pool ) ;
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
pool - > free ( element , pool - > pool_data ) ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
}
pool - > min_nr = new_min_nr ;
goto out_unlock ;
}
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
/* Grow the pool */
new_elements = kmalloc ( new_min_nr * sizeof ( * new_elements ) , gfp_mask ) ;
if ( ! new_elements )
return - ENOMEM ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( unlikely ( new_min_nr < = pool - > min_nr ) ) {
/* Raced, other resize will do our work */
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
kfree ( new_elements ) ;
goto out ;
}
memcpy ( new_elements , pool - > elements ,
pool - > curr_nr * sizeof ( * new_elements ) ) ;
kfree ( pool - > elements ) ;
pool - > elements = new_elements ;
pool - > min_nr = new_min_nr ;
while ( pool - > curr_nr < pool - > min_nr ) {
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
element = pool - > alloc ( gfp_mask , pool - > pool_data ) ;
if ( ! element )
goto out ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( pool - > curr_nr < pool - > min_nr ) {
add_element ( pool , element ) ;
} else {
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
pool - > free ( element , pool - > pool_data ) ; /* Raced */
goto out ;
}
}
out_unlock :
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
out :
return 0 ;
}
EXPORT_SYMBOL ( mempool_resize ) ;
/**
* mempool_destroy - deallocate a memory pool
* @ pool : pointer to the memory pool which was allocated via
* mempool_create ( ) .
*
* this function only sleeps if the free_fn ( ) function sleeps . The caller
* has to guarantee that all elements have been returned to the pool ( ie :
* freed ) prior to calling mempool_destroy ( ) .
*/
void mempool_destroy ( mempool_t * pool )
{
2006-03-26 20:31:56 +04:00
/* Check for outstanding elements */
BUG_ON ( pool - > curr_nr ! = pool - > min_nr ) ;
2005-04-17 02:20:36 +04:00
free_pool ( pool ) ;
}
EXPORT_SYMBOL ( mempool_destroy ) ;
/**
* mempool_alloc - allocate an element from a specific memory pool
* @ pool : pointer to the memory pool which was allocated via
* mempool_create ( ) .
* @ gfp_mask : the usual allocation bitmask .
*
2007-02-10 12:45:59 +03:00
* this function only sleeps if the alloc_fn ( ) function sleeps or
2005-04-17 02:20:36 +04:00
* returns NULL . Note that due to preallocation , this function
* * never * fails when called from process contexts . ( it might
* fail if called from an IRQ context . )
*/
2005-10-07 10:46:04 +04:00
void * mempool_alloc ( mempool_t * pool , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
void * element ;
unsigned long flags ;
2005-06-23 11:10:01 +04:00
wait_queue_t wait ;
2005-10-21 11:18:50 +04:00
gfp_t gfp_temp ;
2005-05-01 19:58:37 +04:00
might_sleep_if ( gfp_mask & __GFP_WAIT ) ;
2005-05-01 19:58:36 +04:00
gfp_mask | = __GFP_NOMEMALLOC ; /* don't allocate emergency reserves */
gfp_mask | = __GFP_NORETRY ; /* don't loop in __alloc_pages */
gfp_mask | = __GFP_NOWARN ; /* failures are OK */
2005-04-17 02:20:36 +04:00
2005-05-01 19:58:37 +04:00
gfp_temp = gfp_mask & ~ ( __GFP_WAIT | __GFP_IO ) ;
2005-04-17 02:20:36 +04:00
repeat_alloc :
2005-05-01 19:58:37 +04:00
element = pool - > alloc ( gfp_temp , pool - > pool_data ) ;
2005-04-17 02:20:36 +04:00
if ( likely ( element ! = NULL ) )
return element ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( likely ( pool - > curr_nr ) ) {
element = remove_element ( pool ) ;
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
return element ;
}
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
/* We must not sleep in the GFP_ATOMIC case */
if ( ! ( gfp_mask & __GFP_WAIT ) )
return NULL ;
2005-05-01 19:58:37 +04:00
/* Now start performing page reclaim */
gfp_temp = gfp_mask ;
2005-06-23 11:10:01 +04:00
init_wait ( & wait ) ;
2005-04-17 02:20:36 +04:00
prepare_to_wait ( & pool - > wait , & wait , TASK_UNINTERRUPTIBLE ) ;
2005-05-01 19:58:47 +04:00
smp_mb ( ) ;
2006-09-01 08:27:47 +04:00
if ( ! pool - > curr_nr ) {
/*
* FIXME : this should be io_schedule ( ) . The timeout is there
* as a workaround for some DM problems in 2.6 .18 .
*/
io_schedule_timeout ( 5 * HZ ) ;
}
2005-04-17 02:20:36 +04:00
finish_wait ( & pool - > wait , & wait ) ;
goto repeat_alloc ;
}
EXPORT_SYMBOL ( mempool_alloc ) ;
/**
* mempool_free - return an element to the pool .
* @ element : pool element pointer .
* @ pool : pointer to the memory pool which was allocated via
* mempool_create ( ) .
*
* this function only sleeps if the free_fn ( ) function sleeps .
*/
void mempool_free ( void * element , mempool_t * pool )
{
unsigned long flags ;
2007-07-16 10:42:00 +04:00
if ( unlikely ( element = = NULL ) )
return ;
2005-05-01 19:58:47 +04:00
smp_mb ( ) ;
2005-04-17 02:20:36 +04:00
if ( pool - > curr_nr < pool - > min_nr ) {
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( pool - > curr_nr < pool - > min_nr ) {
add_element ( pool , element ) ;
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
wake_up ( & pool - > wait ) ;
return ;
}
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
}
pool - > free ( element , pool - > pool_data ) ;
}
EXPORT_SYMBOL ( mempool_free ) ;
/*
* A commonly used alloc and free fn .
*/
2005-10-07 10:46:04 +04:00
void * mempool_alloc_slab ( gfp_t gfp_mask , void * pool_data )
2005-04-17 02:20:36 +04:00
{
2006-03-22 11:08:13 +03:00
struct kmem_cache * mem = pool_data ;
2005-04-17 02:20:36 +04:00
return kmem_cache_alloc ( mem , gfp_mask ) ;
}
EXPORT_SYMBOL ( mempool_alloc_slab ) ;
void mempool_free_slab ( void * element , void * pool_data )
{
2006-03-22 11:08:13 +03:00
struct kmem_cache * mem = pool_data ;
2005-04-17 02:20:36 +04:00
kmem_cache_free ( mem , element ) ;
}
EXPORT_SYMBOL ( mempool_free_slab ) ;
2006-03-26 13:37:44 +04:00
2006-03-26 13:37:46 +04:00
/*
* A commonly used alloc and free fn that kmalloc / kfrees the amount of memory
2007-10-20 03:27:18 +04:00
* specified by pool_data
2006-03-26 13:37:46 +04:00
*/
void * mempool_kmalloc ( gfp_t gfp_mask , void * pool_data )
{
2009-08-08 17:01:22 +04:00
size_t size = ( size_t ) pool_data ;
2006-03-26 13:37:46 +04:00
return kmalloc ( size , gfp_mask ) ;
}
EXPORT_SYMBOL ( mempool_kmalloc ) ;
void mempool_kfree ( void * element , void * pool_data )
{
kfree ( element ) ;
}
EXPORT_SYMBOL ( mempool_kfree ) ;
2006-03-26 13:37:44 +04:00
/*
* A simple mempool - backed page allocator that allocates pages
* of the order specified by pool_data .
*/
void * mempool_alloc_pages ( gfp_t gfp_mask , void * pool_data )
{
int order = ( int ) ( long ) pool_data ;
return alloc_pages ( gfp_mask , order ) ;
}
EXPORT_SYMBOL ( mempool_alloc_pages ) ;
void mempool_free_pages ( void * element , void * pool_data )
{
int order = ( int ) ( long ) pool_data ;
__free_pages ( element , order ) ;
}
EXPORT_SYMBOL ( mempool_free_pages ) ;