2005-04-17 02:20:36 +04:00
/*
* linux / mm / mempool . c
*
* memory buffer pool support . Such pools are mostly used
* for guaranteed , deadlock - free memory allocations during
* extreme VM load .
*
* started by Ingo Molnar , Copyright ( C ) 2001
*/
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/module.h>
# include <linux/mempool.h>
# include <linux/blkdev.h>
# include <linux/writeback.h>
static void add_element ( mempool_t * pool , void * element )
{
BUG_ON ( pool - > curr_nr > = pool - > min_nr ) ;
pool - > elements [ pool - > curr_nr + + ] = element ;
}
static void * remove_element ( mempool_t * pool )
{
BUG_ON ( pool - > curr_nr < = 0 ) ;
return pool - > elements [ - - pool - > curr_nr ] ;
}
static void free_pool ( mempool_t * pool )
{
while ( pool - > curr_nr ) {
void * element = remove_element ( pool ) ;
pool - > free ( element , pool - > pool_data ) ;
}
kfree ( pool - > elements ) ;
kfree ( pool ) ;
}
/**
* mempool_create - create a memory pool
* @ min_nr : the minimum number of elements guaranteed to be
* allocated for this pool .
* @ alloc_fn : user - defined element - allocation function .
* @ free_fn : user - defined element - freeing function .
* @ pool_data : optional private data available to the user - defined functions .
*
* this function creates and allocates a guaranteed size , preallocated
* memory pool . The pool can be used from the mempool_alloc and mempool_free
* functions . This function might sleep . Both the alloc_fn ( ) and the free_fn ( )
* functions might sleep - as long as the mempool_alloc function is not called
* from IRQ contexts .
*/
2005-06-23 11:08:19 +04:00
mempool_t * mempool_create ( int min_nr , mempool_alloc_t * alloc_fn ,
2005-04-17 02:20:36 +04:00
mempool_free_t * free_fn , void * pool_data )
{
2005-06-23 11:08:19 +04:00
return mempool_create_node ( min_nr , alloc_fn , free_fn , pool_data , - 1 ) ;
}
EXPORT_SYMBOL ( mempool_create ) ;
2005-04-17 02:20:36 +04:00
2005-06-23 11:08:19 +04:00
mempool_t * mempool_create_node ( int min_nr , mempool_alloc_t * alloc_fn ,
mempool_free_t * free_fn , void * pool_data , int node_id )
{
mempool_t * pool ;
pool = kmalloc_node ( sizeof ( * pool ) , GFP_KERNEL , node_id ) ;
2005-04-17 02:20:36 +04:00
if ( ! pool )
return NULL ;
memset ( pool , 0 , sizeof ( * pool ) ) ;
2005-06-23 11:08:19 +04:00
pool - > elements = kmalloc_node ( min_nr * sizeof ( void * ) ,
GFP_KERNEL , node_id ) ;
2005-04-17 02:20:36 +04:00
if ( ! pool - > elements ) {
kfree ( pool ) ;
return NULL ;
}
spin_lock_init ( & pool - > lock ) ;
pool - > min_nr = min_nr ;
pool - > pool_data = pool_data ;
init_waitqueue_head ( & pool - > wait ) ;
pool - > alloc = alloc_fn ;
pool - > free = free_fn ;
/*
* First pre - allocate the guaranteed number of buffers .
*/
while ( pool - > curr_nr < pool - > min_nr ) {
void * element ;
element = pool - > alloc ( GFP_KERNEL , pool - > pool_data ) ;
if ( unlikely ( ! element ) ) {
free_pool ( pool ) ;
return NULL ;
}
add_element ( pool , element ) ;
}
return pool ;
}
2005-06-23 11:08:19 +04:00
EXPORT_SYMBOL ( mempool_create_node ) ;
2005-04-17 02:20:36 +04:00
/**
* mempool_resize - resize an existing memory pool
* @ pool : pointer to the memory pool which was allocated via
* mempool_create ( ) .
* @ new_min_nr : the new minimum number of elements guaranteed to be
* allocated for this pool .
* @ gfp_mask : the usual allocation bitmask .
*
* This function shrinks / grows the pool . In the case of growing ,
* it cannot be guaranteed that the pool will be grown to the new
* size immediately , but new mempool_free ( ) calls will refill it .
*
* Note , the caller must guarantee that no mempool_destroy is called
* while this function is running . mempool_alloc ( ) & mempool_free ( )
* might be called ( eg . from IRQ contexts ) while this function executes .
*/
2005-10-07 10:46:04 +04:00
int mempool_resize ( mempool_t * pool , int new_min_nr , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
void * element ;
void * * new_elements ;
unsigned long flags ;
BUG_ON ( new_min_nr < = 0 ) ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( new_min_nr < = pool - > min_nr ) {
while ( new_min_nr < pool - > curr_nr ) {
element = remove_element ( pool ) ;
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
pool - > free ( element , pool - > pool_data ) ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
}
pool - > min_nr = new_min_nr ;
goto out_unlock ;
}
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
/* Grow the pool */
new_elements = kmalloc ( new_min_nr * sizeof ( * new_elements ) , gfp_mask ) ;
if ( ! new_elements )
return - ENOMEM ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( unlikely ( new_min_nr < = pool - > min_nr ) ) {
/* Raced, other resize will do our work */
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
kfree ( new_elements ) ;
goto out ;
}
memcpy ( new_elements , pool - > elements ,
pool - > curr_nr * sizeof ( * new_elements ) ) ;
kfree ( pool - > elements ) ;
pool - > elements = new_elements ;
pool - > min_nr = new_min_nr ;
while ( pool - > curr_nr < pool - > min_nr ) {
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
element = pool - > alloc ( gfp_mask , pool - > pool_data ) ;
if ( ! element )
goto out ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( pool - > curr_nr < pool - > min_nr ) {
add_element ( pool , element ) ;
} else {
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
pool - > free ( element , pool - > pool_data ) ; /* Raced */
goto out ;
}
}
out_unlock :
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
out :
return 0 ;
}
EXPORT_SYMBOL ( mempool_resize ) ;
/**
* mempool_destroy - deallocate a memory pool
* @ pool : pointer to the memory pool which was allocated via
* mempool_create ( ) .
*
* this function only sleeps if the free_fn ( ) function sleeps . The caller
* has to guarantee that all elements have been returned to the pool ( ie :
* freed ) prior to calling mempool_destroy ( ) .
*/
void mempool_destroy ( mempool_t * pool )
{
if ( pool - > curr_nr ! = pool - > min_nr )
BUG ( ) ; /* There were outstanding elements */
free_pool ( pool ) ;
}
EXPORT_SYMBOL ( mempool_destroy ) ;
/**
* mempool_alloc - allocate an element from a specific memory pool
* @ pool : pointer to the memory pool which was allocated via
* mempool_create ( ) .
* @ gfp_mask : the usual allocation bitmask .
*
* this function only sleeps if the alloc_fn function sleeps or
* returns NULL . Note that due to preallocation , this function
* * never * fails when called from process contexts . ( it might
* fail if called from an IRQ context . )
*/
2005-10-07 10:46:04 +04:00
void * mempool_alloc ( mempool_t * pool , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
void * element ;
unsigned long flags ;
2005-06-23 11:10:01 +04:00
wait_queue_t wait ;
2005-07-08 04:56:58 +04:00
unsigned int gfp_temp ;
2005-05-01 19:58:37 +04:00
might_sleep_if ( gfp_mask & __GFP_WAIT ) ;
2005-05-01 19:58:36 +04:00
gfp_mask | = __GFP_NOMEMALLOC ; /* don't allocate emergency reserves */
gfp_mask | = __GFP_NORETRY ; /* don't loop in __alloc_pages */
gfp_mask | = __GFP_NOWARN ; /* failures are OK */
2005-04-17 02:20:36 +04:00
2005-05-01 19:58:37 +04:00
gfp_temp = gfp_mask & ~ ( __GFP_WAIT | __GFP_IO ) ;
2005-04-17 02:20:36 +04:00
repeat_alloc :
2005-05-01 19:58:37 +04:00
element = pool - > alloc ( gfp_temp , pool - > pool_data ) ;
2005-04-17 02:20:36 +04:00
if ( likely ( element ! = NULL ) )
return element ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( likely ( pool - > curr_nr ) ) {
element = remove_element ( pool ) ;
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
return element ;
}
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
/* We must not sleep in the GFP_ATOMIC case */
if ( ! ( gfp_mask & __GFP_WAIT ) )
return NULL ;
2005-05-01 19:58:37 +04:00
/* Now start performing page reclaim */
gfp_temp = gfp_mask ;
2005-06-23 11:10:01 +04:00
init_wait ( & wait ) ;
2005-04-17 02:20:36 +04:00
prepare_to_wait ( & pool - > wait , & wait , TASK_UNINTERRUPTIBLE ) ;
2005-05-01 19:58:47 +04:00
smp_mb ( ) ;
2005-04-17 02:20:36 +04:00
if ( ! pool - > curr_nr )
io_schedule ( ) ;
finish_wait ( & pool - > wait , & wait ) ;
goto repeat_alloc ;
}
EXPORT_SYMBOL ( mempool_alloc ) ;
/**
* mempool_free - return an element to the pool .
* @ element : pool element pointer .
* @ pool : pointer to the memory pool which was allocated via
* mempool_create ( ) .
*
* this function only sleeps if the free_fn ( ) function sleeps .
*/
void mempool_free ( void * element , mempool_t * pool )
{
unsigned long flags ;
2005-05-01 19:58:47 +04:00
smp_mb ( ) ;
2005-04-17 02:20:36 +04:00
if ( pool - > curr_nr < pool - > min_nr ) {
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( pool - > curr_nr < pool - > min_nr ) {
add_element ( pool , element ) ;
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
wake_up ( & pool - > wait ) ;
return ;
}
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
}
pool - > free ( element , pool - > pool_data ) ;
}
EXPORT_SYMBOL ( mempool_free ) ;
/*
* A commonly used alloc and free fn .
*/
2005-10-07 10:46:04 +04:00
void * mempool_alloc_slab ( gfp_t gfp_mask , void * pool_data )
2005-04-17 02:20:36 +04:00
{
kmem_cache_t * mem = ( kmem_cache_t * ) pool_data ;
return kmem_cache_alloc ( mem , gfp_mask ) ;
}
EXPORT_SYMBOL ( mempool_alloc_slab ) ;
void mempool_free_slab ( void * element , void * pool_data )
{
kmem_cache_t * mem = ( kmem_cache_t * ) pool_data ;
kmem_cache_free ( mem , element ) ;
}
EXPORT_SYMBOL ( mempool_free_slab ) ;