2005-04-17 02:20:36 +04:00
/*
* 2002 - 10 - 18 written by Jim Houston jim . houston @ ccur . com
* Copyright ( C ) 2002 by Concurrent Computer Corporation
* Distributed under the GNU GPL license version 2.
*
* Modified by George Anzinger to reuse immediately and to use
* find bit instructions . Also removed _irq on spinlocks .
*
2008-07-25 12:48:00 +04:00
* Modified by Nadia Derbey to make it RCU safe .
*
2005-10-31 02:02:14 +03:00
* Small id to pointer translation service .
2005-04-17 02:20:36 +04:00
*
2005-10-31 02:02:14 +03:00
* It uses a radix tree like structure as a sparse array indexed
2005-04-17 02:20:36 +04:00
* by the id to obtain the pointer . The bitmap makes allocating
2005-10-31 02:02:14 +03:00
* a new id quick .
2005-04-17 02:20:36 +04:00
*
* You call it to allocate an id ( an int ) an associate with that id a
* pointer or what ever , we treat it as a ( void * ) . You can pass this
* id to a user for him to pass back at a later time . You then pass
* that id to this code and it returns your pointer .
2005-10-31 02:02:14 +03:00
* You can release ids at any time . When all ids are released , most of
2005-04-17 02:20:36 +04:00
* the memory is returned ( we keep IDR_FREE_MAX ) in a local pool so we
2005-10-31 02:02:14 +03:00
* don ' t need to go to the memory " store " during an id allocate , just
2005-04-17 02:20:36 +04:00
* so you don ' t need to be too concerned about locking and conflicts
* with the slab allocator .
*/
# ifndef TEST // to test in user space...
# include <linux/slab.h>
# include <linux/init.h>
# include <linux/module.h>
# endif
2006-06-26 11:27:19 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/string.h>
# include <linux/idr.h>
2006-12-07 07:33:20 +03:00
static struct kmem_cache * idr_layer_cache ;
2005-04-17 02:20:36 +04:00
2008-07-25 12:47:58 +04:00
static struct idr_layer * get_from_free_list ( struct idr * idp )
2005-04-17 02:20:36 +04:00
{
struct idr_layer * p ;
2006-07-14 11:24:23 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
2006-07-14 11:24:23 +04:00
spin_lock_irqsave ( & idp - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
if ( ( p = idp - > id_free ) ) {
idp - > id_free = p - > ary [ 0 ] ;
idp - > id_free_cnt - - ;
p - > ary [ 0 ] = NULL ;
}
2006-07-14 11:24:23 +04:00
spin_unlock_irqrestore ( & idp - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
return ( p ) ;
}
2008-07-25 12:48:02 +04:00
static void idr_layer_rcu_free ( struct rcu_head * head )
{
struct idr_layer * layer ;
layer = container_of ( head , struct idr_layer , rcu_head ) ;
kmem_cache_free ( idr_layer_cache , layer ) ;
}
static inline void free_layer ( struct idr_layer * p )
{
call_rcu ( & p - > rcu_head , idr_layer_rcu_free ) ;
}
2006-06-25 16:49:34 +04:00
/* only called when idp->lock is held */
2008-07-25 12:47:58 +04:00
static void __move_to_free_list ( struct idr * idp , struct idr_layer * p )
2006-06-25 16:49:34 +04:00
{
p - > ary [ 0 ] = idp - > id_free ;
idp - > id_free = p ;
idp - > id_free_cnt + + ;
}
2008-07-25 12:47:58 +04:00
static void move_to_free_list ( struct idr * idp , struct idr_layer * p )
2005-04-17 02:20:36 +04:00
{
2006-07-14 11:24:23 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
/*
* Depends on the return element being zeroed .
*/
2006-07-14 11:24:23 +04:00
spin_lock_irqsave ( & idp - > lock , flags ) ;
2008-07-25 12:47:58 +04:00
__move_to_free_list ( idp , p ) ;
2006-07-14 11:24:23 +04:00
spin_unlock_irqrestore ( & idp - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
2007-06-13 22:45:12 +04:00
static void idr_mark_full ( struct idr_layer * * pa , int id )
{
struct idr_layer * p = pa [ 0 ] ;
int l = 0 ;
__set_bit ( id & IDR_MASK , & p - > bitmap ) ;
/*
* If this layer is full mark the bit in the layer above to
* show that this part of the radix tree is full . This may
* complete the layer above and require walking up the radix
* tree .
*/
while ( p - > bitmap = = IDR_FULL ) {
if ( ! ( p = pa [ + + l ] ) )
break ;
id = id > > IDR_BITS ;
__set_bit ( ( id & IDR_MASK ) , & p - > bitmap ) ;
}
}
2005-04-17 02:20:36 +04:00
/**
* idr_pre_get - reserver resources for idr allocation
* @ idp : idr handle
* @ gfp_mask : memory allocation flags
*
* This function should be called prior to locking and calling the
2008-07-25 12:48:00 +04:00
* idr_get_new * functions . It preallocates enough memory to satisfy
2005-04-17 02:20:36 +04:00
* the worst possible allocation .
*
* If the system is REALLY out of memory this function returns 0 ,
* otherwise 1.
*/
2005-10-21 11:18:50 +04:00
int idr_pre_get ( struct idr * idp , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
while ( idp - > id_free_cnt < IDR_FREE_MAX ) {
struct idr_layer * new ;
2009-01-16 00:51:21 +03:00
new = kmem_cache_zalloc ( idr_layer_cache , gfp_mask ) ;
2005-10-31 02:02:14 +03:00
if ( new = = NULL )
2005-04-17 02:20:36 +04:00
return ( 0 ) ;
2008-07-25 12:47:58 +04:00
move_to_free_list ( idp , new ) ;
2005-04-17 02:20:36 +04:00
}
return 1 ;
}
EXPORT_SYMBOL ( idr_pre_get ) ;
2007-06-13 22:45:12 +04:00
static int sub_alloc ( struct idr * idp , int * starting_id , struct idr_layer * * pa )
2005-04-17 02:20:36 +04:00
{
int n , m , sh ;
struct idr_layer * p , * new ;
2007-06-13 22:45:12 +04:00
int l , id , oid ;
2007-10-14 22:35:50 +04:00
unsigned long bm ;
2005-04-17 02:20:36 +04:00
id = * starting_id ;
2007-06-13 22:45:12 +04:00
restart :
2005-04-17 02:20:36 +04:00
p = idp - > top ;
l = idp - > layers ;
pa [ l - - ] = NULL ;
while ( 1 ) {
/*
* We run around this while until we reach the leaf node . . .
*/
n = ( id > > ( IDR_BITS * l ) ) & IDR_MASK ;
bm = ~ p - > bitmap ;
m = find_next_bit ( & bm , IDR_SIZE , n ) ;
if ( m = = IDR_SIZE ) {
/* no space available go back to previous layer. */
l + + ;
2007-06-13 22:45:12 +04:00
oid = id ;
2005-10-31 02:02:14 +03:00
id = ( id | ( ( 1 < < ( IDR_BITS * l ) ) - 1 ) ) + 1 ;
2007-06-13 22:45:12 +04:00
/* if already at the top layer, we need to grow */
2005-04-17 02:20:36 +04:00
if ( ! ( p = pa [ l ] ) ) {
* starting_id = id ;
2008-07-25 12:47:59 +04:00
return IDR_NEED_TO_GROW ;
2005-04-17 02:20:36 +04:00
}
2007-06-13 22:45:12 +04:00
/* If we need to go up one layer, continue the
* loop ; otherwise , restart from the top .
*/
sh = IDR_BITS * ( l + 1 ) ;
if ( oid > > sh = = id > > sh )
continue ;
else
goto restart ;
2005-04-17 02:20:36 +04:00
}
if ( m ! = n ) {
sh = IDR_BITS * l ;
id = ( ( id > > sh ) ^ n ^ m ) < < sh ;
}
if ( ( id > = MAX_ID_BIT ) | | ( id < 0 ) )
2008-07-25 12:47:59 +04:00
return IDR_NOMORE_SPACE ;
2005-04-17 02:20:36 +04:00
if ( l = = 0 )
break ;
/*
* Create the layer below if it is missing .
*/
if ( ! p - > ary [ m ] ) {
2008-07-25 12:47:58 +04:00
new = get_from_free_list ( idp ) ;
if ( ! new )
2005-04-17 02:20:36 +04:00
return - 1 ;
2008-12-02 00:14:02 +03:00
new - > layer = l - 1 ;
2008-07-25 12:48:00 +04:00
rcu_assign_pointer ( p - > ary [ m ] , new ) ;
2005-04-17 02:20:36 +04:00
p - > count + + ;
}
pa [ l - - ] = p ;
p = p - > ary [ m ] ;
}
2007-06-13 22:45:12 +04:00
pa [ l ] = p ;
return id ;
2005-04-17 02:20:36 +04:00
}
2007-06-13 22:45:12 +04:00
static int idr_get_empty_slot ( struct idr * idp , int starting_id ,
struct idr_layer * * pa )
2005-04-17 02:20:36 +04:00
{
struct idr_layer * p , * new ;
int layers , v , id ;
2006-07-14 11:24:23 +04:00
unsigned long flags ;
2005-10-31 02:02:14 +03:00
2005-04-17 02:20:36 +04:00
id = starting_id ;
build_up :
p = idp - > top ;
layers = idp - > layers ;
if ( unlikely ( ! p ) ) {
2008-07-25 12:47:58 +04:00
if ( ! ( p = get_from_free_list ( idp ) ) )
2005-04-17 02:20:36 +04:00
return - 1 ;
2008-12-02 00:14:02 +03:00
p - > layer = 0 ;
2005-04-17 02:20:36 +04:00
layers = 1 ;
}
/*
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space .
*/
2005-06-22 04:14:31 +04:00
while ( ( layers < ( MAX_LEVEL - 1 ) ) & & ( id > = ( 1 < < ( layers * IDR_BITS ) ) ) ) {
2005-04-17 02:20:36 +04:00
layers + + ;
2008-12-10 20:17:06 +03:00
if ( ! p - > count ) {
/* special case: if the tree is currently empty,
* then we grow the tree by moving the top node
* upwards .
*/
p - > layer + + ;
2005-04-17 02:20:36 +04:00
continue ;
2008-12-10 20:17:06 +03:00
}
2008-07-25 12:47:58 +04:00
if ( ! ( new = get_from_free_list ( idp ) ) ) {
2005-04-17 02:20:36 +04:00
/*
* The allocation failed . If we built part of
* the structure tear it down .
*/
2006-07-14 11:24:23 +04:00
spin_lock_irqsave ( & idp - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
for ( new = p ; p & & p ! = idp - > top ; new = p ) {
p = p - > ary [ 0 ] ;
new - > ary [ 0 ] = NULL ;
new - > bitmap = new - > count = 0 ;
2008-07-25 12:47:58 +04:00
__move_to_free_list ( idp , new ) ;
2005-04-17 02:20:36 +04:00
}
2006-07-14 11:24:23 +04:00
spin_unlock_irqrestore ( & idp - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
return - 1 ;
}
new - > ary [ 0 ] = p ;
new - > count = 1 ;
2008-12-02 00:14:02 +03:00
new - > layer = layers - 1 ;
2005-04-17 02:20:36 +04:00
if ( p - > bitmap = = IDR_FULL )
__set_bit ( 0 , & new - > bitmap ) ;
p = new ;
}
2008-07-25 12:48:00 +04:00
rcu_assign_pointer ( idp - > top , p ) ;
2005-04-17 02:20:36 +04:00
idp - > layers = layers ;
2007-06-13 22:45:12 +04:00
v = sub_alloc ( idp , & id , pa ) ;
2008-07-25 12:47:59 +04:00
if ( v = = IDR_NEED_TO_GROW )
2005-04-17 02:20:36 +04:00
goto build_up ;
return ( v ) ;
}
2007-06-13 22:45:12 +04:00
static int idr_get_new_above_int ( struct idr * idp , void * ptr , int starting_id )
{
struct idr_layer * pa [ MAX_LEVEL ] ;
int id ;
id = idr_get_empty_slot ( idp , starting_id , pa ) ;
if ( id > = 0 ) {
/*
* Successfully found an empty slot . Install the user
* pointer and mark the slot full .
*/
2008-07-25 12:48:00 +04:00
rcu_assign_pointer ( pa [ 0 ] - > ary [ id & IDR_MASK ] ,
( struct idr_layer * ) ptr ) ;
2007-06-13 22:45:12 +04:00
pa [ 0 ] - > count + + ;
idr_mark_full ( pa , id ) ;
}
return id ;
}
2005-04-17 02:20:36 +04:00
/**
2005-08-26 22:02:04 +04:00
* idr_get_new_above - allocate new idr entry above or equal to a start id
2005-04-17 02:20:36 +04:00
* @ idp : idr handle
* @ ptr : pointer you want associated with the ide
* @ start_id : id to start search at
* @ id : pointer to the allocated handle
*
* This is the allocate id function . It should be called with any
* required locks .
*
* If memory is required , it will return - EAGAIN , you should unlock
* and go back to the idr_pre_get ( ) call . If the idr is full , it will
* return - ENOSPC .
*
2009-01-16 00:51:00 +03:00
* @ id returns a value in the range @ starting_id . . . 0x7fffffff
2005-04-17 02:20:36 +04:00
*/
int idr_get_new_above ( struct idr * idp , void * ptr , int starting_id , int * id )
{
int rv ;
2005-10-31 02:02:14 +03:00
2005-04-17 02:20:36 +04:00
rv = idr_get_new_above_int ( idp , ptr , starting_id ) ;
/*
* This is a cheap hack until the IDR code can be fixed to
* return proper error values .
*/
2008-07-25 12:47:59 +04:00
if ( rv < 0 )
return _idr_rc_to_errno ( rv ) ;
2005-04-17 02:20:36 +04:00
* id = rv ;
return 0 ;
}
EXPORT_SYMBOL ( idr_get_new_above ) ;
/**
* idr_get_new - allocate new idr entry
* @ idp : idr handle
* @ ptr : pointer you want associated with the ide
* @ id : pointer to the allocated handle
*
* This is the allocate id function . It should be called with any
* required locks .
*
* If memory is required , it will return - EAGAIN , you should unlock
* and go back to the idr_pre_get ( ) call . If the idr is full , it will
* return - ENOSPC .
*
* @ id returns a value in the range 0 . . . 0x7fffffff
*/
int idr_get_new ( struct idr * idp , void * ptr , int * id )
{
int rv ;
2005-10-31 02:02:14 +03:00
2005-04-17 02:20:36 +04:00
rv = idr_get_new_above_int ( idp , ptr , 0 ) ;
/*
* This is a cheap hack until the IDR code can be fixed to
* return proper error values .
*/
2008-07-25 12:47:59 +04:00
if ( rv < 0 )
return _idr_rc_to_errno ( rv ) ;
2005-04-17 02:20:36 +04:00
* id = rv ;
return 0 ;
}
EXPORT_SYMBOL ( idr_get_new ) ;
static void idr_remove_warning ( int id )
{
2008-07-25 12:47:59 +04:00
printk ( KERN_WARNING
" idr_remove called for id=%d which is not allocated. \n " , id ) ;
2005-04-17 02:20:36 +04:00
dump_stack ( ) ;
}
static void sub_remove ( struct idr * idp , int shift , int id )
{
struct idr_layer * p = idp - > top ;
struct idr_layer * * pa [ MAX_LEVEL ] ;
struct idr_layer * * * paa = & pa [ 0 ] ;
2008-07-25 12:48:02 +04:00
struct idr_layer * to_free ;
2005-04-17 02:20:36 +04:00
int n ;
* paa = NULL ;
* + + paa = & idp - > top ;
while ( ( shift > 0 ) & & p ) {
n = ( id > > shift ) & IDR_MASK ;
__clear_bit ( n , & p - > bitmap ) ;
* + + paa = & p - > ary [ n ] ;
p = p - > ary [ n ] ;
shift - = IDR_BITS ;
}
n = id & IDR_MASK ;
if ( likely ( p ! = NULL & & test_bit ( n , & p - > bitmap ) ) ) {
__clear_bit ( n , & p - > bitmap ) ;
2008-07-25 12:48:02 +04:00
rcu_assign_pointer ( p - > ary [ n ] , NULL ) ;
to_free = NULL ;
2005-04-17 02:20:36 +04:00
while ( * paa & & ! - - ( ( * * paa ) - > count ) ) {
2008-07-25 12:48:02 +04:00
if ( to_free )
free_layer ( to_free ) ;
to_free = * * paa ;
2005-04-17 02:20:36 +04:00
* * paa - - = NULL ;
}
2005-10-31 02:02:14 +03:00
if ( ! * paa )
2005-04-17 02:20:36 +04:00
idp - > layers = 0 ;
2008-07-25 12:48:02 +04:00
if ( to_free )
free_layer ( to_free ) ;
2005-10-31 02:02:14 +03:00
} else
2005-04-17 02:20:36 +04:00
idr_remove_warning ( id ) ;
}
/**
* idr_remove - remove the given id and free it ' s slot
2007-02-10 12:45:59 +03:00
* @ idp : idr handle
* @ id : unique key
2005-04-17 02:20:36 +04:00
*/
void idr_remove ( struct idr * idp , int id )
{
struct idr_layer * p ;
2008-07-25 12:48:02 +04:00
struct idr_layer * to_free ;
2005-04-17 02:20:36 +04:00
/* Mask off upper bits we don't use for the search. */
id & = MAX_ID_MASK ;
sub_remove ( idp , ( idp - > layers - 1 ) * IDR_BITS , id ) ;
2005-10-31 02:02:14 +03:00
if ( idp - > top & & idp - > top - > count = = 1 & & ( idp - > layers > 1 ) & &
2008-07-25 12:48:02 +04:00
idp - > top - > ary [ 0 ] ) {
/*
* Single child at leftmost slot : we can shrink the tree .
* This level is not needed anymore since when layers are
* inserted , they are inserted at the top of the existing
* tree .
*/
to_free = idp - > top ;
2005-04-17 02:20:36 +04:00
p = idp - > top - > ary [ 0 ] ;
2008-07-25 12:48:02 +04:00
rcu_assign_pointer ( idp - > top , p ) ;
2005-04-17 02:20:36 +04:00
- - idp - > layers ;
2008-07-25 12:48:02 +04:00
to_free - > bitmap = to_free - > count = 0 ;
free_layer ( to_free ) ;
2005-04-17 02:20:36 +04:00
}
while ( idp - > id_free_cnt > = IDR_FREE_MAX ) {
2008-07-25 12:47:58 +04:00
p = get_from_free_list ( idp ) ;
2008-07-25 12:48:02 +04:00
/*
* Note : we don ' t call the rcu callback here , since the only
* layers that fall into the freelist are those that have been
* preallocated .
*/
2005-04-17 02:20:36 +04:00
kmem_cache_free ( idr_layer_cache , p ) ;
}
2008-05-01 15:34:57 +04:00
return ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( idr_remove ) ;
2007-07-16 10:37:24 +04:00
/**
* idr_remove_all - remove all ids from the given idr tree
* @ idp : idr handle
*
* idr_destroy ( ) only frees up unused , cached idp_layers , but this
* function will remove all id mappings and leave all idp_layers
* unused .
*
* A typical clean - up sequence for objects stored in an idr tree , will
* use idr_for_each ( ) to free all objects , if necessay , then
* idr_remove_all ( ) to remove all ids , and idr_destroy ( ) to free
* up the cached idr_layers .
*/
void idr_remove_all ( struct idr * idp )
{
2007-07-31 11:39:19 +04:00
int n , id , max ;
2007-07-16 10:37:24 +04:00
struct idr_layer * p ;
struct idr_layer * pa [ MAX_LEVEL ] ;
struct idr_layer * * paa = & pa [ 0 ] ;
n = idp - > layers * IDR_BITS ;
p = idp - > top ;
max = 1 < < n ;
id = 0 ;
2007-07-31 11:39:19 +04:00
while ( id < max ) {
2007-07-16 10:37:24 +04:00
while ( n > IDR_BITS & & p ) {
n - = IDR_BITS ;
* paa + + = p ;
p = p - > ary [ ( id > > n ) & IDR_MASK ] ;
}
id + = 1 < < n ;
while ( n < fls ( id ) ) {
2008-07-25 12:48:02 +04:00
if ( p )
free_layer ( p ) ;
2007-07-16 10:37:24 +04:00
n + = IDR_BITS ;
p = * - - paa ;
}
}
2008-07-25 12:48:02 +04:00
rcu_assign_pointer ( idp - > top , NULL ) ;
2007-07-16 10:37:24 +04:00
idp - > layers = 0 ;
}
EXPORT_SYMBOL ( idr_remove_all ) ;
2005-10-23 23:57:18 +04:00
/**
* idr_destroy - release all cached layers within an idr tree
* idp : idr handle
*/
void idr_destroy ( struct idr * idp )
{
while ( idp - > id_free_cnt ) {
2008-07-25 12:47:58 +04:00
struct idr_layer * p = get_from_free_list ( idp ) ;
2005-10-23 23:57:18 +04:00
kmem_cache_free ( idr_layer_cache , p ) ;
}
}
EXPORT_SYMBOL ( idr_destroy ) ;
2005-04-17 02:20:36 +04:00
/**
* idr_find - return pointer for given id
* @ idp : idr handle
* @ id : lookup key
*
* Return the pointer given the id it has been registered with . A % NULL
* return indicates that @ id is not valid or you passed % NULL in
* idr_get_new ( ) .
*
2008-07-25 12:48:01 +04:00
* This function can be called under rcu_read_lock ( ) , given that the leaf
* pointers lifetimes are correctly managed .
2005-04-17 02:20:36 +04:00
*/
void * idr_find ( struct idr * idp , int id )
{
int n ;
struct idr_layer * p ;
2008-07-25 12:48:01 +04:00
p = rcu_dereference ( idp - > top ) ;
2008-12-02 00:14:02 +03:00
if ( ! p )
return NULL ;
n = ( p - > layer + 1 ) * IDR_BITS ;
2005-04-17 02:20:36 +04:00
/* Mask off upper bits we don't use for the search. */
id & = MAX_ID_MASK ;
if ( id > = ( 1 < < n ) )
return NULL ;
2008-12-02 00:14:02 +03:00
BUG_ON ( n = = 0 ) ;
2005-04-17 02:20:36 +04:00
while ( n > 0 & & p ) {
n - = IDR_BITS ;
2008-12-02 00:14:02 +03:00
BUG_ON ( n ! = p - > layer * IDR_BITS ) ;
2008-07-25 12:48:01 +04:00
p = rcu_dereference ( p - > ary [ ( id > > n ) & IDR_MASK ] ) ;
2005-04-17 02:20:36 +04:00
}
return ( ( void * ) p ) ;
}
EXPORT_SYMBOL ( idr_find ) ;
2007-07-16 10:37:24 +04:00
/**
* idr_for_each - iterate through all stored pointers
* @ idp : idr handle
* @ fn : function to be called for each pointer
* @ data : data passed back to callback function
*
* Iterate over the pointers registered with the given idr . The
* callback function will be called for each pointer currently
* registered , passing the id , the pointer and the data pointer passed
* to this function . It is not safe to modify the idr tree while in
* the callback , so functions such as idr_get_new and idr_remove are
* not allowed .
*
* We check the return of @ fn each time . If it returns anything other
* than 0 , we break out and return that value .
*
* The caller must serialize idr_for_each ( ) vs idr_get_new ( ) and idr_remove ( ) .
*/
int idr_for_each ( struct idr * idp ,
int ( * fn ) ( int id , void * p , void * data ) , void * data )
{
int n , id , max , error = 0 ;
struct idr_layer * p ;
struct idr_layer * pa [ MAX_LEVEL ] ;
struct idr_layer * * paa = & pa [ 0 ] ;
n = idp - > layers * IDR_BITS ;
2008-07-25 12:48:01 +04:00
p = rcu_dereference ( idp - > top ) ;
2007-07-16 10:37:24 +04:00
max = 1 < < n ;
id = 0 ;
while ( id < max ) {
while ( n > 0 & & p ) {
n - = IDR_BITS ;
* paa + + = p ;
2008-07-25 12:48:01 +04:00
p = rcu_dereference ( p - > ary [ ( id > > n ) & IDR_MASK ] ) ;
2007-07-16 10:37:24 +04:00
}
if ( p ) {
error = fn ( id , ( void * ) p , data ) ;
if ( error )
break ;
}
id + = 1 < < n ;
while ( n < fls ( id ) ) {
n + = IDR_BITS ;
p = * - - paa ;
}
}
return error ;
}
EXPORT_SYMBOL ( idr_for_each ) ;
2006-06-26 11:27:19 +04:00
/**
* idr_replace - replace pointer for given id
* @ idp : idr handle
* @ ptr : pointer you want associated with the id
* @ id : lookup key
*
* Replace the pointer registered with an id and return the old value .
* A - ENOENT return indicates that @ id was not found .
* A - EINVAL return indicates that @ id was not within valid constraints .
*
2008-07-25 12:48:02 +04:00
* The caller must serialize with writers .
2006-06-26 11:27:19 +04:00
*/
void * idr_replace ( struct idr * idp , void * ptr , int id )
{
int n ;
struct idr_layer * p , * old_p ;
p = idp - > top ;
2008-12-02 00:14:02 +03:00
if ( ! p )
return ERR_PTR ( - EINVAL ) ;
n = ( p - > layer + 1 ) * IDR_BITS ;
2006-06-26 11:27:19 +04:00
id & = MAX_ID_MASK ;
if ( id > = ( 1 < < n ) )
return ERR_PTR ( - EINVAL ) ;
n - = IDR_BITS ;
while ( ( n > 0 ) & & p ) {
p = p - > ary [ ( id > > n ) & IDR_MASK ] ;
n - = IDR_BITS ;
}
n = id & IDR_MASK ;
if ( unlikely ( p = = NULL | | ! test_bit ( n , & p - > bitmap ) ) )
return ERR_PTR ( - ENOENT ) ;
old_p = p - > ary [ n ] ;
2008-07-25 12:48:02 +04:00
rcu_assign_pointer ( p - > ary [ n ] , ptr ) ;
2006-06-26 11:27:19 +04:00
return old_p ;
}
EXPORT_SYMBOL ( idr_replace ) ;
2008-04-29 12:03:13 +04:00
void __init idr_init_cache ( void )
2005-04-17 02:20:36 +04:00
{
2008-04-29 12:03:13 +04:00
idr_layer_cache = kmem_cache_create ( " idr_layer_cache " ,
2009-01-16 00:51:21 +03:00
sizeof ( struct idr_layer ) , 0 , SLAB_PANIC , NULL ) ;
2005-04-17 02:20:36 +04:00
}
/**
* idr_init - initialize idr handle
* @ idp : idr handle
*
* This function is use to set up the handle ( @ idp ) that you will pass
* to the rest of the functions .
*/
void idr_init ( struct idr * idp )
{
memset ( idp , 0 , sizeof ( struct idr ) ) ;
spin_lock_init ( & idp - > lock ) ;
}
EXPORT_SYMBOL ( idr_init ) ;
2007-06-13 22:45:13 +04:00
/*
* IDA - IDR based ID allocator
*
* this is id allocator without id - > pointer translation . Memory
* usage is much lower than full blown idr because each id only
* occupies a bit . ida uses a custom leaf node which contains
* IDA_BITMAP_BITS slots .
*
* 2007 - 04 - 25 written by Tejun Heo < htejun @ gmail . com >
*/
static void free_bitmap ( struct ida * ida , struct ida_bitmap * bitmap )
{
unsigned long flags ;
if ( ! ida - > free_bitmap ) {
spin_lock_irqsave ( & ida - > idr . lock , flags ) ;
if ( ! ida - > free_bitmap ) {
ida - > free_bitmap = bitmap ;
bitmap = NULL ;
}
spin_unlock_irqrestore ( & ida - > idr . lock , flags ) ;
}
kfree ( bitmap ) ;
}
/**
* ida_pre_get - reserve resources for ida allocation
* @ ida : ida handle
* @ gfp_mask : memory allocation flag
*
* This function should be called prior to locking and calling the
* following function . It preallocates enough memory to satisfy the
* worst possible allocation .
*
* If the system is REALLY out of memory this function returns 0 ,
* otherwise 1.
*/
int ida_pre_get ( struct ida * ida , gfp_t gfp_mask )
{
/* allocate idr_layers */
if ( ! idr_pre_get ( & ida - > idr , gfp_mask ) )
return 0 ;
/* allocate free_bitmap */
if ( ! ida - > free_bitmap ) {
struct ida_bitmap * bitmap ;
bitmap = kmalloc ( sizeof ( struct ida_bitmap ) , gfp_mask ) ;
if ( ! bitmap )
return 0 ;
free_bitmap ( ida , bitmap ) ;
}
return 1 ;
}
EXPORT_SYMBOL ( ida_pre_get ) ;
/**
* ida_get_new_above - allocate new ID above or equal to a start id
* @ ida : ida handle
* @ staring_id : id to start search at
* @ p_id : pointer to the allocated handle
*
* Allocate new ID above or equal to @ ida . It should be called with
* any required locks .
*
* If memory is required , it will return - EAGAIN , you should unlock
* and go back to the ida_pre_get ( ) call . If the ida is full , it will
* return - ENOSPC .
*
2009-01-16 00:51:00 +03:00
* @ p_id returns a value in the range @ starting_id . . . 0x7fffffff .
2007-06-13 22:45:13 +04:00
*/
int ida_get_new_above ( struct ida * ida , int starting_id , int * p_id )
{
struct idr_layer * pa [ MAX_LEVEL ] ;
struct ida_bitmap * bitmap ;
unsigned long flags ;
int idr_id = starting_id / IDA_BITMAP_BITS ;
int offset = starting_id % IDA_BITMAP_BITS ;
int t , id ;
restart :
/* get vacant slot */
t = idr_get_empty_slot ( & ida - > idr , idr_id , pa ) ;
2008-07-25 12:47:59 +04:00
if ( t < 0 )
return _idr_rc_to_errno ( t ) ;
2007-06-13 22:45:13 +04:00
if ( t * IDA_BITMAP_BITS > = MAX_ID_BIT )
return - ENOSPC ;
if ( t ! = idr_id )
offset = 0 ;
idr_id = t ;
/* if bitmap isn't there, create a new one */
bitmap = ( void * ) pa [ 0 ] - > ary [ idr_id & IDR_MASK ] ;
if ( ! bitmap ) {
spin_lock_irqsave ( & ida - > idr . lock , flags ) ;
bitmap = ida - > free_bitmap ;
ida - > free_bitmap = NULL ;
spin_unlock_irqrestore ( & ida - > idr . lock , flags ) ;
if ( ! bitmap )
return - EAGAIN ;
memset ( bitmap , 0 , sizeof ( struct ida_bitmap ) ) ;
2008-07-25 12:48:00 +04:00
rcu_assign_pointer ( pa [ 0 ] - > ary [ idr_id & IDR_MASK ] ,
( void * ) bitmap ) ;
2007-06-13 22:45:13 +04:00
pa [ 0 ] - > count + + ;
}
/* lookup for empty slot */
t = find_next_zero_bit ( bitmap - > bitmap , IDA_BITMAP_BITS , offset ) ;
if ( t = = IDA_BITMAP_BITS ) {
/* no empty slot after offset, continue to the next chunk */
idr_id + + ;
offset = 0 ;
goto restart ;
}
id = idr_id * IDA_BITMAP_BITS + t ;
if ( id > = MAX_ID_BIT )
return - ENOSPC ;
__set_bit ( t , bitmap - > bitmap ) ;
if ( + + bitmap - > nr_busy = = IDA_BITMAP_BITS )
idr_mark_full ( pa , idr_id ) ;
* p_id = id ;
/* Each leaf node can handle nearly a thousand slots and the
* whole idea of ida is to have small memory foot print .
* Throw away extra resources one by one after each successful
* allocation .
*/
if ( ida - > idr . id_free_cnt | | ida - > free_bitmap ) {
2008-07-25 12:47:58 +04:00
struct idr_layer * p = get_from_free_list ( & ida - > idr ) ;
2007-06-13 22:45:13 +04:00
if ( p )
kmem_cache_free ( idr_layer_cache , p ) ;
}
return 0 ;
}
EXPORT_SYMBOL ( ida_get_new_above ) ;
/**
* ida_get_new - allocate new ID
* @ ida : idr handle
* @ p_id : pointer to the allocated handle
*
* Allocate new ID . It should be called with any required locks .
*
* If memory is required , it will return - EAGAIN , you should unlock
* and go back to the idr_pre_get ( ) call . If the idr is full , it will
* return - ENOSPC .
*
* @ id returns a value in the range 0 . . . 0x7fffffff .
*/
int ida_get_new ( struct ida * ida , int * p_id )
{
return ida_get_new_above ( ida , 0 , p_id ) ;
}
EXPORT_SYMBOL ( ida_get_new ) ;
/**
* ida_remove - remove the given ID
* @ ida : ida handle
* @ id : ID to free
*/
void ida_remove ( struct ida * ida , int id )
{
struct idr_layer * p = ida - > idr . top ;
int shift = ( ida - > idr . layers - 1 ) * IDR_BITS ;
int idr_id = id / IDA_BITMAP_BITS ;
int offset = id % IDA_BITMAP_BITS ;
int n ;
struct ida_bitmap * bitmap ;
/* clear full bits while looking up the leaf idr_layer */
while ( ( shift > 0 ) & & p ) {
n = ( idr_id > > shift ) & IDR_MASK ;
__clear_bit ( n , & p - > bitmap ) ;
p = p - > ary [ n ] ;
shift - = IDR_BITS ;
}
if ( p = = NULL )
goto err ;
n = idr_id & IDR_MASK ;
__clear_bit ( n , & p - > bitmap ) ;
bitmap = ( void * ) p - > ary [ n ] ;
if ( ! test_bit ( offset , bitmap - > bitmap ) )
goto err ;
/* update bitmap and remove it if empty */
__clear_bit ( offset , bitmap - > bitmap ) ;
if ( - - bitmap - > nr_busy = = 0 ) {
__set_bit ( n , & p - > bitmap ) ; /* to please idr_remove() */
idr_remove ( & ida - > idr , idr_id ) ;
free_bitmap ( ida , bitmap ) ;
}
return ;
err :
printk ( KERN_WARNING
" ida_remove called for id=%d which is not allocated. \n " , id ) ;
}
EXPORT_SYMBOL ( ida_remove ) ;
/**
* ida_destroy - release all cached layers within an ida tree
* ida : ida handle
*/
void ida_destroy ( struct ida * ida )
{
idr_destroy ( & ida - > idr ) ;
kfree ( ida - > free_bitmap ) ;
}
EXPORT_SYMBOL ( ida_destroy ) ;
/**
* ida_init - initialize ida handle
* @ ida : ida handle
*
* This function is use to set up the handle ( @ ida ) that you will pass
* to the rest of the functions .
*/
void ida_init ( struct ida * ida )
{
memset ( ida , 0 , sizeof ( struct ida ) ) ;
idr_init ( & ida - > idr ) ;
}
EXPORT_SYMBOL ( ida_init ) ;