2005-09-26 16:04:21 +10:00
/*
* A Remote Heap . Remote means that we don ' t touch the memory that the
* heap points to . Normal heap implementations use the memory they manage
* to place their list . We cannot do that because the memory we manage may
* have special properties , for example it is uncachable or of different
* endianess .
*
* Author : Pantelis Antoniou < panto @ intracom . gr >
*
* 2004 ( c ) INTRACOM S . A . Greece . This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed " as is " without any warranty of any kind , whether express
* or implied .
*/
# include <linux/types.h>
# include <linux/errno.h>
2007-02-05 16:14:09 -08:00
# include <linux/kernel.h>
2011-07-22 18:24:23 -04:00
# include <linux/export.h>
2005-09-26 16:04:21 +10:00
# include <linux/mm.h>
2007-07-30 02:36:13 +04:00
# include <linux/err.h>
2005-09-26 16:04:21 +10:00
# include <linux/slab.h>
# include <asm/rheap.h>
/*
* Fixup a list_head , needed when copying lists . If the pointers fall
* between s and e , apply the delta . This assumes that
* sizeof ( struct list_head * ) = = sizeof ( unsigned long * ) .
*/
static inline void fixup ( unsigned long s , unsigned long e , int d ,
struct list_head * l )
{
unsigned long * pp ;
pp = ( unsigned long * ) & l - > next ;
if ( * pp > = s & & * pp < e )
* pp + = d ;
pp = ( unsigned long * ) & l - > prev ;
if ( * pp > = s & & * pp < e )
* pp + = d ;
}
/* Grow the allocated blocks */
static int grow ( rh_info_t * info , int max_blocks )
{
rh_block_t * block , * blk ;
int i , new_blocks ;
int delta ;
unsigned long blks , blke ;
if ( max_blocks < = info - > max_blocks )
return - EINVAL ;
new_blocks = max_blocks - info - > max_blocks ;
2008-04-14 10:43:38 -05:00
block = kmalloc ( sizeof ( rh_block_t ) * max_blocks , GFP_ATOMIC ) ;
2005-09-26 16:04:21 +10:00
if ( block = = NULL )
return - ENOMEM ;
if ( info - > max_blocks > 0 ) {
/* copy old block area */
memcpy ( block , info - > block ,
sizeof ( rh_block_t ) * info - > max_blocks ) ;
delta = ( char * ) block - ( char * ) info - > block ;
/* and fixup list pointers */
blks = ( unsigned long ) info - > block ;
blke = ( unsigned long ) ( info - > block + info - > max_blocks ) ;
for ( i = 0 , blk = block ; i < info - > max_blocks ; i + + , blk + + )
fixup ( blks , blke , delta , & blk - > list ) ;
fixup ( blks , blke , delta , & info - > empty_list ) ;
fixup ( blks , blke , delta , & info - > free_list ) ;
fixup ( blks , blke , delta , & info - > taken_list ) ;
/* free the old allocated memory */
if ( ( info - > flags & RHIF_STATIC_BLOCK ) = = 0 )
kfree ( info - > block ) ;
}
info - > block = block ;
info - > empty_slots + = new_blocks ;
info - > max_blocks = max_blocks ;
info - > flags & = ~ RHIF_STATIC_BLOCK ;
/* add all new blocks to the free list */
2007-01-27 17:41:49 -06:00
blk = block + info - > max_blocks - new_blocks ;
for ( i = 0 ; i < new_blocks ; i + + , blk + + )
2005-09-26 16:04:21 +10:00
list_add ( & blk - > list , & info - > empty_list ) ;
return 0 ;
}
/*
* Assure at least the required amount of empty slots . If this function
* causes a grow in the block area then all pointers kept to the block
* area are invalid !
*/
static int assure_empty ( rh_info_t * info , int slots )
{
int max_blocks ;
/* This function is not meant to be used to grow uncontrollably */
if ( slots > = 4 )
return - EINVAL ;
/* Enough space */
if ( info - > empty_slots > = slots )
return 0 ;
/* Next 16 sized block */
max_blocks = ( ( info - > max_blocks + slots ) + 15 ) & ~ 15 ;
return grow ( info , max_blocks ) ;
}
static rh_block_t * get_slot ( rh_info_t * info )
{
rh_block_t * blk ;
/* If no more free slots, and failure to extend. */
/* XXX: You should have called assure_empty before */
if ( info - > empty_slots = = 0 ) {
printk ( KERN_ERR " rh: out of slots; crash is imminent. \n " ) ;
return NULL ;
}
/* Get empty slot to use */
blk = list_entry ( info - > empty_list . next , rh_block_t , list ) ;
list_del_init ( & blk - > list ) ;
info - > empty_slots - - ;
/* Initialize */
2007-05-08 14:46:36 -05:00
blk - > start = 0 ;
2005-09-26 16:04:21 +10:00
blk - > size = 0 ;
blk - > owner = NULL ;
return blk ;
}
static inline void release_slot ( rh_info_t * info , rh_block_t * blk )
{
list_add ( & blk - > list , & info - > empty_list ) ;
info - > empty_slots + + ;
}
static void attach_free_block ( rh_info_t * info , rh_block_t * blkn )
{
rh_block_t * blk ;
rh_block_t * before ;
rh_block_t * after ;
rh_block_t * next ;
int size ;
unsigned long s , e , bs , be ;
struct list_head * l ;
/* We assume that they are aligned properly */
size = blkn - > size ;
2007-05-08 14:46:36 -05:00
s = blkn - > start ;
2005-09-26 16:04:21 +10:00
e = s + size ;
/* Find the blocks immediately before and after the given one
* ( if any ) */
before = NULL ;
after = NULL ;
next = NULL ;
list_for_each ( l , & info - > free_list ) {
blk = list_entry ( l , rh_block_t , list ) ;
2007-05-08 14:46:36 -05:00
bs = blk - > start ;
2005-09-26 16:04:21 +10:00
be = bs + blk - > size ;
if ( next = = NULL & & s > = bs )
next = blk ;
if ( be = = s )
before = blk ;
if ( e = = bs )
after = blk ;
/* If both are not null, break now */
if ( before ! = NULL & & after ! = NULL )
break ;
}
/* Now check if they are really adjacent */
2007-05-08 14:46:36 -05:00
if ( before & & s ! = ( before - > start + before - > size ) )
2005-09-26 16:04:21 +10:00
before = NULL ;
2007-05-08 14:46:36 -05:00
if ( after & & e ! = after - > start )
2005-09-26 16:04:21 +10:00
after = NULL ;
/* No coalescing; list insert and return */
if ( before = = NULL & & after = = NULL ) {
if ( next ! = NULL )
list_add ( & blkn - > list , & next - > list ) ;
else
list_add ( & blkn - > list , & info - > free_list ) ;
return ;
}
/* We don't need it anymore */
release_slot ( info , blkn ) ;
/* Grow the before block */
if ( before ! = NULL & & after = = NULL ) {
before - > size + = size ;
return ;
}
/* Grow the after block backwards */
if ( before = = NULL & & after ! = NULL ) {
2007-05-08 14:46:36 -05:00
after - > start - = size ;
2005-09-26 16:04:21 +10:00
after - > size + = size ;
return ;
}
/* Grow the before block, and release the after block */
before - > size + = size + after - > size ;
list_del ( & after - > list ) ;
release_slot ( info , after ) ;
}
static void attach_taken_block ( rh_info_t * info , rh_block_t * blkn )
{
rh_block_t * blk ;
struct list_head * l ;
/* Find the block immediately before the given one (if any) */
list_for_each ( l , & info - > taken_list ) {
blk = list_entry ( l , rh_block_t , list ) ;
if ( blk - > start > blkn - > start ) {
list_add_tail ( & blkn - > list , & blk - > list ) ;
return ;
}
}
list_add_tail ( & blkn - > list , & info - > taken_list ) ;
}
/*
* Create a remote heap dynamically . Note that no memory for the blocks
* are allocated . It will upon the first allocation
*/
rh_info_t * rh_create ( unsigned int alignment )
{
rh_info_t * info ;
/* Alignment must be a power of two */
if ( ( alignment & ( alignment - 1 ) ) ! = 0 )
return ERR_PTR ( - EINVAL ) ;
2008-04-14 10:43:38 -05:00
info = kmalloc ( sizeof ( * info ) , GFP_ATOMIC ) ;
2005-09-26 16:04:21 +10:00
if ( info = = NULL )
return ERR_PTR ( - ENOMEM ) ;
info - > alignment = alignment ;
/* Initially everything as empty */
info - > block = NULL ;
info - > max_blocks = 0 ;
info - > empty_slots = 0 ;
info - > flags = 0 ;
INIT_LIST_HEAD ( & info - > empty_list ) ;
INIT_LIST_HEAD ( & info - > free_list ) ;
INIT_LIST_HEAD ( & info - > taken_list ) ;
return info ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_create ) ;
2005-09-26 16:04:21 +10:00
/*
* Destroy a dynamically created remote heap . Deallocate only if the areas
* are not static
*/
void rh_destroy ( rh_info_t * info )
{
if ( ( info - > flags & RHIF_STATIC_BLOCK ) = = 0 & & info - > block ! = NULL )
kfree ( info - > block ) ;
if ( ( info - > flags & RHIF_STATIC_INFO ) = = 0 )
kfree ( info ) ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_destroy ) ;
2005-09-26 16:04:21 +10:00
/*
* Initialize in place a remote heap info block . This is needed to support
* operation very early in the startup of the kernel , when it is not yet safe
* to call kmalloc .
*/
void rh_init ( rh_info_t * info , unsigned int alignment , int max_blocks ,
rh_block_t * block )
{
int i ;
rh_block_t * blk ;
/* Alignment must be a power of two */
if ( ( alignment & ( alignment - 1 ) ) ! = 0 )
return ;
info - > alignment = alignment ;
/* Initially everything as empty */
info - > block = block ;
info - > max_blocks = max_blocks ;
info - > empty_slots = max_blocks ;
info - > flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK ;
INIT_LIST_HEAD ( & info - > empty_list ) ;
INIT_LIST_HEAD ( & info - > free_list ) ;
INIT_LIST_HEAD ( & info - > taken_list ) ;
/* Add all new blocks to the free list */
for ( i = 0 , blk = block ; i < max_blocks ; i + + , blk + + )
list_add ( & blk - > list , & info - > empty_list ) ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_init ) ;
2005-09-26 16:04:21 +10:00
/* Attach a free memory region, coalesces regions if adjuscent */
2007-05-08 14:46:36 -05:00
int rh_attach_region ( rh_info_t * info , unsigned long start , int size )
2005-09-26 16:04:21 +10:00
{
rh_block_t * blk ;
unsigned long s , e , m ;
int r ;
/* The region must be aligned */
2007-05-08 14:46:36 -05:00
s = start ;
2005-09-26 16:04:21 +10:00
e = s + size ;
m = info - > alignment - 1 ;
/* Round start up */
s = ( s + m ) & ~ m ;
/* Round end down */
e = e & ~ m ;
2007-05-08 14:46:36 -05:00
if ( IS_ERR_VALUE ( e ) | | ( e < s ) )
return - ERANGE ;
2005-09-26 16:04:21 +10:00
/* Take final values */
2007-05-08 14:46:36 -05:00
start = s ;
size = e - s ;
2005-09-26 16:04:21 +10:00
/* Grow the blocks, if needed */
r = assure_empty ( info , 1 ) ;
if ( r < 0 )
return r ;
blk = get_slot ( info ) ;
blk - > start = start ;
blk - > size = size ;
blk - > owner = NULL ;
attach_free_block ( info , blk ) ;
return 0 ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_attach_region ) ;
2005-09-26 16:04:21 +10:00
/* Detatch given address range, splits free block if needed. */
2007-05-08 14:46:36 -05:00
unsigned long rh_detach_region ( rh_info_t * info , unsigned long start , int size )
2005-09-26 16:04:21 +10:00
{
struct list_head * l ;
rh_block_t * blk , * newblk ;
unsigned long s , e , m , bs , be ;
/* Validate size */
if ( size < = 0 )
2007-05-08 14:46:36 -05:00
return ( unsigned long ) - EINVAL ;
2005-09-26 16:04:21 +10:00
/* The region must be aligned */
2007-05-08 14:46:36 -05:00
s = start ;
2005-09-26 16:04:21 +10:00
e = s + size ;
m = info - > alignment - 1 ;
/* Round start up */
s = ( s + m ) & ~ m ;
/* Round end down */
e = e & ~ m ;
if ( assure_empty ( info , 1 ) < 0 )
2007-05-08 14:46:36 -05:00
return ( unsigned long ) - ENOMEM ;
2005-09-26 16:04:21 +10:00
blk = NULL ;
list_for_each ( l , & info - > free_list ) {
blk = list_entry ( l , rh_block_t , list ) ;
/* The range must lie entirely inside one free block */
2007-05-08 14:46:36 -05:00
bs = blk - > start ;
be = blk - > start + blk - > size ;
2005-09-26 16:04:21 +10:00
if ( s > = bs & & e < = be )
break ;
blk = NULL ;
}
if ( blk = = NULL )
2007-05-08 14:46:36 -05:00
return ( unsigned long ) - ENOMEM ;
2005-09-26 16:04:21 +10:00
/* Perfect fit */
if ( bs = = s & & be = = e ) {
/* Delete from free list, release slot */
list_del ( & blk - > list ) ;
release_slot ( info , blk ) ;
2007-05-08 14:46:36 -05:00
return s ;
2005-09-26 16:04:21 +10:00
}
/* blk still in free list, with updated start and/or size */
if ( bs = = s | | be = = e ) {
if ( bs = = s )
2007-05-08 14:46:36 -05:00
blk - > start + = size ;
2005-09-26 16:04:21 +10:00
blk - > size - = size ;
} else {
/* The front free fragment */
blk - > size = s - bs ;
/* the back free fragment */
newblk = get_slot ( info ) ;
2007-05-08 14:46:36 -05:00
newblk - > start = e ;
2005-09-26 16:04:21 +10:00
newblk - > size = be - e ;
list_add ( & newblk - > list , & blk - > list ) ;
}
2007-05-08 14:46:36 -05:00
return s ;
2005-09-26 16:04:21 +10:00
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_detach_region ) ;
2005-09-26 16:04:21 +10:00
2007-05-08 14:46:36 -05:00
/* Allocate a block of memory at the specified alignment. The value returned
* is an offset into the buffer initialized by rh_init ( ) , or a negative number
* if there is an error .
*/
unsigned long rh_alloc_align ( rh_info_t * info , int size , int alignment , const char * owner )
2005-09-26 16:04:21 +10:00
{
struct list_head * l ;
rh_block_t * blk ;
rh_block_t * newblk ;
2007-06-18 19:29:21 +08:00
unsigned long start , sp_size ;
2005-09-26 16:04:21 +10:00
2007-05-08 14:46:36 -05:00
/* Validate size, and alignment must be power of two */
2006-09-29 18:15:52 +08:00
if ( size < = 0 | | ( alignment & ( alignment - 1 ) ) ! = 0 )
2007-05-08 14:46:36 -05:00
return ( unsigned long ) - EINVAL ;
2005-09-26 16:04:21 +10:00
/* Align to configured alignment */
size = ( size + ( info - > alignment - 1 ) ) & ~ ( info - > alignment - 1 ) ;
2007-06-18 19:29:21 +08:00
if ( assure_empty ( info , 2 ) < 0 )
2007-05-08 14:46:36 -05:00
return ( unsigned long ) - ENOMEM ;
2005-09-26 16:04:21 +10:00
blk = NULL ;
list_for_each ( l , & info - > free_list ) {
blk = list_entry ( l , rh_block_t , list ) ;
2007-06-18 19:29:21 +08:00
if ( size < = blk - > size ) {
start = ( blk - > start + alignment - 1 ) & ~ ( alignment - 1 ) ;
if ( start + size < = blk - > start + blk - > size )
break ;
}
2005-09-26 16:04:21 +10:00
blk = NULL ;
}
if ( blk = = NULL )
2007-05-08 14:46:36 -05:00
return ( unsigned long ) - ENOMEM ;
2005-09-26 16:04:21 +10:00
/* Just fits */
if ( blk - > size = = size ) {
/* Move from free list to taken list */
list_del ( & blk - > list ) ;
2007-05-14 11:31:26 -05:00
newblk = blk ;
} else {
2007-06-18 19:29:21 +08:00
/* Fragment caused, split if needed */
/* Create block for fragment in the beginning */
sp_size = start - blk - > start ;
if ( sp_size ) {
rh_block_t * spblk ;
spblk = get_slot ( info ) ;
spblk - > start = blk - > start ;
spblk - > size = sp_size ;
/* add before the blk */
list_add ( & spblk - > list , blk - > list . prev ) ;
}
2007-05-14 11:31:26 -05:00
newblk = get_slot ( info ) ;
2007-06-18 19:29:21 +08:00
newblk - > start = start ;
2007-05-14 11:31:26 -05:00
newblk - > size = size ;
2005-09-26 16:04:21 +10:00
2007-06-18 19:29:21 +08:00
/* blk still in free list, with updated start and size
* for fragment in the end */
blk - > start = start + size ;
blk - > size - = sp_size + size ;
/* No fragment in the end, remove blk */
if ( blk - > size = = 0 ) {
list_del ( & blk - > list ) ;
release_slot ( info , blk ) ;
}
2005-09-26 16:04:21 +10:00
}
newblk - > owner = owner ;
attach_taken_block ( info , newblk ) ;
return start ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_alloc_align ) ;
2005-09-26 16:04:21 +10:00
2007-05-08 14:46:36 -05:00
/* Allocate a block of memory at the default alignment. The value returned is
* an offset into the buffer initialized by rh_init ( ) , or a negative number if
* there is an error .
*/
unsigned long rh_alloc ( rh_info_t * info , int size , const char * owner )
2006-09-29 18:15:52 +08:00
{
return rh_alloc_align ( info , size , info - > alignment , owner ) ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_alloc ) ;
2006-09-29 18:15:52 +08:00
2007-05-08 14:46:36 -05:00
/* Allocate a block of memory at the given offset, rounded up to the default
* alignment . The value returned is an offset into the buffer initialized by
* rh_init ( ) , or a negative number if there is an error .
*/
unsigned long rh_alloc_fixed ( rh_info_t * info , unsigned long start , int size , const char * owner )
2005-09-26 16:04:21 +10:00
{
struct list_head * l ;
rh_block_t * blk , * newblk1 , * newblk2 ;
2006-09-29 18:15:52 +08:00
unsigned long s , e , m , bs = 0 , be = 0 ;
2005-09-26 16:04:21 +10:00
/* Validate size */
if ( size < = 0 )
2007-05-08 14:46:36 -05:00
return ( unsigned long ) - EINVAL ;
2005-09-26 16:04:21 +10:00
/* The region must be aligned */
2007-05-08 14:46:36 -05:00
s = start ;
2005-09-26 16:04:21 +10:00
e = s + size ;
m = info - > alignment - 1 ;
/* Round start up */
s = ( s + m ) & ~ m ;
/* Round end down */
e = e & ~ m ;
if ( assure_empty ( info , 2 ) < 0 )
2007-05-08 14:46:36 -05:00
return ( unsigned long ) - ENOMEM ;
2005-09-26 16:04:21 +10:00
blk = NULL ;
list_for_each ( l , & info - > free_list ) {
blk = list_entry ( l , rh_block_t , list ) ;
/* The range must lie entirely inside one free block */
2007-05-08 14:46:36 -05:00
bs = blk - > start ;
be = blk - > start + blk - > size ;
2005-09-26 16:04:21 +10:00
if ( s > = bs & & e < = be )
break ;
2008-12-09 15:28:34 +01:00
blk = NULL ;
2005-09-26 16:04:21 +10:00
}
if ( blk = = NULL )
2007-05-08 14:46:36 -05:00
return ( unsigned long ) - ENOMEM ;
2005-09-26 16:04:21 +10:00
/* Perfect fit */
if ( bs = = s & & be = = e ) {
/* Move from free list to taken list */
list_del ( & blk - > list ) ;
blk - > owner = owner ;
start = blk - > start ;
attach_taken_block ( info , blk ) ;
return start ;
}
/* blk still in free list, with updated start and/or size */
if ( bs = = s | | be = = e ) {
if ( bs = = s )
2007-05-08 14:46:36 -05:00
blk - > start + = size ;
2005-09-26 16:04:21 +10:00
blk - > size - = size ;
} else {
/* The front free fragment */
blk - > size = s - bs ;
/* The back free fragment */
newblk2 = get_slot ( info ) ;
2007-05-08 14:46:36 -05:00
newblk2 - > start = e ;
2005-09-26 16:04:21 +10:00
newblk2 - > size = be - e ;
list_add ( & newblk2 - > list , & blk - > list ) ;
}
newblk1 = get_slot ( info ) ;
2007-05-08 14:46:36 -05:00
newblk1 - > start = s ;
2005-09-26 16:04:21 +10:00
newblk1 - > size = e - s ;
newblk1 - > owner = owner ;
start = newblk1 - > start ;
attach_taken_block ( info , newblk1 ) ;
return start ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_alloc_fixed ) ;
2005-09-26 16:04:21 +10:00
2007-05-08 14:46:36 -05:00
/* Deallocate the memory previously allocated by one of the rh_alloc functions.
* The return value is the size of the deallocated block , or a negative number
* if there is an error .
*/
int rh_free ( rh_info_t * info , unsigned long start )
2005-09-26 16:04:21 +10:00
{
rh_block_t * blk , * blk2 ;
struct list_head * l ;
int size ;
/* Linear search for block */
blk = NULL ;
list_for_each ( l , & info - > taken_list ) {
blk2 = list_entry ( l , rh_block_t , list ) ;
if ( start < blk2 - > start )
break ;
blk = blk2 ;
}
if ( blk = = NULL | | start > ( blk - > start + blk - > size ) )
return - EINVAL ;
/* Remove from taken list */
list_del ( & blk - > list ) ;
/* Get size of freed block */
size = blk - > size ;
attach_free_block ( info , blk ) ;
return size ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_free ) ;
2005-09-26 16:04:21 +10:00
int rh_get_stats ( rh_info_t * info , int what , int max_stats , rh_stats_t * stats )
{
rh_block_t * blk ;
struct list_head * l ;
struct list_head * h ;
int nr ;
switch ( what ) {
case RHGS_FREE :
h = & info - > free_list ;
break ;
case RHGS_TAKEN :
h = & info - > taken_list ;
break ;
default :
return - EINVAL ;
}
/* Linear search for block */
nr = 0 ;
list_for_each ( l , h ) {
blk = list_entry ( l , rh_block_t , list ) ;
if ( stats ! = NULL & & nr < max_stats ) {
stats - > start = blk - > start ;
stats - > size = blk - > size ;
stats - > owner = blk - > owner ;
stats + + ;
}
nr + + ;
}
return nr ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_get_stats ) ;
2005-09-26 16:04:21 +10:00
2007-05-08 14:46:36 -05:00
int rh_set_owner ( rh_info_t * info , unsigned long start , const char * owner )
2005-09-26 16:04:21 +10:00
{
rh_block_t * blk , * blk2 ;
struct list_head * l ;
int size ;
/* Linear search for block */
blk = NULL ;
list_for_each ( l , & info - > taken_list ) {
blk2 = list_entry ( l , rh_block_t , list ) ;
if ( start < blk2 - > start )
break ;
blk = blk2 ;
}
if ( blk = = NULL | | start > ( blk - > start + blk - > size ) )
return - EINVAL ;
blk - > owner = owner ;
size = blk - > size ;
return size ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_set_owner ) ;
2005-09-26 16:04:21 +10:00
void rh_dump ( rh_info_t * info )
{
static rh_stats_t st [ 32 ] ; /* XXX maximum 32 blocks */
int maxnr ;
int i , nr ;
2007-02-05 16:14:09 -08:00
maxnr = ARRAY_SIZE ( st ) ;
2005-09-26 16:04:21 +10:00
printk ( KERN_INFO
" info @0x%p (%d slots empty / %d max) \n " ,
info , info - > empty_slots , info - > max_blocks ) ;
printk ( KERN_INFO " Free: \n " ) ;
nr = rh_get_stats ( info , RHGS_FREE , maxnr , st ) ;
if ( nr > maxnr )
nr = maxnr ;
for ( i = 0 ; i < nr ; i + + )
printk ( KERN_INFO
2007-05-08 14:46:36 -05:00
" 0x%lx-0x%lx (%u) \n " ,
st [ i ] . start , st [ i ] . start + st [ i ] . size ,
2005-09-26 16:04:21 +10:00
st [ i ] . size ) ;
printk ( KERN_INFO " \n " ) ;
printk ( KERN_INFO " Taken: \n " ) ;
nr = rh_get_stats ( info , RHGS_TAKEN , maxnr , st ) ;
if ( nr > maxnr )
nr = maxnr ;
for ( i = 0 ; i < nr ; i + + )
printk ( KERN_INFO
2007-05-08 14:46:36 -05:00
" 0x%lx-0x%lx (%u) %s \n " ,
st [ i ] . start , st [ i ] . start + st [ i ] . size ,
2005-09-26 16:04:21 +10:00
st [ i ] . size , st [ i ] . owner ! = NULL ? st [ i ] . owner : " " ) ;
printk ( KERN_INFO " \n " ) ;
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_dump ) ;
2005-09-26 16:04:21 +10:00
void rh_dump_blk ( rh_info_t * info , rh_block_t * blk )
{
printk ( KERN_INFO
2007-05-08 14:46:36 -05:00
" blk @0x%p: 0x%lx-0x%lx (%u) \n " ,
blk , blk - > start , blk - > start + blk - > size , blk - > size ) ;
2005-09-26 16:04:21 +10:00
}
2007-09-16 20:53:24 +10:00
EXPORT_SYMBOL_GPL ( rh_dump_blk ) ;