2001-08-20 12:03:02 +04:00
/*
* dm - table . c
*
2001-09-26 18:32:07 +04:00
* Copyright ( C ) 2001 Sistina Software ( UK ) Limited .
2001-08-20 12:03:02 +04:00
*
2001-09-26 23:48:20 +04:00
* This file is released under the GPL .
2001-08-20 12:03:02 +04:00
*/
/*
* Changelog
*
* 16 / 08 / 2001 - First version [ Joe Thornber ]
*/
2001-08-21 18:47:42 +04:00
# include "dm.h"
2001-08-31 16:49:31 +04:00
/* ceiling(n / size) * size */
2001-08-23 16:35:02 +04:00
static inline ulong round_up ( ulong n , ulong size )
2001-08-20 12:03:02 +04:00
{
ulong r = n % size ;
return n + ( r ? ( size - r ) : 0 ) ;
}
2001-08-31 16:49:31 +04:00
/* ceiling(n / size) */
2001-08-23 16:35:02 +04:00
static inline ulong div_up ( ulong n , ulong size )
2001-08-20 12:03:02 +04:00
{
2001-08-23 16:35:02 +04:00
return round_up ( n , size ) / size ;
2001-08-20 12:03:02 +04:00
}
2001-09-02 14:49:20 +04:00
/* similar to ceiling(log_size(n)) */
2001-08-31 19:13:33 +04:00
static uint int_log ( ulong n , ulong base )
2001-08-31 16:49:31 +04:00
{
int result = 0 ;
2001-09-02 14:49:20 +04:00
while ( n > 1 ) {
2001-08-31 16:49:31 +04:00
n = div_up ( n , base ) ;
result + + ;
}
return result ;
}
/*
* return the highest key that you could lookup
* from the n ' th node on level l of the btree .
*/
2001-08-31 19:13:33 +04:00
static offset_t high ( struct dm_table * t , int l , int n )
2001-08-20 12:03:02 +04:00
{
2001-09-05 11:48:11 +04:00
for ( ; l < t - > depth - 1 ; l + + )
2001-09-04 14:17:28 +04:00
n = get_child ( n , CHILDREN_PER_NODE - 1 ) ;
2001-08-31 16:49:31 +04:00
2001-09-05 11:48:11 +04:00
if ( n > = t - > counts [ l ] )
return ( offset_t ) - 1 ;
return get_node ( t , l , n ) [ KEYS_PER_NODE - 1 ] ;
2001-08-20 12:03:02 +04:00
}
2001-08-31 16:49:31 +04:00
/*
* fills in a level of the btree based on the
* highs of the level below it .
*/
2001-08-31 19:13:33 +04:00
static int setup_btree_index ( int l , struct dm_table * t )
2001-08-20 12:03:02 +04:00
{
2001-09-05 11:48:11 +04:00
int n , k ;
offset_t * node ;
2001-08-20 12:03:02 +04:00
2001-09-05 11:48:11 +04:00
for ( n = 0 ; n < t - > counts [ l ] ; n + + ) {
node = get_node ( t , l , n ) ;
2001-09-04 14:17:28 +04:00
2001-09-05 11:48:11 +04:00
for ( k = 0 ; k < KEYS_PER_NODE ; k + + )
node [ k ] = high ( t , l + 1 , get_child ( n , k ) ) ;
2001-08-20 12:03:02 +04:00
}
2001-08-23 16:35:02 +04:00
return 0 ;
2001-08-20 12:03:02 +04:00
}
2001-08-31 16:49:31 +04:00
/*
2001-08-31 19:13:33 +04:00
* highs , and targets are managed as dynamic
* arrays during a table load .
2001-08-31 16:49:31 +04:00
*/
2001-08-31 19:13:33 +04:00
static int alloc_targets ( struct dm_table * t , int num )
2001-08-31 16:49:31 +04:00
{
offset_t * n_highs ;
struct target * n_targets ;
2001-08-31 19:13:33 +04:00
int n = t - > num_targets ;
2001-09-19 14:59:10 +04:00
int size = ( sizeof ( struct target ) + sizeof ( offset_t ) ) * num ;
2001-08-31 16:49:31 +04:00
2001-09-19 14:59:10 +04:00
n_highs = vmalloc ( size ) ;
2001-09-26 18:32:07 +04:00
if ( ! n_highs )
2001-08-31 16:49:31 +04:00
return - ENOMEM ;
2001-09-26 18:32:07 +04:00
n_targets = ( struct target * ) ( n_highs + num ) ;
2001-08-31 16:49:31 +04:00
2001-08-31 19:13:33 +04:00
if ( n ) {
memcpy ( n_highs , t - > highs , sizeof ( * n_highs ) * n ) ;
memcpy ( n_targets , t - > targets , sizeof ( * n_targets ) * n ) ;
2001-08-31 16:49:31 +04:00
}
2001-08-31 19:13:33 +04:00
vfree ( t - > highs ) ;
2001-08-31 16:49:31 +04:00
2001-08-31 19:13:33 +04:00
t - > num_allocated = num ;
t - > highs = n_highs ;
t - > targets = n_targets ;
2001-08-31 16:49:31 +04:00
return 0 ;
}
2001-08-31 19:13:33 +04:00
struct dm_table * dm_table_create ( void )
2001-08-20 17:45:43 +04:00
{
2001-08-31 19:13:33 +04:00
struct dm_table * t = kmalloc ( sizeof ( struct dm_table ) , GFP_NOIO ) ;
2001-08-20 19:22:44 +04:00
2001-08-31 19:13:33 +04:00
if ( ! t )
return 0 ;
2001-08-31 16:49:31 +04:00
2001-08-31 19:13:33 +04:00
memset ( t , 0 , sizeof ( * t ) ) ;
2001-09-26 23:48:20 +04:00
INIT_LIST_HEAD ( & t - > devices ) ;
2001-08-21 18:47:42 +04:00
2001-08-31 19:13:33 +04:00
/* allocate a single nodes worth of targets to
begin with */
2001-09-14 13:45:35 +04:00
if ( alloc_targets ( t , KEYS_PER_NODE ) ) {
2001-08-31 19:13:33 +04:00
kfree ( t ) ;
t = 0 ;
}
return t ;
}
2001-09-26 21:32:57 +04:00
static void free_devices ( struct list_head * devices )
{
struct list_head * tmp , * next ;
for ( tmp = devices - > next ; tmp ! = devices ; tmp = next ) {
struct dm_dev * dd = list_entry ( tmp , struct dm_dev , list ) ;
next = tmp - > next ;
kfree ( dd ) ;
}
}
2001-09-26 18:32:07 +04:00
void dm_table_destroy ( struct dm_table * t )
2001-08-31 19:13:33 +04:00
{
int i ;
2001-09-26 18:32:07 +04:00
/* free the indexes (see dm_table_complete) */
if ( t - > depth > = 2 )
vfree ( t - > index [ t - > depth - 2 ] ) ;
2001-10-17 15:34:50 +04:00
2001-08-31 19:13:33 +04:00
2001-09-02 14:49:20 +04:00
/* free the targets */
for ( i = 0 ; i < t - > num_targets ; i + + ) {
struct target * tgt = & t - > targets [ i ] ;
2001-10-17 15:34:50 +04:00
if ( tgt - > type - > dtr )
2001-09-07 15:34:46 +04:00
tgt - > type - > dtr ( t , tgt - > private ) ;
2001-09-02 14:49:20 +04:00
}
2001-08-31 19:13:33 +04:00
2001-10-17 15:34:50 +04:00
vfree ( t - > highs ) ;
2001-09-26 18:32:07 +04:00
/* free the device list */
2001-09-26 23:48:20 +04:00
if ( t - > devices . next ! = & t - > devices ) {
2001-09-26 18:32:07 +04:00
WARN ( " there are still devices present, someone isn't "
" calling dm_table_remove_device " ) ;
2001-09-26 23:48:20 +04:00
free_devices ( & t - > devices ) ;
2001-09-26 18:32:07 +04:00
}
kfree ( t ) ;
2001-09-19 20:01:27 +04:00
}
2001-08-31 19:13:33 +04:00
/*
2001-09-26 18:32:07 +04:00
* Checks to see if we need to extend
* highs or targets .
2001-08-31 19:13:33 +04:00
*/
static inline int check_space ( struct dm_table * t )
2001-08-31 16:49:31 +04:00
{
2001-08-31 19:13:33 +04:00
if ( t - > num_targets > = t - > num_allocated )
return alloc_targets ( t , t - > num_allocated * 2 ) ;
2001-08-31 16:49:31 +04:00
return 0 ;
}
2001-09-26 18:32:07 +04:00
/*
* convert a device path to a kdev_t .
*/
2001-09-26 21:07:10 +04:00
int lookup_device ( const char * path , kdev_t * dev )
2001-09-26 18:32:07 +04:00
{
int r ;
struct nameidata nd ;
struct inode * inode ;
if ( ! path_init ( path , LOOKUP_FOLLOW , & nd ) )
return 0 ;
if ( ( r = path_walk ( path , & nd ) ) )
goto bad ;
inode = nd . dentry - > d_inode ;
if ( ! inode ) {
r = - ENOENT ;
goto bad ;
}
if ( ! S_ISBLK ( inode - > i_mode ) ) {
r = - EINVAL ;
goto bad ;
}
2001-10-17 15:34:50 +04:00
* dev = inode - > i_rdev ;
2001-09-26 18:32:07 +04:00
bad :
path_release ( & nd ) ;
return r ;
}
/*
* see if we ' ve already got a device in the list .
*/
static struct dm_dev * find_device ( struct list_head * l , kdev_t dev )
{
struct list_head * tmp ;
2001-09-26 23:48:20 +04:00
for ( tmp = l - > next ; tmp ! = l ; tmp = tmp - > next ) {
2001-09-26 18:32:07 +04:00
struct dm_dev * dd = list_entry ( tmp , struct dm_dev , list ) ;
if ( dd - > dev = = dev )
return dd ;
}
return 0 ;
}
2001-08-31 19:13:33 +04:00
/*
2001-09-26 18:32:07 +04:00
* add a device to the list , or just increment the
* usage count if it ' s already present .
*/
2001-09-26 23:48:20 +04:00
int dm_table_get_device ( struct dm_table * t , const char * path ,
2001-09-26 18:32:07 +04:00
struct dm_dev * * result )
{
int r ;
kdev_t dev ;
struct dm_dev * dd ;
/* convert the path to a device */
if ( ( r = lookup_device ( path , & dev ) ) )
return r ;
2001-09-26 23:48:20 +04:00
dd = find_device ( & t - > devices , dev ) ;
2001-09-26 18:32:07 +04:00
if ( ! dd ) {
dd = kmalloc ( sizeof ( * dd ) , GFP_KERNEL ) ;
if ( ! dd )
return - ENOMEM ;
dd - > dev = dev ;
dd - > bd = 0 ;
atomic_set ( & dd - > count , 0 ) ;
2001-09-26 23:48:20 +04:00
list_add ( & dd - > list , & t - > devices ) ;
2001-09-26 18:32:07 +04:00
}
atomic_inc ( & dd - > count ) ;
* result = dd ;
return 0 ;
}
/*
* decrement a devices use count and remove it if
* neccessary .
*/
2001-09-26 23:48:20 +04:00
void dm_table_put_device ( struct dm_table * t , struct dm_dev * dd )
2001-09-26 18:32:07 +04:00
{
if ( atomic_dec_and_test ( & dd - > count ) ) {
list_del ( & dd - > list ) ;
kfree ( dd ) ;
}
}
2001-09-26 21:32:57 +04:00
/*
2001-08-31 19:13:33 +04:00
* adds a target to the map
*/
2001-09-02 14:49:20 +04:00
int dm_table_add_target ( struct dm_table * t , offset_t high ,
struct target_type * type , void * private )
2001-08-20 17:45:43 +04:00
{
2001-08-31 19:13:33 +04:00
int r , n ;
2001-08-31 16:49:31 +04:00
2001-08-31 19:13:33 +04:00
if ( ( r = check_space ( t ) ) )
2001-08-31 16:49:31 +04:00
return r ;
2001-08-20 19:22:44 +04:00
2001-08-31 19:13:33 +04:00
n = t - > num_targets + + ;
t - > highs [ n ] = high ;
2001-09-02 14:49:20 +04:00
t - > targets [ n ] . type = type ;
2001-08-31 19:13:33 +04:00
t - > targets [ n ] . private = private ;
2001-08-31 16:49:31 +04:00
2001-08-21 18:47:42 +04:00
return 0 ;
2001-08-20 17:45:43 +04:00
}
2001-09-26 23:48:20 +04:00
static int setup_indexes ( struct dm_table * t )
2001-08-31 19:13:33 +04:00
{
2001-09-26 23:48:20 +04:00
int i , total = 0 ;
2001-09-26 18:32:07 +04:00
offset_t * indexes ;
2001-08-20 19:22:44 +04:00
2001-09-26 18:32:07 +04:00
/* allocate the space for *all* the indexes */
2001-09-02 14:49:20 +04:00
for ( i = t - > depth - 2 ; i > = 0 ; i - - ) {
2001-09-04 14:17:28 +04:00
t - > counts [ i ] = div_up ( t - > counts [ i + 1 ] , CHILDREN_PER_NODE ) ;
2001-09-26 18:32:07 +04:00
total + = t - > counts [ i ] ;
2001-09-02 14:49:20 +04:00
}
2001-08-20 19:22:44 +04:00
2001-09-26 18:32:07 +04:00
if ( ! ( indexes = vmalloc ( NODE_SIZE * total ) ) )
return - ENOMEM ;
2001-09-17 15:23:13 +04:00
2001-09-26 18:32:07 +04:00
/* set up internal nodes, bottom-up */
for ( i = t - > depth - 2 , total = 0 ; i > = 0 ; i - - ) {
t - > index [ i ] = indexes + ( KEYS_PER_NODE * t - > counts [ i ] ) ;
setup_btree_index ( i , t ) ;
2001-09-17 15:23:13 +04:00
}
2001-09-26 18:32:07 +04:00
return 0 ;
2001-08-20 17:45:43 +04:00
}
2001-08-20 12:03:02 +04:00
2001-09-26 18:32:07 +04:00
2001-09-26 23:48:20 +04:00
/*
* builds the btree to index the map
*/
int dm_table_complete ( struct dm_table * t )
{
int leaf_nodes , r = 0 ;
/* how many indexes will the btree have ? */
leaf_nodes = div_up ( t - > num_targets , KEYS_PER_NODE ) ;
t - > depth = 1 + int_log ( leaf_nodes , CHILDREN_PER_NODE ) ;
/* leaf layer has already been set up */
t - > counts [ t - > depth - 1 ] = leaf_nodes ;
t - > index [ t - > depth - 1 ] = t - > highs ;
if ( t - > depth > = 2 )
r = setup_indexes ( t ) ;
return r ;
}
2001-09-26 18:32:07 +04:00
2001-09-26 23:48:20 +04:00
EXPORT_SYMBOL ( dm_table_get_device ) ;
EXPORT_SYMBOL ( dm_table_put_device ) ;