2001-08-20 12:05:51 +04:00
/*
* device - mapper . c
*
* Copyright ( C ) 2001 Sistina Software
*
* This software is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation ; either version 2 , or ( at
* your option ) any later version .
*
* This software is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with GNU CC ; see the file COPYING . If not , write to
* the Free Software Foundation , 59 Temple Place - Suite 330 ,
* Boston , MA 02111 - 1307 , USA .
*/
/*
* Changelog
*
* 14 / 08 / 2001 - First Version [ Joe Thornber ]
*/
2001-08-21 18:47:42 +04:00
# include "dm.h"
2001-08-20 12:05:51 +04:00
/* defines for blk.h */
# define MAJOR_NR DM_BLK_MAJOR
2001-09-14 17:27:58 +04:00
# define DEVICE_NR(device) MINOR(device) /* has no partition bits */
# define DEVICE_NAME "device-mapper" /* name for messaging */
# define DEVICE_NO_RANDOM /* no entropy to contribute */
# define DEVICE_OFF(d) /* do-nothing */
2001-08-20 12:05:51 +04:00
# include <linux/blk.h>
2001-09-13 18:01:13 +04:00
# include <linux/blkpg.h>
# include <linux/hdreg.h>
2001-09-14 17:45:40 +04:00
# include <linux/lvm.h>
2001-09-14 19:35:06 +04:00
# include <linux/kmod.h>
2001-08-20 12:05:51 +04:00
# define MAX_DEVICES 64
# define DEFAULT_READ_AHEAD 64
const char * _name = " device-mapper " ;
2001-09-14 17:27:58 +04:00
int _version [ 3 ] = { 0 , 1 , 0 } ;
2001-08-20 12:05:51 +04:00
2001-08-28 17:04:44 +04:00
struct io_hook {
struct mapped_device * md ;
2001-09-14 17:27:58 +04:00
void ( * end_io ) ( struct buffer_head * bh , int uptodate ) ;
2001-08-28 17:04:44 +04:00
void * context ;
} ;
2001-08-31 14:25:32 +04:00
kmem_cache_t * _io_hook_cache ;
2001-08-20 17:45:43 +04:00
struct rw_semaphore _dev_lock ;
2001-08-20 12:05:51 +04:00
static struct mapped_device * _devs [ MAX_DEVICES ] ;
/* block device arrays */
static int _block_size [ MAX_DEVICES ] ;
static int _blksize_size [ MAX_DEVICES ] ;
static int _hardsect_size [ MAX_DEVICES ] ;
2001-09-07 15:34:46 +04:00
const char * _fs_dir = " device-mapper " ;
static devfs_handle_t _dev_dir ;
2001-09-14 17:45:40 +04:00
static int request ( request_queue_t * q , int rw , struct buffer_head * bh ) ;
static int dm_user_bmap ( struct inode * inode , struct lv_bmap * lvb ) ;
2001-08-20 12:05:51 +04:00
/*
* setup and teardown the driver
*/
2001-08-31 19:13:33 +04:00
static int dm_init ( void )
2001-08-20 12:05:51 +04:00
{
2001-08-22 17:41:00 +04:00
int ret ;
2001-08-20 17:45:43 +04:00
init_rwsem ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
2001-09-13 15:07:08 +04:00
if ( ! ( _io_hook_cache =
2001-09-14 17:27:58 +04:00
kmem_cache_create ( " dm io hooks " , sizeof ( struct io_hook ) ,
2001-08-31 14:25:32 +04:00
0 , 0 , NULL , NULL ) ) )
return - ENOMEM ;
2001-09-14 17:27:58 +04:00
if ( ( ret = dmfs_init ( ) ) | | ( ret = dm_target_init ( ) )
| | ( ret = dm_init_blkdev ( ) ) )
2001-08-22 17:41:00 +04:00
return ret ;
2001-08-20 12:05:51 +04:00
/* set up the arrays */
read_ahead [ MAJOR_NR ] = DEFAULT_READ_AHEAD ;
blk_size [ MAJOR_NR ] = _block_size ;
blksize_size [ MAJOR_NR ] = _blksize_size ;
hardsect_size [ MAJOR_NR ] = _hardsect_size ;
2001-08-22 19:33:08 +04:00
if ( devfs_register_blkdev ( MAJOR_NR , _name , & dm_blk_dops ) < 0 ) {
2001-08-20 12:05:51 +04:00
printk ( KERN_ERR " %s -- register_blkdev failed \n " , _name ) ;
return - EIO ;
}
2001-08-23 16:35:02 +04:00
blk_queue_make_request ( BLK_DEFAULT_QUEUE ( MAJOR_NR ) , request ) ;
2001-08-20 12:05:51 +04:00
2001-09-07 15:34:46 +04:00
_dev_dir = devfs_mk_dir ( 0 , _fs_dir , NULL ) ;
2001-08-23 16:35:02 +04:00
printk ( KERN_INFO " %s %d.%d.%d initialised \n " , _name ,
2001-08-20 12:05:51 +04:00
_version [ 0 ] , _version [ 1 ] , _version [ 2 ] ) ;
return 0 ;
}
2001-08-31 19:13:33 +04:00
static void dm_exit ( void )
2001-08-20 12:05:51 +04:00
{
2001-09-14 17:27:58 +04:00
if ( kmem_cache_destroy ( _io_hook_cache ) )
2001-08-31 14:25:32 +04:00
WARN ( " it looks like there are still some io_hooks allocated " ) ;
2001-09-14 17:27:58 +04:00
dmfs_exit ( ) ;
2001-09-14 14:40:20 +04:00
dm_cleanup_blkdev ( ) ;
2001-08-22 17:41:00 +04:00
if ( devfs_unregister_blkdev ( MAJOR_NR , _name ) < 0 )
2001-08-20 12:05:51 +04:00
printk ( KERN_ERR " %s -- unregister_blkdev failed \n " , _name ) ;
read_ahead [ MAJOR_NR ] = 0 ;
blk_size [ MAJOR_NR ] = 0 ;
blksize_size [ MAJOR_NR ] = 0 ;
hardsect_size [ MAJOR_NR ] = 0 ;
2001-08-29 17:58:48 +04:00
printk ( KERN_INFO " %s %d.%d.%d cleaned up \n " , _name ,
2001-08-20 12:05:51 +04:00
_version [ 0 ] , _version [ 1 ] , _version [ 2 ] ) ;
}
/*
* block device functions
*/
2001-09-13 18:01:13 +04:00
static int dm_blk_open ( struct inode * inode , struct file * file )
2001-08-20 12:05:51 +04:00
{
int minor = MINOR ( inode - > i_rdev ) ;
2001-08-21 18:28:00 +04:00
struct mapped_device * md ;
2001-08-20 12:05:51 +04:00
if ( minor > = MAX_DEVICES )
return - ENXIO ;
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-08-21 18:28:00 +04:00
md = _devs [ minor ] ;
2001-08-20 12:05:51 +04:00
2001-08-21 18:28:00 +04:00
if ( ! md | | ! is_active ( md ) ) {
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
return - ENXIO ;
}
2001-08-21 18:28:00 +04:00
md - > use_count + + ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
2001-08-21 18:28:00 +04:00
MOD_INC_USE_COUNT ;
2001-08-20 12:05:51 +04:00
return 0 ;
}
2001-09-13 18:01:13 +04:00
static int dm_blk_close ( struct inode * inode , struct file * file )
2001-08-20 12:05:51 +04:00
{
int minor = MINOR ( inode - > i_rdev ) ;
2001-08-21 18:28:00 +04:00
struct mapped_device * md ;
2001-08-20 12:05:51 +04:00
if ( minor > = MAX_DEVICES )
return - ENXIO ;
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-08-21 18:28:00 +04:00
md = _devs [ minor ] ;
2001-08-23 20:45:43 +04:00
if ( ! md | | md - > use_count < 1 ) {
2001-08-20 12:05:51 +04:00
WARN ( " reference count in mapped_device incorrect " ) ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
return - ENXIO ;
}
2001-08-21 18:28:00 +04:00
md - > use_count - - ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
2001-08-20 19:59:22 +04:00
MOD_DEC_USE_COUNT ;
2001-08-20 12:05:51 +04:00
return 0 ;
}
2001-09-13 20:52:50 +04:00
# define VOLUME_SIZE(minor) ((_block_size[(minor)] << 10) / \
2001-09-13 18:01:13 +04:00
_hardsect_size [ ( minor ) ] )
static int dm_blk_ioctl ( struct inode * inode , struct file * file ,
2001-09-14 17:27:58 +04:00
uint command , ulong a )
2001-08-20 12:05:51 +04:00
{
int minor = MINOR ( inode - > i_rdev ) ;
long size ;
2001-09-13 18:01:13 +04:00
if ( minor > = MAX_DEVICES )
return - ENXIO ;
2001-08-20 12:05:51 +04:00
switch ( command ) {
2001-09-13 18:01:13 +04:00
case BLKSSZGET :
case BLKROGET :
case BLKROSET :
#if 0
case BLKELVSET :
case BLKELVGET :
# endif
return blk_ioctl ( inode - > i_dev , command , a ) ;
break ;
case HDIO_GETGEO :
{
2001-09-14 17:27:58 +04:00
struct hd_geometry tmp = { heads : 64 , sectors : 32 } ;
2001-09-13 18:01:13 +04:00
2001-09-14 17:27:58 +04:00
tmp . cylinders = VOLUME_SIZE ( minor ) / tmp . heads /
tmp . sectors ;
2001-09-13 18:01:13 +04:00
2001-09-14 17:27:58 +04:00
if ( copy_to_user ( ( char * ) a , & tmp , sizeof ( tmp ) ) )
2001-09-13 18:01:13 +04:00
return - EFAULT ;
break ;
}
case HDIO_GETGEO_BIG :
{
2001-09-14 17:27:58 +04:00
struct hd_big_geometry tmp = { heads : 64 , sectors : 32 } ;
2001-09-13 18:01:13 +04:00
tmp . cylinders = VOLUME_SIZE ( minor ) / tmp . heads /
2001-09-14 17:27:58 +04:00
tmp . sectors ;
2001-09-13 18:01:13 +04:00
2001-09-14 17:27:58 +04:00
if ( copy_to_user ( ( char * ) a , & tmp , sizeof ( tmp ) ) )
2001-09-13 18:01:13 +04:00
return - EFAULT ;
break ;
}
2001-08-20 12:05:51 +04:00
case BLKGETSIZE :
2001-09-13 18:01:13 +04:00
size = VOLUME_SIZE ( minor ) ;
2001-09-14 17:27:58 +04:00
if ( copy_to_user ( ( void * ) a , & size , sizeof ( long ) ) )
2001-08-20 12:05:51 +04:00
return - EFAULT ;
break ;
case BLKFLSBUF :
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EACCES ;
fsync_dev ( inode - > i_rdev ) ;
invalidate_buffers ( inode - > i_rdev ) ;
return 0 ;
case BLKRAGET :
2001-09-14 17:27:58 +04:00
if ( copy_to_user
( ( void * ) a , & read_ahead [ MAJOR ( inode - > i_rdev ) ] ,
sizeof ( long ) ) )
2001-08-20 12:05:51 +04:00
return - EFAULT ;
return 0 ;
case BLKRASET :
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EACCES ;
read_ahead [ MAJOR ( inode - > i_rdev ) ] = a ;
return 0 ;
case BLKRRPART :
return - EINVAL ;
2001-09-14 17:45:40 +04:00
case LV_BMAP :
return dm_user_bmap ( inode , ( struct lv_bmap * ) a ) ;
2001-08-20 12:05:51 +04:00
default :
printk ( KERN_WARNING " %s - unknown block ioctl %d " ,
_name , command ) ;
return - EINVAL ;
}
return 0 ;
}
2001-08-31 20:36:56 +04:00
static inline struct io_hook * alloc_io_hook ( void )
2001-08-28 17:04:44 +04:00
{
2001-08-31 14:25:32 +04:00
return kmem_cache_alloc ( _io_hook_cache , GFP_NOIO ) ;
2001-08-28 17:04:44 +04:00
}
2001-08-31 20:36:56 +04:00
static inline void free_io_hook ( struct io_hook * ih )
2001-08-28 17:04:44 +04:00
{
2001-08-31 14:25:32 +04:00
kmem_cache_free ( _io_hook_cache , ih ) ;
2001-08-28 17:04:44 +04:00
}
2001-08-31 16:49:31 +04:00
/*
* FIXME : need to decide if deferred_io ' s need
* their own slab , I say no for now since they are
* only used when the device is suspended .
*/
2001-08-31 20:36:56 +04:00
static inline struct deferred_io * alloc_deferred ( void )
2001-08-28 18:05:22 +04:00
{
2001-09-14 17:27:58 +04:00
return kmalloc ( sizeof ( struct deferred_io ) , GFP_NOIO ) ;
2001-08-28 18:05:22 +04:00
}
2001-08-31 20:36:56 +04:00
static inline void free_deferred ( struct deferred_io * di )
2001-08-28 18:05:22 +04:00
{
kfree ( di ) ;
}
2001-08-31 19:13:33 +04:00
/*
* bh - > b_end_io routine that decrements the
* pending count and then calls the original
* bh - > b_end_io fn .
*/
2001-08-28 17:04:44 +04:00
static void dec_pending ( struct buffer_head * bh , int uptodate )
{
struct io_hook * ih = bh - > b_private ;
if ( atomic_dec_and_test ( & ih - > md - > pending ) )
/* nudge anyone waiting on suspend queue */
2001-09-14 18:03:02 +04:00
wake_up ( & ih - > md - > wait ) ;
2001-08-28 17:04:44 +04:00
bh - > b_end_io = ih - > end_io ;
bh - > b_private = ih - > context ;
free_io_hook ( ih ) ;
bh - > b_end_io ( bh , uptodate ) ;
}
2001-08-31 19:13:33 +04:00
/*
* add the bh to the list of deferred io .
*/
2001-08-28 18:05:22 +04:00
static int queue_io ( struct mapped_device * md , struct buffer_head * bh , int rw )
2001-08-20 12:05:51 +04:00
{
2001-08-28 18:05:22 +04:00
struct deferred_io * di = alloc_deferred ( ) ;
2001-08-20 12:05:51 +04:00
2001-08-28 18:05:22 +04:00
if ( ! di )
return - ENOMEM ;
2001-08-20 12:05:51 +04:00
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-08-28 18:05:22 +04:00
if ( test_bit ( DM_ACTIVE , & md - > state ) ) {
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-28 18:05:22 +04:00
return 0 ;
}
2001-08-20 17:45:43 +04:00
2001-08-28 18:05:22 +04:00
di - > bh = bh ;
di - > rw = rw ;
di - > next = md - > deferred ;
md - > deferred = di ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-31 13:14:55 +04:00
2001-08-28 18:05:22 +04:00
return 1 ;
}
2001-08-20 12:05:51 +04:00
2001-08-31 19:13:33 +04:00
/*
* do the bh mapping for a given leaf
*/
2001-08-31 20:36:56 +04:00
static inline int __map_buffer ( struct mapped_device * md ,
2001-08-31 19:13:33 +04:00
struct buffer_head * bh , int leaf )
2001-08-28 18:05:22 +04:00
{
dm_map_fn fn ;
void * context ;
struct io_hook * ih = 0 ;
int r ;
2001-08-31 19:13:33 +04:00
struct target * ti = md - > map - > targets + leaf ;
2001-08-28 18:05:22 +04:00
2001-09-02 14:49:20 +04:00
fn = ti - > type - > map ;
2001-08-29 17:58:48 +04:00
context = ti - > private ;
2001-08-20 12:05:51 +04:00
2001-08-28 17:04:44 +04:00
if ( ! fn )
2001-08-28 18:05:22 +04:00
return 0 ;
2001-08-28 17:04:44 +04:00
ih = alloc_io_hook ( ) ;
if ( ! ih )
2001-08-28 18:05:22 +04:00
return 0 ;
2001-08-28 17:04:44 +04:00
ih - > md = md ;
ih - > end_io = bh - > b_end_io ;
ih - > context = bh - > b_private ;
r = fn ( bh , context ) ;
if ( r > 0 ) {
/* hook the end io request fn */
atomic_inc ( & md - > pending ) ;
bh - > b_end_io = dec_pending ;
bh - > b_private = ih ;
} else if ( r = = 0 )
/* we don't need to hook */
free_io_hook ( ih ) ;
else if ( r < 0 ) {
free_io_hook ( ih ) ;
2001-08-28 18:05:22 +04:00
return 0 ;
}
return 1 ;
}
2001-08-31 19:13:33 +04:00
/*
* search the btree for the correct target .
*/
2001-08-31 20:36:56 +04:00
static inline int __find_node ( struct dm_table * t , struct buffer_head * bh )
2001-08-29 17:58:48 +04:00
{
2001-09-04 14:17:28 +04:00
int l , n = 0 , k = 0 ;
2001-08-29 17:58:48 +04:00
offset_t * node ;
2001-08-31 19:13:33 +04:00
for ( l = 0 ; l < t - > depth ; l + + ) {
2001-09-04 14:17:28 +04:00
n = get_child ( n , k ) ;
node = get_node ( t , l , n ) ;
2001-08-29 17:58:48 +04:00
2001-09-04 14:17:28 +04:00
for ( k = 0 ; k < KEYS_PER_NODE ; k + + )
if ( node [ k ] > = bh - > b_rsector )
2001-08-29 17:58:48 +04:00
break ;
}
2001-09-04 14:17:28 +04:00
return ( KEYS_PER_NODE * n ) + k ;
2001-08-29 17:58:48 +04:00
}
2001-09-14 17:45:40 +04:00
static int dm_user_bmap ( struct inode * inode , struct lv_bmap * lvb )
{
struct buffer_head bh ;
struct mapped_device * md ;
unsigned long block ;
int minor = MINOR ( inode - > i_rdev ) ;
int err ;
if ( minor > = MAX_DEVICES )
return - ENXIO ;
md = _devs [ minor ] ;
if ( md = = NULL )
return - ENXIO ;
if ( get_user ( block , & lvb - > lv_block ) )
return - EFAULT ;
memset ( & bh , 0 , sizeof ( bh ) ) ;
bh . b_blocknr = block ;
bh . b_dev = bh . b_rdev = inode - > i_rdev ;
bh . b_size = _blksize_size [ minor ] ;
bh . b_rsector = block * ( bh . b_size > > 9 ) ;
err = - EINVAL ;
down_read ( & _dev_lock ) ;
if ( test_bit ( DM_ACTIVE , & md - > state ) & & md - > map ) {
struct target * t = md - > map - > targets + __find_node ( md - > map , & bh ) ;
struct target_type * target = t - > type ;
if ( target - > flags & TF_BMAP ) {
err = target - > map ( & bh , t - > private ) ;
}
}
up_read ( & _dev_lock ) ;
if ( err > = 0 ) {
if ( put_user ( kdev_t_to_nr ( bh . b_rdev ) , & lvb - > lv_dev ) )
return - EFAULT ;
if ( put_user ( bh . b_rsector / ( bh . b_size > > 9 ) , & lvb - > lv_dev ) )
return - EFAULT ;
}
return err ;
}
static int request ( request_queue_t * q , int rw , struct buffer_head * bh )
2001-08-28 18:05:22 +04:00
{
struct mapped_device * md ;
2001-08-29 17:58:48 +04:00
int r , minor = MINOR ( bh - > b_rdev ) ;
2001-08-28 18:05:22 +04:00
if ( minor > = MAX_DEVICES )
2001-08-31 13:43:35 +04:00
goto bad_no_lock ;
2001-08-28 18:05:22 +04:00
2001-09-13 15:29:38 +04:00
down_read ( & _dev_lock ) ;
2001-08-28 18:05:22 +04:00
md = _devs [ minor ] ;
2001-08-31 19:13:33 +04:00
if ( ! md | | ! md - > map )
2001-08-28 17:04:44 +04:00
goto bad ;
2001-08-28 18:05:22 +04:00
/* if we're suspended we have to queue this io for later */
if ( ! test_bit ( DM_ACTIVE , & md - > state ) ) {
2001-09-13 15:29:38 +04:00
up_read ( & _dev_lock ) ;
2001-08-28 18:05:22 +04:00
r = queue_io ( md , bh , rw ) ;
2001-08-31 13:43:35 +04:00
if ( r < 0 )
goto bad_no_lock ;
2001-08-28 18:05:22 +04:00
2001-08-31 13:43:35 +04:00
else if ( r > 0 )
2001-09-14 17:27:58 +04:00
return 0 ; /* deferred successfully */
2001-08-28 18:05:22 +04:00
2001-09-13 15:29:38 +04:00
down_read ( & _dev_lock ) ; /* FIXME: there's still a race here */
2001-08-28 17:04:44 +04:00
}
2001-08-20 12:05:51 +04:00
2001-08-31 19:13:33 +04:00
if ( ! __map_buffer ( md , bh , __find_node ( md - > map , bh ) ) )
2001-08-28 18:05:22 +04:00
goto bad ;
2001-09-13 15:29:38 +04:00
up_read ( & _dev_lock ) ;
2001-08-28 18:05:22 +04:00
return 1 ;
2001-08-28 17:04:44 +04:00
2001-09-14 17:27:58 +04:00
bad :
2001-09-13 15:29:38 +04:00
up_read ( & _dev_lock ) ;
2001-08-31 13:43:35 +04:00
2001-09-14 17:27:58 +04:00
bad_no_lock :
2001-08-28 17:04:44 +04:00
buffer_IO_error ( bh ) ;
return 0 ;
2001-08-20 12:05:51 +04:00
}
2001-08-31 19:13:33 +04:00
/*
* see if the device with a specific minor # is
* free .
*/
2001-08-20 12:05:51 +04:00
static inline int __specific_dev ( int minor )
{
if ( minor > MAX_DEVICES ) {
WARN ( " request for a mapped_device > than MAX_DEVICES " ) ;
return 0 ;
}
if ( ! _devs [ minor ] )
return minor ;
return - 1 ;
}
2001-08-31 19:13:33 +04:00
/*
* find the first free device .
*/
2001-08-20 12:05:51 +04:00
static inline int __any_old_dev ( void )
{
int i ;
for ( i = 0 ; i < MAX_DEVICES ; i + + )
if ( ! _devs [ i ] )
return i ;
return - 1 ;
}
2001-08-31 19:13:33 +04:00
/*
* allocate and initialise a blank device .
*/
2001-08-23 16:35:02 +04:00
static struct mapped_device * alloc_dev ( int minor )
2001-08-20 12:05:51 +04:00
{
2001-09-14 17:27:58 +04:00
struct mapped_device * md = kmalloc ( sizeof ( * md ) , GFP_KERNEL ) ;
2001-09-14 14:06:22 +04:00
if ( ! md )
return 0 ;
2001-09-14 17:27:58 +04:00
memset ( md , 0 , sizeof ( * md ) ) ;
2001-08-20 12:05:51 +04:00
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
minor = ( minor < 0 ) ? __any_old_dev ( ) : __specific_dev ( minor ) ;
if ( minor < 0 ) {
WARN ( " no free devices available " ) ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
kfree ( md ) ;
return 0 ;
}
md - > dev = MKDEV ( DM_BLK_MAJOR , minor ) ;
md - > name [ 0 ] = ' \0 ' ;
2001-08-21 18:28:00 +04:00
md - > state = 0 ;
2001-08-20 12:05:51 +04:00
2001-08-28 17:04:44 +04:00
init_waitqueue_head ( & md - > wait ) ;
2001-08-20 12:05:51 +04:00
_devs [ minor ] = md ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
2001-08-21 18:28:00 +04:00
return md ;
2001-08-20 12:05:51 +04:00
}
2001-08-31 19:13:33 +04:00
static inline struct mapped_device * __find_by_name ( const char * name )
2001-08-23 16:35:02 +04:00
{
2001-08-31 19:13:33 +04:00
int i ;
for ( i = 0 ; i < MAX_DEVICES ; i + + )
if ( _devs [ i ] & & ! strcmp ( _devs [ i ] - > name , name ) )
return _devs [ i ] ;
2001-08-23 16:35:02 +04:00
2001-08-31 19:13:33 +04:00
return 0 ;
2001-08-23 16:35:02 +04:00
}
2001-08-29 17:58:48 +04:00
struct mapped_device * dm_find_by_name ( const char * name )
2001-08-20 17:45:43 +04:00
{
struct mapped_device * md ;
2001-09-13 15:29:38 +04:00
down_read ( & _dev_lock ) ;
2001-08-31 16:49:31 +04:00
md = __find_by_name ( name ) ;
2001-09-13 15:29:38 +04:00
up_read ( & _dev_lock ) ;
2001-08-20 18:06:25 +04:00
return md ;
2001-08-20 17:45:43 +04:00
}
2001-08-29 17:58:48 +04:00
struct mapped_device * dm_find_by_minor ( int minor )
2001-08-20 17:45:43 +04:00
{
2001-08-20 18:06:25 +04:00
struct mapped_device * md ;
2001-09-13 15:29:38 +04:00
down_read ( & _dev_lock ) ;
2001-08-20 18:06:25 +04:00
md = _devs [ minor ] ;
2001-09-13 15:29:38 +04:00
up_read ( & _dev_lock ) ;
2001-08-20 17:45:43 +04:00
2001-08-20 18:06:25 +04:00
return md ;
2001-08-20 12:05:51 +04:00
}
2001-09-07 15:34:46 +04:00
static int register_device ( struct mapped_device * md )
{
md - > devfs_entry =
2001-09-14 17:27:58 +04:00
devfs_register ( _dev_dir , md - > name , DEVFS_FL_CURRENT_OWNER ,
MAJOR ( md - > dev ) , MINOR ( md - > dev ) ,
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP ,
& dm_blk_dops , NULL ) ;
2001-09-07 15:34:46 +04:00
if ( ! md - > devfs_entry )
return - ENOMEM ;
return 0 ;
}
static int unregister_device ( struct mapped_device * md )
{
devfs_unregister ( md - > devfs_entry ) ;
return 0 ;
}
2001-09-14 19:35:06 +04:00
# ifdef CONFIG_HOTPLUG
static void dm_sbin_hotplug ( struct mapped_device * md , int create )
{
int i ;
char * argv [ 3 ] ;
char * envp [ 5 ] ;
char name [ DM_NAME_LEN + 16 ] ;
if ( ! hotplug_path [ 0 ] )
return ;
sprintf ( name , " DMNAME=%s \n " , md - > name ) ;
i = 0 ;
argv [ i + + ] = hotplug_path ;
argv [ i + + ] = " devmap " ;
argv [ i ] = 0 ;
i = 0 ;
envp [ i + + ] = " HOME=/ " ;
envp [ i + + ] = " PATH=/sbin:/bin:/usr/sbin:/usr/bin " ;
envp [ i + + ] = name ;
if ( create )
envp [ i + + ] = " ACTION=add " ;
else
envp [ i + + ] = " ACTION=remove " ;
envp [ i ] = 0 ;
call_usermodehelper ( argv [ 0 ] , argv , envp ) ;
}
# else
# define dm_sbin_hotplug(md, create) do { } while(0)
# endif /* CONFIG_HOTPLUG */
2001-08-31 19:13:33 +04:00
/*
* constructor for a new device
*/
2001-08-21 19:24:02 +04:00
int dm_create ( const char * name , int minor )
2001-08-20 12:05:51 +04:00
{
2001-08-22 19:01:09 +04:00
int r ;
2001-08-20 18:06:25 +04:00
struct mapped_device * md ;
2001-08-20 12:05:51 +04:00
2001-08-20 18:06:25 +04:00
if ( minor > = MAX_DEVICES )
2001-08-20 12:05:51 +04:00
return - ENXIO ;
2001-08-23 16:35:02 +04:00
if ( ! ( md = alloc_dev ( minor ) ) )
2001-09-14 14:06:22 +04:00
return - ENXIO ;
2001-08-20 18:06:25 +04:00
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-08-31 16:49:31 +04:00
if ( __find_by_name ( name ) ) {
2001-08-20 12:05:51 +04:00
WARN ( " device with that name already exists " ) ;
2001-08-22 19:33:08 +04:00
kfree ( md ) ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
return - EINVAL ;
}
strcpy ( md - > name , name ) ;
2001-08-22 19:01:09 +04:00
2001-09-07 15:34:46 +04:00
if ( ( r = register_device ( md ) ) ) {
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-22 19:01:09 +04:00
return r ;
}
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-21 18:28:00 +04:00
2001-09-14 19:35:06 +04:00
dm_sbin_hotplug ( md , 1 ) ;
2001-08-21 18:28:00 +04:00
return 0 ;
2001-08-20 12:05:51 +04:00
}
2001-08-31 19:13:33 +04:00
/*
* destructor for the device . md - > map is
* deliberately not destroyed , dm - fs should manage
* table objects .
*/
2001-08-21 19:24:02 +04:00
int dm_remove ( const char * name )
2001-08-20 12:05:51 +04:00
{
struct mapped_device * md ;
2001-08-22 19:01:09 +04:00
int minor , r ;
2001-08-20 12:05:51 +04:00
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-08-31 16:49:31 +04:00
if ( ! ( md = __find_by_name ( name ) ) ) {
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 17:45:43 +04:00
return - ENXIO ;
}
2001-08-28 18:05:22 +04:00
if ( md - > use_count ) {
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-28 17:04:44 +04:00
return - EPERM ;
}
2001-09-07 15:34:46 +04:00
if ( ( r = unregister_device ( md ) ) ) {
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-22 19:01:09 +04:00
return r ;
}
2001-08-20 18:06:25 +04:00
minor = MINOR ( md - > dev ) ;
2001-08-22 19:33:08 +04:00
kfree ( md ) ;
2001-08-20 17:45:43 +04:00
_devs [ minor ] = 0 ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-21 18:28:00 +04:00
2001-09-14 19:35:06 +04:00
dm_sbin_hotplug ( md , 0 ) ;
2001-08-20 12:05:51 +04:00
return 0 ;
}
2001-08-31 19:13:33 +04:00
/*
2001-09-07 15:34:46 +04:00
* Bind a table to the device .
2001-08-31 19:13:33 +04:00
*/
2001-09-07 15:34:46 +04:00
void __bind ( struct mapped_device * md , struct dm_table * t )
2001-08-31 19:13:33 +04:00
{
int minor = MINOR ( md - > dev ) ;
md - > map = t ;
_block_size [ minor ] = ( t - > highs [ t - > num_targets - 1 ] + 1 ) > > 1 ;
/* FIXME: block size depends on the mapping table */
_blksize_size [ minor ] = BLOCK_SIZE ;
2001-09-14 14:40:20 +04:00
_hardsect_size [ minor ] = t - > hardsect_size ;
2001-08-31 19:13:33 +04:00
register_disk ( NULL , md - > dev , 1 , & dm_blk_dops , _block_size [ minor ] ) ;
2001-08-20 20:12:22 +04:00
}
2001-08-31 19:13:33 +04:00
/*
* requeue the deferred buffer_heads by calling
* generic_make_request .
*/
2001-08-28 18:05:22 +04:00
static void __flush_deferred_io ( struct mapped_device * md )
{
struct deferred_io * c , * n ;
for ( c = md - > deferred , md - > deferred = 0 ; c ; c = n ) {
n = c - > next ;
generic_make_request ( c - > rw , c - > bh ) ;
free_deferred ( c ) ;
}
}
2001-08-31 19:13:33 +04:00
/*
* make the device available for use , if was
* previously suspended rather than newly created
* then all queued io is flushed
*/
2001-09-07 15:34:46 +04:00
int dm_activate ( struct mapped_device * md , struct dm_table * table )
2001-08-20 20:12:22 +04:00
{
2001-09-07 15:34:46 +04:00
/* check that the mapping has at least been loaded. */
if ( ! table - > num_targets )
return - EINVAL ;
2001-08-20 20:12:22 +04:00
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-08-23 16:35:02 +04:00
2001-09-07 15:34:46 +04:00
/* you must be deactivated first */
2001-08-23 16:35:02 +04:00
if ( is_active ( md ) ) {
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-09-07 15:34:46 +04:00
return - EPERM ;
2001-08-23 16:35:02 +04:00
}
2001-09-07 15:34:46 +04:00
__bind ( md , table ) ;
2001-08-23 16:35:02 +04:00
set_bit ( DM_ACTIVE , & md - > state ) ;
2001-08-28 18:05:22 +04:00
__flush_deferred_io ( md ) ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 20:12:22 +04:00
return 0 ;
2001-09-07 15:34:46 +04:00
}
/*
* Deactivate the device , the device must not be
* opened by anyone .
*/
int dm_deactivate ( struct mapped_device * md )
{
2001-09-13 15:29:38 +04:00
down_read ( & _dev_lock ) ;
2001-09-07 15:34:46 +04:00
if ( md - > use_count ) {
2001-09-13 15:29:38 +04:00
up_read ( & _dev_lock ) ;
2001-09-07 15:34:46 +04:00
return - EPERM ;
}
fsync_dev ( md - > dev ) ;
2001-08-20 20:12:22 +04:00
2001-09-13 15:29:38 +04:00
up_read ( & _dev_lock ) ;
2001-08-20 20:12:22 +04:00
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-09-07 15:34:46 +04:00
if ( md - > use_count ) {
/* drat, somebody got in quick ... */
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-09-07 15:34:46 +04:00
return - EPERM ;
}
md - > map = 0 ;
clear_bit ( DM_ACTIVE , & md - > state ) ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-09-07 15:34:46 +04:00
return 0 ;
2001-08-20 20:12:22 +04:00
}
2001-08-31 19:13:33 +04:00
/*
2001-09-07 15:34:46 +04:00
* We need to be able to change a mapping table
2001-08-31 19:13:33 +04:00
* under a mounted filesystem . for example we
* might want to move some data in the background .
* Before the table can be swapped with
* dm_bind_table , dm_suspend must be called to
* flush any in flight buffer_heads and ensure
* that any further io gets deferred .
*/
2001-08-20 20:12:22 +04:00
void dm_suspend ( struct mapped_device * md )
{
2001-08-28 17:04:44 +04:00
DECLARE_WAITQUEUE ( wait , current ) ;
2001-08-31 19:13:33 +04:00
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-09-07 15:34:46 +04:00
if ( ! is_active ( md ) ) {
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 20:12:22 +04:00
return ;
2001-09-07 15:34:46 +04:00
}
clear_bit ( DM_ACTIVE , & md - > state ) ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 20:12:22 +04:00
2001-08-28 17:04:44 +04:00
/* wait for all the pending io to flush */
add_wait_queue ( & md - > wait , & wait ) ;
2001-09-14 18:03:02 +04:00
current - > state = TASK_UNINTERRUPTIBLE ;
2001-08-28 17:04:44 +04:00
do {
2001-09-13 15:29:38 +04:00
down_write ( & _dev_lock ) ;
2001-08-28 17:04:44 +04:00
if ( ! atomic_read ( & md - > pending ) )
break ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-28 17:04:44 +04:00
schedule ( ) ;
} while ( 1 ) ;
current - > state = TASK_RUNNING ;
remove_wait_queue ( & md - > wait , & wait ) ;
2001-09-07 15:34:46 +04:00
md - > map = 0 ;
2001-09-13 15:29:38 +04:00
up_write ( & _dev_lock ) ;
2001-08-20 20:12:22 +04:00
}
2001-08-31 19:13:33 +04:00
struct block_device_operations dm_blk_dops = {
2001-09-14 17:27:58 +04:00
open : dm_blk_open ,
2001-09-13 18:01:13 +04:00
release : dm_blk_close ,
2001-09-14 17:27:58 +04:00
ioctl : dm_blk_ioctl
2001-08-31 19:13:33 +04:00
} ;
2001-08-20 12:05:51 +04:00
/*
* module hooks
*/
2001-08-31 19:13:33 +04:00
module_init ( dm_init ) ;
module_exit ( dm_exit ) ;
2001-08-20 12:05:51 +04:00
2001-09-07 15:34:46 +04:00
MODULE_DESCRIPTION ( " device-mapper driver " ) ;
MODULE_AUTHOR ( " Joe Thornber <thornber@btconnect.com> " ) ;
2001-08-20 12:05:51 +04:00
/*
* Local variables :
* c - file - style : " linux "
* End :
*/