2001-08-20 12:05:51 +04:00
/*
* device - mapper . c
*
* Copyright ( C ) 2001 Sistina Software
*
* This software is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation ; either version 2 , or ( at
* your option ) any later version .
*
* This software is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with GNU CC ; see the file COPYING . If not , write to
* the Free Software Foundation , 59 Temple Place - Suite 330 ,
* Boston , MA 02111 - 1307 , USA .
*/
/*
* Changelog
*
* 14 / 08 / 2001 - First Version [ Joe Thornber ]
*/
2001-08-21 18:47:42 +04:00
# include "dm.h"
2001-08-20 12:05:51 +04:00
/* defines for blk.h */
# define MAJOR_NR DM_BLK_MAJOR
2001-08-23 16:35:02 +04:00
# define DEVICE_NR(device) MINOR(device) /* has no partition bits */
# define DEVICE_NAME "device-mapper" /* name for messaging */
# define DEVICE_NO_RANDOM /* no entropy to contribute */
# define DEVICE_OFF(d) /* do-nothing */
2001-08-20 12:05:51 +04:00
# include <linux/blk.h>
# define MAX_DEVICES 64
# define DEFAULT_READ_AHEAD 64
const char * _name = " device-mapper " ;
2001-08-23 16:35:02 +04:00
int _version [ 3 ] = { 0 , 1 , 0 } ;
2001-08-20 12:05:51 +04:00
2001-08-28 17:04:44 +04:00
struct io_hook {
struct mapped_device * md ;
void ( * end_io ) ( struct buffer_head * bh , int uptodate ) ;
void * context ;
} ;
2001-08-31 14:25:32 +04:00
kmem_cache_t * _io_hook_cache ;
2001-08-21 18:28:00 +04:00
# define rl down_read(&_dev_lock)
# define ru up_read(&_dev_lock)
# define wl down_write(&_dev_lock)
2001-08-22 19:01:09 +04:00
# define wu up_write(&_dev_lock)
2001-08-21 18:28:00 +04:00
2001-08-20 17:45:43 +04:00
struct rw_semaphore _dev_lock ;
2001-08-20 12:05:51 +04:00
static struct mapped_device * _devs [ MAX_DEVICES ] ;
/* block device arrays */
static int _block_size [ MAX_DEVICES ] ;
static int _blksize_size [ MAX_DEVICES ] ;
static int _hardsect_size [ MAX_DEVICES ] ;
2001-08-23 16:35:02 +04:00
static int request ( request_queue_t * q , int rw , struct buffer_head * bh ) ;
2001-08-20 12:05:51 +04:00
/*
* setup and teardown the driver
*/
2001-08-31 19:13:33 +04:00
static int dm_init ( void )
2001-08-20 12:05:51 +04:00
{
2001-08-22 17:41:00 +04:00
int ret ;
2001-08-20 17:45:43 +04:00
init_rwsem ( & _dev_lock ) ;
2001-08-20 12:05:51 +04:00
2001-08-31 14:25:32 +04:00
if ( ! _io_hook_cache & &
! ( _io_hook_cache =
kmem_cache_create ( " dm io hooks " , sizeof ( struct io_hook ) ,
0 , 0 , NULL , NULL ) ) )
return - ENOMEM ;
2001-08-31 16:49:31 +04:00
if ( ( ret = dm_fs_init ( ) ) | | ( ret = dm_target_init ( ) ) )
2001-08-22 17:41:00 +04:00
return ret ;
2001-08-20 12:05:51 +04:00
/* set up the arrays */
read_ahead [ MAJOR_NR ] = DEFAULT_READ_AHEAD ;
blk_size [ MAJOR_NR ] = _block_size ;
blksize_size [ MAJOR_NR ] = _blksize_size ;
hardsect_size [ MAJOR_NR ] = _hardsect_size ;
2001-08-22 19:33:08 +04:00
if ( devfs_register_blkdev ( MAJOR_NR , _name , & dm_blk_dops ) < 0 ) {
2001-08-20 12:05:51 +04:00
printk ( KERN_ERR " %s -- register_blkdev failed \n " , _name ) ;
return - EIO ;
}
2001-08-23 16:35:02 +04:00
blk_queue_make_request ( BLK_DEFAULT_QUEUE ( MAJOR_NR ) , request ) ;
2001-08-20 12:05:51 +04:00
2001-08-23 16:35:02 +04:00
printk ( KERN_INFO " %s %d.%d.%d initialised \n " , _name ,
2001-08-20 12:05:51 +04:00
_version [ 0 ] , _version [ 1 ] , _version [ 2 ] ) ;
return 0 ;
}
2001-08-31 19:13:33 +04:00
static void dm_exit ( void )
2001-08-20 12:05:51 +04:00
{
2001-08-31 14:25:32 +04:00
if ( kmem_cache_shrink ( _io_hook_cache ) )
WARN ( " it looks like there are still some io_hooks allocated " ) ;
2001-08-31 16:49:31 +04:00
dm_fs_exit ( ) ;
2001-08-22 17:41:00 +04:00
if ( devfs_unregister_blkdev ( MAJOR_NR , _name ) < 0 )
2001-08-20 12:05:51 +04:00
printk ( KERN_ERR " %s -- unregister_blkdev failed \n " , _name ) ;
read_ahead [ MAJOR_NR ] = 0 ;
blk_size [ MAJOR_NR ] = 0 ;
blksize_size [ MAJOR_NR ] = 0 ;
hardsect_size [ MAJOR_NR ] = 0 ;
2001-08-29 17:58:48 +04:00
printk ( KERN_INFO " %s %d.%d.%d cleaned up \n " , _name ,
2001-08-20 12:05:51 +04:00
_version [ 0 ] , _version [ 1 ] , _version [ 2 ] ) ;
}
/*
* block device functions
*/
2001-08-23 16:35:02 +04:00
static int blk_open ( struct inode * inode , struct file * file )
2001-08-20 12:05:51 +04:00
{
int minor = MINOR ( inode - > i_rdev ) ;
2001-08-21 18:28:00 +04:00
struct mapped_device * md ;
2001-08-20 12:05:51 +04:00
if ( minor > = MAX_DEVICES )
return - ENXIO ;
2001-08-21 18:28:00 +04:00
wl ;
md = _devs [ minor ] ;
2001-08-20 12:05:51 +04:00
2001-08-21 18:28:00 +04:00
if ( ! md | | ! is_active ( md ) ) {
wu ;
2001-08-20 12:05:51 +04:00
return - ENXIO ;
}
2001-08-21 18:28:00 +04:00
md - > use_count + + ;
wu ;
2001-08-20 12:05:51 +04:00
2001-08-21 18:28:00 +04:00
MOD_INC_USE_COUNT ;
2001-08-20 12:05:51 +04:00
return 0 ;
}
2001-08-23 16:35:02 +04:00
static int blk_close ( struct inode * inode , struct file * file )
2001-08-20 12:05:51 +04:00
{
int minor = MINOR ( inode - > i_rdev ) ;
2001-08-21 18:28:00 +04:00
struct mapped_device * md ;
2001-08-20 12:05:51 +04:00
if ( minor > = MAX_DEVICES )
return - ENXIO ;
2001-08-21 18:28:00 +04:00
wl ;
md = _devs [ minor ] ;
2001-08-23 20:45:43 +04:00
if ( ! md | | md - > use_count < 1 ) {
2001-08-20 12:05:51 +04:00
WARN ( " reference count in mapped_device incorrect " ) ;
2001-08-21 18:28:00 +04:00
wu ;
2001-08-20 12:05:51 +04:00
return - ENXIO ;
}
2001-08-21 18:28:00 +04:00
md - > use_count - - ;
wu ;
2001-08-20 12:05:51 +04:00
2001-08-20 19:59:22 +04:00
MOD_DEC_USE_COUNT ;
2001-08-20 12:05:51 +04:00
return 0 ;
}
2001-08-23 16:35:02 +04:00
static int blk_ioctl ( struct inode * inode , struct file * file ,
2001-08-20 12:05:51 +04:00
uint command , ulong a )
{
/* FIXME: check in the latest Rubini that all expected ioctl's
are supported */
int minor = MINOR ( inode - > i_rdev ) ;
long size ;
switch ( command ) {
case BLKGETSIZE :
size = _block_size [ minor ] * 1024 / _hardsect_size [ minor ] ;
if ( copy_to_user ( ( void * ) a , & size , sizeof ( long ) ) )
return - EFAULT ;
break ;
case BLKFLSBUF :
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EACCES ;
fsync_dev ( inode - > i_rdev ) ;
invalidate_buffers ( inode - > i_rdev ) ;
return 0 ;
case BLKRAGET :
if ( copy_to_user ( ( void * ) a , & read_ahead [ MAJOR ( inode - > i_rdev ) ] ,
sizeof ( long ) ) )
return - EFAULT ;
return 0 ;
case BLKRASET :
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EACCES ;
read_ahead [ MAJOR ( inode - > i_rdev ) ] = a ;
return 0 ;
case BLKRRPART :
return - EINVAL ;
default :
printk ( KERN_WARNING " %s - unknown block ioctl %d " ,
_name , command ) ;
return - EINVAL ;
}
return 0 ;
}
2001-08-28 17:04:44 +04:00
inline static struct io_hook * alloc_io_hook ( void )
{
2001-08-31 14:25:32 +04:00
return kmem_cache_alloc ( _io_hook_cache , GFP_NOIO ) ;
2001-08-28 17:04:44 +04:00
}
inline static void free_io_hook ( struct io_hook * ih )
{
2001-08-31 14:25:32 +04:00
kmem_cache_free ( _io_hook_cache , ih ) ;
2001-08-28 17:04:44 +04:00
}
2001-08-31 16:49:31 +04:00
/*
* FIXME : need to decide if deferred_io ' s need
* their own slab , I say no for now since they are
* only used when the device is suspended .
*/
2001-08-28 18:05:22 +04:00
inline static struct deferred_io * alloc_deferred ( void )
{
return kmalloc ( sizeof ( struct deferred_io ) , GFP_NOIO ) ;
}
inline static void free_deferred ( struct deferred_io * di )
{
kfree ( di ) ;
}
2001-08-31 19:13:33 +04:00
/*
* bh - > b_end_io routine that decrements the
* pending count and then calls the original
* bh - > b_end_io fn .
*/
2001-08-28 17:04:44 +04:00
static void dec_pending ( struct buffer_head * bh , int uptodate )
{
struct io_hook * ih = bh - > b_private ;
if ( atomic_dec_and_test ( & ih - > md - > pending ) )
/* nudge anyone waiting on suspend queue */
wake_up_interruptible ( & ih - > md - > wait ) ;
bh - > b_end_io = ih - > end_io ;
bh - > b_private = ih - > context ;
free_io_hook ( ih ) ;
bh - > b_end_io ( bh , uptodate ) ;
}
2001-08-31 19:13:33 +04:00
/*
* add the bh to the list of deferred io .
*/
2001-08-28 18:05:22 +04:00
static int queue_io ( struct mapped_device * md , struct buffer_head * bh , int rw )
2001-08-20 12:05:51 +04:00
{
2001-08-28 18:05:22 +04:00
struct deferred_io * di = alloc_deferred ( ) ;
2001-08-20 12:05:51 +04:00
2001-08-28 18:05:22 +04:00
if ( ! di )
return - ENOMEM ;
2001-08-20 12:05:51 +04:00
2001-08-28 18:05:22 +04:00
wl ;
if ( test_bit ( DM_ACTIVE , & md - > state ) ) {
wu ;
return 0 ;
}
2001-08-20 17:45:43 +04:00
2001-08-28 18:05:22 +04:00
di - > bh = bh ;
di - > rw = rw ;
di - > next = md - > deferred ;
md - > deferred = di ;
wu ;
2001-08-31 13:14:55 +04:00
2001-08-28 18:05:22 +04:00
return 1 ;
}
2001-08-20 12:05:51 +04:00
2001-08-31 19:13:33 +04:00
/*
* do the bh mapping for a given leaf
*/
2001-08-31 13:14:55 +04:00
inline static int __map_buffer ( struct mapped_device * md ,
2001-08-31 19:13:33 +04:00
struct buffer_head * bh , int leaf )
2001-08-28 18:05:22 +04:00
{
dm_map_fn fn ;
void * context ;
struct io_hook * ih = 0 ;
int r ;
2001-08-31 19:13:33 +04:00
struct target * ti = md - > map - > targets + leaf ;
2001-08-28 18:05:22 +04:00
2001-08-29 17:58:48 +04:00
fn = ti - > map ;
context = ti - > private ;
2001-08-20 12:05:51 +04:00
2001-08-28 17:04:44 +04:00
if ( ! fn )
2001-08-28 18:05:22 +04:00
return 0 ;
2001-08-28 17:04:44 +04:00
ih = alloc_io_hook ( ) ;
if ( ! ih )
2001-08-28 18:05:22 +04:00
return 0 ;
2001-08-28 17:04:44 +04:00
ih - > md = md ;
ih - > end_io = bh - > b_end_io ;
ih - > context = bh - > b_private ;
r = fn ( bh , context ) ;
if ( r > 0 ) {
/* hook the end io request fn */
atomic_inc ( & md - > pending ) ;
bh - > b_end_io = dec_pending ;
bh - > b_private = ih ;
} else if ( r = = 0 )
/* we don't need to hook */
free_io_hook ( ih ) ;
else if ( r < 0 ) {
free_io_hook ( ih ) ;
2001-08-28 18:05:22 +04:00
return 0 ;
}
return 1 ;
}
2001-08-31 19:13:33 +04:00
/*
* search the btree for the correct target .
*/
inline static int __find_node ( struct dm_table * t , struct buffer_head * bh )
2001-08-29 17:58:48 +04:00
{
int i = 0 , l , r = 0 ;
offset_t * node ;
2001-08-31 19:13:33 +04:00
for ( l = 0 ; l < t - > depth ; l + + ) {
2001-08-29 17:58:48 +04:00
r = ( ( KEYS_PER_NODE + 1 ) * r ) + i ;
2001-08-31 19:13:33 +04:00
node = t - > index [ l ] + ( r * KEYS_PER_NODE ) ;
2001-08-29 17:58:48 +04:00
for ( i = 0 ; i < KEYS_PER_NODE ; i + + )
if ( node [ i ] > = bh - > b_rsector )
break ;
}
return ( KEYS_PER_NODE * r ) + i ;
}
2001-08-28 18:05:22 +04:00
static int request ( request_queue_t * q , int rw , struct buffer_head * bh )
{
struct mapped_device * md ;
2001-08-29 17:58:48 +04:00
int r , minor = MINOR ( bh - > b_rdev ) ;
2001-08-28 18:05:22 +04:00
if ( minor > = MAX_DEVICES )
2001-08-31 13:43:35 +04:00
goto bad_no_lock ;
2001-08-28 18:05:22 +04:00
rl ;
md = _devs [ minor ] ;
2001-08-31 19:13:33 +04:00
if ( ! md | | ! md - > map )
2001-08-28 17:04:44 +04:00
goto bad ;
2001-08-28 18:05:22 +04:00
/* if we're suspended we have to queue this io for later */
if ( ! test_bit ( DM_ACTIVE , & md - > state ) ) {
ru ;
r = queue_io ( md , bh , rw ) ;
2001-08-31 13:43:35 +04:00
if ( r < 0 )
goto bad_no_lock ;
2001-08-28 18:05:22 +04:00
2001-08-31 13:43:35 +04:00
else if ( r > 0 )
2001-08-28 18:05:22 +04:00
return 0 ; /* deferred successfully */
rl ; /* FIXME: there's still a race here */
2001-08-28 17:04:44 +04:00
}
2001-08-20 12:05:51 +04:00
2001-08-31 19:13:33 +04:00
if ( ! __map_buffer ( md , bh , __find_node ( md - > map , bh ) ) )
2001-08-28 18:05:22 +04:00
goto bad ;
2001-08-21 18:28:00 +04:00
ru ;
2001-08-28 18:05:22 +04:00
return 1 ;
2001-08-28 17:04:44 +04:00
bad :
ru ;
2001-08-31 13:43:35 +04:00
bad_no_lock :
2001-08-28 17:04:44 +04:00
buffer_IO_error ( bh ) ;
return 0 ;
2001-08-20 12:05:51 +04:00
}
2001-08-31 19:13:33 +04:00
/*
* see if the device with a specific minor # is
* free .
*/
2001-08-20 12:05:51 +04:00
static inline int __specific_dev ( int minor )
{
if ( minor > MAX_DEVICES ) {
WARN ( " request for a mapped_device > than MAX_DEVICES " ) ;
return 0 ;
}
if ( ! _devs [ minor ] )
return minor ;
return - 1 ;
}
2001-08-31 19:13:33 +04:00
/*
* find the first free device .
*/
2001-08-20 12:05:51 +04:00
static inline int __any_old_dev ( void )
{
int i ;
for ( i = 0 ; i < MAX_DEVICES ; i + + )
if ( ! _devs [ i ] )
return i ;
return - 1 ;
}
2001-08-31 19:13:33 +04:00
/*
* allocate and initialise a blank device .
*/
2001-08-23 16:35:02 +04:00
static struct mapped_device * alloc_dev ( int minor )
2001-08-20 12:05:51 +04:00
{
struct mapped_device * md = kmalloc ( sizeof ( * md ) , GFP_KERNEL ) ;
2001-08-22 19:01:09 +04:00
memset ( md , 0 , sizeof ( * md ) ) ;
2001-08-20 12:05:51 +04:00
2001-08-21 18:28:00 +04:00
wl ;
2001-08-20 12:05:51 +04:00
minor = ( minor < 0 ) ? __any_old_dev ( ) : __specific_dev ( minor ) ;
if ( minor < 0 ) {
WARN ( " no free devices available " ) ;
2001-08-21 18:28:00 +04:00
wu ;
2001-08-20 12:05:51 +04:00
kfree ( md ) ;
return 0 ;
}
md - > dev = MKDEV ( DM_BLK_MAJOR , minor ) ;
md - > name [ 0 ] = ' \0 ' ;
2001-08-21 18:28:00 +04:00
md - > state = 0 ;
2001-08-20 12:05:51 +04:00
2001-08-28 17:04:44 +04:00
init_waitqueue_head ( & md - > wait ) ;
2001-08-20 12:05:51 +04:00
_devs [ minor ] = md ;
2001-08-21 18:28:00 +04:00
wu ;
2001-08-20 12:05:51 +04:00
2001-08-21 18:28:00 +04:00
return md ;
2001-08-20 12:05:51 +04:00
}
2001-08-31 19:13:33 +04:00
/*
* open a device so we can use it as a map
* destination .
*/
2001-08-23 16:35:02 +04:00
static int open_dev ( struct dev_list * d )
2001-08-21 18:28:00 +04:00
{
int err ;
if ( ! ( d - > bd = bdget ( kdev_t_to_nr ( d - > dev ) ) ) )
return - ENOMEM ;
if ( ( err = blkdev_get ( d - > bd , FMODE_READ | FMODE_WRITE , 0 , BDEV_FILE ) ) ) {
bdput ( d - > bd ) ;
return err ;
}
return 0 ;
}
2001-08-31 19:13:33 +04:00
/*
* close a device that we ' ve been using .
*/
2001-08-23 16:35:02 +04:00
static void close_dev ( struct dev_list * d )
2001-08-21 18:28:00 +04:00
{
blkdev_put ( d - > bd , BDEV_FILE ) ;
bdput ( d - > bd ) ;
d - > bd = 0 ;
}
2001-08-31 19:13:33 +04:00
static inline struct mapped_device * __find_by_name ( const char * name )
2001-08-23 16:35:02 +04:00
{
2001-08-31 19:13:33 +04:00
int i ;
for ( i = 0 ; i < MAX_DEVICES ; i + + )
if ( _devs [ i ] & & ! strcmp ( _devs [ i ] - > name , name ) )
return _devs [ i ] ;
2001-08-23 16:35:02 +04:00
2001-08-31 19:13:33 +04:00
return 0 ;
2001-08-23 16:35:02 +04:00
}
2001-08-29 17:58:48 +04:00
struct mapped_device * dm_find_by_name ( const char * name )
2001-08-20 17:45:43 +04:00
{
struct mapped_device * md ;
2001-08-21 18:28:00 +04:00
rl ;
2001-08-31 16:49:31 +04:00
md = __find_by_name ( name ) ;
2001-08-21 18:28:00 +04:00
ru ;
2001-08-20 18:06:25 +04:00
return md ;
2001-08-20 17:45:43 +04:00
}
2001-08-29 17:58:48 +04:00
struct mapped_device * dm_find_by_minor ( int minor )
2001-08-20 17:45:43 +04:00
{
2001-08-20 18:06:25 +04:00
struct mapped_device * md ;
2001-08-21 18:28:00 +04:00
rl ;
2001-08-20 18:06:25 +04:00
md = _devs [ minor ] ;
2001-08-21 18:28:00 +04:00
ru ;
2001-08-20 17:45:43 +04:00
2001-08-20 18:06:25 +04:00
return md ;
2001-08-20 12:05:51 +04:00
}
2001-08-31 19:13:33 +04:00
/*
* constructor for a new device
*/
2001-08-21 19:24:02 +04:00
int dm_create ( const char * name , int minor )
2001-08-20 12:05:51 +04:00
{
2001-08-22 19:01:09 +04:00
int r ;
2001-08-20 18:06:25 +04:00
struct mapped_device * md ;
2001-08-20 12:05:51 +04:00
2001-08-20 18:06:25 +04:00
if ( minor > = MAX_DEVICES )
2001-08-20 12:05:51 +04:00
return - ENXIO ;
2001-08-23 16:35:02 +04:00
if ( ! ( md = alloc_dev ( minor ) ) )
2001-08-20 18:06:25 +04:00
return - ENOMEM ;
2001-08-21 18:28:00 +04:00
wl ;
2001-08-31 16:49:31 +04:00
if ( __find_by_name ( name ) ) {
2001-08-20 12:05:51 +04:00
WARN ( " device with that name already exists " ) ;
2001-08-22 19:33:08 +04:00
kfree ( md ) ;
2001-08-21 18:28:00 +04:00
wu ;
2001-08-20 12:05:51 +04:00
return - EINVAL ;
}
strcpy ( md - > name , name ) ;
2001-08-20 18:06:25 +04:00
_devs [ minor ] = md ;
2001-08-22 19:01:09 +04:00
if ( ( r = dm_fs_add ( md ) ) ) {
wu ;
return r ;
}
2001-08-21 18:28:00 +04:00
wu ;
return 0 ;
2001-08-20 12:05:51 +04:00
}
2001-08-31 19:13:33 +04:00
/*
* destructor for the device . md - > map is
* deliberately not destroyed , dm - fs should manage
* table objects .
*/
2001-08-21 19:24:02 +04:00
int dm_remove ( const char * name )
2001-08-20 12:05:51 +04:00
{
struct mapped_device * md ;
2001-08-22 19:01:09 +04:00
int minor , r ;
2001-08-20 12:05:51 +04:00
2001-08-21 18:28:00 +04:00
wl ;
2001-08-31 16:49:31 +04:00
if ( ! ( md = __find_by_name ( name ) ) ) {
2001-08-21 18:28:00 +04:00
wu ;
2001-08-20 17:45:43 +04:00
return - ENXIO ;
}
2001-08-28 18:05:22 +04:00
if ( md - > use_count ) {
2001-08-28 17:04:44 +04:00
wu ;
return - EPERM ;
}
2001-08-22 19:01:09 +04:00
if ( ( r = dm_fs_remove ( md ) ) ) {
wu ;
return r ;
}
2001-08-20 18:06:25 +04:00
minor = MINOR ( md - > dev ) ;
2001-08-22 19:33:08 +04:00
kfree ( md ) ;
2001-08-20 17:45:43 +04:00
_devs [ minor ] = 0 ;
2001-08-21 18:28:00 +04:00
wu ;
2001-08-20 12:05:51 +04:00
return 0 ;
}
2001-08-31 19:13:33 +04:00
/*
* the hardsect size for a mapped device is the
* smallest hard sect size from the devices it
* maps onto .
*/
static int __find_hardsect_size ( struct dev_list * dl )
2001-08-20 20:12:22 +04:00
{
2001-08-31 19:13:33 +04:00
int result = INT_MAX , size ;
2001-08-20 20:12:22 +04:00
2001-08-31 19:13:33 +04:00
while ( dl ) {
size = get_hardsect_size ( dl - > dev ) ;
if ( size < result )
result = size ;
dl = dl - > next ;
}
2001-08-20 20:12:22 +04:00
2001-08-31 19:13:33 +04:00
return result ;
}
/*
* bind a table to the device , the device must not
* be active , though it could have another table
* aready bound .
*/
int dm_bind ( struct mapped_device * md , struct dm_table * t )
{
int minor = MINOR ( md - > dev ) ;
wl ;
if ( is_active ( md ) ) {
wu ;
return - EPERM ;
}
md - > map = t ;
_block_size [ minor ] = ( t - > highs [ t - > num_targets - 1 ] + 1 ) > > 1 ;
/* FIXME: block size depends on the mapping table */
_blksize_size [ minor ] = BLOCK_SIZE ;
_hardsect_size [ minor ] = __find_hardsect_size ( t - > devices ) ;
register_disk ( NULL , md - > dev , 1 , & dm_blk_dops , _block_size [ minor ] ) ;
wu ;
2001-08-21 18:28:00 +04:00
2001-08-23 16:35:02 +04:00
return 0 ;
2001-08-20 20:12:22 +04:00
}
2001-08-31 19:13:33 +04:00
/*
* requeue the deferred buffer_heads by calling
* generic_make_request .
*/
2001-08-28 18:05:22 +04:00
static void __flush_deferred_io ( struct mapped_device * md )
{
struct deferred_io * c , * n ;
for ( c = md - > deferred , md - > deferred = 0 ; c ; c = n ) {
n = c - > next ;
generic_make_request ( c - > rw , c - > bh ) ;
free_deferred ( c ) ;
}
}
2001-08-31 19:13:33 +04:00
/*
* make the device available for use , if was
* previously suspended rather than newly created
* then all queued io is flushed
*/
2001-08-20 20:12:22 +04:00
int dm_activate ( struct mapped_device * md )
{
2001-08-31 19:13:33 +04:00
int ret ;
2001-08-20 20:12:22 +04:00
struct dev_list * d , * od ;
2001-08-23 16:35:02 +04:00
wl ;
if ( is_active ( md ) ) {
wu ;
return 0 ;
}
2001-08-31 19:13:33 +04:00
if ( ! md - > map ) {
2001-08-23 16:35:02 +04:00
wu ;
return - ENXIO ;
}
2001-08-20 20:12:22 +04:00
/* open all the devices */
2001-08-31 19:13:33 +04:00
for ( d = md - > map - > devices ; d ; d = d - > next )
2001-08-23 16:35:02 +04:00
if ( ( ret = open_dev ( d ) ) )
2001-08-20 20:12:22 +04:00
goto bad ;
2001-08-23 16:35:02 +04:00
set_bit ( DM_ACTIVE , & md - > state ) ;
2001-08-28 18:05:22 +04:00
__flush_deferred_io ( md ) ;
2001-08-23 16:35:02 +04:00
wu ;
2001-08-20 20:12:22 +04:00
return 0 ;
bad :
od = d ;
2001-08-31 19:13:33 +04:00
for ( d = md - > map - > devices ; d ! = od ; d = d - > next )
2001-08-23 16:35:02 +04:00
close_dev ( d ) ;
2001-08-21 18:28:00 +04:00
ru ;
2001-08-20 20:12:22 +04:00
return ret ;
}
2001-08-31 19:13:33 +04:00
/*
* we need to be able to change a mapping table
* under a mounted filesystem . for example we
* might want to move some data in the background .
* Before the table can be swapped with
* dm_bind_table , dm_suspend must be called to
* flush any in flight buffer_heads and ensure
* that any further io gets deferred .
*/
2001-08-20 20:12:22 +04:00
void dm_suspend ( struct mapped_device * md )
{
2001-08-28 17:04:44 +04:00
DECLARE_WAITQUEUE ( wait , current ) ;
2001-08-20 20:12:22 +04:00
struct dev_list * d ;
2001-08-31 19:13:33 +04:00
2001-08-20 20:12:22 +04:00
if ( ! is_active ( md ) )
return ;
2001-08-28 17:04:44 +04:00
/* wait for all the pending io to flush */
add_wait_queue ( & md - > wait , & wait ) ;
current - > state = TASK_INTERRUPTIBLE ;
do {
wl ;
if ( ! atomic_read ( & md - > pending ) )
break ;
wu ;
schedule ( ) ;
} while ( 1 ) ;
current - > state = TASK_RUNNING ;
remove_wait_queue ( & md - > wait , & wait ) ;
2001-08-20 20:12:22 +04:00
/* close all the devices */
2001-08-31 19:13:33 +04:00
for ( d = md - > map - > devices ; d ; d = d - > next )
2001-08-23 16:35:02 +04:00
close_dev ( d ) ;
2001-08-20 20:12:22 +04:00
2001-08-23 16:35:02 +04:00
clear_bit ( DM_ACTIVE , & md - > state ) ;
2001-08-28 17:04:44 +04:00
wu ;
2001-08-20 20:12:22 +04:00
}
2001-08-31 19:13:33 +04:00
struct block_device_operations dm_blk_dops = {
open : blk_open ,
release : blk_close ,
ioctl : blk_ioctl
} ;
2001-08-20 12:05:51 +04:00
/*
* module hooks
*/
2001-08-31 19:13:33 +04:00
module_init ( dm_init ) ;
module_exit ( dm_exit ) ;
2001-08-20 12:05:51 +04:00
/*
* Local variables :
* c - file - style : " linux "
* End :
*/