2001-09-14 12:06:02 +04:00
/*
* dm - blkdev . c
*
* Copyright ( C ) 2001 Sistina Software
*
* This software is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation ; either version 2 , or ( at
* your option ) any later version .
*
* This software is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with GNU CC ; see the file COPYING . If not , write to
* the Free Software Foundation , 59 Temple Place - Suite 330 ,
* Boston , MA 02111 - 1307 , USA .
*/
# include <linux/config.h>
2001-09-18 01:17:30 +04:00
# include <linux/fs.h>
2001-09-14 12:06:02 +04:00
# include <linux/slab.h>
# include <linux/list.h>
2001-09-17 13:01:23 +04:00
# include <asm/atomic.h>
2001-09-14 12:06:02 +04:00
# include "dm.h"
2001-09-14 15:25:51 +04:00
struct dm_bdev {
struct list_head list ;
struct block_device * bdev ;
2001-09-17 13:01:23 +04:00
atomic_t use ;
2001-09-14 15:25:51 +04:00
} ;
# define DMB_HASH_SHIFT 8
# define DMB_HASH_SIZE (1 << DMB_HASH_SHIFT)
# define DMB_HASH_MASK (DMB_HASH_SIZE - 1)
2001-09-14 12:06:02 +04:00
/*
* Lock ordering : Always get bdev_sem before bdev_lock if you need both locks .
*
2001-09-14 15:25:51 +04:00
* bdev_lock : A spinlock which protects the hash table
2001-09-14 12:06:02 +04:00
* bdev_sem : A semaphore which protects blkdev_get / blkdev_put so that we
* are certain to hold only a single reference at any point in time .
*/
static kmem_cache_t * bdev_cachep ;
2001-09-14 15:25:51 +04:00
struct list_head bdev_hash [ DMB_HASH_SIZE ] ;
2001-09-14 12:06:02 +04:00
static rwlock_t bdev_lock = RW_LOCK_UNLOCKED ;
static DECLARE_MUTEX ( bdev_sem ) ;
2001-09-14 15:25:51 +04:00
/*
* Subject to change . . . seems the best solution for now though . . .
*/
static inline unsigned dm_hash_bdev ( struct block_device * bdev )
{
unsigned hash = ( unsigned ) bdev - > bd_dev ;
hash ^ = ( hash > > DMB_HASH_SHIFT ) ;
return hash & DMB_HASH_MASK ;
}
static struct dm_bdev * __dm_get_device ( struct block_device * bdev , unsigned hash )
2001-09-14 12:06:02 +04:00
{
struct list_head * tmp , * head ;
struct dm_bdev * b ;
2001-09-14 15:25:51 +04:00
tmp = head = & bdev_hash [ hash ] ;
2001-09-14 12:06:02 +04:00
for ( ; ; ) {
tmp = tmp - > next ;
if ( tmp = = head )
break ;
b = list_entry ( tmp , struct dm_bdev , list ) ;
if ( b - > bdev ! = bdev )
continue ;
2001-09-17 13:01:23 +04:00
atomic_inc ( & b - > use ) ;
2001-09-14 12:06:02 +04:00
return b ;
}
return NULL ;
}
2001-09-14 15:25:51 +04:00
static struct block_device * dm_get_device ( struct block_device * bdev )
2001-09-14 12:06:02 +04:00
{
struct dm_bdev * d , * n ;
int rv = 0 ;
2001-09-14 15:25:51 +04:00
unsigned hash = dm_hash_bdev ( bdev ) ;
2001-09-14 12:06:02 +04:00
read_lock ( & bdev_lock ) ;
2001-09-14 15:25:51 +04:00
d = __dm_get_device ( bdev , hash ) ;
2001-09-14 12:06:02 +04:00
read_unlock ( & bdev_lock ) ;
if ( d )
2001-09-14 15:25:51 +04:00
return d - > bdev ;
2001-09-14 12:06:02 +04:00
n = kmem_cache_alloc ( bdev_cachep , GFP_KERNEL ) ;
if ( ! n )
return ERR_PTR ( - ENOMEM ) ;
n - > bdev = bdev ;
2001-09-17 13:01:23 +04:00
atomic_set ( & n - > use , 1 ) ;
2001-09-14 12:06:02 +04:00
down ( & bdev_sem ) ;
read_lock ( & bdev_lock ) ;
2001-09-14 15:25:51 +04:00
d = __dm_get_device ( bdev , hash ) ;
2001-09-14 12:06:02 +04:00
read_unlock ( & bdev_lock ) ;
if ( ! d ) {
2001-09-19 00:03:00 +04:00
rv = blkdev_get ( bdev , FMODE_READ | FMODE_WRITE , 0 , BDEV_FILE ) ;
2001-09-14 12:06:02 +04:00
if ( rv = = 0 ) {
2001-09-19 18:54:44 +04:00
atomic_inc ( & bdev - > bd_count ) ;
2001-09-14 12:06:02 +04:00
write_lock ( & bdev_lock ) ;
2001-09-19 00:03:00 +04:00
list_add ( & n - > list , & bdev_hash [ hash ] ) ;
2001-09-14 12:06:02 +04:00
d = n ;
n = NULL ;
write_unlock ( & bdev_lock ) ;
}
}
if ( n ) {
kmem_cache_free ( bdev_cachep , n ) ;
}
if ( rv ) {
d = ERR_PTR ( rv ) ;
}
up ( & bdev_sem ) ;
2001-09-14 15:25:51 +04:00
return d - > bdev ;
2001-09-14 12:06:02 +04:00
}
2001-09-14 15:25:51 +04:00
struct block_device * dm_blkdev_get ( const char * path )
2001-09-14 12:06:02 +04:00
{
struct nameidata nd ;
struct inode * inode ;
2001-09-14 15:25:51 +04:00
struct block_device * bdev ;
2001-09-18 01:17:30 +04:00
int err = - ENOENT ;
2001-09-14 12:06:02 +04:00
2001-09-18 01:17:30 +04:00
if ( path_init ( path , LOOKUP_FOLLOW , & nd ) )
err = path_walk ( path , & nd ) ;
2001-09-14 12:06:02 +04:00
2001-09-18 01:17:30 +04:00
if ( err ) {
bdev = ERR_PTR ( err ) ;
goto out ;
}
2001-09-14 12:06:02 +04:00
inode = nd . dentry - > d_inode ;
2001-09-18 01:17:30 +04:00
2001-09-14 12:06:02 +04:00
if ( ! inode ) {
2001-09-14 15:25:51 +04:00
bdev = ERR_PTR ( - ENOENT ) ;
2001-09-14 12:06:02 +04:00
goto out ;
}
2001-09-18 01:17:30 +04:00
if ( ! S_ISBLK ( inode - > i_mode ) ) {
bdev = ERR_PTR ( - ENOTBLK ) ;
goto out ;
}
2001-09-19 00:03:00 +04:00
/* Versions? */
# ifdef MNT_NODEV
2001-09-18 01:17:30 +04:00
if ( nd . mnt - > mnt_flags & MNT_NODEV ) {
2001-09-19 00:03:00 +04:00
# else
if ( IS_NODEV ( inode ) ) {
# endif
2001-09-18 01:17:30 +04:00
bdev = ERR_PTR ( - EACCES ) ;
goto out ;
}
2001-09-14 15:25:51 +04:00
bdev = dm_get_device ( inode - > i_bdev ) ;
2001-09-18 01:17:30 +04:00
2001-09-14 12:06:02 +04:00
out :
path_release ( & nd ) ;
2001-09-14 15:25:51 +04:00
return bdev ;
2001-09-14 12:06:02 +04:00
}
static void dm_blkdev_drop ( struct dm_bdev * d )
{
down ( & bdev_sem ) ;
write_lock ( & bdev_lock ) ;
2001-09-17 13:01:23 +04:00
if ( atomic_read ( & d - > use ) = = 0 ) {
2001-09-14 12:06:02 +04:00
list_del ( & d - > list ) ;
} else {
d = NULL ;
}
write_unlock ( & bdev_lock ) ;
if ( d ) {
blkdev_put ( d - > bdev , BDEV_FILE ) ;
2001-09-19 18:54:44 +04:00
bdput ( d - > bdev ) ;
2001-09-14 12:06:02 +04:00
kmem_cache_free ( bdev_cachep , d ) ;
}
up ( & bdev_sem ) ;
}
2001-09-14 15:25:51 +04:00
int dm_blkdev_put ( struct block_device * bdev )
2001-09-14 12:06:02 +04:00
{
2001-09-14 15:25:51 +04:00
struct dm_bdev * d ;
2001-09-14 12:06:02 +04:00
int do_drop = 0 ;
2001-09-14 15:25:51 +04:00
unsigned hash = dm_hash_bdev ( bdev ) ;
2001-09-14 12:06:02 +04:00
read_lock ( & bdev_lock ) ;
2001-09-14 15:25:51 +04:00
d = __dm_get_device ( bdev , hash ) ;
if ( d ) {
2001-09-17 13:01:23 +04:00
/*
* One for ref that we want to drop ,
* one for ref from __dm_get_device ( )
*/
if ( atomic_sub_and_test ( 2 , & d - > use ) )
2001-09-14 15:25:51 +04:00
do_drop = 1 ;
}
2001-09-14 12:06:02 +04:00
read_unlock ( & bdev_lock ) ;
if ( do_drop )
dm_blkdev_drop ( d ) ;
2001-09-14 15:25:51 +04:00
return ( d ! = NULL ) ? 0 : - ENOENT ;
2001-09-14 12:06:02 +04:00
}
2001-09-14 13:45:35 +04:00
EXPORT_SYMBOL ( dm_blkdev_get ) ;
EXPORT_SYMBOL ( dm_blkdev_put ) ;
2001-09-14 12:06:02 +04:00
2001-09-14 14:40:20 +04:00
int dm_init_blkdev ( void )
{
2001-09-14 15:25:51 +04:00
int i ;
for ( i = 0 ; i < DMB_HASH_SIZE ; i + + )
INIT_LIST_HEAD ( & bdev_hash [ i ] ) ;
2001-09-14 14:40:20 +04:00
bdev_cachep = kmem_cache_create ( " dm_bdev " , sizeof ( struct dm_bdev ) ,
0 , 0 , NULL , NULL ) ;
if ( bdev_cachep = = NULL )
return - ENOMEM ;
return 0 ;
}
void dm_cleanup_blkdev ( void )
{
if ( kmem_cache_destroy ( bdev_cachep ) )
printk ( KERN_ERR " Device Mapper: dm_bdev cache not empty \n " ) ;
}