2005-04-17 02:20:36 +04:00
/*
* ( C ) 2003 David Woodhouse < dwmw2 @ infradead . org >
*
* Interface to Linux 2.5 block layer for MTD ' translation layers ' .
*
*/
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/module.h>
# include <linux/list.h>
# include <linux/fs.h>
# include <linux/mtd/blktrans.h>
# include <linux/mtd/mtd.h>
# include <linux/blkdev.h>
# include <linux/blkpg.h>
# include <linux/spinlock.h>
# include <linux/hdreg.h>
# include <linux/init.h>
2006-03-31 14:29:41 +04:00
# include <linux/mutex.h>
2007-04-19 11:58:33 +04:00
# include <linux/kthread.h>
2005-04-17 02:20:36 +04:00
# include <asm/uaccess.h>
2007-05-28 23:28:34 +04:00
# include "mtdcore.h"
2005-04-17 02:20:36 +04:00
2007-05-28 23:28:34 +04:00
static LIST_HEAD ( blktrans_majors ) ;
2010-02-22 21:39:30 +03:00
static DEFINE_MUTEX ( blktrans_ref_mutex ) ;
void blktrans_dev_release ( struct kref * kref )
{
struct mtd_blktrans_dev * dev =
container_of ( kref , struct mtd_blktrans_dev , ref ) ;
dev - > disk - > private_data = NULL ;
put_disk ( dev - > disk ) ;
list_del ( & dev - > list ) ;
kfree ( dev ) ;
}
static struct mtd_blktrans_dev * blktrans_dev_get ( struct gendisk * disk )
{
struct mtd_blktrans_dev * dev ;
mutex_lock ( & blktrans_ref_mutex ) ;
dev = disk - > private_data ;
if ( ! dev )
goto unlock ;
kref_get ( & dev - > ref ) ;
unlock :
mutex_unlock ( & blktrans_ref_mutex ) ;
return dev ;
}
void blktrans_dev_put ( struct mtd_blktrans_dev * dev )
{
mutex_lock ( & blktrans_ref_mutex ) ;
kref_put ( & dev - > ref , blktrans_dev_release ) ;
mutex_unlock ( & blktrans_ref_mutex ) ;
}
2005-04-17 02:20:36 +04:00
static int do_blktrans_request ( struct mtd_blktrans_ops * tr ,
struct mtd_blktrans_dev * dev ,
struct request * req )
{
unsigned long block , nsect ;
char * buf ;
2009-05-07 17:24:39 +04:00
block = blk_rq_pos ( req ) < < 9 > > tr - > blkshift ;
2009-05-07 17:24:45 +04:00
nsect = blk_rq_cur_bytes ( req ) > > tr - > blkshift ;
2006-10-27 12:09:33 +04:00
2005-04-17 02:20:36 +04:00
buf = req - > buffer ;
2006-08-10 10:44:47 +04:00
if ( ! blk_fs_request ( req ) )
2009-04-23 06:05:19 +04:00
return - EIO ;
2005-04-17 02:20:36 +04:00
2009-05-07 17:24:39 +04:00
if ( blk_rq_pos ( req ) + blk_rq_cur_sectors ( req ) >
get_capacity ( req - > rq_disk ) )
2009-04-23 06:05:19 +04:00
return - EIO ;
2005-04-17 02:20:36 +04:00
2009-09-30 15:52:12 +04:00
if ( blk_discard_rq ( req ) )
return tr - > discard ( dev , block , nsect ) ;
2005-04-17 02:20:36 +04:00
switch ( rq_data_dir ( req ) ) {
case READ :
2006-10-27 12:09:33 +04:00
for ( ; nsect > 0 ; nsect - - , block + + , buf + = tr - > blksize )
2005-04-17 02:20:36 +04:00
if ( tr - > readsect ( dev , block , buf ) )
2009-04-23 06:05:19 +04:00
return - EIO ;
2009-11-26 11:16:19 +03:00
rq_flush_dcache_pages ( req ) ;
2009-04-23 06:05:19 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
case WRITE :
if ( ! tr - > writesect )
2009-04-23 06:05:19 +04:00
return - EIO ;
2005-04-17 02:20:36 +04:00
2009-11-26 11:16:19 +03:00
rq_flush_dcache_pages ( req ) ;
2006-10-27 12:09:33 +04:00
for ( ; nsect > 0 ; nsect - - , block + + , buf + = tr - > blksize )
2005-04-17 02:20:36 +04:00
if ( tr - > writesect ( dev , block , buf ) )
2009-04-23 06:05:19 +04:00
return - EIO ;
return 0 ;
2005-04-17 02:20:36 +04:00
default :
2006-10-01 20:16:00 +04:00
printk ( KERN_NOTICE " Unknown request %u \n " , rq_data_dir ( req ) ) ;
2009-04-23 06:05:19 +04:00
return - EIO ;
2005-04-17 02:20:36 +04:00
}
}
static int mtd_blktrans_thread ( void * arg )
{
2010-02-22 21:39:29 +03:00
struct mtd_blktrans_dev * dev = arg ;
struct request_queue * rq = dev - > rq ;
2009-05-08 06:54:11 +04:00
struct request * req = NULL ;
2005-04-17 02:20:36 +04:00
spin_lock_irq ( rq - > queue_lock ) ;
2009-05-08 06:54:11 +04:00
2007-04-22 23:40:57 +04:00
while ( ! kthread_should_stop ( ) ) {
2009-04-23 06:05:19 +04:00
int res ;
2005-04-17 02:20:36 +04:00
2009-05-08 06:54:16 +04:00
if ( ! req & & ! ( req = blk_fetch_request ( rq ) ) ) {
2005-04-17 02:20:36 +04:00
set_current_state ( TASK_INTERRUPTIBLE ) ;
spin_unlock_irq ( rq - > queue_lock ) ;
schedule ( ) ;
spin_lock_irq ( rq - > queue_lock ) ;
continue ;
}
spin_unlock_irq ( rq - > queue_lock ) ;
2006-03-31 14:29:41 +04:00
mutex_lock ( & dev - > lock ) ;
2010-02-22 21:39:29 +03:00
res = do_blktrans_request ( dev - > tr , dev , req ) ;
2006-03-31 14:29:41 +04:00
mutex_unlock ( & dev - > lock ) ;
2005-04-17 02:20:36 +04:00
spin_lock_irq ( rq - > queue_lock ) ;
2009-05-08 06:54:11 +04:00
if ( ! __blk_end_request_cur ( req , res ) )
req = NULL ;
2005-04-17 02:20:36 +04:00
}
2009-05-08 06:54:11 +04:00
if ( req )
__blk_end_request_all ( req , - EIO ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( rq - > queue_lock ) ;
2007-04-22 23:40:57 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static void mtd_blktrans_request ( struct request_queue * rq )
{
2010-02-22 21:39:30 +03:00
struct mtd_blktrans_dev * dev ;
struct request * req = NULL ;
dev = rq - > queuedata ;
2005-04-17 02:20:36 +04:00
2010-02-22 21:39:30 +03:00
if ( ! dev )
while ( ( req = blk_fetch_request ( rq ) ) ! = NULL )
__blk_end_request_all ( req , - ENODEV ) ;
else
wake_up_process ( dev - > thread ) ;
}
2005-04-17 02:20:36 +04:00
2008-03-02 18:35:06 +03:00
static int blktrans_open ( struct block_device * bdev , fmode_t mode )
2005-04-17 02:20:36 +04:00
{
2010-02-22 21:39:30 +03:00
struct mtd_blktrans_dev * dev = blktrans_dev_get ( bdev - > bd_disk ) ;
int ret ;
if ( ! dev )
return - ERESTARTSYS ;
mutex_lock ( & dev - > lock ) ;
if ( ! dev - > mtd ) {
ret = - ENXIO ;
goto unlock ;
2005-04-17 02:20:36 +04:00
}
2010-02-22 21:39:30 +03:00
ret = ! dev - > open + + & & dev - > tr - > open ? dev - > tr - > open ( dev ) : 0 ;
/* Take another reference on the device so it won't go away till
last release */
if ( ! ret )
kref_get ( & dev - > ref ) ;
unlock :
mutex_unlock ( & dev - > lock ) ;
blktrans_dev_put ( dev ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2008-03-02 18:35:06 +03:00
static int blktrans_release ( struct gendisk * disk , fmode_t mode )
2005-04-17 02:20:36 +04:00
{
2010-02-22 21:39:30 +03:00
struct mtd_blktrans_dev * dev = blktrans_dev_get ( disk ) ;
int ret = - ENXIO ;
2005-04-17 02:20:36 +04:00
2010-02-22 21:39:30 +03:00
if ( ! dev )
return ret ;
2005-04-17 02:20:36 +04:00
2010-02-22 21:39:30 +03:00
mutex_lock ( & dev - > lock ) ;
/* Release one reference, we sure its not the last one here*/
kref_put ( & dev - > ref , blktrans_dev_release ) ;
2005-04-17 02:20:36 +04:00
2010-02-22 21:39:30 +03:00
if ( ! dev - > mtd )
goto unlock ;
ret = ! - - dev - > open & & dev - > tr - > release ? dev - > tr - > release ( dev ) : 0 ;
unlock :
mutex_unlock ( & dev - > lock ) ;
blktrans_dev_put ( dev ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2006-01-08 12:02:50 +03:00
static int blktrans_getgeo ( struct block_device * bdev , struct hd_geometry * geo )
{
2010-02-22 21:39:30 +03:00
struct mtd_blktrans_dev * dev = blktrans_dev_get ( bdev - > bd_disk ) ;
int ret = - ENXIO ;
if ( ! dev )
return ret ;
mutex_lock ( & dev - > lock ) ;
2006-01-08 12:02:50 +03:00
2010-02-22 21:39:30 +03:00
if ( ! dev - > mtd )
goto unlock ;
ret = dev - > tr - > getgeo ? dev - > tr - > getgeo ( dev , geo ) : 0 ;
unlock :
mutex_unlock ( & dev - > lock ) ;
blktrans_dev_put ( dev ) ;
return ret ;
2006-01-08 12:02:50 +03:00
}
2005-04-17 02:20:36 +04:00
2008-03-02 18:35:06 +03:00
static int blktrans_ioctl ( struct block_device * bdev , fmode_t mode ,
2005-04-17 02:20:36 +04:00
unsigned int cmd , unsigned long arg )
{
2010-02-22 21:39:30 +03:00
struct mtd_blktrans_dev * dev = blktrans_dev_get ( bdev - > bd_disk ) ;
int ret = - ENXIO ;
if ( ! dev )
return ret ;
mutex_lock ( & dev - > lock ) ;
if ( ! dev - > mtd )
goto unlock ;
2005-04-17 02:20:36 +04:00
switch ( cmd ) {
case BLKFLSBUF :
2010-02-22 21:39:30 +03:00
ret = dev - > tr - > flush ? dev - > tr - > flush ( dev ) : 0 ;
2005-04-17 02:20:36 +04:00
default :
2010-02-22 21:39:30 +03:00
ret = - ENOTTY ;
2005-04-17 02:20:36 +04:00
}
2010-02-22 21:39:30 +03:00
unlock :
mutex_unlock ( & dev - > lock ) ;
blktrans_dev_put ( dev ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2009-09-22 04:01:13 +04:00
static const struct block_device_operations mtd_blktrans_ops = {
2005-04-17 02:20:36 +04:00
. owner = THIS_MODULE ,
2008-03-02 18:35:06 +03:00
. open = blktrans_open ,
. release = blktrans_release ,
. locked_ioctl = blktrans_ioctl ,
2006-01-08 12:02:50 +03:00
. getgeo = blktrans_getgeo ,
2005-04-17 02:20:36 +04:00
} ;
int add_mtd_blktrans_dev ( struct mtd_blktrans_dev * new )
{
struct mtd_blktrans_ops * tr = new - > tr ;
2008-05-19 23:11:50 +04:00
struct mtd_blktrans_dev * d ;
2005-04-17 02:20:36 +04:00
int last_devnum = - 1 ;
struct gendisk * gd ;
2010-02-22 21:39:29 +03:00
int ret ;
2005-04-17 02:20:36 +04:00
2007-05-08 11:30:46 +04:00
if ( mutex_trylock ( & mtd_table_mutex ) ) {
2006-03-31 14:29:41 +04:00
mutex_unlock ( & mtd_table_mutex ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
}
2010-02-22 21:39:30 +03:00
mutex_lock ( & blktrans_ref_mutex ) ;
2008-05-19 23:11:50 +04:00
list_for_each_entry ( d , & tr - > devs , list ) {
2005-04-17 02:20:36 +04:00
if ( new - > devnum = = - 1 ) {
/* Use first free number */
if ( d - > devnum ! = last_devnum + 1 ) {
/* Found a free devnum. Plug it in here */
new - > devnum = last_devnum + 1 ;
list_add_tail ( & new - > list , & d - > list ) ;
goto added ;
}
} else if ( d - > devnum = = new - > devnum ) {
/* Required number taken */
2010-02-22 21:39:30 +03:00
mutex_unlock ( & blktrans_ref_mutex ) ;
2005-04-17 02:20:36 +04:00
return - EBUSY ;
} else if ( d - > devnum > new - > devnum ) {
/* Required number was free */
list_add_tail ( & new - > list , & d - > list ) ;
goto added ;
2005-11-07 14:15:26 +03:00
}
2005-04-17 02:20:36 +04:00
last_devnum = d - > devnum ;
}
2010-02-22 21:39:29 +03:00
ret = - EBUSY ;
2005-04-17 02:20:36 +04:00
if ( new - > devnum = = - 1 )
new - > devnum = last_devnum + 1 ;
2010-01-29 23:59:53 +03:00
/* Check that the device and any partitions will get valid
* minor numbers and that the disk naming code below can cope
* with this number . */
if ( new - > devnum > ( MINORMASK > > tr - > part_bits ) | |
2010-02-22 21:39:30 +03:00
( tr - > part_bits & & new - > devnum > = 27 * 26 ) ) {
mutex_unlock ( & blktrans_ref_mutex ) ;
2010-02-22 21:39:29 +03:00
goto error1 ;
2010-02-22 21:39:30 +03:00
}
2005-04-17 02:20:36 +04:00
list_add_tail ( & new - > list , & tr - > devs ) ;
added :
2010-02-22 21:39:30 +03:00
mutex_unlock ( & blktrans_ref_mutex ) ;
2007-12-03 15:46:12 +03:00
mutex_init ( & new - > lock ) ;
2010-02-22 21:39:30 +03:00
kref_init ( & new - > ref ) ;
2005-04-17 02:20:36 +04:00
if ( ! tr - > writesect )
new - > readonly = 1 ;
2010-02-22 21:39:29 +03:00
/* Create gendisk */
ret = - ENOMEM ;
2005-04-17 02:20:36 +04:00
gd = alloc_disk ( 1 < < tr - > part_bits ) ;
2010-02-22 21:39:29 +03:00
if ( ! gd )
goto error2 ;
new - > disk = gd ;
gd - > private_data = new ;
2005-04-17 02:20:36 +04:00
gd - > major = tr - > major ;
gd - > first_minor = ( new - > devnum ) < < tr - > part_bits ;
gd - > fops = & mtd_blktrans_ops ;
2005-11-07 14:15:26 +03:00
2005-07-29 23:42:07 +04:00
if ( tr - > part_bits )
if ( new - > devnum < 26 )
snprintf ( gd - > disk_name , sizeof ( gd - > disk_name ) ,
" %s%c " , tr - > name , ' a ' + new - > devnum ) ;
else
snprintf ( gd - > disk_name , sizeof ( gd - > disk_name ) ,
" %s%c%c " , tr - > name ,
' a ' - 1 + new - > devnum / 26 ,
' a ' + new - > devnum % 26 ) ;
else
snprintf ( gd - > disk_name , sizeof ( gd - > disk_name ) ,
" %s%d " , tr - > name , new - > devnum ) ;
2005-04-17 02:20:36 +04:00
2006-10-27 12:09:33 +04:00
set_capacity ( gd , ( new - > size * tr - > blksize ) > > 9 ) ;
2005-04-17 02:20:36 +04:00
2010-02-22 21:39:29 +03:00
/* Create the request queue */
spin_lock_init ( & new - > queue_lock ) ;
new - > rq = blk_init_queue ( mtd_blktrans_request , & new - > queue_lock ) ;
if ( ! new - > rq )
goto error3 ;
new - > rq - > queuedata = new ;
blk_queue_logical_block_size ( new - > rq , tr - > blksize ) ;
if ( tr - > discard )
queue_flag_set_unlocked ( QUEUE_FLAG_DISCARD ,
new - > rq ) ;
gd - > queue = new - > rq ;
2010-02-22 21:39:30 +03:00
__get_mtd_device ( new - > mtd ) ;
__module_get ( tr - > owner ) ;
2010-02-22 21:39:29 +03:00
/* Create processing thread */
/* TODO: workqueue ? */
new - > thread = kthread_run ( mtd_blktrans_thread , new ,
" %s%d " , tr - > name , new - > mtd - > index ) ;
if ( IS_ERR ( new - > thread ) ) {
ret = PTR_ERR ( new - > thread ) ;
goto error4 ;
}
2009-04-05 18:38:33 +04:00
gd - > driverfs_dev = & new - > mtd - > dev ;
2005-04-17 02:20:36 +04:00
if ( new - > readonly )
set_disk_ro ( gd , 1 ) ;
add_disk ( gd ) ;
2010-02-22 21:39:33 +03:00
if ( new - > disk_attributes )
sysfs_create_group ( & disk_to_dev ( gd ) - > kobj ,
new - > disk_attributes ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
2010-02-22 21:39:29 +03:00
error4 :
2010-02-22 21:39:30 +03:00
module_put ( tr - > owner ) ;
__put_mtd_device ( new - > mtd ) ;
2010-02-22 21:39:29 +03:00
blk_cleanup_queue ( new - > rq ) ;
error3 :
put_disk ( new - > disk ) ;
error2 :
list_del ( & new - > list ) ;
error1 :
kfree ( new ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
int del_mtd_blktrans_dev ( struct mtd_blktrans_dev * old )
{
2010-02-22 21:39:30 +03:00
unsigned long flags ;
2007-05-08 11:30:46 +04:00
if ( mutex_trylock ( & mtd_table_mutex ) ) {
2006-03-31 14:29:41 +04:00
mutex_unlock ( & mtd_table_mutex ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
}
2010-02-22 21:39:30 +03:00
/* Stop new requests to arrive */
2010-02-22 21:39:29 +03:00
del_gendisk ( old - > disk ) ;
2005-11-07 14:15:26 +03:00
2010-02-22 21:39:33 +03:00
if ( old - > disk_attributes )
sysfs_remove_group ( & disk_to_dev ( old - > disk ) - > kobj ,
old - > disk_attributes ) ;
2010-02-22 21:39:29 +03:00
/* Stop the thread */
kthread_stop ( old - > thread ) ;
2010-02-22 21:39:30 +03:00
/* Kill current requests */
spin_lock_irqsave ( & old - > queue_lock , flags ) ;
old - > rq - > queuedata = NULL ;
blk_start_queue ( old - > rq ) ;
spin_unlock_irqrestore ( & old - > queue_lock , flags ) ;
2010-02-22 21:39:29 +03:00
blk_cleanup_queue ( old - > rq ) ;
2010-02-22 21:39:30 +03:00
/* Ask trans driver for release to the mtd device */
mutex_lock ( & old - > lock ) ;
if ( old - > open & & old - > tr - > release ) {
old - > tr - > release ( old ) ;
old - > open = 0 ;
}
__put_mtd_device ( old - > mtd ) ;
module_put ( old - > tr - > owner ) ;
/* At that point, we don't touch the mtd anymore */
old - > mtd = NULL ;
mutex_unlock ( & old - > lock ) ;
blktrans_dev_put ( old ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
static void blktrans_notify_remove ( struct mtd_info * mtd )
{
2008-05-19 23:11:50 +04:00
struct mtd_blktrans_ops * tr ;
struct mtd_blktrans_dev * dev , * next ;
2005-04-17 02:20:36 +04:00
2008-05-19 23:11:50 +04:00
list_for_each_entry ( tr , & blktrans_majors , list )
list_for_each_entry_safe ( dev , next , & tr - > devs , list )
2005-04-17 02:20:36 +04:00
if ( dev - > mtd = = mtd )
tr - > remove_dev ( dev ) ;
}
static void blktrans_notify_add ( struct mtd_info * mtd )
{
2008-05-19 23:11:50 +04:00
struct mtd_blktrans_ops * tr ;
2005-04-17 02:20:36 +04:00
if ( mtd - > type = = MTD_ABSENT )
return ;
2008-05-19 23:11:50 +04:00
list_for_each_entry ( tr , & blktrans_majors , list )
2005-04-17 02:20:36 +04:00
tr - > add_mtd ( tr , mtd ) ;
}
static struct mtd_notifier blktrans_notifier = {
. add = blktrans_notify_add ,
. remove = blktrans_notify_remove ,
} ;
2005-11-07 14:15:26 +03:00
2005-04-17 02:20:36 +04:00
int register_mtd_blktrans ( struct mtd_blktrans_ops * tr )
{
2010-01-29 23:57:11 +03:00
struct mtd_info * mtd ;
int ret ;
2005-04-17 02:20:36 +04:00
2005-11-07 14:15:26 +03:00
/* Register the notifier if/when the first device type is
2005-04-17 02:20:36 +04:00
registered , to prevent the link / init ordering from fucking
us over . */
if ( ! blktrans_notifier . list . next )
register_mtd_user ( & blktrans_notifier ) ;
2006-03-31 14:29:41 +04:00
mutex_lock ( & mtd_table_mutex ) ;
2005-04-17 02:20:36 +04:00
ret = register_blkdev ( tr - > major , tr - > name ) ;
if ( ret ) {
printk ( KERN_WARNING " Unable to register %s block device on major %d: %d \n " ,
tr - > name , tr - > major , ret ) ;
2006-03-31 14:29:41 +04:00
mutex_unlock ( & mtd_table_mutex ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2008-08-05 21:08:25 +04:00
2006-10-27 12:09:33 +04:00
tr - > blkshift = ffs ( tr - > blksize ) - 1 ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & tr - > devs ) ;
list_add ( & tr - > list , & blktrans_majors ) ;
2010-01-29 23:57:11 +03:00
mtd_for_each_device ( mtd )
if ( mtd - > type ! = MTD_ABSENT )
tr - > add_mtd ( tr , mtd ) ;
2005-04-17 02:20:36 +04:00
2006-03-31 14:29:41 +04:00
mutex_unlock ( & mtd_table_mutex ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
int deregister_mtd_blktrans ( struct mtd_blktrans_ops * tr )
{
2008-05-19 23:11:50 +04:00
struct mtd_blktrans_dev * dev , * next ;
2005-04-17 02:20:36 +04:00
2006-03-31 14:29:41 +04:00
mutex_lock ( & mtd_table_mutex ) ;
2005-04-17 02:20:36 +04:00
/* Remove it from the list of active majors */
list_del ( & tr - > list ) ;
2008-05-19 23:11:50 +04:00
list_for_each_entry_safe ( dev , next , & tr - > devs , list )
2005-04-17 02:20:36 +04:00
tr - > remove_dev ( dev ) ;
unregister_blkdev ( tr - > major , tr - > name ) ;
2006-03-31 14:29:41 +04:00
mutex_unlock ( & mtd_table_mutex ) ;
2005-04-17 02:20:36 +04:00
2006-03-26 20:15:12 +04:00
BUG_ON ( ! list_empty ( & tr - > devs ) ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
static void __exit mtd_blktrans_exit ( void )
{
/* No race here -- if someone's currently in register_mtd_blktrans
we ' re screwed anyway . */
if ( blktrans_notifier . list . next )
unregister_mtd_user ( & blktrans_notifier ) ;
}
module_exit ( mtd_blktrans_exit ) ;
EXPORT_SYMBOL_GPL ( register_mtd_blktrans ) ;
EXPORT_SYMBOL_GPL ( deregister_mtd_blktrans ) ;
EXPORT_SYMBOL_GPL ( add_mtd_blktrans_dev ) ;
EXPORT_SYMBOL_GPL ( del_mtd_blktrans_dev ) ;
MODULE_AUTHOR ( " David Woodhouse <dwmw2@infradead.org> " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Common interface to block layer for MTD 'translation layers' " ) ;