2015-10-28 19:54:56 +01:00
/*
* Copyright ( C ) 2015 Matias Bjorling < m @ bjorling . me >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; see the file COPYING . If not , write to
* the Free Software Foundation , 675 Mass Ave , Cambridge , MA 0213 9 ,
* USA .
*
* Implementation of a generic nvm manager for Open - Channel SSDs .
*/
# include "gennvm.h"
2016-03-03 15:06:37 +01:00
static int gennvm_get_area ( struct nvm_dev * dev , sector_t * lba , sector_t len )
{
struct gen_nvm * gn = dev - > mp ;
struct gennvm_area * area , * prev , * next ;
sector_t begin = 0 ;
sector_t max_sectors = ( dev - > sec_size * dev - > total_secs ) > > 9 ;
if ( len > max_sectors )
return - EINVAL ;
area = kmalloc ( sizeof ( struct gennvm_area ) , GFP_KERNEL ) ;
if ( ! area )
return - ENOMEM ;
prev = NULL ;
spin_lock ( & dev - > lock ) ;
list_for_each_entry ( next , & gn - > area_list , list ) {
if ( begin + len > next - > begin ) {
begin = next - > end ;
prev = next ;
continue ;
}
break ;
}
if ( ( begin + len ) > max_sectors ) {
spin_unlock ( & dev - > lock ) ;
kfree ( area ) ;
return - EINVAL ;
}
area - > begin = * lba = begin ;
area - > end = begin + len ;
if ( prev ) /* insert into sorted order */
list_add ( & area - > list , & prev - > list ) ;
else
list_add ( & area - > list , & gn - > area_list ) ;
spin_unlock ( & dev - > lock ) ;
return 0 ;
}
static void gennvm_put_area ( struct nvm_dev * dev , sector_t begin )
{
struct gen_nvm * gn = dev - > mp ;
struct gennvm_area * area ;
spin_lock ( & dev - > lock ) ;
list_for_each_entry ( area , & gn - > area_list , list ) {
if ( area - > begin ! = begin )
continue ;
list_del ( & area - > list ) ;
spin_unlock ( & dev - > lock ) ;
kfree ( area ) ;
return ;
}
spin_unlock ( & dev - > lock ) ;
}
2015-10-28 19:54:56 +01:00
static void gennvm_blocks_free ( struct nvm_dev * dev )
{
struct gen_nvm * gn = dev - > mp ;
struct gen_lun * lun ;
int i ;
gennvm_for_each_lun ( gn , lun , i ) {
if ( ! lun - > vlun . blocks )
break ;
vfree ( lun - > vlun . blocks ) ;
}
}
static void gennvm_luns_free ( struct nvm_dev * dev )
{
struct gen_nvm * gn = dev - > mp ;
kfree ( gn - > luns ) ;
}
static int gennvm_luns_init ( struct nvm_dev * dev , struct gen_nvm * gn )
{
struct gen_lun * lun ;
int i ;
gn - > luns = kcalloc ( dev - > nr_luns , sizeof ( struct gen_lun ) , GFP_KERNEL ) ;
if ( ! gn - > luns )
return - ENOMEM ;
gennvm_for_each_lun ( gn , lun , i ) {
spin_lock_init ( & lun - > vlun . lock ) ;
INIT_LIST_HEAD ( & lun - > free_list ) ;
INIT_LIST_HEAD ( & lun - > used_list ) ;
INIT_LIST_HEAD ( & lun - > bb_list ) ;
lun - > reserved_blocks = 2 ; /* for GC only */
lun - > vlun . id = i ;
lun - > vlun . lun_id = i % dev - > luns_per_chnl ;
lun - > vlun . chnl_id = i / dev - > luns_per_chnl ;
lun - > vlun . nr_free_blocks = dev - > blks_per_lun ;
2016-01-12 07:49:33 +01:00
lun - > vlun . nr_open_blocks = 0 ;
lun - > vlun . nr_closed_blocks = 0 ;
2015-11-20 13:47:56 +01:00
lun - > vlun . nr_bad_blocks = 0 ;
2015-10-28 19:54:56 +01:00
}
return 0 ;
}
2016-05-06 20:03:05 +02:00
static int gennvm_block_bb ( struct gen_nvm * gn , struct ppa_addr ppa ,
u8 * blks , int nr_blks )
2015-10-28 19:54:56 +01:00
{
2016-05-06 20:03:05 +02:00
struct nvm_dev * dev = gn - > dev ;
2015-11-16 15:34:37 +01:00
struct gen_lun * lun ;
2015-10-28 19:54:56 +01:00
struct nvm_block * blk ;
int i ;
2016-05-06 20:02:58 +02:00
nr_blks = nvm_bb_tbl_fold ( dev , blks , nr_blks ) ;
if ( nr_blks < 0 )
return nr_blks ;
2015-12-29 14:37:56 +01:00
lun = & gn - > luns [ ( dev - > luns_per_chnl * ppa . g . ch ) + ppa . g . lun ] ;
2015-11-16 15:34:37 +01:00
2016-05-06 20:02:58 +02:00
for ( i = 0 ; i < nr_blks ; i + + ) {
2015-11-16 15:34:37 +01:00
if ( blks [ i ] = = 0 )
continue ;
2015-10-28 19:54:56 +01:00
blk = & lun - > vlun . blocks [ i ] ;
list_move_tail ( & blk - > list , & lun - > bb_list ) ;
2015-11-20 13:47:56 +01:00
lun - > vlun . nr_bad_blocks + + ;
2016-01-12 07:49:16 +01:00
lun - > vlun . nr_free_blocks - - ;
2015-10-28 19:54:56 +01:00
}
return 0 ;
}
static int gennvm_block_map ( u64 slba , u32 nlb , __le64 * entries , void * private )
{
struct nvm_dev * dev = private ;
struct gen_nvm * gn = dev - > mp ;
u64 elba = slba + nlb ;
struct gen_lun * lun ;
struct nvm_block * blk ;
u64 i ;
int lun_id ;
2016-02-20 08:52:41 +01:00
if ( unlikely ( elba > dev - > total_secs ) ) {
2015-10-28 19:54:56 +01:00
pr_err ( " gennvm: L2P data from device is out of bounds! \n " ) ;
return - EINVAL ;
}
for ( i = 0 ; i < nlb ; i + + ) {
u64 pba = le64_to_cpu ( entries [ i ] ) ;
2016-02-20 08:52:41 +01:00
if ( unlikely ( pba > = dev - > total_secs & & pba ! = U64_MAX ) ) {
2015-10-28 19:54:56 +01:00
pr_err ( " gennvm: L2P data entry is out of bounds! \n " ) ;
return - EINVAL ;
}
/* Address zero is a special one. The first page on a disk is
* protected . It often holds internal device boot
* information .
*/
if ( ! pba )
continue ;
/* resolve block from physical address */
lun_id = div_u64 ( pba , dev - > sec_per_lun ) ;
lun = & gn - > luns [ lun_id ] ;
/* Calculate block offset into lun */
pba = pba - ( dev - > sec_per_lun * lun_id ) ;
blk = & lun - > vlun . blocks [ div_u64 ( pba , dev - > sec_per_blk ) ] ;
2016-01-12 07:49:33 +01:00
if ( ! blk - > state ) {
2015-10-28 19:54:56 +01:00
/* at this point, we don't know anything about the
* block . It ' s up to the FTL on top to re - etablish the
2016-01-12 07:49:33 +01:00
* block state . The block is assumed to be open .
2015-10-28 19:54:56 +01:00
*/
list_move_tail ( & blk - > list , & lun - > used_list ) ;
2016-01-12 07:49:33 +01:00
blk - > state = NVM_BLK_ST_OPEN ;
2015-10-28 19:54:56 +01:00
lun - > vlun . nr_free_blocks - - ;
2016-01-12 07:49:33 +01:00
lun - > vlun . nr_open_blocks + + ;
2015-10-28 19:54:56 +01:00
}
}
return 0 ;
}
static int gennvm_blocks_init ( struct nvm_dev * dev , struct gen_nvm * gn )
{
struct gen_lun * lun ;
struct nvm_block * block ;
sector_t lun_iter , blk_iter , cur_block_id = 0 ;
2016-05-06 20:03:05 +02:00
int ret , nr_blks ;
u8 * blks ;
nr_blks = dev - > blks_per_lun * dev - > plane_mode ;
blks = kmalloc ( nr_blks , GFP_KERNEL ) ;
if ( ! blks )
return - ENOMEM ;
2015-10-28 19:54:56 +01:00
gennvm_for_each_lun ( gn , lun , lun_iter ) {
lun - > vlun . blocks = vzalloc ( sizeof ( struct nvm_block ) *
dev - > blks_per_lun ) ;
2016-05-06 20:03:05 +02:00
if ( ! lun - > vlun . blocks ) {
kfree ( blks ) ;
2015-10-28 19:54:56 +01:00
return - ENOMEM ;
2016-05-06 20:03:05 +02:00
}
2015-10-28 19:54:56 +01:00
for ( blk_iter = 0 ; blk_iter < dev - > blks_per_lun ; blk_iter + + ) {
block = & lun - > vlun . blocks [ blk_iter ] ;
INIT_LIST_HEAD ( & block - > list ) ;
block - > lun = & lun - > vlun ;
block - > id = cur_block_id + + ;
/* First block is reserved for device */
2015-11-20 13:47:56 +01:00
if ( unlikely ( lun_iter = = 0 & & blk_iter = = 0 ) ) {
lun - > vlun . nr_free_blocks - - ;
2015-10-28 19:54:56 +01:00
continue ;
2015-11-20 13:47:56 +01:00
}
2015-10-28 19:54:56 +01:00
list_add_tail ( & block - > list , & lun - > free_list ) ;
}
if ( dev - > ops - > get_bb_tbl ) {
2015-11-16 15:34:37 +01:00
struct ppa_addr ppa ;
ppa . ppa = 0 ;
ppa . g . ch = lun - > vlun . chnl_id ;
2016-05-06 20:03:10 +02:00
ppa . g . lun = lun - > vlun . lun_id ;
2015-11-16 15:34:37 +01:00
2016-05-06 20:03:05 +02:00
ret = nvm_get_bb_tbl ( dev , ppa , blks ) ;
if ( ret )
pr_err ( " gennvm: could not get BB table \n " ) ;
ret = gennvm_block_bb ( gn , ppa , blks , nr_blks ) ;
2015-10-28 19:54:56 +01:00
if ( ret )
2016-05-06 20:03:05 +02:00
pr_err ( " gennvm: BB table map failed \n " ) ;
2015-10-28 19:54:56 +01:00
}
}
2016-03-03 15:06:41 +01:00
if ( ( dev - > identity . dom & NVM_RSP_L2P ) & & dev - > ops - > get_l2p_tbl ) {
2016-02-20 08:52:41 +01:00
ret = dev - > ops - > get_l2p_tbl ( dev , 0 , dev - > total_secs ,
2015-10-28 19:54:56 +01:00
gennvm_block_map , dev ) ;
if ( ret ) {
pr_err ( " gennvm: could not read L2P table. \n " ) ;
pr_warn ( " gennvm: default block initialization " ) ;
}
}
2016-05-06 20:03:05 +02:00
kfree ( blks ) ;
2015-10-28 19:54:56 +01:00
return 0 ;
}
2015-11-28 16:49:23 +01:00
static void gennvm_free ( struct nvm_dev * dev )
{
gennvm_blocks_free ( dev ) ;
gennvm_luns_free ( dev ) ;
kfree ( dev - > mp ) ;
dev - > mp = NULL ;
}
2015-10-28 19:54:56 +01:00
static int gennvm_register ( struct nvm_dev * dev )
{
struct gen_nvm * gn ;
int ret ;
2015-12-06 11:25:50 +01:00
if ( ! try_module_get ( THIS_MODULE ) )
return - ENODEV ;
2015-10-28 19:54:56 +01:00
gn = kzalloc ( sizeof ( struct gen_nvm ) , GFP_KERNEL ) ;
if ( ! gn )
return - ENOMEM ;
2015-11-16 15:34:37 +01:00
gn - > dev = dev ;
2015-10-28 19:54:56 +01:00
gn - > nr_luns = dev - > nr_luns ;
2016-03-03 15:06:37 +01:00
INIT_LIST_HEAD ( & gn - > area_list ) ;
2015-10-28 19:54:56 +01:00
dev - > mp = gn ;
ret = gennvm_luns_init ( dev , gn ) ;
if ( ret ) {
pr_err ( " gennvm: could not initialize luns \n " ) ;
goto err ;
}
ret = gennvm_blocks_init ( dev , gn ) ;
if ( ret ) {
pr_err ( " gennvm: could not initialize blocks \n " ) ;
goto err ;
}
return 1 ;
err :
2015-11-28 16:49:23 +01:00
gennvm_free ( dev ) ;
2015-12-06 11:25:50 +01:00
module_put ( THIS_MODULE ) ;
2015-10-28 19:54:56 +01:00
return ret ;
}
static void gennvm_unregister ( struct nvm_dev * dev )
{
2015-11-28 16:49:23 +01:00
gennvm_free ( dev ) ;
2015-12-06 11:25:50 +01:00
module_put ( THIS_MODULE ) ;
2015-10-28 19:54:56 +01:00
}
2016-01-12 07:49:33 +01:00
static struct nvm_block * gennvm_get_blk_unlocked ( struct nvm_dev * dev ,
2015-10-28 19:54:56 +01:00
struct nvm_lun * vlun , unsigned long flags )
{
struct gen_lun * lun = container_of ( vlun , struct gen_lun , vlun ) ;
struct nvm_block * blk = NULL ;
int is_gc = flags & NVM_IOTYPE_GC ;
2016-01-12 07:49:33 +01:00
assert_spin_locked ( & vlun - > lock ) ;
2015-10-28 19:54:56 +01:00
if ( list_empty ( & lun - > free_list ) ) {
pr_err_ratelimited ( " gennvm: lun %u have no free pages available " ,
lun - > vlun . id ) ;
goto out ;
}
2015-12-06 11:25:45 +01:00
if ( ! is_gc & & lun - > vlun . nr_free_blocks < lun - > reserved_blocks )
2015-10-28 19:54:56 +01:00
goto out ;
blk = list_first_entry ( & lun - > free_list , struct nvm_block , list ) ;
list_move_tail ( & blk - > list , & lun - > used_list ) ;
2016-01-12 07:49:33 +01:00
blk - > state = NVM_BLK_ST_OPEN ;
2015-10-28 19:54:56 +01:00
lun - > vlun . nr_free_blocks - - ;
2016-01-12 07:49:33 +01:00
lun - > vlun . nr_open_blocks + + ;
2015-10-28 19:54:56 +01:00
out :
2016-01-12 07:49:33 +01:00
return blk ;
}
static struct nvm_block * gennvm_get_blk ( struct nvm_dev * dev ,
struct nvm_lun * vlun , unsigned long flags )
{
struct nvm_block * blk ;
spin_lock ( & vlun - > lock ) ;
blk = gennvm_get_blk_unlocked ( dev , vlun , flags ) ;
2015-12-06 11:25:45 +01:00
spin_unlock ( & vlun - > lock ) ;
2015-10-28 19:54:56 +01:00
return blk ;
}
2016-01-12 07:49:33 +01:00
static void gennvm_put_blk_unlocked ( struct nvm_dev * dev , struct nvm_block * blk )
2015-10-28 19:54:56 +01:00
{
struct nvm_lun * vlun = blk - > lun ;
struct gen_lun * lun = container_of ( vlun , struct gen_lun , vlun ) ;
2016-01-12 07:49:33 +01:00
assert_spin_locked ( & vlun - > lock ) ;
2015-10-28 19:54:56 +01:00
2016-01-12 07:49:33 +01:00
if ( blk - > state & NVM_BLK_ST_OPEN ) {
2015-10-28 19:54:56 +01:00
list_move_tail ( & blk - > list , & lun - > free_list ) ;
2016-01-12 07:49:33 +01:00
lun - > vlun . nr_open_blocks - - ;
2015-10-28 19:54:56 +01:00
lun - > vlun . nr_free_blocks + + ;
2016-01-12 07:49:33 +01:00
blk - > state = NVM_BLK_ST_FREE ;
} else if ( blk - > state & NVM_BLK_ST_CLOSED ) {
list_move_tail ( & blk - > list , & lun - > free_list ) ;
lun - > vlun . nr_closed_blocks - - ;
lun - > vlun . nr_free_blocks + + ;
blk - > state = NVM_BLK_ST_FREE ;
} else if ( blk - > state & NVM_BLK_ST_BAD ) {
2015-10-28 19:54:56 +01:00
list_move_tail ( & blk - > list , & lun - > bb_list ) ;
2015-11-20 13:47:56 +01:00
lun - > vlun . nr_bad_blocks + + ;
2016-01-12 07:49:33 +01:00
blk - > state = NVM_BLK_ST_BAD ;
} else {
2015-10-28 19:54:56 +01:00
WARN_ON_ONCE ( 1 ) ;
pr_err ( " gennvm: erroneous block type (%lu -> %u) \n " ,
2016-01-12 07:49:33 +01:00
blk - > id , blk - > state ) ;
2015-10-28 19:54:56 +01:00
list_move_tail ( & blk - > list , & lun - > bb_list ) ;
2015-11-20 13:47:56 +01:00
lun - > vlun . nr_bad_blocks + + ;
2016-01-12 07:49:33 +01:00
blk - > state = NVM_BLK_ST_BAD ;
2015-10-28 19:54:56 +01:00
}
2016-01-12 07:49:33 +01:00
}
2015-10-28 19:54:56 +01:00
2016-01-12 07:49:33 +01:00
static void gennvm_put_blk ( struct nvm_dev * dev , struct nvm_block * blk )
{
struct nvm_lun * vlun = blk - > lun ;
spin_lock ( & vlun - > lock ) ;
gennvm_put_blk_unlocked ( dev , blk ) ;
2015-10-28 19:54:56 +01:00
spin_unlock ( & vlun - > lock ) ;
}
2016-05-06 20:03:18 +02:00
static void gennvm_mark_blk ( struct nvm_dev * dev , struct ppa_addr ppa , int type )
2015-10-28 19:54:56 +01:00
{
struct gen_nvm * gn = dev - > mp ;
struct gen_lun * lun ;
struct nvm_block * blk ;
2016-05-06 20:03:08 +02:00
pr_debug ( " gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u \n " ,
2016-05-06 20:03:18 +02:00
ppa . g . ch , ppa . g . lun , ppa . g . blk , ppa . g . pg , type ) ;
2016-05-06 20:03:08 +02:00
2016-05-06 20:03:18 +02:00
if ( unlikely ( ppa . g . ch > dev - > nr_chnls | |
ppa . g . lun > dev - > luns_per_chnl | |
ppa . g . blk > dev - > blks_per_lun ) ) {
2015-10-28 19:54:56 +01:00
WARN_ON_ONCE ( 1 ) ;
pr_err ( " gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u " ,
2016-05-06 20:03:18 +02:00
ppa . g . ch , dev - > nr_chnls ,
ppa . g . lun , dev - > luns_per_chnl ,
ppa . g . blk , dev - > blks_per_lun ) ;
2015-10-28 19:54:56 +01:00
return ;
}
2016-05-06 20:03:18 +02:00
lun = & gn - > luns [ ppa . g . lun * ppa . g . ch ] ;
blk = & lun - > vlun . blocks [ ppa . g . blk ] ;
2015-10-28 19:54:56 +01:00
/* will be moved to bb list on put_blk from target */
2016-01-12 07:49:33 +01:00
blk - > state = type ;
2015-10-28 19:54:56 +01:00
}
2016-05-06 20:03:08 +02:00
/*
* mark block bad in gennvm . It is expected that the target recovers separately
*/
2015-10-28 19:54:56 +01:00
static void gennvm_mark_blk_bad ( struct nvm_dev * dev , struct nvm_rq * rqd )
{
2016-05-06 20:03:08 +02:00
int bit = - 1 ;
int max_secs = dev - > ops - > max_phys_sect ;
void * comp_bits = & rqd - > ppa_status ;
2015-10-28 19:54:56 +01:00
2016-01-12 07:49:19 +01:00
nvm_addr_to_generic_mode ( dev , rqd ) ;
2015-10-28 19:54:56 +01:00
/* look up blocks and mark them as bad */
2016-05-06 20:03:20 +02:00
if ( rqd - > nr_ppas = = 1 ) {
2016-05-06 20:03:18 +02:00
gennvm_mark_blk ( dev , rqd - > ppa_addr , NVM_BLK_ST_BAD ) ;
2016-05-06 20:03:08 +02:00
return ;
}
while ( ( bit = find_next_bit ( comp_bits , max_secs , bit + 1 ) ) < max_secs )
2016-05-06 20:03:18 +02:00
gennvm_mark_blk ( dev , rqd - > ppa_list [ bit ] , NVM_BLK_ST_BAD ) ;
2015-10-28 19:54:56 +01:00
}
2016-01-12 07:49:29 +01:00
static void gennvm_end_io ( struct nvm_rq * rqd )
2015-10-28 19:54:56 +01:00
{
struct nvm_tgt_instance * ins = rqd - > ins ;
2016-05-06 20:03:08 +02:00
if ( rqd - > error = = NVM_RSP_ERR_FAILWRITE )
2015-10-28 19:54:56 +01:00
gennvm_mark_blk_bad ( rqd - > dev , rqd ) ;
2016-01-12 07:49:29 +01:00
ins - > tt - > end_io ( rqd ) ;
2016-01-12 07:49:21 +01:00
}
2015-10-28 19:54:56 +01:00
2016-01-12 07:49:21 +01:00
static int gennvm_submit_io ( struct nvm_dev * dev , struct nvm_rq * rqd )
{
if ( ! dev - > ops - > submit_io )
return - ENODEV ;
/* Convert address space */
nvm_generic_to_addr_mode ( dev , rqd ) ;
rqd - > dev = dev ;
rqd - > end_io = gennvm_end_io ;
return dev - > ops - > submit_io ( dev , rqd ) ;
2015-10-28 19:54:56 +01:00
}
static int gennvm_erase_blk ( struct nvm_dev * dev , struct nvm_block * blk ,
unsigned long flags )
{
2016-01-12 07:49:19 +01:00
struct ppa_addr addr = block_to_ppa ( dev , blk ) ;
2015-10-28 19:54:56 +01:00
2016-01-12 07:49:28 +01:00
return nvm_erase_ppa ( dev , & addr , 1 ) ;
2015-10-28 19:54:56 +01:00
}
2016-03-03 15:06:38 +01:00
static int gennvm_reserve_lun ( struct nvm_dev * dev , int lunid )
{
return test_and_set_bit ( lunid , dev - > lun_map ) ;
}
static void gennvm_release_lun ( struct nvm_dev * dev , int lunid )
{
WARN_ON ( ! test_and_clear_bit ( lunid , dev - > lun_map ) ) ;
}
2015-10-28 19:54:56 +01:00
static struct nvm_lun * gennvm_get_lun ( struct nvm_dev * dev , int lunid )
{
struct gen_nvm * gn = dev - > mp ;
2016-03-03 15:06:38 +01:00
if ( unlikely ( lunid > = dev - > nr_luns ) )
return NULL ;
2015-10-28 19:54:56 +01:00
return & gn - > luns [ lunid ] . vlun ;
}
2015-11-20 13:47:57 +01:00
static void gennvm_lun_info_print ( struct nvm_dev * dev )
2015-10-28 19:54:56 +01:00
{
struct gen_nvm * gn = dev - > mp ;
struct gen_lun * lun ;
unsigned int i ;
2015-11-20 13:47:57 +01:00
gennvm_for_each_lun ( gn , lun , i ) {
spin_lock ( & lun - > vlun . lock ) ;
2016-01-12 07:49:33 +01:00
pr_info ( " %s: lun%8u \t %u \t %u \t %u \t %u \n " ,
2015-11-20 13:47:57 +01:00
dev - > name , i ,
lun - > vlun . nr_free_blocks ,
2016-01-12 07:49:33 +01:00
lun - > vlun . nr_open_blocks ,
lun - > vlun . nr_closed_blocks ,
2015-11-20 13:47:57 +01:00
lun - > vlun . nr_bad_blocks ) ;
spin_unlock ( & lun - > vlun . lock ) ;
}
2015-10-28 19:54:56 +01:00
}
static struct nvmm_type gennvm = {
2016-01-12 07:49:33 +01:00
. name = " gennvm " ,
. version = { 0 , 1 , 0 } ,
. register_mgr = gennvm_register ,
. unregister_mgr = gennvm_unregister ,
2015-10-28 19:54:56 +01:00
2016-01-12 07:49:33 +01:00
. get_blk_unlocked = gennvm_get_blk_unlocked ,
. put_blk_unlocked = gennvm_put_blk_unlocked ,
2015-10-28 19:54:56 +01:00
2016-01-12 07:49:33 +01:00
. get_blk = gennvm_get_blk ,
. put_blk = gennvm_put_blk ,
2015-10-28 19:54:56 +01:00
2016-01-12 07:49:33 +01:00
. submit_io = gennvm_submit_io ,
. erase_blk = gennvm_erase_blk ,
2015-10-28 19:54:56 +01:00
2016-05-06 20:03:18 +02:00
. mark_blk = gennvm_mark_blk ,
2016-01-12 07:49:33 +01:00
. get_lun = gennvm_get_lun ,
2016-03-03 15:06:38 +01:00
. reserve_lun = gennvm_reserve_lun ,
. release_lun = gennvm_release_lun ,
2016-01-12 07:49:33 +01:00
. lun_info_print = gennvm_lun_info_print ,
2016-03-03 15:06:37 +01:00
. get_area = gennvm_get_area ,
. put_area = gennvm_put_area ,
2015-10-28 19:54:56 +01:00
} ;
static int __init gennvm_module_init ( void )
{
return nvm_register_mgr ( & gennvm ) ;
}
static void gennvm_module_exit ( void )
{
nvm_unregister_mgr ( & gennvm ) ;
}
module_init ( gennvm_module_init ) ;
module_exit ( gennvm_module_exit ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_DESCRIPTION ( " Generic media manager for Open-Channel SSDs " ) ;