2015-10-29 17:57:29 +09:00
/*
* nvme - lightnvm . c - LightNVM NVMe device
*
* Copyright ( C ) 2014 - 2015 IT University of Copenhagen
* Initial release : Matias Bjorling < mb @ lightnvm . io >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; see the file COPYING . If not , write to
* the Free Software Foundation , 675 Mass Ave , Cambridge , MA 0213 9 ,
* USA .
*
*/
# include "nvme.h"
# include <linux/nvme.h>
# include <linux/bitops.h>
# include <linux/lightnvm.h>
# include <linux/vmalloc.h>
enum nvme_nvm_admin_opcode {
nvme_nvm_admin_identity = 0xe2 ,
nvme_nvm_admin_get_l2p_tbl = 0xea ,
nvme_nvm_admin_get_bb_tbl = 0xf2 ,
nvme_nvm_admin_set_bb_tbl = 0xf1 ,
} ;
struct nvme_nvm_hb_rw {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__u64 rsvd2 ;
__le64 metadata ;
__le64 prp1 ;
__le64 prp2 ;
__le64 spba ;
__le16 length ;
__le16 control ;
__le32 dsmgmt ;
__le64 slba ;
} ;
struct nvme_nvm_ph_rw {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__u64 rsvd2 ;
__le64 metadata ;
__le64 prp1 ;
__le64 prp2 ;
__le64 spba ;
__le16 length ;
__le16 control ;
__le32 dsmgmt ;
__le64 resv ;
} ;
struct nvme_nvm_identity {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__u64 rsvd [ 2 ] ;
__le64 prp1 ;
__le64 prp2 ;
__le32 chnl_off ;
__u32 rsvd11 [ 5 ] ;
} ;
struct nvme_nvm_l2ptbl {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__le32 cdw2 [ 4 ] ;
__le64 prp1 ;
__le64 prp2 ;
__le64 slba ;
__le32 nlb ;
__le16 cdw14 [ 6 ] ;
} ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_getbbtbl {
2015-10-29 17:57:29 +09:00
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__u64 rsvd [ 2 ] ;
__le64 prp1 ;
__le64 prp2 ;
2015-11-16 15:34:37 +01:00
__le64 spba ;
__u32 rsvd4 [ 4 ] ;
} ;
struct nvme_nvm_setbbtbl {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__le64 rsvd [ 2 ] ;
__le64 prp1 ;
__le64 prp2 ;
__le64 spba ;
__le16 nlb ;
__u8 value ;
__u8 rsvd3 ;
__u32 rsvd4 [ 3 ] ;
2015-10-29 17:57:29 +09:00
} ;
struct nvme_nvm_erase_blk {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__u64 rsvd [ 2 ] ;
__le64 prp1 ;
__le64 prp2 ;
__le64 spba ;
__le16 length ;
__le16 control ;
__le32 dsmgmt ;
__le64 resv ;
} ;
struct nvme_nvm_command {
union {
struct nvme_common_command common ;
struct nvme_nvm_identity identity ;
struct nvme_nvm_hb_rw hb_rw ;
struct nvme_nvm_ph_rw ph_rw ;
struct nvme_nvm_l2ptbl l2p ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_getbbtbl get_bb ;
struct nvme_nvm_setbbtbl set_bb ;
2015-10-29 17:57:29 +09:00
struct nvme_nvm_erase_blk erase ;
} ;
} ;
2016-02-04 15:13:26 +01:00
# define NVME_NVM_LP_MLC_PAIRS 886
2016-01-12 07:49:35 +01:00
struct nvme_nvm_lp_mlc {
2016-07-07 09:54:10 +02:00
__le16 num_pairs ;
2016-02-04 15:13:26 +01:00
__u8 pairs [ NVME_NVM_LP_MLC_PAIRS ] ;
2016-01-12 07:49:35 +01:00
} ;
struct nvme_nvm_lp_tbl {
__u8 id [ 8 ] ;
struct nvme_nvm_lp_mlc mlc ;
} ;
2015-10-29 17:57:29 +09:00
struct nvme_nvm_id_group {
__u8 mtype ;
__u8 fmtype ;
__le16 res16 ;
__u8 num_ch ;
__u8 num_lun ;
__u8 num_pln ;
2015-11-16 15:34:38 +01:00
__u8 rsvd1 ;
2015-10-29 17:57:29 +09:00
__le16 num_blk ;
__le16 num_pg ;
__le16 fpg_sz ;
__le16 csecs ;
__le16 sos ;
2015-11-16 15:34:38 +01:00
__le16 rsvd2 ;
2015-10-29 17:57:29 +09:00
__le32 trdt ;
__le32 trdm ;
__le32 tprt ;
__le32 tprm ;
__le32 tbet ;
__le32 tbem ;
__le32 mpos ;
2015-11-16 15:34:39 +01:00
__le32 mccap ;
2015-10-29 17:57:29 +09:00
__le16 cpar ;
2016-01-12 07:49:35 +01:00
__u8 reserved [ 10 ] ;
struct nvme_nvm_lp_tbl lptbl ;
2015-10-29 17:57:29 +09:00
} __packed ;
struct nvme_nvm_addr_format {
__u8 ch_offset ;
__u8 ch_len ;
__u8 lun_offset ;
__u8 lun_len ;
__u8 pln_offset ;
__u8 pln_len ;
__u8 blk_offset ;
__u8 blk_len ;
__u8 pg_offset ;
__u8 pg_len ;
__u8 sect_offset ;
__u8 sect_len ;
__u8 res [ 4 ] ;
} __packed ;
struct nvme_nvm_id {
__u8 ver_id ;
__u8 vmnt ;
__u8 cgrps ;
2015-11-16 15:34:46 +01:00
__u8 res ;
2015-10-29 17:57:29 +09:00
__le32 cap ;
__le32 dom ;
struct nvme_nvm_addr_format ppaf ;
2015-11-16 15:34:46 +01:00
__u8 resv [ 228 ] ;
2015-10-29 17:57:29 +09:00
struct nvme_nvm_id_group groups [ 4 ] ;
} __packed ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_bb_tbl {
__u8 tblid [ 4 ] ;
__le16 verid ;
__le16 revid ;
__le32 rvsd1 ;
__le32 tblks ;
__le32 tfact ;
__le32 tgrown ;
__le32 tdresv ;
__le32 thresv ;
__le32 rsvd2 [ 8 ] ;
__u8 blk [ 0 ] ;
} ;
2015-10-29 17:57:29 +09:00
/*
* Check we didn ' t inadvertently grow the command struct
*/
static inline void _nvme_nvm_check_size ( void )
{
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_identity ) ! = 64 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_hb_rw ) ! = 64 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_ph_rw ) ! = 64 ) ;
2015-11-16 15:34:37 +01:00
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_getbbtbl ) ! = 64 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_setbbtbl ) ! = 64 ) ;
2015-10-29 17:57:29 +09:00
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_l2ptbl ) ! = 64 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_erase_blk ) ! = 64 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_id_group ) ! = 960 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_addr_format ) ! = 128 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_id ) ! = 4096 ) ;
2015-11-16 15:34:37 +01:00
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_bb_tbl ) ! = 512 ) ;
2015-10-29 17:57:29 +09:00
}
static int init_grps ( struct nvm_id * nvm_id , struct nvme_nvm_id * nvme_nvm_id )
{
struct nvme_nvm_id_group * src ;
struct nvm_id_group * dst ;
int i , end ;
end = min_t ( u32 , 4 , nvm_id - > cgrps ) ;
for ( i = 0 ; i < end ; i + + ) {
src = & nvme_nvm_id - > groups [ i ] ;
dst = & nvm_id - > groups [ i ] ;
dst - > mtype = src - > mtype ;
dst - > fmtype = src - > fmtype ;
dst - > num_ch = src - > num_ch ;
dst - > num_lun = src - > num_lun ;
dst - > num_pln = src - > num_pln ;
dst - > num_pg = le16_to_cpu ( src - > num_pg ) ;
dst - > num_blk = le16_to_cpu ( src - > num_blk ) ;
dst - > fpg_sz = le16_to_cpu ( src - > fpg_sz ) ;
dst - > csecs = le16_to_cpu ( src - > csecs ) ;
dst - > sos = le16_to_cpu ( src - > sos ) ;
dst - > trdt = le32_to_cpu ( src - > trdt ) ;
dst - > trdm = le32_to_cpu ( src - > trdm ) ;
dst - > tprt = le32_to_cpu ( src - > tprt ) ;
dst - > tprm = le32_to_cpu ( src - > tprm ) ;
dst - > tbet = le32_to_cpu ( src - > tbet ) ;
dst - > tbem = le32_to_cpu ( src - > tbem ) ;
dst - > mpos = le32_to_cpu ( src - > mpos ) ;
2015-11-16 15:34:39 +01:00
dst - > mccap = le32_to_cpu ( src - > mccap ) ;
2015-10-29 17:57:29 +09:00
dst - > cpar = le16_to_cpu ( src - > cpar ) ;
2016-01-12 07:49:35 +01:00
if ( dst - > fmtype = = NVM_ID_FMTYPE_MLC ) {
memcpy ( dst - > lptbl . id , src - > lptbl . id , 8 ) ;
dst - > lptbl . mlc . num_pairs =
le16_to_cpu ( src - > lptbl . mlc . num_pairs ) ;
2016-02-04 15:13:26 +01:00
if ( dst - > lptbl . mlc . num_pairs > NVME_NVM_LP_MLC_PAIRS ) {
pr_err ( " nvm: number of MLC pairs not supported \n " ) ;
return - EINVAL ;
}
2016-01-12 07:49:35 +01:00
memcpy ( dst - > lptbl . mlc . pairs , src - > lptbl . mlc . pairs ,
2016-02-04 15:13:26 +01:00
dst - > lptbl . mlc . num_pairs ) ;
2016-01-12 07:49:35 +01:00
}
2015-10-29 17:57:29 +09:00
}
return 0 ;
}
2015-12-06 11:25:48 +01:00
static int nvme_nvm_identity ( struct nvm_dev * nvmdev , struct nvm_id * nvm_id )
2015-10-29 17:57:29 +09:00
{
2015-12-06 11:25:48 +01:00
struct nvme_ns * ns = nvmdev - > q - > queuedata ;
2015-10-29 17:57:29 +09:00
struct nvme_nvm_id * nvme_nvm_id ;
struct nvme_nvm_command c = { } ;
int ret ;
c . identity . opcode = nvme_nvm_admin_identity ;
c . identity . nsid = cpu_to_le32 ( ns - > ns_id ) ;
c . identity . chnl_off = 0 ;
nvme_nvm_id = kmalloc ( sizeof ( struct nvme_nvm_id ) , GFP_KERNEL ) ;
if ( ! nvme_nvm_id )
return - ENOMEM ;
2015-12-03 09:52:05 -07:00
ret = nvme_submit_sync_cmd ( ns - > ctrl - > admin_q , ( struct nvme_command * ) & c ,
2015-11-20 13:47:55 +01:00
nvme_nvm_id , sizeof ( struct nvme_nvm_id ) ) ;
2015-10-29 17:57:29 +09:00
if ( ret ) {
ret = - EIO ;
goto out ;
}
nvm_id - > ver_id = nvme_nvm_id - > ver_id ;
nvm_id - > vmnt = nvme_nvm_id - > vmnt ;
nvm_id - > cgrps = nvme_nvm_id - > cgrps ;
nvm_id - > cap = le32_to_cpu ( nvme_nvm_id - > cap ) ;
nvm_id - > dom = le32_to_cpu ( nvme_nvm_id - > dom ) ;
2015-11-16 15:34:45 +01:00
memcpy ( & nvm_id - > ppaf , & nvme_nvm_id - > ppaf ,
sizeof ( struct nvme_nvm_addr_format ) ) ;
2015-10-29 17:57:29 +09:00
ret = init_grps ( nvm_id , nvme_nvm_id ) ;
out :
kfree ( nvme_nvm_id ) ;
return ret ;
}
2015-12-06 11:25:48 +01:00
static int nvme_nvm_get_l2p_tbl ( struct nvm_dev * nvmdev , u64 slba , u32 nlb ,
2015-10-29 17:57:29 +09:00
nvm_l2p_update_fn * update_l2p , void * priv )
{
2015-12-06 11:25:48 +01:00
struct nvme_ns * ns = nvmdev - > q - > queuedata ;
2015-10-29 17:57:29 +09:00
struct nvme_nvm_command c = { } ;
2015-12-03 09:52:05 -07:00
u32 len = queue_max_hw_sectors ( ns - > ctrl - > admin_q ) < < 9 ;
2015-11-04 01:37:31 +03:00
u32 nlb_pr_rq = len / sizeof ( u64 ) ;
2015-10-29 17:57:29 +09:00
u64 cmd_slba = slba ;
void * entries ;
int ret = 0 ;
c . l2p . opcode = nvme_nvm_admin_get_l2p_tbl ;
c . l2p . nsid = cpu_to_le32 ( ns - > ns_id ) ;
entries = kmalloc ( len , GFP_KERNEL ) ;
if ( ! entries )
return - ENOMEM ;
while ( nlb ) {
2015-11-04 01:37:31 +03:00
u32 cmd_nlb = min ( nlb_pr_rq , nlb ) ;
2015-10-29 17:57:29 +09:00
c . l2p . slba = cpu_to_le64 ( cmd_slba ) ;
c . l2p . nlb = cpu_to_le32 ( cmd_nlb ) ;
2015-12-03 09:52:05 -07:00
ret = nvme_submit_sync_cmd ( ns - > ctrl - > admin_q ,
2015-11-20 13:47:55 +01:00
( struct nvme_command * ) & c , entries , len ) ;
2015-10-29 17:57:29 +09:00
if ( ret ) {
2016-05-06 20:03:14 +02:00
dev_err ( ns - > ctrl - > device ,
" L2P table transfer failed (%d) \n " , ret ) ;
2015-10-29 17:57:29 +09:00
ret = - EIO ;
goto out ;
}
if ( update_l2p ( cmd_slba , cmd_nlb , entries , priv ) ) {
ret = - EINTR ;
goto out ;
}
cmd_slba + = cmd_nlb ;
nlb - = cmd_nlb ;
}
out :
kfree ( entries ) ;
return ret ;
}
2015-11-28 16:49:27 +01:00
static int nvme_nvm_get_bb_tbl ( struct nvm_dev * nvmdev , struct ppa_addr ppa ,
2016-05-06 20:03:05 +02:00
u8 * blks )
2015-10-29 17:57:29 +09:00
{
2015-11-28 16:49:27 +01:00
struct request_queue * q = nvmdev - > q ;
2015-10-29 17:57:29 +09:00
struct nvme_ns * ns = q - > queuedata ;
2015-12-03 09:52:05 -07:00
struct nvme_ctrl * ctrl = ns - > ctrl ;
2015-10-29 17:57:29 +09:00
struct nvme_nvm_command c = { } ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_bb_tbl * bb_tbl ;
2016-05-06 20:02:58 +02:00
int nr_blks = nvmdev - > blks_per_lun * nvmdev - > plane_mode ;
int tblsz = sizeof ( struct nvme_nvm_bb_tbl ) + nr_blks ;
2015-10-29 17:57:29 +09:00
int ret = 0 ;
c . get_bb . opcode = nvme_nvm_admin_get_bb_tbl ;
c . get_bb . nsid = cpu_to_le32 ( ns - > ns_id ) ;
2015-11-16 15:34:37 +01:00
c . get_bb . spba = cpu_to_le64 ( ppa . ppa ) ;
2015-10-29 17:57:29 +09:00
2015-11-16 15:34:37 +01:00
bb_tbl = kzalloc ( tblsz , GFP_KERNEL ) ;
if ( ! bb_tbl )
return - ENOMEM ;
2015-10-29 17:57:29 +09:00
2015-12-03 09:52:05 -07:00
ret = nvme_submit_sync_cmd ( ctrl - > admin_q , ( struct nvme_command * ) & c ,
2015-11-20 13:47:55 +01:00
bb_tbl , tblsz ) ;
2015-10-29 17:57:29 +09:00
if ( ret ) {
2016-05-06 20:03:14 +02:00
dev_err ( ctrl - > device , " get bad block table failed (%d) \n " , ret ) ;
2015-10-29 17:57:29 +09:00
ret = - EIO ;
goto out ;
}
2015-11-16 15:34:37 +01:00
if ( bb_tbl - > tblid [ 0 ] ! = ' B ' | | bb_tbl - > tblid [ 1 ] ! = ' B ' | |
bb_tbl - > tblid [ 2 ] ! = ' L ' | | bb_tbl - > tblid [ 3 ] ! = ' T ' ) {
2016-05-06 20:03:14 +02:00
dev_err ( ctrl - > device , " bbt format mismatch \n " ) ;
2015-11-16 15:34:37 +01:00
ret = - EINVAL ;
goto out ;
}
if ( le16_to_cpu ( bb_tbl - > verid ) ! = 1 ) {
ret = - EINVAL ;
2016-05-06 20:03:14 +02:00
dev_err ( ctrl - > device , " bbt version not supported \n " ) ;
2015-11-16 15:34:37 +01:00
goto out ;
}
2016-05-06 20:02:58 +02:00
if ( le32_to_cpu ( bb_tbl - > tblks ) ! = nr_blks ) {
2015-11-16 15:34:37 +01:00
ret = - EINVAL ;
2016-05-06 20:03:14 +02:00
dev_err ( ctrl - > device ,
" bbt unsuspected blocks returned (%u!=%u) " ,
2016-05-06 20:02:58 +02:00
le32_to_cpu ( bb_tbl - > tblks ) , nr_blks ) ;
2015-11-16 15:34:37 +01:00
goto out ;
}
2016-05-06 20:03:05 +02:00
memcpy ( blks , bb_tbl - > blk , nvmdev - > blks_per_lun * nvmdev - > plane_mode ) ;
2015-10-29 17:57:29 +09:00
out :
2015-11-16 15:34:37 +01:00
kfree ( bb_tbl ) ;
return ret ;
}
2016-05-06 20:03:09 +02:00
static int nvme_nvm_set_bb_tbl ( struct nvm_dev * nvmdev , struct ppa_addr * ppas ,
int nr_ppas , int type )
2015-11-16 15:34:37 +01:00
{
2015-12-06 11:25:48 +01:00
struct nvme_ns * ns = nvmdev - > q - > queuedata ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_command c = { } ;
int ret = 0 ;
c . set_bb . opcode = nvme_nvm_admin_set_bb_tbl ;
c . set_bb . nsid = cpu_to_le32 ( ns - > ns_id ) ;
2016-05-06 20:03:09 +02:00
c . set_bb . spba = cpu_to_le64 ( ppas - > ppa ) ;
c . set_bb . nlb = cpu_to_le16 ( nr_ppas - 1 ) ;
2015-11-16 15:34:37 +01:00
c . set_bb . value = type ;
2015-12-03 09:52:05 -07:00
ret = nvme_submit_sync_cmd ( ns - > ctrl - > admin_q , ( struct nvme_command * ) & c ,
2015-11-20 13:47:55 +01:00
NULL , 0 ) ;
2015-11-16 15:34:37 +01:00
if ( ret )
2016-05-06 20:03:14 +02:00
dev_err ( ns - > ctrl - > device , " set bad block table failed (%d) \n " ,
ret ) ;
2015-10-29 17:57:29 +09:00
return ret ;
}
static inline void nvme_nvm_rqtocmd ( struct request * rq , struct nvm_rq * rqd ,
struct nvme_ns * ns , struct nvme_nvm_command * c )
{
c - > ph_rw . opcode = rqd - > opcode ;
c - > ph_rw . nsid = cpu_to_le32 ( ns - > ns_id ) ;
c - > ph_rw . spba = cpu_to_le64 ( rqd - > ppa_addr . ppa ) ;
2016-05-06 20:03:16 +02:00
c - > ph_rw . metadata = cpu_to_le64 ( rqd - > dma_meta_list ) ;
2015-10-29 17:57:29 +09:00
c - > ph_rw . control = cpu_to_le16 ( rqd - > flags ) ;
2016-05-06 20:03:20 +02:00
c - > ph_rw . length = cpu_to_le16 ( rqd - > nr_ppas - 1 ) ;
2015-10-29 17:57:29 +09:00
if ( rqd - > opcode = = NVM_OP_HBWRITE | | rqd - > opcode = = NVM_OP_HBREAD )
2016-09-16 14:25:07 +02:00
c - > hb_rw . slba = cpu_to_le64 ( nvme_block_nr ( ns ,
rqd - > bio - > bi_iter . bi_sector ) ) ;
2015-10-29 17:57:29 +09:00
}
static void nvme_nvm_end_io ( struct request * rq , int error )
{
struct nvm_rq * rqd = rq - > end_io_data ;
2016-11-10 07:32:33 -08:00
rqd - > ppa_status = nvme_req ( rq ) - > result . u64 ;
2016-01-12 07:49:21 +01:00
nvm_end_io ( rqd , error ) ;
2015-10-29 17:57:29 +09:00
2016-11-28 22:38:52 +01:00
kfree ( nvme_req ( rq ) - > cmd ) ;
2015-10-29 17:57:29 +09:00
blk_mq_free_request ( rq ) ;
}
2015-12-06 11:25:48 +01:00
static int nvme_nvm_submit_io ( struct nvm_dev * dev , struct nvm_rq * rqd )
2015-10-29 17:57:29 +09:00
{
2015-12-06 11:25:48 +01:00
struct request_queue * q = dev - > q ;
2015-10-29 17:57:29 +09:00
struct nvme_ns * ns = q - > queuedata ;
struct request * rq ;
struct bio * bio = rqd - > bio ;
struct nvme_nvm_command * cmd ;
2016-11-10 07:32:33 -08:00
cmd = kzalloc ( sizeof ( struct nvme_nvm_command ) , GFP_KERNEL ) ;
if ( ! cmd )
2015-10-29 17:57:29 +09:00
return - ENOMEM ;
2016-11-10 07:32:33 -08:00
rq = nvme_alloc_request ( q , ( struct nvme_command * ) cmd , 0 , NVME_QID_ANY ) ;
if ( IS_ERR ( rq ) ) {
kfree ( cmd ) ;
2015-10-29 17:57:29 +09:00
return - ENOMEM ;
}
2016-11-10 07:32:33 -08:00
rq - > cmd_flags & = ~ REQ_FAILFAST_DRIVER ;
2015-10-29 17:57:29 +09:00
rq - > ioprio = bio_prio ( bio ) ;
if ( bio_has_data ( bio ) )
rq - > nr_phys_segments = bio_phys_segments ( q , bio ) ;
rq - > __data_len = bio - > bi_iter . bi_size ;
rq - > bio = rq - > biotail = bio ;
nvme_nvm_rqtocmd ( rq , rqd , ns , cmd ) ;
rq - > end_io_data = rqd ;
blk_execute_rq_nowait ( q , NULL , rq , 0 , nvme_nvm_end_io ) ;
return 0 ;
}
2015-12-06 11:25:48 +01:00
static int nvme_nvm_erase_block ( struct nvm_dev * dev , struct nvm_rq * rqd )
2015-10-29 17:57:29 +09:00
{
2015-12-06 11:25:48 +01:00
struct request_queue * q = dev - > q ;
2015-10-29 17:57:29 +09:00
struct nvme_ns * ns = q - > queuedata ;
struct nvme_nvm_command c = { } ;
c . erase . opcode = NVM_OP_ERASE ;
c . erase . nsid = cpu_to_le32 ( ns - > ns_id ) ;
c . erase . spba = cpu_to_le64 ( rqd - > ppa_addr . ppa ) ;
2016-05-06 20:03:20 +02:00
c . erase . length = cpu_to_le16 ( rqd - > nr_ppas - 1 ) ;
2015-10-29 17:57:29 +09:00
return nvme_submit_sync_cmd ( q , ( struct nvme_command * ) & c , NULL , 0 ) ;
}
2015-12-06 11:25:48 +01:00
static void * nvme_nvm_create_dma_pool ( struct nvm_dev * nvmdev , char * name )
2015-10-29 17:57:29 +09:00
{
2015-12-06 11:25:48 +01:00
struct nvme_ns * ns = nvmdev - > q - > queuedata ;
2015-10-29 17:57:29 +09:00
2015-12-03 09:52:05 -07:00
return dma_pool_create ( name , ns - > ctrl - > dev , PAGE_SIZE , PAGE_SIZE , 0 ) ;
2015-10-29 17:57:29 +09:00
}
static void nvme_nvm_destroy_dma_pool ( void * pool )
{
struct dma_pool * dma_pool = pool ;
dma_pool_destroy ( dma_pool ) ;
}
2015-12-06 11:25:48 +01:00
static void * nvme_nvm_dev_dma_alloc ( struct nvm_dev * dev , void * pool ,
2015-10-29 17:57:29 +09:00
gfp_t mem_flags , dma_addr_t * dma_handler )
{
return dma_pool_alloc ( pool , mem_flags , dma_handler ) ;
}
2016-05-06 20:03:13 +02:00
static void nvme_nvm_dev_dma_free ( void * pool , void * addr ,
2015-10-29 17:57:29 +09:00
dma_addr_t dma_handler )
{
2016-05-06 20:03:13 +02:00
dma_pool_free ( pool , addr , dma_handler ) ;
2015-10-29 17:57:29 +09:00
}
static struct nvm_dev_ops nvme_nvm_dev_ops = {
. identity = nvme_nvm_identity ,
. get_l2p_tbl = nvme_nvm_get_l2p_tbl ,
. get_bb_tbl = nvme_nvm_get_bb_tbl ,
2015-11-16 15:34:37 +01:00
. set_bb_tbl = nvme_nvm_set_bb_tbl ,
2015-10-29 17:57:29 +09:00
. submit_io = nvme_nvm_submit_io ,
. erase_block = nvme_nvm_erase_block ,
. create_dma_pool = nvme_nvm_create_dma_pool ,
. destroy_dma_pool = nvme_nvm_destroy_dma_pool ,
. dev_dma_alloc = nvme_nvm_dev_dma_alloc ,
. dev_dma_free = nvme_nvm_dev_dma_free ,
. max_phys_sect = 64 ,
} ;
2016-11-28 22:38:53 +01:00
int nvme_nvm_register ( struct nvme_ns * ns , char * disk_name , int node )
2015-10-29 17:57:29 +09:00
{
2016-09-16 14:25:07 +02:00
struct request_queue * q = ns - > queue ;
struct nvm_dev * dev ;
dev = nvm_alloc_dev ( node ) ;
if ( ! dev )
return - ENOMEM ;
dev - > q = q ;
memcpy ( dev - > name , disk_name , DISK_NAME_LEN ) ;
dev - > ops = & nvme_nvm_dev_ops ;
2016-09-16 14:25:08 +02:00
dev - > private_data = ns ;
2016-09-16 14:25:07 +02:00
ns - > ndev = dev ;
2016-11-28 22:38:53 +01:00
return nvm_register ( dev ) ;
2015-10-29 17:57:29 +09:00
}
2016-09-16 14:25:07 +02:00
void nvme_nvm_unregister ( struct nvme_ns * ns )
2015-10-29 17:57:29 +09:00
{
2016-09-16 14:25:07 +02:00
nvm_unregister ( ns - > ndev ) ;
2015-10-29 17:57:29 +09:00
}
2016-11-28 22:38:53 +01:00
static ssize_t nvm_dev_attr_show ( struct device * dev ,
struct device_attribute * dattr , char * page )
{
struct nvme_ns * ns = nvme_get_ns_from_dev ( dev ) ;
struct nvm_dev * ndev = ns - > ndev ;
struct nvm_id * id ;
struct nvm_id_group * grp ;
struct attribute * attr ;
if ( ! ndev )
return 0 ;
id = & ndev - > identity ;
grp = & id - > groups [ 0 ] ;
attr = & dattr - > attr ;
if ( strcmp ( attr - > name , " version " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > ver_id ) ;
} else if ( strcmp ( attr - > name , " vendor_opcode " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > vmnt ) ;
} else if ( strcmp ( attr - > name , " capabilities " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > cap ) ;
} else if ( strcmp ( attr - > name , " device_mode " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > dom ) ;
} else if ( strcmp ( attr - > name , " media_manager " ) = = 0 ) {
if ( ! ndev - > mt )
return scnprintf ( page , PAGE_SIZE , " %s \n " , " none " ) ;
return scnprintf ( page , PAGE_SIZE , " %s \n " , ndev - > mt - > name ) ;
} else if ( strcmp ( attr - > name , " ppa_format " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE ,
" 0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x \n " ,
id - > ppaf . ch_offset , id - > ppaf . ch_len ,
id - > ppaf . lun_offset , id - > ppaf . lun_len ,
id - > ppaf . pln_offset , id - > ppaf . pln_len ,
id - > ppaf . blk_offset , id - > ppaf . blk_len ,
id - > ppaf . pg_offset , id - > ppaf . pg_len ,
id - > ppaf . sect_offset , id - > ppaf . sect_len ) ;
} else if ( strcmp ( attr - > name , " media_type " ) = = 0 ) { /* u8 */
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > mtype ) ;
} else if ( strcmp ( attr - > name , " flash_media_type " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > fmtype ) ;
} else if ( strcmp ( attr - > name , " num_channels " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > num_ch ) ;
} else if ( strcmp ( attr - > name , " num_luns " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > num_lun ) ;
} else if ( strcmp ( attr - > name , " num_planes " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > num_pln ) ;
} else if ( strcmp ( attr - > name , " num_blocks " ) = = 0 ) { /* u16 */
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > num_blk ) ;
} else if ( strcmp ( attr - > name , " num_pages " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > num_pg ) ;
} else if ( strcmp ( attr - > name , " page_size " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > fpg_sz ) ;
} else if ( strcmp ( attr - > name , " hw_sector_size " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > csecs ) ;
} else if ( strcmp ( attr - > name , " oob_sector_size " ) = = 0 ) { /* u32 */
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > sos ) ;
} else if ( strcmp ( attr - > name , " read_typ " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > trdt ) ;
} else if ( strcmp ( attr - > name , " read_max " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > trdm ) ;
} else if ( strcmp ( attr - > name , " prog_typ " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > tprt ) ;
} else if ( strcmp ( attr - > name , " prog_max " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > tprm ) ;
} else if ( strcmp ( attr - > name , " erase_typ " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > tbet ) ;
} else if ( strcmp ( attr - > name , " erase_max " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , grp - > tbem ) ;
} else if ( strcmp ( attr - > name , " multiplane_modes " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " 0x%08x \n " , grp - > mpos ) ;
} else if ( strcmp ( attr - > name , " media_capabilities " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " 0x%08x \n " , grp - > mccap ) ;
} else if ( strcmp ( attr - > name , " max_phys_secs " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " ,
ndev - > ops - > max_phys_sect ) ;
} else {
return scnprintf ( page ,
PAGE_SIZE ,
" Unhandled attr(%s) in `nvm_dev_attr_show` \n " ,
attr - > name ) ;
}
}
# define NVM_DEV_ATTR_RO(_name) \
DEVICE_ATTR ( _name , S_IRUGO , nvm_dev_attr_show , NULL )
static NVM_DEV_ATTR_RO ( version ) ;
static NVM_DEV_ATTR_RO ( vendor_opcode ) ;
static NVM_DEV_ATTR_RO ( capabilities ) ;
static NVM_DEV_ATTR_RO ( device_mode ) ;
static NVM_DEV_ATTR_RO ( ppa_format ) ;
static NVM_DEV_ATTR_RO ( media_manager ) ;
static NVM_DEV_ATTR_RO ( media_type ) ;
static NVM_DEV_ATTR_RO ( flash_media_type ) ;
static NVM_DEV_ATTR_RO ( num_channels ) ;
static NVM_DEV_ATTR_RO ( num_luns ) ;
static NVM_DEV_ATTR_RO ( num_planes ) ;
static NVM_DEV_ATTR_RO ( num_blocks ) ;
static NVM_DEV_ATTR_RO ( num_pages ) ;
static NVM_DEV_ATTR_RO ( page_size ) ;
static NVM_DEV_ATTR_RO ( hw_sector_size ) ;
static NVM_DEV_ATTR_RO ( oob_sector_size ) ;
static NVM_DEV_ATTR_RO ( read_typ ) ;
static NVM_DEV_ATTR_RO ( read_max ) ;
static NVM_DEV_ATTR_RO ( prog_typ ) ;
static NVM_DEV_ATTR_RO ( prog_max ) ;
static NVM_DEV_ATTR_RO ( erase_typ ) ;
static NVM_DEV_ATTR_RO ( erase_max ) ;
static NVM_DEV_ATTR_RO ( multiplane_modes ) ;
static NVM_DEV_ATTR_RO ( media_capabilities ) ;
static NVM_DEV_ATTR_RO ( max_phys_secs ) ;
static struct attribute * nvm_dev_attrs [ ] = {
& dev_attr_version . attr ,
& dev_attr_vendor_opcode . attr ,
& dev_attr_capabilities . attr ,
& dev_attr_device_mode . attr ,
& dev_attr_media_manager . attr ,
& dev_attr_ppa_format . attr ,
& dev_attr_media_type . attr ,
& dev_attr_flash_media_type . attr ,
& dev_attr_num_channels . attr ,
& dev_attr_num_luns . attr ,
& dev_attr_num_planes . attr ,
& dev_attr_num_blocks . attr ,
& dev_attr_num_pages . attr ,
& dev_attr_page_size . attr ,
& dev_attr_hw_sector_size . attr ,
& dev_attr_oob_sector_size . attr ,
& dev_attr_read_typ . attr ,
& dev_attr_read_max . attr ,
& dev_attr_prog_typ . attr ,
& dev_attr_prog_max . attr ,
& dev_attr_erase_typ . attr ,
& dev_attr_erase_max . attr ,
& dev_attr_multiplane_modes . attr ,
& dev_attr_media_capabilities . attr ,
& dev_attr_max_phys_secs . attr ,
NULL ,
} ;
static const struct attribute_group nvm_dev_attr_group = {
. name = " lightnvm " ,
. attrs = nvm_dev_attrs ,
} ;
int nvme_nvm_register_sysfs ( struct nvme_ns * ns )
{
return sysfs_create_group ( & disk_to_dev ( ns - > disk ) - > kobj ,
& nvm_dev_attr_group ) ;
}
void nvme_nvm_unregister_sysfs ( struct nvme_ns * ns )
{
sysfs_remove_group ( & disk_to_dev ( ns - > disk ) - > kobj ,
& nvm_dev_attr_group ) ;
}
2015-11-28 16:49:26 +01:00
/* move to shared place when used in multiple places. */
# define PCI_VENDOR_ID_CNEX 0x1d1d
# define PCI_DEVICE_ID_CNEX_WL 0x2807
# define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
2015-10-29 17:57:29 +09:00
int nvme_nvm_ns_supported ( struct nvme_ns * ns , struct nvme_id_ns * id )
{
2015-12-03 09:52:05 -07:00
struct nvme_ctrl * ctrl = ns - > ctrl ;
/* XXX: this is poking into PCI structures from generic code! */
struct pci_dev * pdev = to_pci_dev ( ctrl - > dev ) ;
2015-10-29 17:57:29 +09:00
/* QEMU NVMe simulator - PCI ID + Vendor specific bit */
2015-11-28 16:49:26 +01:00
if ( pdev - > vendor = = PCI_VENDOR_ID_CNEX & &
pdev - > device = = PCI_DEVICE_ID_CNEX_QEMU & &
2015-10-29 17:57:29 +09:00
id - > vs [ 0 ] = = 0x1 )
return 1 ;
/* CNEX Labs - PCI ID + Vendor specific bit */
2015-11-28 16:49:26 +01:00
if ( pdev - > vendor = = PCI_VENDOR_ID_CNEX & &
pdev - > device = = PCI_DEVICE_ID_CNEX_WL & &
2015-10-29 17:57:29 +09:00
id - > vs [ 0 ] = = 0x1 )
return 1 ;
return 0 ;
}