2015-10-29 17:57:29 +09:00
/*
* nvme - lightnvm . c - LightNVM NVMe device
*
* Copyright ( C ) 2014 - 2015 IT University of Copenhagen
* Initial release : Matias Bjorling < mb @ lightnvm . io >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; see the file COPYING . If not , write to
* the Free Software Foundation , 675 Mass Ave , Cambridge , MA 0213 9 ,
* USA .
*
*/
# include "nvme.h"
# include <linux/nvme.h>
# include <linux/bitops.h>
# include <linux/lightnvm.h>
# include <linux/vmalloc.h>
2017-01-31 13:17:16 +01:00
# include <linux/sched/sysctl.h>
# include <uapi/linux/lightnvm.h>
2015-10-29 17:57:29 +09:00
enum nvme_nvm_admin_opcode {
nvme_nvm_admin_identity = 0xe2 ,
nvme_nvm_admin_get_bb_tbl = 0xf2 ,
nvme_nvm_admin_set_bb_tbl = 0xf1 ,
} ;
struct nvme_nvm_ph_rw {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__u64 rsvd2 ;
__le64 metadata ;
__le64 prp1 ;
__le64 prp2 ;
__le64 spba ;
__le16 length ;
__le16 control ;
__le32 dsmgmt ;
__le64 resv ;
} ;
2018-03-30 00:05:00 +02:00
struct nvme_nvm_erase_blk {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__u64 rsvd [ 2 ] ;
__le64 prp1 ;
__le64 prp2 ;
__le64 spba ;
__le16 length ;
__le16 control ;
__le32 dsmgmt ;
__le64 resv ;
} ;
2015-10-29 17:57:29 +09:00
struct nvme_nvm_identity {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__u64 rsvd [ 2 ] ;
__le64 prp1 ;
__le64 prp2 ;
2018-03-30 00:04:49 +02:00
__u32 rsvd11 [ 6 ] ;
2015-10-29 17:57:29 +09:00
} ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_getbbtbl {
2015-10-29 17:57:29 +09:00
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__u64 rsvd [ 2 ] ;
__le64 prp1 ;
__le64 prp2 ;
2015-11-16 15:34:37 +01:00
__le64 spba ;
__u32 rsvd4 [ 4 ] ;
} ;
struct nvme_nvm_setbbtbl {
__u8 opcode ;
__u8 flags ;
__u16 command_id ;
__le32 nsid ;
__le64 rsvd [ 2 ] ;
__le64 prp1 ;
__le64 prp2 ;
__le64 spba ;
__le16 nlb ;
__u8 value ;
__u8 rsvd3 ;
__u32 rsvd4 [ 3 ] ;
2015-10-29 17:57:29 +09:00
} ;
struct nvme_nvm_command {
union {
struct nvme_common_command common ;
struct nvme_nvm_ph_rw ph_rw ;
2018-03-30 00:05:00 +02:00
struct nvme_nvm_erase_blk erase ;
struct nvme_nvm_identity identity ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_getbbtbl get_bb ;
struct nvme_nvm_setbbtbl set_bb ;
2015-10-29 17:57:29 +09:00
} ;
} ;
2018-03-30 00:05:00 +02:00
struct nvme_nvm_id12_grp {
2015-10-29 17:57:29 +09:00
__u8 mtype ;
__u8 fmtype ;
__le16 res16 ;
__u8 num_ch ;
__u8 num_lun ;
__u8 num_pln ;
2015-11-16 15:34:38 +01:00
__u8 rsvd1 ;
2018-01-05 14:16:03 +01:00
__le16 num_chk ;
2015-10-29 17:57:29 +09:00
__le16 num_pg ;
__le16 fpg_sz ;
__le16 csecs ;
__le16 sos ;
2015-11-16 15:34:38 +01:00
__le16 rsvd2 ;
2015-10-29 17:57:29 +09:00
__le32 trdt ;
__le32 trdm ;
__le32 tprt ;
__le32 tprm ;
__le32 tbet ;
__le32 tbem ;
__le32 mpos ;
2015-11-16 15:34:39 +01:00
__le32 mccap ;
2015-10-29 17:57:29 +09:00
__le16 cpar ;
2018-03-30 00:04:53 +02:00
__u8 reserved [ 906 ] ;
2015-10-29 17:57:29 +09:00
} __packed ;
2018-03-30 00:05:00 +02:00
struct nvme_nvm_id12_addrf {
2015-10-29 17:57:29 +09:00
__u8 ch_offset ;
__u8 ch_len ;
__u8 lun_offset ;
__u8 lun_len ;
__u8 pln_offset ;
__u8 pln_len ;
__u8 blk_offset ;
__u8 blk_len ;
__u8 pg_offset ;
__u8 pg_len ;
__u8 sect_offset ;
__u8 sect_len ;
__u8 res [ 4 ] ;
} __packed ;
2018-03-30 00:05:00 +02:00
struct nvme_nvm_id12 {
2015-10-29 17:57:29 +09:00
__u8 ver_id ;
__u8 vmnt ;
__u8 cgrps ;
2015-11-16 15:34:46 +01:00
__u8 res ;
2015-10-29 17:57:29 +09:00
__le32 cap ;
__le32 dom ;
2018-03-30 00:05:00 +02:00
struct nvme_nvm_id12_addrf ppaf ;
2015-11-16 15:34:46 +01:00
__u8 resv [ 228 ] ;
2018-03-30 00:05:00 +02:00
struct nvme_nvm_id12_grp grp ;
2018-03-30 00:04:54 +02:00
__u8 resv2 [ 2880 ] ;
2015-10-29 17:57:29 +09:00
} __packed ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_bb_tbl {
__u8 tblid [ 4 ] ;
__le16 verid ;
__le16 revid ;
__le32 rvsd1 ;
__le32 tblks ;
__le32 tfact ;
__le32 tgrown ;
__le32 tdresv ;
__le32 thresv ;
__le32 rsvd2 [ 8 ] ;
__u8 blk [ 0 ] ;
} ;
2015-10-29 17:57:29 +09:00
/*
* Check we didn ' t inadvertently grow the command struct
*/
static inline void _nvme_nvm_check_size ( void )
{
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_identity ) ! = 64 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_ph_rw ) ! = 64 ) ;
2018-03-30 00:05:00 +02:00
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_erase_blk ) ! = 64 ) ;
2015-11-16 15:34:37 +01:00
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_getbbtbl ) ! = 64 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_setbbtbl ) ! = 64 ) ;
2018-03-30 00:05:00 +02:00
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_id12_grp ) ! = 960 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_id12_addrf ) ! = 16 ) ;
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_id12 ) ! = NVME_IDENTIFY_DATA_SIZE ) ;
2017-04-15 20:55:40 +02:00
BUILD_BUG_ON ( sizeof ( struct nvme_nvm_bb_tbl ) ! = 64 ) ;
2015-10-29 17:57:29 +09:00
}
2018-03-30 00:05:00 +02:00
static int init_grp ( struct nvm_id * nvm_id , struct nvme_nvm_id12 * id12 )
2015-10-29 17:57:29 +09:00
{
2018-03-30 00:05:00 +02:00
struct nvme_nvm_id12_grp * src ;
2018-01-05 14:16:03 +01:00
int sec_per_pg , sec_per_pl , pg_per_blk ;
2017-01-31 13:17:15 +01:00
2018-03-30 00:05:00 +02:00
if ( id12 - > cgrps ! = 1 )
2017-01-31 13:17:15 +01:00
return - EINVAL ;
2018-03-30 00:05:00 +02:00
src = & id12 - > grp ;
2018-01-05 14:16:03 +01:00
2018-03-30 00:05:01 +02:00
nvm_id - > mtype = src - > mtype ;
nvm_id - > fmtype = src - > fmtype ;
2018-01-05 14:16:03 +01:00
2018-03-30 00:05:01 +02:00
nvm_id - > num_ch = src - > num_ch ;
nvm_id - > num_lun = src - > num_lun ;
2018-01-05 14:16:03 +01:00
2018-03-30 00:05:01 +02:00
nvm_id - > num_chk = le16_to_cpu ( src - > num_chk ) ;
nvm_id - > csecs = le16_to_cpu ( src - > csecs ) ;
nvm_id - > sos = le16_to_cpu ( src - > sos ) ;
2018-01-05 14:16:03 +01:00
pg_per_blk = le16_to_cpu ( src - > num_pg ) ;
2018-03-30 00:05:01 +02:00
sec_per_pg = le16_to_cpu ( src - > fpg_sz ) / nvm_id - > csecs ;
2018-01-05 14:16:03 +01:00
sec_per_pl = sec_per_pg * src - > num_pln ;
2018-03-30 00:05:01 +02:00
nvm_id - > clba = sec_per_pl * pg_per_blk ;
nvm_id - > ws_per_chk = pg_per_blk ;
nvm_id - > mpos = le32_to_cpu ( src - > mpos ) ;
nvm_id - > cpar = le16_to_cpu ( src - > cpar ) ;
nvm_id - > mccap = le32_to_cpu ( src - > mccap ) ;
nvm_id - > ws_opt = nvm_id - > ws_min = sec_per_pg ;
nvm_id - > ws_seq = NVM_IO_SNGL_ACCESS ;
if ( nvm_id - > mpos & 0x020202 ) {
nvm_id - > ws_seq = NVM_IO_DUAL_ACCESS ;
nvm_id - > ws_opt < < = 1 ;
} else if ( nvm_id - > mpos & 0x040404 ) {
nvm_id - > ws_seq = NVM_IO_QUAD_ACCESS ;
nvm_id - > ws_opt < < = 2 ;
2018-01-05 14:16:03 +01:00
}
2018-03-30 00:05:01 +02:00
nvm_id - > trdt = le32_to_cpu ( src - > trdt ) ;
nvm_id - > trdm = le32_to_cpu ( src - > trdm ) ;
nvm_id - > tprt = le32_to_cpu ( src - > tprt ) ;
nvm_id - > tprm = le32_to_cpu ( src - > tprm ) ;
nvm_id - > tbet = le32_to_cpu ( src - > tbet ) ;
nvm_id - > tbem = le32_to_cpu ( src - > tbem ) ;
2018-01-05 14:16:03 +01:00
/* 1.2 compatibility */
2018-03-30 00:05:01 +02:00
nvm_id - > num_pln = src - > num_pln ;
nvm_id - > num_pg = le16_to_cpu ( src - > num_pg ) ;
nvm_id - > fpg_sz = le16_to_cpu ( src - > fpg_sz ) ;
2017-01-31 13:17:15 +01:00
2015-10-29 17:57:29 +09:00
return 0 ;
}
2015-12-06 11:25:48 +01:00
static int nvme_nvm_identity ( struct nvm_dev * nvmdev , struct nvm_id * nvm_id )
2015-10-29 17:57:29 +09:00
{
2015-12-06 11:25:48 +01:00
struct nvme_ns * ns = nvmdev - > q - > queuedata ;
2018-03-30 00:05:00 +02:00
struct nvme_nvm_id12 * id ;
2015-10-29 17:57:29 +09:00
struct nvme_nvm_command c = { } ;
int ret ;
c . identity . opcode = nvme_nvm_admin_identity ;
2017-11-09 13:50:43 +01:00
c . identity . nsid = cpu_to_le32 ( ns - > head - > ns_id ) ;
2015-10-29 17:57:29 +09:00
2018-03-30 00:05:00 +02:00
id = kmalloc ( sizeof ( struct nvme_nvm_id12 ) , GFP_KERNEL ) ;
if ( ! id )
2015-10-29 17:57:29 +09:00
return - ENOMEM ;
2015-12-03 09:52:05 -07:00
ret = nvme_submit_sync_cmd ( ns - > ctrl - > admin_q , ( struct nvme_command * ) & c ,
2018-03-30 00:05:00 +02:00
id , sizeof ( struct nvme_nvm_id12 ) ) ;
2015-10-29 17:57:29 +09:00
if ( ret ) {
ret = - EIO ;
goto out ;
}
2018-03-30 00:05:00 +02:00
nvm_id - > ver_id = id - > ver_id ;
nvm_id - > vmnt = id - > vmnt ;
nvm_id - > cap = le32_to_cpu ( id - > cap ) ;
nvm_id - > dom = le32_to_cpu ( id - > dom ) ;
memcpy ( & nvm_id - > ppaf , & id - > ppaf ,
2017-04-15 20:55:36 +02:00
sizeof ( struct nvm_addr_format ) ) ;
2015-10-29 17:57:29 +09:00
2018-03-30 00:05:00 +02:00
ret = init_grp ( nvm_id , id ) ;
2015-10-29 17:57:29 +09:00
out :
2018-03-30 00:05:00 +02:00
kfree ( id ) ;
2015-10-29 17:57:29 +09:00
return ret ;
}
2015-11-28 16:49:27 +01:00
static int nvme_nvm_get_bb_tbl ( struct nvm_dev * nvmdev , struct ppa_addr ppa ,
2016-05-06 20:03:05 +02:00
u8 * blks )
2015-10-29 17:57:29 +09:00
{
2015-11-28 16:49:27 +01:00
struct request_queue * q = nvmdev - > q ;
2016-11-28 22:39:06 +01:00
struct nvm_geo * geo = & nvmdev - > geo ;
2015-10-29 17:57:29 +09:00
struct nvme_ns * ns = q - > queuedata ;
2015-12-03 09:52:05 -07:00
struct nvme_ctrl * ctrl = ns - > ctrl ;
2015-10-29 17:57:29 +09:00
struct nvme_nvm_command c = { } ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_bb_tbl * bb_tbl ;
2018-01-05 14:16:03 +01:00
int nr_blks = geo - > nr_chks * geo - > plane_mode ;
2016-05-06 20:02:58 +02:00
int tblsz = sizeof ( struct nvme_nvm_bb_tbl ) + nr_blks ;
2015-10-29 17:57:29 +09:00
int ret = 0 ;
c . get_bb . opcode = nvme_nvm_admin_get_bb_tbl ;
2017-11-09 13:50:43 +01:00
c . get_bb . nsid = cpu_to_le32 ( ns - > head - > ns_id ) ;
2015-11-16 15:34:37 +01:00
c . get_bb . spba = cpu_to_le64 ( ppa . ppa ) ;
2015-10-29 17:57:29 +09:00
2015-11-16 15:34:37 +01:00
bb_tbl = kzalloc ( tblsz , GFP_KERNEL ) ;
if ( ! bb_tbl )
return - ENOMEM ;
2015-10-29 17:57:29 +09:00
2015-12-03 09:52:05 -07:00
ret = nvme_submit_sync_cmd ( ctrl - > admin_q , ( struct nvme_command * ) & c ,
2015-11-20 13:47:55 +01:00
bb_tbl , tblsz ) ;
2015-10-29 17:57:29 +09:00
if ( ret ) {
2016-05-06 20:03:14 +02:00
dev_err ( ctrl - > device , " get bad block table failed (%d) \n " , ret ) ;
2015-10-29 17:57:29 +09:00
ret = - EIO ;
goto out ;
}
2015-11-16 15:34:37 +01:00
if ( bb_tbl - > tblid [ 0 ] ! = ' B ' | | bb_tbl - > tblid [ 1 ] ! = ' B ' | |
bb_tbl - > tblid [ 2 ] ! = ' L ' | | bb_tbl - > tblid [ 3 ] ! = ' T ' ) {
2016-05-06 20:03:14 +02:00
dev_err ( ctrl - > device , " bbt format mismatch \n " ) ;
2015-11-16 15:34:37 +01:00
ret = - EINVAL ;
goto out ;
}
if ( le16_to_cpu ( bb_tbl - > verid ) ! = 1 ) {
ret = - EINVAL ;
2016-05-06 20:03:14 +02:00
dev_err ( ctrl - > device , " bbt version not supported \n " ) ;
2015-11-16 15:34:37 +01:00
goto out ;
}
2016-05-06 20:02:58 +02:00
if ( le32_to_cpu ( bb_tbl - > tblks ) ! = nr_blks ) {
2015-11-16 15:34:37 +01:00
ret = - EINVAL ;
2016-05-06 20:03:14 +02:00
dev_err ( ctrl - > device ,
" bbt unsuspected blocks returned (%u!=%u) " ,
2016-05-06 20:02:58 +02:00
le32_to_cpu ( bb_tbl - > tblks ) , nr_blks ) ;
2015-11-16 15:34:37 +01:00
goto out ;
}
2018-01-05 14:16:03 +01:00
memcpy ( blks , bb_tbl - > blk , geo - > nr_chks * geo - > plane_mode ) ;
2015-10-29 17:57:29 +09:00
out :
2015-11-16 15:34:37 +01:00
kfree ( bb_tbl ) ;
return ret ;
}
2016-05-06 20:03:09 +02:00
static int nvme_nvm_set_bb_tbl ( struct nvm_dev * nvmdev , struct ppa_addr * ppas ,
int nr_ppas , int type )
2015-11-16 15:34:37 +01:00
{
2015-12-06 11:25:48 +01:00
struct nvme_ns * ns = nvmdev - > q - > queuedata ;
2015-11-16 15:34:37 +01:00
struct nvme_nvm_command c = { } ;
int ret = 0 ;
c . set_bb . opcode = nvme_nvm_admin_set_bb_tbl ;
2017-11-09 13:50:43 +01:00
c . set_bb . nsid = cpu_to_le32 ( ns - > head - > ns_id ) ;
2016-05-06 20:03:09 +02:00
c . set_bb . spba = cpu_to_le64 ( ppas - > ppa ) ;
c . set_bb . nlb = cpu_to_le16 ( nr_ppas - 1 ) ;
2015-11-16 15:34:37 +01:00
c . set_bb . value = type ;
2015-12-03 09:52:05 -07:00
ret = nvme_submit_sync_cmd ( ns - > ctrl - > admin_q , ( struct nvme_command * ) & c ,
2015-11-20 13:47:55 +01:00
NULL , 0 ) ;
2015-11-16 15:34:37 +01:00
if ( ret )
2016-05-06 20:03:14 +02:00
dev_err ( ns - > ctrl - > device , " set bad block table failed (%d) \n " ,
ret ) ;
2015-10-29 17:57:29 +09:00
return ret ;
}
2017-05-07 16:14:44 +02:00
static inline void nvme_nvm_rqtocmd ( struct nvm_rq * rqd , struct nvme_ns * ns ,
struct nvme_nvm_command * c )
2015-10-29 17:57:29 +09:00
{
c - > ph_rw . opcode = rqd - > opcode ;
2017-11-09 13:50:43 +01:00
c - > ph_rw . nsid = cpu_to_le32 ( ns - > head - > ns_id ) ;
2015-10-29 17:57:29 +09:00
c - > ph_rw . spba = cpu_to_le64 ( rqd - > ppa_addr . ppa ) ;
2016-05-06 20:03:16 +02:00
c - > ph_rw . metadata = cpu_to_le64 ( rqd - > dma_meta_list ) ;
2015-10-29 17:57:29 +09:00
c - > ph_rw . control = cpu_to_le16 ( rqd - > flags ) ;
2016-05-06 20:03:20 +02:00
c - > ph_rw . length = cpu_to_le16 ( rqd - > nr_ppas - 1 ) ;
2015-10-29 17:57:29 +09:00
}
2017-06-03 09:38:04 +02:00
static void nvme_nvm_end_io ( struct request * rq , blk_status_t status )
2015-10-29 17:57:29 +09:00
{
struct nvm_rq * rqd = rq - > end_io_data ;
2017-04-21 08:26:57 +02:00
rqd - > ppa_status = le64_to_cpu ( nvme_req ( rq ) - > result . u64 ) ;
2017-04-20 16:02:57 +02:00
rqd - > error = nvme_req ( rq ) - > status ;
2017-01-31 13:17:17 +01:00
nvm_end_io ( rqd ) ;
2015-10-29 17:57:29 +09:00
2016-11-28 22:38:52 +01:00
kfree ( nvme_req ( rq ) - > cmd ) ;
2015-10-29 17:57:29 +09:00
blk_mq_free_request ( rq ) ;
}
2017-10-13 14:46:47 +02:00
static struct request * nvme_nvm_alloc_request ( struct request_queue * q ,
struct nvm_rq * rqd ,
struct nvme_nvm_command * cmd )
2015-10-29 17:57:29 +09:00
{
struct nvme_ns * ns = q - > queuedata ;
struct request * rq ;
2017-05-07 16:14:44 +02:00
nvme_nvm_rqtocmd ( rqd , ns , cmd ) ;
2017-05-03 11:19:04 +02:00
2016-11-10 07:32:33 -08:00
rq = nvme_alloc_request ( q , ( struct nvme_command * ) cmd , 0 , NVME_QID_ANY ) ;
2017-10-13 14:46:47 +02:00
if ( IS_ERR ( rq ) )
return rq ;
2016-11-10 07:32:33 -08:00
rq - > cmd_flags & = ~ REQ_FAILFAST_DRIVER ;
2015-10-29 17:57:29 +09:00
2017-10-13 14:46:47 +02:00
if ( rqd - > bio ) {
blk_init_request_from_bio ( rq , rqd - > bio ) ;
2017-04-15 20:55:37 +02:00
} else {
rq - > ioprio = IOPRIO_PRIO_VALUE ( IOPRIO_CLASS_BE , IOPRIO_NORM ) ;
rq - > __data_len = 0 ;
}
2015-10-29 17:57:29 +09:00
2017-10-13 14:46:47 +02:00
return rq ;
}
static int nvme_nvm_submit_io ( struct nvm_dev * dev , struct nvm_rq * rqd )
{
struct request_queue * q = dev - > q ;
struct nvme_nvm_command * cmd ;
struct request * rq ;
cmd = kzalloc ( sizeof ( struct nvme_nvm_command ) , GFP_KERNEL ) ;
if ( ! cmd )
return - ENOMEM ;
rq = nvme_nvm_alloc_request ( q , rqd , cmd ) ;
if ( IS_ERR ( rq ) ) {
kfree ( cmd ) ;
return PTR_ERR ( rq ) ;
}
2015-10-29 17:57:29 +09:00
rq - > end_io_data = rqd ;
blk_execute_rq_nowait ( q , NULL , rq , 0 , nvme_nvm_end_io ) ;
return 0 ;
}
2017-10-13 14:46:47 +02:00
static int nvme_nvm_submit_io_sync ( struct nvm_dev * dev , struct nvm_rq * rqd )
{
struct request_queue * q = dev - > q ;
struct request * rq ;
struct nvme_nvm_command cmd ;
int ret = 0 ;
memset ( & cmd , 0 , sizeof ( struct nvme_nvm_command ) ) ;
rq = nvme_nvm_alloc_request ( q , rqd , & cmd ) ;
if ( IS_ERR ( rq ) )
return PTR_ERR ( rq ) ;
/* I/Os can fail and the error is signaled through rqd. Callers must
* handle the error accordingly .
*/
blk_execute_rq ( q , NULL , rq , 0 ) ;
if ( nvme_req ( rq ) - > flags & NVME_REQ_CANCELLED )
ret = - EINTR ;
rqd - > ppa_status = le64_to_cpu ( nvme_req ( rq ) - > result . u64 ) ;
rqd - > error = nvme_req ( rq ) - > status ;
blk_mq_free_request ( rq ) ;
return ret ;
}
2015-12-06 11:25:48 +01:00
static void * nvme_nvm_create_dma_pool ( struct nvm_dev * nvmdev , char * name )
2015-10-29 17:57:29 +09:00
{
2015-12-06 11:25:48 +01:00
struct nvme_ns * ns = nvmdev - > q - > queuedata ;
2015-10-29 17:57:29 +09:00
2015-12-03 09:52:05 -07:00
return dma_pool_create ( name , ns - > ctrl - > dev , PAGE_SIZE , PAGE_SIZE , 0 ) ;
2015-10-29 17:57:29 +09:00
}
static void nvme_nvm_destroy_dma_pool ( void * pool )
{
struct dma_pool * dma_pool = pool ;
dma_pool_destroy ( dma_pool ) ;
}
2015-12-06 11:25:48 +01:00
static void * nvme_nvm_dev_dma_alloc ( struct nvm_dev * dev , void * pool ,
2015-10-29 17:57:29 +09:00
gfp_t mem_flags , dma_addr_t * dma_handler )
{
return dma_pool_alloc ( pool , mem_flags , dma_handler ) ;
}
2016-05-06 20:03:13 +02:00
static void nvme_nvm_dev_dma_free ( void * pool , void * addr ,
2015-10-29 17:57:29 +09:00
dma_addr_t dma_handler )
{
2016-05-06 20:03:13 +02:00
dma_pool_free ( pool , addr , dma_handler ) ;
2015-10-29 17:57:29 +09:00
}
static struct nvm_dev_ops nvme_nvm_dev_ops = {
. identity = nvme_nvm_identity ,
. get_bb_tbl = nvme_nvm_get_bb_tbl ,
2015-11-16 15:34:37 +01:00
. set_bb_tbl = nvme_nvm_set_bb_tbl ,
2015-10-29 17:57:29 +09:00
. submit_io = nvme_nvm_submit_io ,
2017-10-13 14:46:47 +02:00
. submit_io_sync = nvme_nvm_submit_io_sync ,
2015-10-29 17:57:29 +09:00
. create_dma_pool = nvme_nvm_create_dma_pool ,
. destroy_dma_pool = nvme_nvm_destroy_dma_pool ,
. dev_dma_alloc = nvme_nvm_dev_dma_alloc ,
. dev_dma_free = nvme_nvm_dev_dma_free ,
. max_phys_sect = 64 ,
} ;
2017-01-31 13:17:16 +01:00
static int nvme_nvm_submit_user_cmd ( struct request_queue * q ,
struct nvme_ns * ns ,
struct nvme_nvm_command * vcmd ,
void __user * ubuf , unsigned int bufflen ,
void __user * meta_buf , unsigned int meta_len ,
void __user * ppa_buf , unsigned int ppa_len ,
u32 * result , u64 * status , unsigned int timeout )
{
bool write = nvme_is_write ( ( struct nvme_command * ) vcmd ) ;
struct nvm_dev * dev = ns - > ndev ;
struct gendisk * disk = ns - > disk ;
struct request * rq ;
struct bio * bio = NULL ;
__le64 * ppa_list = NULL ;
dma_addr_t ppa_dma ;
__le64 * metadata = NULL ;
dma_addr_t metadata_dma ;
DECLARE_COMPLETION_ONSTACK ( wait ) ;
2017-04-20 16:02:58 +02:00
int ret = 0 ;
2017-01-31 13:17:16 +01:00
rq = nvme_alloc_request ( q , ( struct nvme_command * ) vcmd , 0 ,
NVME_QID_ANY ) ;
if ( IS_ERR ( rq ) ) {
ret = - ENOMEM ;
goto err_cmd ;
}
rq - > timeout = timeout ? timeout : ADMIN_TIMEOUT ;
if ( ppa_buf & & ppa_len ) {
ppa_list = dma_pool_alloc ( dev - > dma_pool , GFP_KERNEL , & ppa_dma ) ;
if ( ! ppa_list ) {
ret = - ENOMEM ;
goto err_rq ;
}
if ( copy_from_user ( ppa_list , ( void __user * ) ppa_buf ,
sizeof ( u64 ) * ( ppa_len + 1 ) ) ) {
ret = - EFAULT ;
goto err_ppa ;
}
vcmd - > ph_rw . spba = cpu_to_le64 ( ppa_dma ) ;
} else {
vcmd - > ph_rw . spba = cpu_to_le64 ( ( uintptr_t ) ppa_buf ) ;
}
if ( ubuf & & bufflen ) {
ret = blk_rq_map_user ( q , rq , NULL , ubuf , bufflen , GFP_KERNEL ) ;
if ( ret )
goto err_ppa ;
bio = rq - > bio ;
if ( meta_buf & & meta_len ) {
metadata = dma_pool_alloc ( dev - > dma_pool , GFP_KERNEL ,
& metadata_dma ) ;
if ( ! metadata ) {
ret = - ENOMEM ;
goto err_map ;
}
if ( write ) {
if ( copy_from_user ( metadata ,
( void __user * ) meta_buf ,
meta_len ) ) {
ret = - EFAULT ;
goto err_meta ;
}
}
vcmd - > ph_rw . metadata = cpu_to_le64 ( metadata_dma ) ;
}
2017-08-23 19:10:32 +02:00
bio - > bi_disk = disk ;
2017-01-31 13:17:16 +01:00
}
2017-06-03 09:37:54 +02:00
blk_execute_rq ( q , NULL , rq , 0 ) ;
2017-01-31 13:17:16 +01:00
2017-04-20 16:02:57 +02:00
if ( nvme_req ( rq ) - > flags & NVME_REQ_CANCELLED )
ret = - EINTR ;
2017-04-20 16:02:58 +02:00
else if ( nvme_req ( rq ) - > status & 0x7ff )
ret = - EIO ;
2017-01-31 13:17:16 +01:00
if ( result )
2017-04-20 16:02:57 +02:00
* result = nvme_req ( rq ) - > status & 0x7ff ;
2017-01-31 13:17:16 +01:00
if ( status )
* status = le64_to_cpu ( nvme_req ( rq ) - > result . u64 ) ;
if ( metadata & & ! ret & & ! write ) {
if ( copy_to_user ( meta_buf , ( void * ) metadata , meta_len ) )
ret = - EFAULT ;
}
err_meta :
if ( meta_buf & & meta_len )
dma_pool_free ( dev - > dma_pool , metadata , metadata_dma ) ;
err_map :
2017-08-23 19:10:32 +02:00
if ( bio )
2017-01-31 13:17:16 +01:00
blk_rq_unmap_user ( bio ) ;
err_ppa :
if ( ppa_buf & & ppa_len )
dma_pool_free ( dev - > dma_pool , ppa_list , ppa_dma ) ;
err_rq :
blk_mq_free_request ( rq ) ;
err_cmd :
return ret ;
}
static int nvme_nvm_submit_vio ( struct nvme_ns * ns ,
struct nvm_user_vio __user * uvio )
{
struct nvm_user_vio vio ;
struct nvme_nvm_command c ;
unsigned int length ;
int ret ;
if ( copy_from_user ( & vio , uvio , sizeof ( vio ) ) )
return - EFAULT ;
if ( vio . flags )
return - EINVAL ;
memset ( & c , 0 , sizeof ( c ) ) ;
c . ph_rw . opcode = vio . opcode ;
2017-11-09 13:50:43 +01:00
c . ph_rw . nsid = cpu_to_le32 ( ns - > head - > ns_id ) ;
2017-01-31 13:17:16 +01:00
c . ph_rw . control = cpu_to_le16 ( vio . control ) ;
c . ph_rw . length = cpu_to_le16 ( vio . nppas ) ;
length = ( vio . nppas + 1 ) < < ns - > lba_shift ;
ret = nvme_nvm_submit_user_cmd ( ns - > queue , ns , & c ,
( void __user * ) ( uintptr_t ) vio . addr , length ,
( void __user * ) ( uintptr_t ) vio . metadata ,
vio . metadata_len ,
( void __user * ) ( uintptr_t ) vio . ppa_list , vio . nppas ,
& vio . result , & vio . status , 0 ) ;
if ( ret & & copy_to_user ( uvio , & vio , sizeof ( vio ) ) )
return - EFAULT ;
return ret ;
}
static int nvme_nvm_user_vcmd ( struct nvme_ns * ns , int admin ,
struct nvm_passthru_vio __user * uvcmd )
{
struct nvm_passthru_vio vcmd ;
struct nvme_nvm_command c ;
struct request_queue * q ;
unsigned int timeout = 0 ;
int ret ;
if ( copy_from_user ( & vcmd , uvcmd , sizeof ( vcmd ) ) )
return - EFAULT ;
if ( ( vcmd . opcode ! = 0xF2 ) & & ( ! capable ( CAP_SYS_ADMIN ) ) )
return - EACCES ;
if ( vcmd . flags )
return - EINVAL ;
memset ( & c , 0 , sizeof ( c ) ) ;
c . common . opcode = vcmd . opcode ;
2017-11-09 13:50:43 +01:00
c . common . nsid = cpu_to_le32 ( ns - > head - > ns_id ) ;
2017-01-31 13:17:16 +01:00
c . common . cdw2 [ 0 ] = cpu_to_le32 ( vcmd . cdw2 ) ;
c . common . cdw2 [ 1 ] = cpu_to_le32 ( vcmd . cdw3 ) ;
/* cdw11-12 */
c . ph_rw . length = cpu_to_le16 ( vcmd . nppas ) ;
2017-04-15 20:55:44 +02:00
c . ph_rw . control = cpu_to_le16 ( vcmd . control ) ;
2017-01-31 13:17:16 +01:00
c . common . cdw10 [ 3 ] = cpu_to_le32 ( vcmd . cdw13 ) ;
c . common . cdw10 [ 4 ] = cpu_to_le32 ( vcmd . cdw14 ) ;
c . common . cdw10 [ 5 ] = cpu_to_le32 ( vcmd . cdw15 ) ;
if ( vcmd . timeout_ms )
timeout = msecs_to_jiffies ( vcmd . timeout_ms ) ;
q = admin ? ns - > ctrl - > admin_q : ns - > queue ;
ret = nvme_nvm_submit_user_cmd ( q , ns ,
( struct nvme_nvm_command * ) & c ,
( void __user * ) ( uintptr_t ) vcmd . addr , vcmd . data_len ,
( void __user * ) ( uintptr_t ) vcmd . metadata ,
vcmd . metadata_len ,
( void __user * ) ( uintptr_t ) vcmd . ppa_list , vcmd . nppas ,
& vcmd . result , & vcmd . status , timeout ) ;
if ( ret & & copy_to_user ( uvcmd , & vcmd , sizeof ( vcmd ) ) )
return - EFAULT ;
return ret ;
}
int nvme_nvm_ioctl ( struct nvme_ns * ns , unsigned int cmd , unsigned long arg )
{
switch ( cmd ) {
case NVME_NVM_IOCTL_ADMIN_VIO :
return nvme_nvm_user_vcmd ( ns , 1 , ( void __user * ) arg ) ;
case NVME_NVM_IOCTL_IO_VIO :
return nvme_nvm_user_vcmd ( ns , 0 , ( void __user * ) arg ) ;
case NVME_NVM_IOCTL_SUBMIT_VIO :
return nvme_nvm_submit_vio ( ns , ( void __user * ) arg ) ;
default :
return - ENOTTY ;
}
}
2016-11-28 22:38:53 +01:00
int nvme_nvm_register ( struct nvme_ns * ns , char * disk_name , int node )
2015-10-29 17:57:29 +09:00
{
2016-09-16 14:25:07 +02:00
struct request_queue * q = ns - > queue ;
struct nvm_dev * dev ;
2017-04-15 20:55:40 +02:00
_nvme_nvm_check_size ( ) ;
2016-09-16 14:25:07 +02:00
dev = nvm_alloc_dev ( node ) ;
if ( ! dev )
return - ENOMEM ;
dev - > q = q ;
memcpy ( dev - > name , disk_name , DISK_NAME_LEN ) ;
dev - > ops = & nvme_nvm_dev_ops ;
2016-09-16 14:25:08 +02:00
dev - > private_data = ns ;
2016-09-16 14:25:07 +02:00
ns - > ndev = dev ;
2016-11-28 22:38:53 +01:00
return nvm_register ( dev ) ;
2015-10-29 17:57:29 +09:00
}
2016-09-16 14:25:07 +02:00
void nvme_nvm_unregister ( struct nvme_ns * ns )
2015-10-29 17:57:29 +09:00
{
2016-09-16 14:25:07 +02:00
nvm_unregister ( ns - > ndev ) ;
2015-10-29 17:57:29 +09:00
}
2016-11-28 22:38:53 +01:00
static ssize_t nvm_dev_attr_show ( struct device * dev ,
struct device_attribute * dattr , char * page )
{
struct nvme_ns * ns = nvme_get_ns_from_dev ( dev ) ;
struct nvm_dev * ndev = ns - > ndev ;
struct nvm_id * id ;
struct attribute * attr ;
if ( ! ndev )
return 0 ;
id = & ndev - > identity ;
attr = & dattr - > attr ;
if ( strcmp ( attr - > name , " version " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > ver_id ) ;
} else if ( strcmp ( attr - > name , " vendor_opcode " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > vmnt ) ;
} else if ( strcmp ( attr - > name , " capabilities " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > cap ) ;
} else if ( strcmp ( attr - > name , " device_mode " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > dom ) ;
2017-01-31 13:17:09 +01:00
/* kept for compatibility */
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " media_manager " ) = = 0 ) {
2017-01-31 13:17:09 +01:00
return scnprintf ( page , PAGE_SIZE , " %s \n " , " gennvm " ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " ppa_format " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE ,
" 0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x \n " ,
id - > ppaf . ch_offset , id - > ppaf . ch_len ,
id - > ppaf . lun_offset , id - > ppaf . lun_len ,
id - > ppaf . pln_offset , id - > ppaf . pln_len ,
id - > ppaf . blk_offset , id - > ppaf . blk_len ,
id - > ppaf . pg_offset , id - > ppaf . pg_len ,
id - > ppaf . sect_offset , id - > ppaf . sect_len ) ;
} else if ( strcmp ( attr - > name , " media_type " ) = = 0 ) { /* u8 */
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > mtype ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " flash_media_type " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > fmtype ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " num_channels " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > num_ch ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " num_luns " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > num_lun ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " num_planes " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > num_pln ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " num_blocks " ) = = 0 ) { /* u16 */
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > num_chk ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " num_pages " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > num_pg ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " page_size " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > fpg_sz ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " hw_sector_size " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > csecs ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " oob_sector_size " ) = = 0 ) { /* u32 */
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > sos ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " read_typ " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > trdt ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " read_max " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > trdm ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " prog_typ " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > tprt ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " prog_max " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > tprm ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " erase_typ " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > tbet ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " erase_max " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " %u \n " , id - > tbem ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " multiplane_modes " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " 0x%08x \n " , id - > mpos ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " media_capabilities " ) = = 0 ) {
2018-03-30 00:05:01 +02:00
return scnprintf ( page , PAGE_SIZE , " 0x%08x \n " , id - > mccap ) ;
2016-11-28 22:38:53 +01:00
} else if ( strcmp ( attr - > name , " max_phys_secs " ) = = 0 ) {
return scnprintf ( page , PAGE_SIZE , " %u \n " ,
ndev - > ops - > max_phys_sect ) ;
} else {
return scnprintf ( page ,
PAGE_SIZE ,
" Unhandled attr(%s) in `nvm_dev_attr_show` \n " ,
attr - > name ) ;
}
}
# define NVM_DEV_ATTR_RO(_name) \
DEVICE_ATTR ( _name , S_IRUGO , nvm_dev_attr_show , NULL )
static NVM_DEV_ATTR_RO ( version ) ;
static NVM_DEV_ATTR_RO ( vendor_opcode ) ;
static NVM_DEV_ATTR_RO ( capabilities ) ;
static NVM_DEV_ATTR_RO ( device_mode ) ;
static NVM_DEV_ATTR_RO ( ppa_format ) ;
static NVM_DEV_ATTR_RO ( media_manager ) ;
static NVM_DEV_ATTR_RO ( media_type ) ;
static NVM_DEV_ATTR_RO ( flash_media_type ) ;
static NVM_DEV_ATTR_RO ( num_channels ) ;
static NVM_DEV_ATTR_RO ( num_luns ) ;
static NVM_DEV_ATTR_RO ( num_planes ) ;
static NVM_DEV_ATTR_RO ( num_blocks ) ;
static NVM_DEV_ATTR_RO ( num_pages ) ;
static NVM_DEV_ATTR_RO ( page_size ) ;
static NVM_DEV_ATTR_RO ( hw_sector_size ) ;
static NVM_DEV_ATTR_RO ( oob_sector_size ) ;
static NVM_DEV_ATTR_RO ( read_typ ) ;
static NVM_DEV_ATTR_RO ( read_max ) ;
static NVM_DEV_ATTR_RO ( prog_typ ) ;
static NVM_DEV_ATTR_RO ( prog_max ) ;
static NVM_DEV_ATTR_RO ( erase_typ ) ;
static NVM_DEV_ATTR_RO ( erase_max ) ;
static NVM_DEV_ATTR_RO ( multiplane_modes ) ;
static NVM_DEV_ATTR_RO ( media_capabilities ) ;
static NVM_DEV_ATTR_RO ( max_phys_secs ) ;
static struct attribute * nvm_dev_attrs [ ] = {
& dev_attr_version . attr ,
& dev_attr_vendor_opcode . attr ,
& dev_attr_capabilities . attr ,
& dev_attr_device_mode . attr ,
& dev_attr_media_manager . attr ,
& dev_attr_ppa_format . attr ,
& dev_attr_media_type . attr ,
& dev_attr_flash_media_type . attr ,
& dev_attr_num_channels . attr ,
& dev_attr_num_luns . attr ,
& dev_attr_num_planes . attr ,
& dev_attr_num_blocks . attr ,
& dev_attr_num_pages . attr ,
& dev_attr_page_size . attr ,
& dev_attr_hw_sector_size . attr ,
& dev_attr_oob_sector_size . attr ,
& dev_attr_read_typ . attr ,
& dev_attr_read_max . attr ,
& dev_attr_prog_typ . attr ,
& dev_attr_prog_max . attr ,
& dev_attr_erase_typ . attr ,
& dev_attr_erase_max . attr ,
& dev_attr_multiplane_modes . attr ,
& dev_attr_media_capabilities . attr ,
& dev_attr_max_phys_secs . attr ,
NULL ,
} ;
static const struct attribute_group nvm_dev_attr_group = {
. name = " lightnvm " ,
. attrs = nvm_dev_attrs ,
} ;
int nvme_nvm_register_sysfs ( struct nvme_ns * ns )
{
return sysfs_create_group ( & disk_to_dev ( ns - > disk ) - > kobj ,
& nvm_dev_attr_group ) ;
}
void nvme_nvm_unregister_sysfs ( struct nvme_ns * ns )
{
sysfs_remove_group ( & disk_to_dev ( ns - > disk ) - > kobj ,
& nvm_dev_attr_group ) ;
}