2019-02-18 11:36:11 +01:00
// SPDX-License-Identifier: GPL-2.0
2016-06-21 18:04:20 +02:00
/*
* NVMe I / O command implementation .
* Copyright ( c ) 2015 - 2016 HGST , a Western Digital Company .
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/blkdev.h>
# include <linux/module.h>
# include "nvmet.h"
2018-05-23 00:34:39 -04:00
int nvmet_bdev_ns_enable ( struct nvmet_ns * ns )
{
int ret ;
ns - > bdev = blkdev_get_by_path ( ns - > device_path ,
FMODE_READ | FMODE_WRITE , NULL ) ;
if ( IS_ERR ( ns - > bdev ) ) {
ret = PTR_ERR ( ns - > bdev ) ;
if ( ret ! = - ENOTBLK ) {
pr_err ( " failed to open block device %s: (%ld) \n " ,
ns - > device_path , PTR_ERR ( ns - > bdev ) ) ;
}
ns - > bdev = NULL ;
return ret ;
}
ns - > size = i_size_read ( ns - > bdev - > bd_inode ) ;
ns - > blksize_shift = blksize_bits ( bdev_logical_block_size ( ns - > bdev ) ) ;
return 0 ;
}
void nvmet_bdev_ns_disable ( struct nvmet_ns * ns )
{
if ( ns - > bdev ) {
blkdev_put ( ns - > bdev , FMODE_WRITE | FMODE_READ ) ;
ns - > bdev = NULL ;
}
}
2018-12-12 15:11:42 -08:00
static u16 blk_to_nvme_status ( struct nvmet_req * req , blk_status_t blk_sts )
{
u16 status = NVME_SC_SUCCESS ;
if ( likely ( blk_sts = = BLK_STS_OK ) )
return status ;
/*
* Right now there exists M : 1 mapping between block layer error
* to the NVMe status code ( see nvme_error_status ( ) ) . For consistency ,
* when we reverse map we use most appropriate NVMe Status code from
* the group of the NVMe staus codes used in the nvme_error_status ( ) .
*/
switch ( blk_sts ) {
case BLK_STS_NOSPC :
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR ;
req - > error_loc = offsetof ( struct nvme_rw_command , length ) ;
break ;
case BLK_STS_TARGET :
status = NVME_SC_LBA_RANGE | NVME_SC_DNR ;
req - > error_loc = offsetof ( struct nvme_rw_command , slba ) ;
break ;
case BLK_STS_NOTSUPP :
req - > error_loc = offsetof ( struct nvme_common_command , opcode ) ;
switch ( req - > cmd - > common . opcode ) {
case nvme_cmd_dsm :
case nvme_cmd_write_zeroes :
status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR ;
break ;
default :
status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
}
break ;
case BLK_STS_MEDIUM :
status = NVME_SC_ACCESS_DENIED ;
req - > error_loc = offsetof ( struct nvme_rw_command , nsid ) ;
break ;
case BLK_STS_IOERR :
/* fallthru */
default :
status = NVME_SC_INTERNAL | NVME_SC_DNR ;
req - > error_loc = offsetof ( struct nvme_common_command , opcode ) ;
}
switch ( req - > cmd - > common . opcode ) {
case nvme_cmd_read :
case nvme_cmd_write :
req - > error_slba = le64_to_cpu ( req - > cmd - > rw . slba ) ;
break ;
case nvme_cmd_write_zeroes :
req - > error_slba =
le64_to_cpu ( req - > cmd - > write_zeroes . slba ) ;
break ;
default :
req - > error_slba = 0 ;
}
return status ;
}
2016-06-21 18:04:20 +02:00
static void nvmet_bio_done ( struct bio * bio )
{
struct nvmet_req * req = bio - > bi_private ;
2018-12-12 15:11:42 -08:00
nvmet_req_complete ( req , blk_to_nvme_status ( req , bio - > bi_status ) ) ;
2018-05-23 00:34:39 -04:00
if ( bio ! = & req - > b . inline_bio )
2016-06-21 18:04:20 +02:00
bio_put ( bio ) ;
}
2018-05-23 00:34:39 -04:00
static void nvmet_bdev_execute_rw ( struct nvmet_req * req )
2016-06-21 18:04:20 +02:00
{
int sg_cnt = req - > sg_cnt ;
2018-09-28 15:40:43 -07:00
struct bio * bio ;
2016-06-21 18:04:20 +02:00
struct scatterlist * sg ;
sector_t sector ;
int op , op_flags = 0 , i ;
if ( ! req - > sg_cnt ) {
nvmet_req_complete ( req , 0 ) ;
return ;
}
if ( req - > cmd - > rw . opcode = = nvme_cmd_write ) {
op = REQ_OP_WRITE ;
2016-11-01 07:40:10 -06:00
op_flags = REQ_SYNC | REQ_IDLE ;
2016-06-21 18:04:20 +02:00
if ( req - > cmd - > rw . control & cpu_to_le16 ( NVME_RW_FUA ) )
op_flags | = REQ_FUA ;
} else {
op = REQ_OP_READ ;
}
2018-10-04 15:27:47 -06:00
if ( is_pci_p2pdma_page ( sg_page ( req - > sg ) ) )
op_flags | = REQ_NOMERGE ;
2016-06-21 18:04:20 +02:00
sector = le64_to_cpu ( req - > cmd - > rw . slba ) ;
sector < < = ( req - > ns - > blksize_shift - 9 ) ;
2018-09-28 15:40:43 -07:00
if ( req - > data_len < = NVMET_MAX_INLINE_DATA_LEN ) {
bio = & req - > b . inline_bio ;
bio_init ( bio , req - > inline_bvec , ARRAY_SIZE ( req - > inline_bvec ) ) ;
} else {
bio = bio_alloc ( GFP_KERNEL , min ( sg_cnt , BIO_MAX_PAGES ) ) ;
}
2017-08-23 19:10:32 +02:00
bio_set_dev ( bio , req - > ns - > bdev ) ;
2016-06-21 18:04:20 +02:00
bio - > bi_iter . bi_sector = sector ;
bio - > bi_private = req ;
bio - > bi_end_io = nvmet_bio_done ;
bio_set_op_attrs ( bio , op , op_flags ) ;
for_each_sg ( req - > sg , sg , req - > sg_cnt , i ) {
while ( bio_add_page ( bio , sg_page ( sg ) , sg - > length , sg - > offset )
! = sg - > length ) {
struct bio * prev = bio ;
bio = bio_alloc ( GFP_KERNEL , min ( sg_cnt , BIO_MAX_PAGES ) ) ;
2017-08-23 19:10:32 +02:00
bio_set_dev ( bio , req - > ns - > bdev ) ;
2016-06-21 18:04:20 +02:00
bio - > bi_iter . bi_sector = sector ;
bio_set_op_attrs ( bio , op , op_flags ) ;
bio_chain ( bio , prev ) ;
2017-07-10 17:24:02 +03:00
submit_bio ( prev ) ;
2016-06-21 18:04:20 +02:00
}
sector + = sg - > length > > 9 ;
sg_cnt - - ;
}
2018-12-12 23:01:54 -08:00
submit_bio ( bio ) ;
2016-06-21 18:04:20 +02:00
}
2018-05-23 00:34:39 -04:00
static void nvmet_bdev_execute_flush ( struct nvmet_req * req )
2016-06-21 18:04:20 +02:00
{
2018-05-23 00:34:39 -04:00
struct bio * bio = & req - > b . inline_bio ;
2016-06-21 18:04:20 +02:00
2017-11-09 14:29:27 +01:00
bio_init ( bio , req - > inline_bvec , ARRAY_SIZE ( req - > inline_bvec ) ) ;
2017-08-23 19:10:32 +02:00
bio_set_dev ( bio , req - > ns - > bdev ) ;
2016-06-21 18:04:20 +02:00
bio - > bi_private = req ;
bio - > bi_end_io = nvmet_bio_done ;
2016-11-01 07:40:10 -06:00
bio - > bi_opf = REQ_OP_WRITE | REQ_PREFLUSH ;
2016-06-21 18:04:20 +02:00
submit_bio ( bio ) ;
}
2018-08-07 23:01:07 -07:00
u16 nvmet_bdev_flush ( struct nvmet_req * req )
{
if ( blkdev_issue_flush ( req - > ns - > bdev , GFP_KERNEL , NULL ) )
return NVME_SC_INTERNAL | NVME_SC_DNR ;
return 0 ;
}
2018-12-12 15:11:42 -08:00
static u16 nvmet_bdev_discard_range ( struct nvmet_req * req ,
2016-06-21 18:04:20 +02:00
struct nvme_dsm_range * range , struct bio * * bio )
{
2018-12-12 15:11:42 -08:00
struct nvmet_ns * ns = req - > ns ;
2018-01-30 10:07:01 +00:00
int ret ;
ret = __blkdev_issue_discard ( ns - > bdev ,
2016-06-21 18:04:20 +02:00
le64_to_cpu ( range - > slba ) < < ( ns - > blksize_shift - 9 ) ,
le32_to_cpu ( range - > nlb ) < < ( ns - > blksize_shift - 9 ) ,
2018-01-30 10:07:01 +00:00
GFP_KERNEL , 0 , bio ) ;
2019-03-13 18:55:09 +01:00
if ( ret & & ret ! = - EOPNOTSUPP ) {
2018-12-12 15:11:42 -08:00
req - > error_slba = le64_to_cpu ( range - > slba ) ;
2019-03-13 18:55:09 +01:00
return blk_to_nvme_status ( req , errno_to_blk_status ( ret ) ) ;
}
return NVME_SC_SUCCESS ;
2016-06-21 18:04:20 +02:00
}
2018-05-23 00:34:39 -04:00
static void nvmet_bdev_execute_discard ( struct nvmet_req * req )
2016-06-21 18:04:20 +02:00
{
struct nvme_dsm_range range ;
struct bio * bio = NULL ;
int i ;
u16 status ;
for ( i = 0 ; i < = le32_to_cpu ( req - > cmd - > dsm . nr ) ; i + + ) {
status = nvmet_copy_from_sgl ( req , i * sizeof ( range ) , & range ,
sizeof ( range ) ) ;
if ( status )
break ;
2018-12-12 15:11:42 -08:00
status = nvmet_bdev_discard_range ( req , & range , & bio ) ;
2016-06-21 18:04:20 +02:00
if ( status )
break ;
}
if ( bio ) {
bio - > bi_private = req ;
bio - > bi_end_io = nvmet_bio_done ;
if ( status ) {
2017-06-03 09:38:06 +02:00
bio - > bi_status = BLK_STS_IOERR ;
2016-06-21 18:04:20 +02:00
bio_endio ( bio ) ;
} else {
submit_bio ( bio ) ;
}
} else {
nvmet_req_complete ( req , status ) ;
}
}
2018-05-23 00:34:39 -04:00
static void nvmet_bdev_execute_dsm ( struct nvmet_req * req )
2016-06-21 18:04:20 +02:00
{
switch ( le32_to_cpu ( req - > cmd - > dsm . attributes ) ) {
case NVME_DSMGMT_AD :
2018-05-23 00:34:39 -04:00
nvmet_bdev_execute_discard ( req ) ;
2016-06-21 18:04:20 +02:00
return ;
case NVME_DSMGMT_IDR :
case NVME_DSMGMT_IDW :
default :
/* Not supported yet */
nvmet_req_complete ( req , 0 ) ;
return ;
}
}
2018-05-23 00:34:39 -04:00
static void nvmet_bdev_execute_write_zeroes ( struct nvmet_req * req )
2016-11-30 12:29:02 -08:00
{
struct nvme_write_zeroes_cmd * write_zeroes = & req - > cmd - > write_zeroes ;
struct bio * bio = NULL ;
u16 status = NVME_SC_SUCCESS ;
sector_t sector ;
sector_t nr_sector ;
2018-12-12 15:11:42 -08:00
int ret ;
2016-11-30 12:29:02 -08:00
sector = le64_to_cpu ( write_zeroes - > slba ) < <
( req - > ns - > blksize_shift - 9 ) ;
2018-04-12 09:16:11 -06:00
nr_sector = ( ( ( sector_t ) le16_to_cpu ( write_zeroes - > length ) + 1 ) < <
( req - > ns - > blksize_shift - 9 ) ) ;
2016-11-30 12:29:02 -08:00
2018-12-12 15:11:42 -08:00
ret = __blkdev_issue_zeroout ( req - > ns - > bdev , sector , nr_sector ,
GFP_KERNEL , & bio , 0 ) ;
status = blk_to_nvme_status ( req , errno_to_blk_status ( ret ) ) ;
2016-11-30 12:29:02 -08:00
if ( bio ) {
bio - > bi_private = req ;
bio - > bi_end_io = nvmet_bio_done ;
submit_bio ( bio ) ;
} else {
nvmet_req_complete ( req , status ) ;
}
}
2018-05-23 00:34:39 -04:00
u16 nvmet_bdev_parse_io_cmd ( struct nvmet_req * req )
2016-06-21 18:04:20 +02:00
{
struct nvme_command * cmd = req - > cmd ;
switch ( cmd - > common . opcode ) {
case nvme_cmd_read :
case nvme_cmd_write :
2018-05-23 00:34:39 -04:00
req - > execute = nvmet_bdev_execute_rw ;
2016-06-21 18:04:20 +02:00
req - > data_len = nvmet_rw_len ( req ) ;
return 0 ;
case nvme_cmd_flush :
2018-05-23 00:34:39 -04:00
req - > execute = nvmet_bdev_execute_flush ;
2016-06-21 18:04:20 +02:00
req - > data_len = 0 ;
return 0 ;
case nvme_cmd_dsm :
2018-05-23 00:34:39 -04:00
req - > execute = nvmet_bdev_execute_dsm ;
2017-03-31 17:00:08 +02:00
req - > data_len = ( le32_to_cpu ( cmd - > dsm . nr ) + 1 ) *
2016-06-21 18:04:20 +02:00
sizeof ( struct nvme_dsm_range ) ;
return 0 ;
2016-11-30 12:29:02 -08:00
case nvme_cmd_write_zeroes :
2018-05-23 00:34:39 -04:00
req - > execute = nvmet_bdev_execute_write_zeroes ;
2016-11-30 12:29:02 -08:00
return 0 ;
2016-06-21 18:04:20 +02:00
default :
2017-02-27 23:21:33 -06:00
pr_err ( " unhandled cmd %d on qid %d \n " , cmd - > common . opcode ,
req - > sq - > qid ) ;
2018-12-12 15:11:42 -08:00
req - > error_loc = offsetof ( struct nvme_common_command , opcode ) ;
2016-06-21 18:04:20 +02:00
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
}
}