2020-07-24 20:25:17 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* NVMe Over Fabrics Target Passthrough command implementation .
*
* Copyright ( c ) 2017 - 2018 Western Digital Corporation or its
* affiliates .
* Copyright ( c ) 2019 - 2020 , Eideticom Inc .
*
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/module.h>
# include "../host/nvme.h"
# include "nvmet.h"
MODULE_IMPORT_NS ( NVME_TARGET_PASSTHRU ) ;
2020-07-24 20:25:18 +03:00
/*
* xarray to maintain one passthru subsystem per nvme controller .
*/
static DEFINE_XARRAY ( passthru_subsystems ) ;
2021-08-27 00:15:45 +03:00
void nvmet_passthrough_override_cap ( struct nvmet_ctrl * ctrl )
{
/*
* Multiple command set support can only be declared if the underlying
* controller actually supports it .
*/
if ( ! nvme_multi_css ( ctrl - > subsys - > passthru_ctrl ) )
ctrl - > cap & = ~ ( 1ULL < < 43 ) ;
}
2022-06-28 02:25:43 +03:00
static u16 nvmet_passthru_override_id_descs ( struct nvmet_req * req )
{
struct nvmet_ctrl * ctrl = req - > sq - > ctrl ;
u16 status = NVME_SC_SUCCESS ;
int pos , len ;
bool csi_seen = false ;
void * data ;
u8 csi ;
if ( ! ctrl - > subsys - > clear_ids )
return status ;
data = kzalloc ( NVME_IDENTIFY_DATA_SIZE , GFP_KERNEL ) ;
if ( ! data )
return NVME_SC_INTERNAL ;
status = nvmet_copy_from_sgl ( req , 0 , data , NVME_IDENTIFY_DATA_SIZE ) ;
if ( status )
goto out_free ;
for ( pos = 0 ; pos < NVME_IDENTIFY_DATA_SIZE ; pos + = len ) {
struct nvme_ns_id_desc * cur = data + pos ;
if ( cur - > nidl = = 0 )
break ;
if ( cur - > nidt = = NVME_NIDT_CSI ) {
memcpy ( & csi , cur + 1 , NVME_NIDT_CSI_LEN ) ;
csi_seen = true ;
break ;
}
len = sizeof ( struct nvme_ns_id_desc ) + cur - > nidl ;
}
memset ( data , 0 , NVME_IDENTIFY_DATA_SIZE ) ;
if ( csi_seen ) {
struct nvme_ns_id_desc * cur = data ;
cur - > nidt = NVME_NIDT_CSI ;
cur - > nidl = NVME_NIDT_CSI_LEN ;
memcpy ( cur + 1 , & csi , NVME_NIDT_CSI_LEN ) ;
}
status = nvmet_copy_to_sgl ( req , 0 , data , NVME_IDENTIFY_DATA_SIZE ) ;
out_free :
kfree ( data ) ;
return status ;
}
2020-07-24 20:25:17 +03:00
static u16 nvmet_passthru_override_id_ctrl ( struct nvmet_req * req )
{
struct nvmet_ctrl * ctrl = req - > sq - > ctrl ;
struct nvme_ctrl * pctrl = ctrl - > subsys - > passthru_ctrl ;
u16 status = NVME_SC_SUCCESS ;
struct nvme_id_ctrl * id ;
2021-01-29 07:38:57 +03:00
unsigned int max_hw_sectors ;
2020-07-24 20:25:17 +03:00
int page_shift ;
id = kzalloc ( sizeof ( * id ) , GFP_KERNEL ) ;
if ( ! id )
return NVME_SC_INTERNAL ;
status = nvmet_copy_from_sgl ( req , 0 , id , sizeof ( * id ) ) ;
if ( status )
goto out_free ;
id - > cntlid = cpu_to_le16 ( ctrl - > cntlid ) ;
id - > ver = cpu_to_le32 ( ctrl - > subsys - > ver ) ;
/*
* The passthru NVMe driver may have a limit on the number of segments
* which depends on the host ' s memory fragementation . To solve this ,
* ensure mdts is limited to the pages equal to the number of segments .
*/
2023-07-07 03:32:22 +03:00
max_hw_sectors = min_not_zero ( pctrl - > max_segments < < PAGE_SECTORS_SHIFT ,
2020-07-24 20:25:17 +03:00
pctrl - > max_hw_sectors ) ;
2020-10-17 01:19:04 +03:00
/*
* nvmet_passthru_map_sg is limitted to using a single bio so limit
2021-03-11 14:01:37 +03:00
* the mdts based on BIO_MAX_VECS as well
2020-10-17 01:19:04 +03:00
*/
2023-07-07 03:32:22 +03:00
max_hw_sectors = min_not_zero ( BIO_MAX_VECS < < PAGE_SECTORS_SHIFT ,
2020-10-17 01:19:04 +03:00
max_hw_sectors ) ;
2020-07-24 20:25:17 +03:00
page_shift = NVME_CAP_MPSMIN ( ctrl - > cap ) + 12 ;
id - > mdts = ilog2 ( max_hw_sectors ) + 9 - page_shift ;
id - > acl = 3 ;
/*
* We export aerl limit for the fabrics controller , update this when
* passthru based aerl support is added .
*/
id - > aerl = NVMET_ASYNC_EVENTS - 1 ;
/* emulate kas as most of the PCIe ctrl don't have a support for kas */
id - > kas = cpu_to_le16 ( NVMET_KAS ) ;
/* don't support host memory buffer */
id - > hmpre = 0 ;
id - > hmmin = 0 ;
id - > sqes = min_t ( __u8 , ( ( 0x6 < < 4 ) | 0x6 ) , id - > sqes ) ;
id - > cqes = min_t ( __u8 , ( ( 0x4 < < 4 ) | 0x4 ) , id - > cqes ) ;
id - > maxcmd = cpu_to_le16 ( NVMET_MAX_CMD ) ;
/* don't support fuse commands */
id - > fuses = 0 ;
id - > sgls = cpu_to_le32 ( 1 < < 0 ) ; /* we always support SGLs */
if ( ctrl - > ops - > flags & NVMF_KEYED_SGLS )
id - > sgls | = cpu_to_le32 ( 1 < < 2 ) ;
if ( req - > port - > inline_data_size )
id - > sgls | = cpu_to_le32 ( 1 < < 20 ) ;
/*
2022-05-21 14:10:36 +03:00
* When passthru controller is setup using nvme - loop transport it will
2020-07-24 20:25:17 +03:00
* export the passthru ctrl subsysnqn ( PCIe NVMe ctrl ) and will fail in
* the nvme / host / core . c in the nvme_init_subsystem ( ) - > nvme_active_ctrl ( )
* code path with duplicate ctr subsynqn . In order to prevent that we
* mask the passthru - ctrl subsysnqn with the target ctrl subsysnqn .
*/
memcpy ( id - > subnqn , ctrl - > subsysnqn , sizeof ( id - > subnqn ) ) ;
/* use fabric id-ctrl values */
id - > ioccsz = cpu_to_le32 ( ( sizeof ( struct nvme_command ) +
req - > port - > inline_data_size ) / 16 ) ;
id - > iorcsz = cpu_to_le32 ( sizeof ( struct nvme_completion ) / 16 ) ;
id - > msdbd = ctrl - > ops - > msdbd ;
/* Support multipath connections with fabrics */
id - > cmic | = 1 < < 1 ;
/* Disable reservations, see nvmet_parse_passthru_io_cmd() */
id - > oncs & = cpu_to_le16 ( ~ NVME_CTRL_ONCS_RESERVATIONS ) ;
status = nvmet_copy_to_sgl ( req , 0 , id , sizeof ( struct nvme_id_ctrl ) ) ;
out_free :
kfree ( id ) ;
return status ;
}
static u16 nvmet_passthru_override_id_ns ( struct nvmet_req * req )
{
u16 status = NVME_SC_SUCCESS ;
struct nvme_id_ns * id ;
int i ;
id = kzalloc ( sizeof ( * id ) , GFP_KERNEL ) ;
if ( ! id )
return NVME_SC_INTERNAL ;
status = nvmet_copy_from_sgl ( req , 0 , id , sizeof ( struct nvme_id_ns ) ) ;
if ( status )
goto out_free ;
for ( i = 0 ; i < ( id - > nlbaf + 1 ) ; i + + )
if ( id - > lbaf [ i ] . ms )
memset ( & id - > lbaf [ i ] , 0 , sizeof ( id - > lbaf [ i ] ) ) ;
id - > flbas = id - > flbas & ~ ( 1 < < 4 ) ;
/*
* Presently the NVMEof target code does not support sending
* metadata , so we must disable it here . This should be updated
* once target starts supporting metadata .
*/
id - > mc = 0 ;
2022-06-28 02:25:43 +03:00
if ( req - > sq - > ctrl - > subsys - > clear_ids ) {
memset ( id - > nguid , 0 , NVME_NIDT_NGUID_LEN ) ;
memset ( id - > eui64 , 0 , NVME_NIDT_EUI64_LEN ) ;
}
2020-07-24 20:25:17 +03:00
status = nvmet_copy_to_sgl ( req , 0 , id , sizeof ( * id ) ) ;
out_free :
kfree ( id ) ;
return status ;
}
static void nvmet_passthru_execute_cmd_work ( struct work_struct * w )
{
struct nvmet_req * req = container_of ( w , struct nvmet_req , p . work ) ;
struct request * rq = req - > p . rq ;
2022-09-19 22:36:46 +03:00
struct nvme_ctrl * ctrl = nvme_req ( rq ) - > ctrl ;
2022-12-14 12:13:16 +03:00
struct nvme_ns * ns = rq - > q - > queuedata ;
2022-09-19 22:36:46 +03:00
u32 effects ;
2021-06-11 00:44:37 +03:00
int status ;
2020-07-24 20:25:17 +03:00
2022-12-14 12:13:16 +03:00
effects = nvme_passthru_start ( ctrl , ns , req - > cmd - > common . opcode ) ;
status = nvme_execute_rq ( rq , false ) ;
2020-07-24 20:25:17 +03:00
if ( status = = NVME_SC_SUCCESS & &
req - > cmd - > common . opcode = = nvme_admin_identify ) {
switch ( req - > cmd - > identify . cns ) {
case NVME_ID_CNS_CTRL :
nvmet_passthru_override_id_ctrl ( req ) ;
break ;
case NVME_ID_CNS_NS :
nvmet_passthru_override_id_ns ( req ) ;
break ;
2022-06-28 02:25:43 +03:00
case NVME_ID_CNS_NS_DESC_LIST :
nvmet_passthru_override_id_descs ( req ) ;
break ;
2020-07-24 20:25:17 +03:00
}
2021-06-11 00:44:37 +03:00
} else if ( status < 0 )
status = NVME_SC_INTERNAL ;
2020-07-24 20:25:17 +03:00
req - > cqe - > result = nvme_req ( rq ) - > result ;
nvmet_req_complete ( req , status ) ;
2020-08-07 01:56:27 +03:00
blk_mq_free_request ( rq ) ;
2022-09-19 22:36:46 +03:00
if ( effects )
2023-05-26 20:06:56 +03:00
nvme_passthru_end ( ctrl , ns , effects , req - > cmd , status ) ;
2020-07-24 20:25:17 +03:00
}
2022-09-22 00:19:54 +03:00
static enum rq_end_io_ret nvmet_passthru_req_done ( struct request * rq ,
blk_status_t blk_status )
2020-07-24 20:25:17 +03:00
{
struct nvmet_req * req = rq - > end_io_data ;
req - > cqe - > result = nvme_req ( rq ) - > result ;
nvmet_req_complete ( req , nvme_req ( rq ) - > status ) ;
2020-08-07 01:56:27 +03:00
blk_mq_free_request ( rq ) ;
2022-09-22 00:19:54 +03:00
return RQ_END_IO_NONE ;
2020-07-24 20:25:17 +03:00
}
static int nvmet_passthru_map_sg ( struct nvmet_req * req , struct request * rq )
{
struct scatterlist * sg ;
struct bio * bio ;
2020-11-10 05:24:04 +03:00
int i ;
2020-07-24 20:25:17 +03:00
2021-03-11 14:01:37 +03:00
if ( req - > sg_cnt > BIO_MAX_VECS )
2020-10-17 01:19:05 +03:00
return - EINVAL ;
2021-05-07 04:51:36 +03:00
if ( nvmet_use_inline_bvec ( req ) ) {
2020-11-10 05:24:05 +03:00
bio = & req - > p . inline_bio ;
2022-01-24 12:11:06 +03:00
bio_init ( bio , NULL , req - > inline_bvec ,
ARRAY_SIZE ( req - > inline_bvec ) , req_op ( rq ) ) ;
2020-11-10 05:24:05 +03:00
} else {
2022-01-24 12:11:05 +03:00
bio = bio_alloc ( NULL , bio_max_segs ( req - > sg_cnt ) , req_op ( rq ) ,
GFP_KERNEL ) ;
2020-11-10 05:24:05 +03:00
bio - > bi_end_io = bio_put ;
}
2020-07-24 20:25:17 +03:00
for_each_sg ( req - > sg , sg , req - > sg_cnt , i ) {
if ( bio_add_pc_page ( rq - > q , bio , sg_page ( sg ) , sg - > length ,
sg - > offset ) < sg - > length ) {
2021-06-10 04:32:50 +03:00
nvmet_req_bio_put ( req , bio ) ;
2020-07-24 20:25:17 +03:00
return - EINVAL ;
}
}
2020-11-10 05:24:04 +03:00
blk_rq_bio_prep ( rq , bio , req - > sg_cnt ) ;
2020-07-24 20:25:17 +03:00
return 0 ;
}
static void nvmet_passthru_execute_cmd ( struct nvmet_req * req )
{
2021-08-27 09:11:12 +03:00
struct nvme_ctrl * ctrl = nvmet_req_subsys ( req ) - > passthru_ctrl ;
2020-07-24 20:25:17 +03:00
struct request_queue * q = ctrl - > admin_q ;
struct nvme_ns * ns = NULL ;
struct request * rq = NULL ;
2020-11-10 03:33:44 +03:00
unsigned int timeout ;
2020-07-24 20:25:17 +03:00
u32 effects ;
u16 status ;
int ret ;
if ( likely ( req - > sq - > qid ! = 0 ) ) {
u32 nsid = le32_to_cpu ( req - > cmd - > common . nsid ) ;
ns = nvme_find_get_ns ( ctrl , nsid ) ;
if ( unlikely ( ! ns ) ) {
pr_err ( " failed to get passthru ns nsid:%u \n " , nsid ) ;
status = NVME_SC_INVALID_NS | NVME_SC_DNR ;
2020-08-06 23:02:23 +03:00
goto out ;
2020-07-24 20:25:17 +03:00
}
q = ns - > queue ;
2021-02-10 08:48:01 +03:00
timeout = nvmet_req_subsys ( req ) - > io_timeout ;
2020-11-10 03:33:43 +03:00
} else {
2021-02-10 08:48:01 +03:00
timeout = nvmet_req_subsys ( req ) - > admin_timeout ;
2020-07-24 20:25:17 +03:00
}
2022-03-15 17:53:59 +03:00
rq = blk_mq_alloc_request ( q , nvme_req_op ( req - > cmd ) , 0 ) ;
2020-07-24 20:25:17 +03:00
if ( IS_ERR ( rq ) ) {
status = NVME_SC_INTERNAL ;
2020-08-06 23:02:23 +03:00
goto out_put_ns ;
2020-07-24 20:25:17 +03:00
}
2022-03-15 17:53:59 +03:00
nvme_init_request ( rq , req - > cmd ) ;
2020-07-24 20:25:17 +03:00
2020-11-10 03:33:43 +03:00
if ( timeout )
rq - > timeout = timeout ;
2020-07-24 20:25:17 +03:00
if ( req - > sg_cnt ) {
ret = nvmet_passthru_map_sg ( req , rq ) ;
if ( unlikely ( ret ) ) {
status = NVME_SC_INTERNAL ;
2020-08-07 01:48:58 +03:00
goto out_put_req ;
2020-07-24 20:25:17 +03:00
}
}
/*
2022-12-21 11:51:19 +03:00
* If a command needs post - execution fixups , or there are any
* non - trivial effects , make sure to execute the command synchronously
* in a workqueue so that nvme_passthru_end gets called .
2020-07-24 20:25:17 +03:00
*/
effects = nvme_command_effects ( ctrl , ns , req - > cmd - > common . opcode ) ;
2022-12-21 11:51:19 +03:00
if ( req - > p . use_workqueue | |
( effects & ~ ( NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC ) ) ) {
2020-07-24 20:25:17 +03:00
INIT_WORK ( & req - > p . work , nvmet_passthru_execute_cmd_work ) ;
req - > p . rq = rq ;
2022-03-21 14:57:27 +03:00
queue_work ( nvmet_wq , & req - > p . work ) ;
2020-07-24 20:25:17 +03:00
} else {
2022-05-24 15:15:30 +03:00
rq - > end_io = nvmet_passthru_req_done ;
2020-07-24 20:25:17 +03:00
rq - > end_io_data = req ;
2022-05-24 15:15:30 +03:00
blk_execute_rq_nowait ( rq , false ) ;
2020-07-24 20:25:17 +03:00
}
if ( ns )
nvme_put_ns ( ns ) ;
return ;
2020-08-07 01:48:58 +03:00
out_put_req :
2020-08-07 01:56:27 +03:00
blk_mq_free_request ( rq ) ;
2020-08-06 23:02:23 +03:00
out_put_ns :
2020-07-24 20:25:17 +03:00
if ( ns )
nvme_put_ns ( ns ) ;
2020-08-06 23:02:23 +03:00
out :
2020-07-24 20:25:17 +03:00
nvmet_req_complete ( req , status ) ;
}
/*
* We need to emulate set host behaviour to ensure that any requested
* behaviour of the target ' s host matches the requested behaviour
* of the device ' s host and fail otherwise .
*/
static void nvmet_passthru_set_host_behaviour ( struct nvmet_req * req )
{
2021-08-27 09:11:12 +03:00
struct nvme_ctrl * ctrl = nvmet_req_subsys ( req ) - > passthru_ctrl ;
2020-07-24 20:25:17 +03:00
struct nvme_feat_host_behavior * host ;
u16 status = NVME_SC_INTERNAL ;
int ret ;
host = kzalloc ( sizeof ( * host ) * 2 , GFP_KERNEL ) ;
if ( ! host )
goto out_complete_req ;
ret = nvme_get_features ( ctrl , NVME_FEAT_HOST_BEHAVIOR , 0 ,
host , sizeof ( * host ) , NULL ) ;
if ( ret )
goto out_free_host ;
status = nvmet_copy_from_sgl ( req , 0 , & host [ 1 ] , sizeof ( * host ) ) ;
if ( status )
goto out_free_host ;
if ( memcmp ( & host [ 0 ] , & host [ 1 ] , sizeof ( host [ 0 ] ) ) ) {
pr_warn ( " target host has requested different behaviour from the local host \n " ) ;
status = NVME_SC_INTERNAL ;
}
out_free_host :
kfree ( host ) ;
out_complete_req :
nvmet_req_complete ( req , status ) ;
}
static u16 nvmet_setup_passthru_command ( struct nvmet_req * req )
{
req - > p . use_workqueue = false ;
req - > execute = nvmet_passthru_execute_cmd ;
return NVME_SC_SUCCESS ;
}
u16 nvmet_parse_passthru_io_cmd ( struct nvmet_req * req )
{
2020-07-29 22:10:09 +03:00
/* Reject any commands with non-sgl flags set (ie. fused commands) */
if ( req - > cmd - > common . flags & ~ NVME_CMD_SGL_ALL )
return NVME_SC_INVALID_FIELD ;
2020-07-24 20:25:17 +03:00
switch ( req - > cmd - > common . opcode ) {
case nvme_cmd_resv_register :
case nvme_cmd_resv_report :
case nvme_cmd_resv_acquire :
case nvme_cmd_resv_release :
/*
* Reservations cannot be supported properly because the
* underlying device has no way of differentiating different
* hosts that connect via fabrics . This could potentially be
* emulated in the future if regular targets grow support for
* this feature .
*/
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
}
return nvmet_setup_passthru_command ( req ) ;
}
/*
* Only features that are emulated or specifically allowed in the list are
* passed down to the controller . This function implements the allow list for
* both get and set features .
*/
static u16 nvmet_passthru_get_set_features ( struct nvmet_req * req )
{
switch ( le32_to_cpu ( req - > cmd - > features . fid ) ) {
case NVME_FEAT_ARBITRATION :
case NVME_FEAT_POWER_MGMT :
case NVME_FEAT_LBA_RANGE :
case NVME_FEAT_TEMP_THRESH :
case NVME_FEAT_ERR_RECOVERY :
case NVME_FEAT_VOLATILE_WC :
case NVME_FEAT_WRITE_ATOMIC :
case NVME_FEAT_AUTO_PST :
case NVME_FEAT_TIMESTAMP :
case NVME_FEAT_HCTM :
case NVME_FEAT_NOPSC :
case NVME_FEAT_RRL :
case NVME_FEAT_PLM_CONFIG :
case NVME_FEAT_PLM_WINDOW :
case NVME_FEAT_HOST_BEHAVIOR :
case NVME_FEAT_SANITIZE :
case NVME_FEAT_VENDOR_START . . . NVME_FEAT_VENDOR_END :
return nvmet_setup_passthru_command ( req ) ;
case NVME_FEAT_ASYNC_EVENT :
/* There is no support for forwarding ASYNC events */
case NVME_FEAT_IRQ_COALESCE :
case NVME_FEAT_IRQ_CONFIG :
/* The IRQ settings will not apply to the target controller */
case NVME_FEAT_HOST_MEM_BUF :
/*
* Any HMB that ' s set will not be passed through and will
* not work as expected
*/
case NVME_FEAT_SW_PROGRESS :
/*
* The Pre - Boot Software Load Count doesn ' t make much
* sense for a target to export
*/
case NVME_FEAT_RESV_MASK :
case NVME_FEAT_RESV_PERSIST :
/* No reservations, see nvmet_parse_passthru_io_cmd() */
default :
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
}
}
u16 nvmet_parse_passthru_admin_cmd ( struct nvmet_req * req )
{
2020-07-29 22:10:09 +03:00
/* Reject any commands with non-sgl flags set (ie. fused commands) */
if ( req - > cmd - > common . flags & ~ NVME_CMD_SGL_ALL )
return NVME_SC_INVALID_FIELD ;
2020-07-24 20:25:17 +03:00
/*
* Passthru all vendor specific commands
*/
if ( req - > cmd - > common . opcode > = nvme_admin_vendor_start )
return nvmet_setup_passthru_command ( req ) ;
switch ( req - > cmd - > common . opcode ) {
case nvme_admin_async_event :
req - > execute = nvmet_execute_async_event ;
return NVME_SC_SUCCESS ;
case nvme_admin_keep_alive :
/*
* Most PCIe ctrls don ' t support keep alive cmd , we route keep
* alive to the non - passthru mode . In future please change this
* code when PCIe ctrls with keep alive support available .
*/
req - > execute = nvmet_execute_keep_alive ;
return NVME_SC_SUCCESS ;
case nvme_admin_set_features :
switch ( le32_to_cpu ( req - > cmd - > features . fid ) ) {
case NVME_FEAT_ASYNC_EVENT :
case NVME_FEAT_KATO :
case NVME_FEAT_NUM_QUEUES :
case NVME_FEAT_HOST_ID :
req - > execute = nvmet_execute_set_features ;
return NVME_SC_SUCCESS ;
case NVME_FEAT_HOST_BEHAVIOR :
req - > execute = nvmet_passthru_set_host_behaviour ;
return NVME_SC_SUCCESS ;
default :
return nvmet_passthru_get_set_features ( req ) ;
}
break ;
case nvme_admin_get_features :
switch ( le32_to_cpu ( req - > cmd - > features . fid ) ) {
case NVME_FEAT_ASYNC_EVENT :
case NVME_FEAT_KATO :
case NVME_FEAT_NUM_QUEUES :
case NVME_FEAT_HOST_ID :
req - > execute = nvmet_execute_get_features ;
return NVME_SC_SUCCESS ;
default :
return nvmet_passthru_get_set_features ( req ) ;
}
break ;
case nvme_admin_identify :
switch ( req - > cmd - > identify . cns ) {
case NVME_ID_CNS_CTRL :
req - > execute = nvmet_passthru_execute_cmd ;
req - > p . use_workqueue = true ;
return NVME_SC_SUCCESS ;
2020-09-23 00:17:18 +03:00
case NVME_ID_CNS_CS_CTRL :
switch ( req - > cmd - > identify . csi ) {
case NVME_CSI_ZNS :
req - > execute = nvmet_passthru_execute_cmd ;
req - > p . use_workqueue = true ;
return NVME_SC_SUCCESS ;
}
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
2020-07-24 20:25:17 +03:00
case NVME_ID_CNS_NS :
req - > execute = nvmet_passthru_execute_cmd ;
req - > p . use_workqueue = true ;
return NVME_SC_SUCCESS ;
2020-09-23 00:17:18 +03:00
case NVME_ID_CNS_CS_NS :
switch ( req - > cmd - > identify . csi ) {
case NVME_CSI_ZNS :
req - > execute = nvmet_passthru_execute_cmd ;
req - > p . use_workqueue = true ;
return NVME_SC_SUCCESS ;
}
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
2020-07-24 20:25:17 +03:00
default :
return nvmet_setup_passthru_command ( req ) ;
}
case nvme_admin_get_log_page :
return nvmet_setup_passthru_command ( req ) ;
default :
/* Reject commands not in the allowlist above */
2021-02-10 08:47:58 +03:00
return nvmet_report_invalid_opcode ( req ) ;
2020-07-24 20:25:17 +03:00
}
}
2020-07-24 20:25:18 +03:00
int nvmet_passthru_ctrl_enable ( struct nvmet_subsys * subsys )
{
struct nvme_ctrl * ctrl ;
2020-09-17 04:11:02 +03:00
struct file * file ;
2020-07-24 20:25:18 +03:00
int ret = - EINVAL ;
void * old ;
mutex_lock ( & subsys - > lock ) ;
if ( ! subsys - > passthru_ctrl_path )
goto out_unlock ;
if ( subsys - > passthru_ctrl )
goto out_unlock ;
if ( subsys - > nr_namespaces ) {
pr_info ( " cannot enable both passthru and regular namespaces for a single subsystem " ) ;
goto out_unlock ;
}
2020-09-17 04:11:02 +03:00
file = filp_open ( subsys - > passthru_ctrl_path , O_RDWR , 0 ) ;
if ( IS_ERR ( file ) ) {
ret = PTR_ERR ( file ) ;
goto out_unlock ;
}
ctrl = nvme_ctrl_from_file ( file ) ;
if ( ! ctrl ) {
2020-07-24 20:25:18 +03:00
pr_err ( " failed to open nvme controller %s \n " ,
subsys - > passthru_ctrl_path ) ;
2020-09-17 04:11:02 +03:00
goto out_put_file ;
2020-07-24 20:25:18 +03:00
}
old = xa_cmpxchg ( & passthru_subsystems , ctrl - > cntlid , NULL ,
subsys , GFP_KERNEL ) ;
if ( xa_is_err ( old ) ) {
ret = xa_err ( old ) ;
2020-09-17 04:11:02 +03:00
goto out_put_file ;
2020-07-24 20:25:18 +03:00
}
if ( old )
2020-09-17 04:11:02 +03:00
goto out_put_file ;
2020-07-24 20:25:18 +03:00
subsys - > passthru_ctrl = ctrl ;
subsys - > ver = ctrl - > vs ;
if ( subsys - > ver < NVME_VS ( 1 , 2 , 1 ) ) {
pr_warn ( " nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1 \n " ,
NVME_MAJOR ( subsys - > ver ) , NVME_MINOR ( subsys - > ver ) ,
NVME_TERTIARY ( subsys - > ver ) ) ;
subsys - > ver = NVME_VS ( 1 , 2 , 1 ) ;
}
2020-09-17 04:11:02 +03:00
nvme_get_ctrl ( ctrl ) ;
2020-09-17 04:11:01 +03:00
__module_get ( subsys - > passthru_ctrl - > ops - > module ) ;
2020-09-17 04:11:02 +03:00
ret = 0 ;
2020-07-24 20:25:18 +03:00
2020-09-17 04:11:02 +03:00
out_put_file :
filp_close ( file , NULL ) ;
2020-07-24 20:25:18 +03:00
out_unlock :
mutex_unlock ( & subsys - > lock ) ;
return ret ;
}
static void __nvmet_passthru_ctrl_disable ( struct nvmet_subsys * subsys )
{
if ( subsys - > passthru_ctrl ) {
xa_erase ( & passthru_subsystems , subsys - > passthru_ctrl - > cntlid ) ;
2020-09-17 04:11:01 +03:00
module_put ( subsys - > passthru_ctrl - > ops - > module ) ;
2020-07-24 20:25:18 +03:00
nvme_put_ctrl ( subsys - > passthru_ctrl ) ;
}
subsys - > passthru_ctrl = NULL ;
subsys - > ver = NVMET_DEFAULT_VS ;
}
void nvmet_passthru_ctrl_disable ( struct nvmet_subsys * subsys )
{
mutex_lock ( & subsys - > lock ) ;
__nvmet_passthru_ctrl_disable ( subsys ) ;
mutex_unlock ( & subsys - > lock ) ;
}
void nvmet_passthru_subsys_free ( struct nvmet_subsys * subsys )
{
mutex_lock ( & subsys - > lock ) ;
__nvmet_passthru_ctrl_disable ( subsys ) ;
mutex_unlock ( & subsys - > lock ) ;
kfree ( subsys - > passthru_ctrl_path ) ;
}