2019-02-18 11:36:11 +01:00
// SPDX-License-Identifier: GPL-2.0
2016-06-21 18:04:20 +02:00
/*
* NVMe Fabrics command implementation .
* Copyright ( c ) 2015 - 2016 HGST , a Western Digital Company .
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/blkdev.h>
# include "nvmet.h"
static void nvmet_execute_prop_set ( struct nvmet_req * req )
{
2018-12-12 15:11:44 -08:00
u64 val = le64_to_cpu ( req - > cmd - > prop_set . value ) ;
2016-06-21 18:04:20 +02:00
u16 status = 0 ;
2020-05-19 17:05:59 +03:00
if ( ! nvmet_check_transfer_len ( req , 0 ) )
2019-10-23 10:35:44 -06:00
return ;
2018-12-12 15:11:44 -08:00
if ( req - > cmd - > prop_set . attrib & 1 ) {
req - > error_loc =
offsetof ( struct nvmf_property_set_command , attrib ) ;
2016-06-21 18:04:20 +02:00
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR ;
2018-12-12 15:11:44 -08:00
goto out ;
2016-06-21 18:04:20 +02:00
}
2018-12-12 15:11:44 -08:00
switch ( le32_to_cpu ( req - > cmd - > prop_set . offset ) ) {
case NVME_REG_CC :
nvmet_update_cc ( req - > sq - > ctrl , val ) ;
break ;
default :
req - > error_loc =
offsetof ( struct nvmf_property_set_command , offset ) ;
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR ;
}
out :
2016-06-21 18:04:20 +02:00
nvmet_req_complete ( req , status ) ;
}
static void nvmet_execute_prop_get ( struct nvmet_req * req )
{
struct nvmet_ctrl * ctrl = req - > sq - > ctrl ;
u16 status = 0 ;
u64 val = 0 ;
2020-05-19 17:05:59 +03:00
if ( ! nvmet_check_transfer_len ( req , 0 ) )
2019-10-23 10:35:44 -06:00
return ;
2016-06-21 18:04:20 +02:00
if ( req - > cmd - > prop_get . attrib & 1 ) {
switch ( le32_to_cpu ( req - > cmd - > prop_get . offset ) ) {
case NVME_REG_CAP :
val = ctrl - > cap ;
break ;
default :
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR ;
break ;
}
} else {
switch ( le32_to_cpu ( req - > cmd - > prop_get . offset ) ) {
case NVME_REG_VS :
val = ctrl - > subsys - > ver ;
break ;
case NVME_REG_CC :
val = ctrl - > cc ;
break ;
case NVME_REG_CSTS :
val = ctrl - > csts ;
break ;
default :
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR ;
break ;
}
}
2018-12-12 15:11:44 -08:00
if ( status & & req - > cmd - > prop_get . attrib & 1 ) {
req - > error_loc =
offsetof ( struct nvmf_property_get_command , offset ) ;
} else {
req - > error_loc =
offsetof ( struct nvmf_property_get_command , attrib ) ;
}
2019-04-08 18:39:59 +03:00
req - > cqe - > result . u64 = cpu_to_le64 ( val ) ;
2016-06-21 18:04:20 +02:00
nvmet_req_complete ( req , status ) ;
}
2022-06-27 11:52:04 +02:00
u16 nvmet_parse_fabrics_admin_cmd ( struct nvmet_req * req )
2016-06-21 18:04:20 +02:00
{
struct nvme_command * cmd = req - > cmd ;
switch ( cmd - > fabrics . fctype ) {
case nvme_fabrics_type_property_set :
req - > execute = nvmet_execute_prop_set ;
break ;
case nvme_fabrics_type_property_get :
req - > execute = nvmet_execute_prop_get ;
break ;
2022-06-27 11:52:05 +02:00
# ifdef CONFIG_NVME_TARGET_AUTH
case nvme_fabrics_type_auth_send :
req - > execute = nvmet_execute_auth_send ;
break ;
case nvme_fabrics_type_auth_receive :
req - > execute = nvmet_execute_auth_receive ;
break ;
# endif
2016-06-21 18:04:20 +02:00
default :
2021-05-10 12:15:38 -07:00
pr_debug ( " received unknown capsule type 0x%x \n " ,
2016-06-21 18:04:20 +02:00
cmd - > fabrics . fctype ) ;
2018-12-12 15:11:44 -08:00
req - > error_loc = offsetof ( struct nvmf_common_command , fctype ) ;
2016-06-21 18:04:20 +02:00
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
}
return 0 ;
}
2022-06-27 11:52:04 +02:00
u16 nvmet_parse_fabrics_io_cmd ( struct nvmet_req * req )
{
struct nvme_command * cmd = req - > cmd ;
switch ( cmd - > fabrics . fctype ) {
2022-06-27 11:52:05 +02:00
# ifdef CONFIG_NVME_TARGET_AUTH
case nvme_fabrics_type_auth_send :
req - > execute = nvmet_execute_auth_send ;
break ;
case nvme_fabrics_type_auth_receive :
req - > execute = nvmet_execute_auth_receive ;
break ;
# endif
2022-06-27 11:52:04 +02:00
default :
pr_debug ( " received unknown capsule type 0x%x \n " ,
cmd - > fabrics . fctype ) ;
req - > error_loc = offsetof ( struct nvmf_common_command , fctype ) ;
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
}
return 0 ;
}
2016-06-21 18:04:20 +02:00
static u16 nvmet_install_queue ( struct nvmet_ctrl * ctrl , struct nvmet_req * req )
{
struct nvmf_connect_command * c = & req - > cmd - > connect ;
u16 qid = le16_to_cpu ( c - > qid ) ;
u16 sqsize = le16_to_cpu ( c - > sqsize ) ;
struct nvmet_ctrl * old ;
2021-08-05 18:02:51 +03:00
u16 mqes = NVME_CAP_MQES ( ctrl - > cap ) ;
2020-02-04 14:38:10 +02:00
u16 ret ;
2016-06-21 18:04:20 +02:00
2017-09-18 09:08:29 -07:00
if ( ! sqsize ) {
pr_warn ( " queue size zero! \n " ) ;
2018-12-12 15:11:44 -08:00
req - > error_loc = offsetof ( struct nvmf_connect_command , sqsize ) ;
2021-08-08 09:20:14 +03:00
req - > cqe - > result . u32 = IPO_IATTR_CONNECT_SQE ( sqsize ) ;
2020-02-04 14:38:10 +02:00
ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR ;
goto err ;
2017-09-18 09:08:29 -07:00
}
2016-06-21 18:04:20 +02:00
2021-08-08 18:06:15 +03:00
if ( ctrl - > sqs [ qid ] ! = NULL ) {
pr_warn ( " qid %u has already been created \n " , qid ) ;
req - > error_loc = offsetof ( struct nvmf_connect_command , qid ) ;
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR ;
}
2021-08-05 18:02:51 +03:00
if ( sqsize > mqes ) {
pr_warn ( " sqsize %u is larger than MQES supported %u cntlid %d \n " ,
sqsize , mqes , ctrl - > cntlid ) ;
req - > error_loc = offsetof ( struct nvmf_connect_command , sqsize ) ;
req - > cqe - > result . u32 = IPO_IATTR_CONNECT_SQE ( sqsize ) ;
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR ;
}
2021-08-08 18:06:15 +03:00
old = cmpxchg ( & req - > sq - > ctrl , NULL , ctrl ) ;
if ( old ) {
pr_warn ( " queue already connected! \n " ) ;
req - > error_loc = offsetof ( struct nvmf_connect_command , opcode ) ;
return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR ;
}
2017-09-18 09:08:29 -07:00
/* note: convert queue size from 0's-based value to 1's-based value */
nvmet_cq_setup ( ctrl , req - > cq , qid , sqsize + 1 ) ;
nvmet_sq_setup ( ctrl , req - > sq , qid , sqsize + 1 ) ;
2018-11-19 14:11:12 -08:00
if ( c - > cattr & NVME_CONNECT_DISABLE_SQFLOW ) {
req - > sq - > sqhd_disabled = true ;
2019-04-08 18:39:59 +03:00
req - > cqe - > sq_head = cpu_to_le16 ( 0xffff ) ;
2018-11-19 14:11:12 -08:00
}
2018-12-03 17:52:11 -08:00
if ( ctrl - > ops - > install_queue ) {
2020-02-04 14:38:10 +02:00
ret = ctrl - > ops - > install_queue ( req - > sq ) ;
2018-12-03 17:52:11 -08:00
if ( ret ) {
pr_err ( " failed to install queue %d cntlid %d ret %x \n " ,
2020-02-04 14:38:09 +02:00
qid , ctrl - > cntlid , ret ) ;
2021-08-08 18:06:15 +03:00
ctrl - > sqs [ qid ] = NULL ;
2020-02-04 14:38:10 +02:00
goto err ;
2018-12-03 17:52:11 -08:00
}
}
2016-06-21 18:04:20 +02:00
return 0 ;
2020-02-04 14:38:10 +02:00
err :
req - > sq - > ctrl = NULL ;
return ret ;
2016-06-21 18:04:20 +02:00
}
2022-09-20 16:09:57 +02:00
static u32 nvmet_connect_result ( struct nvmet_ctrl * ctrl )
{
return ( u32 ) ctrl - > cntlid |
( nvmet_has_auth ( ctrl ) ? NVME_CONNECT_AUTHREQ_ATR : 0 ) ;
}
2016-06-21 18:04:20 +02:00
static void nvmet_execute_admin_connect ( struct nvmet_req * req )
{
struct nvmf_connect_command * c = & req - > cmd - > connect ;
struct nvmf_connect_data * d ;
struct nvmet_ctrl * ctrl = NULL ;
u16 status = 0 ;
2022-06-27 11:52:05 +02:00
int ret ;
2016-06-21 18:04:20 +02:00
2020-05-19 17:05:59 +03:00
if ( ! nvmet_check_transfer_len ( req , sizeof ( struct nvmf_connect_data ) ) )
2019-10-23 10:35:44 -06:00
return ;
2017-04-18 17:32:15 -06:00
d = kmalloc ( sizeof ( * d ) , GFP_KERNEL ) ;
if ( ! d ) {
status = NVME_SC_INTERNAL ;
goto complete ;
}
status = nvmet_copy_from_sgl ( req , 0 , d , sizeof ( * d ) ) ;
if ( status )
goto out ;
2016-06-21 18:04:20 +02:00
/* zero out initial completion result, assign values as needed */
2019-04-08 18:39:59 +03:00
req - > cqe - > result . u32 = 0 ;
2016-06-21 18:04:20 +02:00
if ( c - > recfmt ! = 0 ) {
pr_warn ( " invalid connect version (%d). \n " ,
le16_to_cpu ( c - > recfmt ) ) ;
2018-12-12 15:11:44 -08:00
req - > error_loc = offsetof ( struct nvmf_connect_command , recfmt ) ;
2016-06-21 18:04:20 +02:00
status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR ;
goto out ;
}
if ( unlikely ( d - > cntlid ! = cpu_to_le16 ( 0xffff ) ) ) {
pr_warn ( " connect attempt for invalid controller ID %#x \n " ,
d - > cntlid ) ;
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR ;
2019-04-08 18:39:59 +03:00
req - > cqe - > result . u32 = IPO_IATTR_CONNECT_DATA ( cntlid ) ;
2016-06-21 18:04:20 +02:00
goto out ;
}
status = nvmet_alloc_ctrl ( d - > subsysnqn , d - > hostnqn , req ,
2017-04-18 17:32:15 -06:00
le32_to_cpu ( c - > kato ) , & ctrl ) ;
2021-02-24 17:56:38 -08:00
if ( status )
2016-06-21 18:04:20 +02:00
goto out ;
2018-12-12 15:11:44 -08:00
2020-05-19 17:06:01 +03:00
ctrl - > pi_support = ctrl - > port - > pi_enable & & ctrl - > subsys - > pi_support ;
2017-08-30 15:22:59 +03:00
uuid_copy ( & ctrl - > hostid , & d - > hostid ) ;
2016-06-21 18:04:20 +02:00
2022-06-27 11:52:05 +02:00
ret = nvmet_setup_auth ( ctrl ) ;
if ( ret < 0 ) {
pr_err ( " Failed to setup authentication, error %d \n " , ret ) ;
nvmet_ctrl_put ( ctrl ) ;
if ( ret = = - EPERM )
status = ( NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR ) ;
else
status = NVME_SC_INTERNAL ;
goto out ;
}
2016-06-21 18:04:20 +02:00
status = nvmet_install_queue ( ctrl , req ) ;
if ( status ) {
nvmet_ctrl_put ( ctrl ) ;
goto out ;
}
2022-06-27 11:52:05 +02:00
pr_info ( " creating %s controller %d for subsystem %s for NQN %s%s%s. \n " ,
2021-09-22 08:35:22 +02:00
nvmet_is_disc_subsys ( ctrl - > subsys ) ? " discovery " : " nvm " ,
2020-05-19 17:06:01 +03:00
ctrl - > cntlid , ctrl - > subsys - > subsysnqn , ctrl - > hostnqn ,
2022-06-27 11:52:05 +02:00
ctrl - > pi_support ? " T10-PI is enabled " : " " ,
nvmet_has_auth ( ctrl ) ? " with DH-HMAC-CHAP " : " " ) ;
2022-09-20 16:09:57 +02:00
req - > cqe - > result . u32 = cpu_to_le32 ( nvmet_connect_result ( ctrl ) ) ;
2016-06-21 18:04:20 +02:00
out :
2017-04-18 17:32:15 -06:00
kfree ( d ) ;
complete :
2016-06-21 18:04:20 +02:00
nvmet_req_complete ( req , status ) ;
}
static void nvmet_execute_io_connect ( struct nvmet_req * req )
{
struct nvmf_connect_command * c = & req - > cmd - > connect ;
struct nvmf_connect_data * d ;
2021-03-09 17:16:32 -08:00
struct nvmet_ctrl * ctrl ;
2016-06-21 18:04:20 +02:00
u16 qid = le16_to_cpu ( c - > qid ) ;
u16 status = 0 ;
2020-05-19 17:05:59 +03:00
if ( ! nvmet_check_transfer_len ( req , sizeof ( struct nvmf_connect_data ) ) )
2019-10-23 10:35:44 -06:00
return ;
2017-04-18 17:32:15 -06:00
d = kmalloc ( sizeof ( * d ) , GFP_KERNEL ) ;
if ( ! d ) {
status = NVME_SC_INTERNAL ;
goto complete ;
}
status = nvmet_copy_from_sgl ( req , 0 , d , sizeof ( * d ) ) ;
if ( status )
goto out ;
2016-06-21 18:04:20 +02:00
/* zero out initial completion result, assign values as needed */
2019-04-08 18:39:59 +03:00
req - > cqe - > result . u32 = 0 ;
2016-06-21 18:04:20 +02:00
if ( c - > recfmt ! = 0 ) {
pr_warn ( " invalid connect version (%d). \n " ,
le16_to_cpu ( c - > recfmt ) ) ;
status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR ;
goto out ;
}
2021-03-09 17:16:32 -08:00
ctrl = nvmet_ctrl_find_get ( d - > subsysnqn , d - > hostnqn ,
le16_to_cpu ( d - > cntlid ) , req ) ;
if ( ! ctrl ) {
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR ;
2016-06-21 18:04:20 +02:00
goto out ;
2021-03-09 17:16:32 -08:00
}
2016-06-21 18:04:20 +02:00
if ( unlikely ( qid > ctrl - > subsys - > max_qid ) ) {
pr_warn ( " invalid queue id (%d) \n " , qid ) ;
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR ;
2019-04-08 18:39:59 +03:00
req - > cqe - > result . u32 = IPO_IATTR_CONNECT_SQE ( qid ) ;
2016-06-21 18:04:20 +02:00
goto out_ctrl_put ;
}
status = nvmet_install_queue ( ctrl , req ) ;
2021-08-08 09:20:14 +03:00
if ( status )
2016-06-21 18:04:20 +02:00
goto out_ctrl_put ;
2021-08-08 09:20:14 +03:00
2017-12-04 10:47:09 +02:00
pr_debug ( " adding queue %d to ctrl %d. \n " , qid , ctrl - > cntlid ) ;
2022-09-20 16:09:57 +02:00
req - > cqe - > result . u32 = cpu_to_le32 ( nvmet_connect_result ( ctrl ) ) ;
2016-06-21 18:04:20 +02:00
out :
2017-04-18 17:32:15 -06:00
kfree ( d ) ;
complete :
2016-06-21 18:04:20 +02:00
nvmet_req_complete ( req , status ) ;
return ;
out_ctrl_put :
nvmet_ctrl_put ( ctrl ) ;
goto out ;
}
2017-02-27 23:21:33 -06:00
u16 nvmet_parse_connect_cmd ( struct nvmet_req * req )
2016-06-21 18:04:20 +02:00
{
struct nvme_command * cmd = req - > cmd ;
2019-06-06 14:30:14 +09:00
if ( ! nvme_is_fabrics ( cmd ) ) {
2021-05-10 12:15:38 -07:00
pr_debug ( " invalid command 0x%x on unconnected queue. \n " ,
2016-06-21 18:04:20 +02:00
cmd - > fabrics . opcode ) ;
2018-12-12 15:11:44 -08:00
req - > error_loc = offsetof ( struct nvme_common_command , opcode ) ;
2016-06-21 18:04:20 +02:00
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
}
if ( cmd - > fabrics . fctype ! = nvme_fabrics_type_connect ) {
2021-05-10 12:15:38 -07:00
pr_debug ( " invalid capsule type 0x%x on unconnected queue. \n " ,
2016-06-21 18:04:20 +02:00
cmd - > fabrics . fctype ) ;
2018-12-12 15:11:44 -08:00
req - > error_loc = offsetof ( struct nvmf_common_command , fctype ) ;
2016-06-21 18:04:20 +02:00
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR ;
}
if ( cmd - > connect . qid = = 0 )
req - > execute = nvmet_execute_admin_connect ;
else
req - > execute = nvmet_execute_io_connect ;
return 0 ;
}