From f6f09c15a767fad1206068bc09ec62d3cf273460 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 6 Oct 2021 11:13:13 +0200 Subject: [PATCH 01/23] nvme: generate uevent once a multipath namespace is operational again When fast_io_fail_tmo is set I/O will be aborted while recovery is still ongoing. This causes MD to set the namespace to failed, and no futher I/O will be submitted to that namespace. However, once the recovery succeeds and the namespace becomes operational again the NVMe subsystem doesn't send a notification, so MD cannot automatically reinstate operation and requires manual interaction. This patch will send a KOBJ_CHANGE uevent per multipathed namespace once the underlying controller transitions to LIVE, allowing an automatic MD reassembly with these udev rules: /etc/udev/rules.d/65-md-auto-re-add.rules: SUBSYSTEM!="block", GOTO="md_end" ACTION!="change", GOTO="md_end" ENV{ID_FS_TYPE}!="linux_raid_member", GOTO="md_end" PROGRAM="/sbin/md_raid_auto_readd.sh $devnode" LABEL="md_end" /sbin/md_raid_auto_readd.sh: MDADM=/sbin/mdadm DEVNAME=$1 export $(${MDADM} --examine --export ${DEVNAME}) if [ -z "${MD_UUID}" ]; then exit 1 fi UUID_LINK=$(readlink /dev/disk/by-id/md-uuid-${MD_UUID}) MD_DEVNAME=${UUID_LINK##*/} export $(${MDADM} --detail --export /dev/${MD_DEVNAME}) if [ -z "${MD_METADATA}" ] ; then exit 1 fi if [ $(cat /sys/block/${MD_DEVNAME}/md/degraded) != 1 ]; then echo "${MD_DEVNAME}: array not degraded, nothing to do" exit 0 fi MD_STATE=$(cat /sys/block/${MD_DEVNAME}/md/array_state) if [ ${MD_STATE} != "clean" ] ; then echo "${MD_DEVNAME}: array state ${MD_STATE}, cannot re-add" exit 1 fi MD_VARNAME="MD_DEVICE_dev_${DEVNAME##*/}_ROLE" if [ ${!MD_VARNAME} = "spare" ] ; then ${MDADM} --manage /dev/${MD_DEVNAME} --re-add ${DEVNAME} fi Changes to v2: - Add udev rules example to description Changes to v1: - use disk_uevent() as suggested by hch Signed-off-by: Hannes Reinecke Signed-off-by: Christoph Hellwig --- drivers/nvme/host/multipath.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 11440c86881e..22c6b64f4d93 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -105,8 +105,11 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { - if (ns->head->disk) - kblockd_schedule_work(&ns->head->requeue_work); + if (!ns->head->disk) + continue; + kblockd_schedule_work(&ns->head->requeue_work); + if (ctrl->state == NVME_CTRL_LIVE) + disk_uevent(ns->head->disk, KOBJ_CHANGE); } up_read(&ctrl->namespaces_rwsem); } From 01d838164b4c305c1cafb0c3f71fb0027d99358b Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Mon, 23 Aug 2021 05:56:48 -0700 Subject: [PATCH 02/23] nvme-fc: add support for ->map_queues NVMe FC don't have support for map queues, unlike the PCI, RDMA and TCP transports. Add a ->map_queues callout for the LLDDs to provide such functionality. Signed-off-by: Saurav Kashyap Signed-off-by: Nilesh Javali Signed-off-by: Christoph Hellwig --- drivers/nvme/host/fc.c | 24 ++++++++++++++++++++++++ include/linux/nvme-fc-driver.h | 7 +++++++ 2 files changed, 31 insertions(+) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index aa14ad963d91..34f7a7c236bf 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -16,6 +16,7 @@ #include #include "fc.h" #include +#include /* *************************** Data Structures/Defines ****************** */ @@ -2841,6 +2842,28 @@ nvme_fc_complete_rq(struct request *rq) nvme_fc_ctrl_put(ctrl); } +static int nvme_fc_map_queues(struct blk_mq_tag_set *set) +{ + struct nvme_fc_ctrl *ctrl = set->driver_data; + int i; + + for (i = 0; i < set->nr_maps; i++) { + struct blk_mq_queue_map *map = &set->map[i]; + + if (!map->nr_queues) { + WARN_ON(i == HCTX_TYPE_DEFAULT); + continue; + } + + /* Call LLDD map queue functionality if defined */ + if (ctrl->lport->ops->map_queues) + ctrl->lport->ops->map_queues(&ctrl->lport->localport, + map); + else + blk_mq_map_queues(map); + } + return 0; +} static const struct blk_mq_ops nvme_fc_mq_ops = { .queue_rq = nvme_fc_queue_rq, @@ -2849,6 +2872,7 @@ static const struct blk_mq_ops nvme_fc_mq_ops = { .exit_request = nvme_fc_exit_request, .init_hctx = nvme_fc_init_hctx, .timeout = nvme_fc_timeout, + .map_queues = nvme_fc_map_queues, }; static int diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 2a38f2b477a5..cb909edb76c4 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -7,6 +7,7 @@ #define _NVME_FC_DRIVER_H 1 #include +#include /* @@ -497,6 +498,8 @@ struct nvme_fc_port_template { int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *rport, struct nvmefc_ls_rsp *ls_rsp); + void (*map_queues)(struct nvme_fc_local_port *localport, + struct blk_mq_queue_map *map); u32 max_hw_queues; u16 max_sgl_segments; @@ -779,6 +782,10 @@ struct nvmet_fc_target_port { * LS received. * Entrypoint is Mandatory. * + * @map_queues: This functions lets the driver expose the queue mapping + * to the block layer. + * Entrypoint is Optional. + * * @fcp_op: Called to perform a data transfer or transmit a response. * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied * exchange structure specified in the nvmet_fc_rcv_fcp_req() call From 2b2af50ae8367b0655e12ccfe6252d5c1d7ae7bf Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Mon, 23 Aug 2021 05:56:49 -0700 Subject: [PATCH 03/23] qla2xxx: add ->map_queues support for nvme Implement ->map queues and use the block layer blk_mq_pci_map_queues helper for mapping queues to CPUs. With this mapping minimum 10%+ increase in performance is noticed. Signed-off-by: Saurav Kashyap Signed-off-by: Nilesh Javali Signed-off-by: Christoph Hellwig --- drivers/scsi/qla2xxx/qla_nvme.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 1c5da2dbd6f9..253055cf9daf 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include static struct nvme_fc_port_template qla_nvme_fc_transport; @@ -642,6 +644,18 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, return rval; } +static void qla_nvme_map_queues(struct nvme_fc_local_port *lport, + struct blk_mq_queue_map *map) +{ + struct scsi_qla_host *vha = lport->private; + int rc; + + rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset); + if (rc) + ql_log(ql_log_warn, vha, 0x21de, + "pci map queue failed 0x%x", rc); +} + static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) { struct scsi_qla_host *vha = lport->private; @@ -676,6 +690,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = { .ls_abort = qla_nvme_ls_abort, .fcp_io = qla_nvme_post_cmd, .fcp_abort = qla_nvme_fcp_abort, + .map_queues = qla_nvme_map_queues, .max_hw_queues = 8, .max_sgl_segments = 1024, .max_dif_sgl_segments = 64, From e3e19dcc4c416d65f99f13d55be2b787f8d0050e Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Wed, 6 Oct 2021 08:09:43 +0000 Subject: [PATCH 04/23] nvmet: fix use-after-free when a port is removed When a port is removed through configfs, any connected controllers are starting teardown flow asynchronously and can still send commands. This causes a use-after-free bug for any command that dereferences req->port (like in nvmet_parse_io_cmd). To fix this, wait for all the teardown scheduled works to complete (like release_work at rdma/tcp drivers). This ensures there are no active controllers when the port is eventually removed. Signed-off-by: Israel Rukshin Reviewed-by: Max Gurtovoy Signed-off-by: Christoph Hellwig --- drivers/nvme/target/configfs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index be5d82421e3a..496d775c6770 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1553,6 +1553,8 @@ static void nvmet_port_release(struct config_item *item) { struct nvmet_port *port = to_nvmet_port(item); + /* Let inflight controllers teardown complete */ + flush_scheduled_work(); list_del(&port->global_entry); kfree(port->ana_state); From fcf73a804c7d6bbf0ea63531c6122aa363852e04 Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Wed, 6 Oct 2021 08:09:44 +0000 Subject: [PATCH 05/23] nvmet-rdma: fix use-after-free when a port is removed When removing a port, all its controllers are being removed, but there are queues on the port that doesn't belong to any controller (during connection time). This causes a use-after-free bug for any command that dereferences req->port (like in nvmet_alloc_ctrl). Those queues should be destroyed before freeing the port via configfs. Destroy the remaining queues after the RDMA-CM was destroyed guarantees that no new queue will be created. Signed-off-by: Israel Rukshin Reviewed-by: Max Gurtovoy Signed-off-by: Christoph Hellwig --- drivers/nvme/target/rdma.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 38d1f292ecc2..6f75e6be2ce3 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1819,12 +1819,36 @@ restart: mutex_unlock(&nvmet_rdma_queue_mutex); } +static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port) +{ + struct nvmet_rdma_queue *queue, *tmp; + struct nvmet_port *nport = port->nport; + + mutex_lock(&nvmet_rdma_queue_mutex); + list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, + queue_list) { + if (queue->port != nport) + continue; + + list_del_init(&queue->queue_list); + __nvmet_rdma_queue_disconnect(queue); + } + mutex_unlock(&nvmet_rdma_queue_mutex); +} + static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port) { struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL); if (cm_id) rdma_destroy_id(cm_id); + + /* + * Destroy the remaining queues, which are not belong to any + * controller yet. Do it here after the RDMA-CM was destroyed + * guarantees that no new queue will be created. + */ + nvmet_rdma_destroy_port_queues(port); } static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port) From 2351ead99ce9164fb42555aee3f96af84c4839e9 Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Wed, 6 Oct 2021 08:09:45 +0000 Subject: [PATCH 06/23] nvmet-tcp: fix use-after-free when a port is removed When removing a port, all its controllers are being removed, but there are queues on the port that doesn't belong to any controller (during connection time). This causes a use-after-free bug for any command that dereferences req->port (like in nvmet_alloc_ctrl). Those queues should be destroyed before freeing the port via configfs. Destroy the remaining queues after the accept_work was cancelled guarantees that no new queue will be created. Signed-off-by: Israel Rukshin Reviewed-by: Max Gurtovoy Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 07ee347ea3f3..6eb0b3153477 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1737,6 +1737,17 @@ err_port: return ret; } +static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port) +{ + struct nvmet_tcp_queue *queue; + + mutex_lock(&nvmet_tcp_queue_mutex); + list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) + if (queue->port == port) + kernel_sock_shutdown(queue->sock, SHUT_RDWR); + mutex_unlock(&nvmet_tcp_queue_mutex); +} + static void nvmet_tcp_remove_port(struct nvmet_port *nport) { struct nvmet_tcp_port *port = nport->priv; @@ -1746,6 +1757,11 @@ static void nvmet_tcp_remove_port(struct nvmet_port *nport) port->sock->sk->sk_user_data = NULL; write_unlock_bh(&port->sock->sk->sk_callback_lock); cancel_work_sync(&port->accept_work); + /* + * Destroy the remaining queues, which are not belong to any + * controller yet. + */ + nvmet_tcp_destroy_port_queues(port); sock_release(port->sock); kfree(port); From 44c3c6257e99c6284f312206de73783575fc8906 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Thu, 23 Sep 2021 00:55:35 +0300 Subject: [PATCH 07/23] nvme-rdma: limit the maximal queue size for RDMA controllers Corrent limit of 1024 isn't valid for some of the RDMA based ctrls. In case the target expose a cap of larger amount of entries (e.g. 1024), the initiator may fail to create a QP with this size. Thus limit to a value that works for all RDMA adapters. Future general solution should use RDMA/core API to calculate this size according to device capabilities and number of WRs needed per NVMe IO request. Signed-off-by: Max Gurtovoy Signed-off-by: Christoph Hellwig --- drivers/nvme/host/rdma.c | 7 +++++++ include/linux/nvme-rdma.h | 2 ++ 2 files changed, 9 insertions(+) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 1624da3702d4..027ee57cbdb0 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1112,6 +1112,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); } + if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { + dev_warn(ctrl->ctrl.device, + "ctrl sqsize %u > max queue size %u, clamping down\n", + ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); + ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; + } + if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { dev_warn(ctrl->ctrl.device, "sqsize %u > ctrl maxcmd %u, clamping down\n", diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h index 3ec8e50efa16..4dd7e6fe92fb 100644 --- a/include/linux/nvme-rdma.h +++ b/include/linux/nvme-rdma.h @@ -6,6 +6,8 @@ #ifndef _LINUX_NVME_RDMA_H #define _LINUX_NVME_RDMA_H +#define NVME_RDMA_MAX_QUEUE_SIZE 128 + enum nvme_rdma_cm_fmt { NVME_RDMA_CM_FMT_1_0 = 0x0, }; From 6d1555cc41c088d738b4968009b32aaeda8542a3 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Thu, 23 Sep 2021 00:55:36 +0300 Subject: [PATCH 08/23] nvmet: add get_max_queue_size op for controllers Some transports, such as RDMA, would like to set the queue size according to device/port/ctrl characteristics. Add a new nvmet transport op that is called during ctrl initialization. This will not effect transports that don't implement this option. Reviewed-by: Chaitanya Kulkarni Signed-off-by: Max Gurtovoy Signed-off-by: Christoph Hellwig --- drivers/nvme/target/core.c | 8 +++++--- drivers/nvme/target/nvmet.h | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b8425fa34300..93107af3310d 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1205,7 +1205,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl) /* CC.EN timeout in 500msec units: */ ctrl->cap |= (15ULL << 24); /* maximum queue entries supported: */ - ctrl->cap |= NVMET_QUEUE_SIZE - 1; + if (ctrl->ops->get_max_queue_size) + ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1; + else + ctrl->cap |= NVMET_QUEUE_SIZE - 1; if (nvmet_is_passthru_subsys(ctrl->subsys)) nvmet_passthrough_override_cap(ctrl); @@ -1367,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, mutex_init(&ctrl->lock); ctrl->port = req->port; + ctrl->ops = req->ops; INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_LIST_HEAD(&ctrl->async_events); @@ -1405,8 +1409,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, } ctrl->cntlid = ret; - ctrl->ops = req->ops; - /* * Discovery controllers may use some arbitrary high value * in order to cleanup stale discovery sessions diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 7143c7fa7464..f8e0ee131dc6 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -309,6 +309,7 @@ struct nvmet_fabrics_ops { u16 (*install_queue)(struct nvmet_sq *nvme_sq); void (*discovery_chg)(struct nvmet_port *port); u8 (*get_mdts)(const struct nvmet_ctrl *ctrl); + u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl); }; #define NVMET_MAX_INLINE_BIOVEC 8 From c7d792f9b8b0502c807ecda57aeb5eac70cc7ab9 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Thu, 23 Sep 2021 00:55:37 +0300 Subject: [PATCH 09/23] nvmet-rdma: implement get_max_queue_size controller op Limit the maximal queue size for RDMA controllers. Today, the target reports a limit of 1024 and this limit isn't valid for some of the RDMA based controllers. For now, limit RDMA transport to 128 entries (the max queue depth configured for Linux NVMe/RDMA host). Future general solution should use RDMA/core API to calculate this size according to device capabilities and number of WRs needed per NVMe IO request. Reported-by: Mark Ruijter Reviewed-by: Chaitanya Kulkarni Signed-off-by: Max Gurtovoy Signed-off-by: Christoph Hellwig --- drivers/nvme/target/rdma.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 6f75e6be2ce3..1deb4043e242 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -2000,6 +2000,11 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) return NVMET_RDMA_MAX_MDTS; } +static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl) +{ + return NVME_RDMA_MAX_QUEUE_SIZE; +} + static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_RDMA, @@ -2011,6 +2016,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .delete_ctrl = nvmet_rdma_delete_ctrl, .disc_traddr = nvmet_rdma_disc_port_addr, .get_mdts = nvmet_rdma_get_mdts, + .get_max_queue_size = nvmet_rdma_get_max_queue_size, }; static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) From 626851e9225df93eda00f6b256cb74ad0860e418 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 22 Sep 2021 08:35:19 +0200 Subject: [PATCH 10/23] nvmet: make discovery NQN configurable TPAR8013 allows for unique discovery NQNs, so make the discovery controller NQN configurable by exposing a subsys attribute 'discovery_nqn'. Signed-off-by: Hannes Reinecke Reviewed-by: Chaitanya Kulkarni Reviewed-by: Himanshu Madhani Signed-off-by: Christoph Hellwig --- drivers/nvme/target/configfs.c | 39 ++++++++++++++++++++++++++++++++++ drivers/nvme/target/core.c | 3 ++- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 496d775c6770..091a0ca16361 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1233,6 +1233,44 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, } CONFIGFS_ATTR(nvmet_subsys_, attr_model); +static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%s\n", + nvmet_disc_subsys->subsysnqn); +} + +static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_subsys *subsys = to_subsys(item); + char *subsysnqn; + int len; + + len = strcspn(page, "\n"); + if (!len) + return -EINVAL; + + subsysnqn = kmemdup_nul(page, len, GFP_KERNEL); + if (!subsysnqn) + return -ENOMEM; + + /* + * The discovery NQN must be different from subsystem NQN. + */ + if (!strcmp(subsysnqn, subsys->subsysnqn)) { + kfree(subsysnqn); + return -EBUSY; + } + down_write(&nvmet_config_sem); + kfree(nvmet_disc_subsys->subsysnqn); + nvmet_disc_subsys->subsysnqn = subsysnqn; + up_write(&nvmet_config_sem); + + return count; +} +CONFIGFS_ATTR(nvmet_subsys_, attr_discovery_nqn); + #ifdef CONFIG_BLK_DEV_INTEGRITY static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item, char *page) @@ -1262,6 +1300,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = { &nvmet_subsys_attr_attr_cntlid_min, &nvmet_subsys_attr_attr_cntlid_max, &nvmet_subsys_attr_attr_model, + &nvmet_subsys_attr_attr_discovery_nqn, #ifdef CONFIG_BLK_DEV_INTEGRITY &nvmet_subsys_attr_attr_pi_enable, #endif diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 93107af3310d..e1f41436ea2c 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1493,7 +1493,8 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, if (!port) return NULL; - if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) { + if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn) || + !strcmp(nvmet_disc_subsys->subsysnqn, subsysnqn)) { if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) return NULL; return nvmet_disc_subsys; From e15a8a9755659ff5972f30de4dd64867c97f242d Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 22 Sep 2021 08:35:20 +0200 Subject: [PATCH 11/23] nvme: add CNTRLTYPE definitions for 'identify controller' Update the 'identify controller' structure to define the newly added CNTRLTYPE field. Signed-off-by: Hannes Reinecke Reviewed-by: Chaitanya Kulkarni Reviewed-by: Himanshu Madhani Signed-off-by: Christoph Hellwig --- include/linux/nvme.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b7c4c4130b65..ed2428918bca 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -31,6 +31,12 @@ enum nvme_subsys_type { NVME_NQN_NVME = 2, /* NVME type target subsystem */ }; +enum nvme_ctrl_type { + NVME_CTRL_IO = 1, /* I/O controller */ + NVME_CTRL_DISC = 2, /* Discovery controller */ + NVME_CTRL_ADMIN = 3, /* Administrative controller */ +}; + /* Address Family codes for Discovery Log Page entry ADRFAM field */ enum { NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */ @@ -244,7 +250,9 @@ struct nvme_id_ctrl { __le32 rtd3e; __le32 oaes; __le32 ctratt; - __u8 rsvd100[28]; + __u8 rsvd100[11]; + __u8 cntrltype; + __u8 fguid[16]; __le16 crdt1; __le16 crdt2; __le16 crdt3; From a294711ed5123f757ed8ed2f103c851b8ee416c9 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 22 Sep 2021 08:35:21 +0200 Subject: [PATCH 12/23] nvmet: add nvmet_is_disc_subsys() helper Add a helper function to determine if a given subsystem is a discovery subsystem. Signed-off-by: Hannes Reinecke Reviewed-by: Chaitanya Kulkarni Reviewed-by: Himanshu Madhani Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 2 +- drivers/nvme/target/core.c | 6 +++--- drivers/nvme/target/nvmet.h | 5 +++++ 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index aa6d84d8848e..b653ea4244fe 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -1008,7 +1008,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_cmd(req); - if (nvmet_req_subsys(req)->type == NVME_NQN_DISC) + if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) return nvmet_parse_discovery_cmd(req); ret = nvmet_check_ctrl_status(req); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index e1f41436ea2c..d844fb9ef2eb 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1140,7 +1140,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) * should verify iosqes,iocqes are zeroed, however that * would break backwards compatibility, so don't enforce it. */ - if (ctrl->subsys->type != NVME_NQN_DISC && + if (!nvmet_is_disc_subsys(ctrl->subsys) && (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { ctrl->csts = NVME_CSTS_CFS; @@ -1281,7 +1281,7 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) if (subsys->allow_any_host) return true; - if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */ + if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */ return true; list_for_each_entry(p, &subsys->hosts, entry) { @@ -1413,7 +1413,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, * Discovery controllers may use some arbitrary high value * in order to cleanup stale discovery sessions */ - if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato) + if (nvmet_is_disc_subsys(ctrl->subsys) && !kato) kato = NVMET_DISC_KATO_MS; /* keep-alive timeout in seconds */ diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index f8e0ee131dc6..f31dcc4fb1a2 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -577,6 +577,11 @@ static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req) return req->sq->ctrl->subsys; } +static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys) +{ + return subsys->type == NVME_NQN_DISC; +} + #ifdef CONFIG_NVME_TARGET_PASSTHRU void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); From d3aef70124e7f69975eabeb340866bc91672532d Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 22 Sep 2021 08:35:22 +0200 Subject: [PATCH 13/23] nvmet: set 'CNTRLTYPE' in the identify controller data Set the correct 'CNTRLTYPE' field in the identify controller data. Signed-off-by: Hannes Reinecke Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 5 +++++ drivers/nvme/target/discovery.c | 2 ++ drivers/nvme/target/fabrics-cmd.c | 3 ++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index b653ea4244fe..3616a06f7836 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -374,6 +374,11 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->rab = 6; + if (nvmet_is_disc_subsys(ctrl->subsys)) + id->cntrltype = NVME_CTRL_DISC; + else + id->cntrltype = NVME_CTRL_IO; + /* * XXX: figure out how we can assign a IEEE OUI, but until then * the safest is to leave it as zeroes. diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 7aa62bc6ae84..7b360f8d07e9 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -268,6 +268,8 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) memcpy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE), ' '); + id->cntrltype = NVME_CTRL_DISC; + /* no limit on data transfer sizes for now */ id->mdts = 0; id->cntlid = cpu_to_le16(ctrl->cntlid); diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 7d0454cee920..70fb587e9413 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -221,7 +221,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) goto out; } - pr_info("creating controller %d for subsystem %s for NQN %s%s.\n", + pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n", + nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, ctrl->pi_support ? " T10-PI is enabled" : ""); req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); From 954ae16681f6bdf684f016ca626329302a38e177 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 22 Sep 2021 08:35:23 +0200 Subject: [PATCH 14/23] nvme: expose subsystem type in sysfs attribute 'subsystype' With unique discovery controller NQNs we cannot distinguish the subsystem type by the NQN alone, but need to check the subsystem type, too. So expose the subsystem type in a new sysfs attribute 'subsystype'. Signed-off-by: Hannes Reinecke Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 27 +++++++++++++++++++++++++++ drivers/nvme/host/nvme.h | 1 + 2 files changed, 28 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c415c3faf420..0f0f64db1a91 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2620,6 +2620,24 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev, } static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); +static ssize_t nvme_subsys_show_type(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + + switch (subsys->subtype) { + case NVME_NQN_DISC: + return sysfs_emit(buf, "discovery\n"); + case NVME_NQN_NVME: + return sysfs_emit(buf, "nvm\n"); + default: + return sysfs_emit(buf, "reserved\n"); + } +} +static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type); + #define nvme_subsys_show_str_function(field) \ static ssize_t subsys_##field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ @@ -2640,6 +2658,7 @@ static struct attribute *nvme_subsys_attrs[] = { &subsys_attr_serial.attr, &subsys_attr_firmware_rev.attr, &subsys_attr_subsysnqn.attr, + &subsys_attr_subsystype.attr, #ifdef CONFIG_NVME_MULTIPATH &subsys_attr_iopolicy.attr, #endif @@ -2710,6 +2729,14 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); subsys->vendor_id = le16_to_cpu(id->vid); subsys->cmic = id->cmic; + + /* Versions prior to 1.4 don't necessarily report a valid type */ + if (id->cntrltype == NVME_CTRL_DISC || + !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) + subsys->subtype = NVME_NQN_DISC; + else + subsys->subtype = NVME_NQN_NVME; + subsys->awupf = le16_to_cpu(id->awupf); #ifdef CONFIG_NVME_MULTIPATH subsys->iopolicy = NVME_IOPOLICY_NUMA; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ef2467b93adb..ba3edd4d02f0 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -372,6 +372,7 @@ struct nvme_subsystem { char model[40]; char firmware_rev[8]; u8 cmic; + enum nvme_subsys_type subtype; u16 vendor_id; u16 awupf; /* 0's based awupf value. */ struct ida ns_ida; From 20e8b689c9088027b7495ffd6f80812c11ecc872 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 22 Sep 2021 08:35:24 +0200 Subject: [PATCH 15/23] nvme: Add connect option 'discovery' Add a connect option 'discovery' to specify that the connection should be made to a discovery controller, not a normal I/O controller. With discovery controllers supporting unique subsystem NQNs we cannot easily distinguish by the subsystem NQN if this should be a discovery connection, but we need this information to blank out options not supported by discovery controllers. Signed-off-by: Hannes Reinecke Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 7 +++++++ drivers/nvme/host/fabrics.c | 6 +++++- drivers/nvme/host/fabrics.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0f0f64db1a91..fd856eec0912 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2737,6 +2737,13 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) else subsys->subtype = NVME_NQN_NVME; + if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { + dev_err(ctrl->device, + "Subsystem %s is not a discovery controller", + subsys->subnqn); + kfree(subsys); + return -EINVAL; + } subsys->awupf = le16_to_cpu(id->awupf); #ifdef CONFIG_NVME_MULTIPATH subsys->iopolicy = NVME_IOPOLICY_NUMA; diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 668c6bb7a567..c5a2b71c5268 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -548,6 +548,7 @@ static const match_table_t opt_tokens = { { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" }, { NVMF_OPT_TOS, "tos=%d" }, { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" }, + { NVMF_OPT_DISCOVERY, "discovery" }, { NVMF_OPT_ERR, NULL } }; @@ -823,6 +824,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, } opts->tos = token; break; + case NVMF_OPT_DISCOVERY: + opts->discovery_nqn = true; + break; default: pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", p); @@ -949,7 +953,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options); #define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \ NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ - NVMF_OPT_DISABLE_SQFLOW |\ + NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\ NVMF_OPT_FAIL_FAST_TMO) static struct nvme_ctrl * diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index a146cb903869..b61b666e10ec 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -67,6 +67,7 @@ enum { NVMF_OPT_TOS = 1 << 19, NVMF_OPT_FAIL_FAST_TMO = 1 << 20, NVMF_OPT_HOST_IFACE = 1 << 21, + NVMF_OPT_DISCOVERY = 1 << 22, }; /** From e5ea42faa773c6a6bb5d9e9f5c2cc808940b5a55 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 22 Sep 2021 08:35:25 +0200 Subject: [PATCH 16/23] nvme: display correct subsystem NQN With discovery controllers supporting unique subsystem NQNs the actual subsystem NQN might be different from that one passed in via the connect args. So add a helper to display the resulting subsystem NQN. Signed-off-by: Hannes Reinecke Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 2 +- drivers/nvme/host/fabrics.h | 7 +++++++ drivers/nvme/host/fc.c | 2 +- drivers/nvme/host/rdma.c | 2 +- drivers/nvme/host/tcp.c | 2 +- 5 files changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index fd856eec0912..3825b596ca16 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -222,7 +222,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) { dev_info(ctrl->device, - "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); + "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl)); flush_work(&ctrl->reset_work); nvme_stop_ctrl(ctrl); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index b61b666e10ec..c3203ff1c654 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -179,6 +179,13 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, return true; } +static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl) +{ + if (!ctrl->subsys) + return ctrl->opts->subsysnqn; + return ctrl->subsys->subnqn; +} + int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 34f7a7c236bf..be9892894849 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3596,7 +3596,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, dev_info(ctrl->ctrl.device, "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", - ctrl->cnum, ctrl->ctrl.opts->subsysnqn); + ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl)); return &ctrl->ctrl; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 027ee57cbdb0..5ba386f00c3f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2393,7 +2393,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, goto out_uninit_ctrl; dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", - ctrl->ctrl.opts->subsysnqn, &ctrl->addr); + nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); mutex_lock(&nvme_rdma_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 9ce3458ee1dd..07156ea9d1a8 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2582,7 +2582,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, goto out_uninit_ctrl; dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", - ctrl->ctrl.opts->subsysnqn, &ctrl->addr); + nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); mutex_lock(&nvme_tcp_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); From 571b5444d1eee112e82eebd0cf877dc510b8b5a5 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Thu, 23 Sep 2021 13:17:43 +0300 Subject: [PATCH 17/23] nvmet: use macro definition for setting nmic value This makes the code more readable. Signed-off-by: Max Gurtovoy Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 3616a06f7836..dce53030b375 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -541,7 +541,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) * Our namespace might always be shared. Not just with other * controllers, but also with any other user of the block device. */ - id->nmic = (1 << 0); + id->nmic = NVME_NS_NMIC_SHARED; id->anagrpid = cpu_to_le32(req->ns->anagrpid); memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); From d56ae18f063e38eba47550c632519c9fe3f76b19 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Thu, 23 Sep 2021 13:17:44 +0300 Subject: [PATCH 18/23] nvmet: use macro definitions for setting cmic value This makes the code more readable. Signed-off-by: Max Gurtovoy Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 3 ++- include/linux/nvme.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index dce53030b375..6e75890bc8d9 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -385,7 +385,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) */ /* we support multiple ports, multiples hosts and ANA: */ - id->cmic = (1 << 0) | (1 << 1) | (1 << 3); + id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL | + NVME_CTRL_CMIC_ANA; /* Limit MDTS according to transport capability */ if (ctrl->ops->get_mdts) diff --git a/include/linux/nvme.h b/include/linux/nvme.h index ed2428918bca..357482dedb59 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -320,6 +320,7 @@ struct nvme_id_ctrl { }; enum { + NVME_CTRL_CMIC_MULTI_PORT = 1 << 0, NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1, NVME_CTRL_CMIC_ANA = 1 << 3, NVME_CTRL_ONCS_COMPARE = 1 << 0, From 11384580e3322b41dbaa68dbcd949af875b8aa22 Mon Sep 17 00:00:00 2001 From: Luis Chamberlain Date: Fri, 15 Oct 2021 16:52:08 -0700 Subject: [PATCH 19/23] nvme-multipath: add error handling support for add_disk() We never checked for errors on add_disk() as this function returned void. Now that this is fixed, use the shiny new error handling. Since we now can tell for sure when a disk was added, move setting the bit NVME_NSHEAD_DISK_LIVE only when we did add the disk successfully. Nothing to do here as the cleanup is done elsewhere. We take care and use test_and_set_bit() because it is protects against two nvme paths simultaneously calling device_add_disk() on the same namespace head. Signed-off-by: Luis Chamberlain Reviewed-by: Keith Busch Signed-off-by: Christoph Hellwig --- drivers/nvme/host/multipath.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 22c6b64f4d93..bc1e7d8a2c0f 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -509,13 +509,23 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) static void nvme_mpath_set_live(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; + int rc; if (!head->disk) return; + /* + * test_and_set_bit() is used because it is protecting against two nvme + * paths simultaneously calling device_add_disk() on the same namespace + * head. + */ if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { - device_add_disk(&head->subsys->dev, head->disk, - nvme_ns_id_attr_groups); + rc = device_add_disk(&head->subsys->dev, head->disk, + nvme_ns_id_attr_groups); + if (rc) { + clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); + return; + } nvme_add_ns_head_cdev(head); } From 09748122009aed7bfaa7acc33c10c083a4758322 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Sun, 17 Oct 2021 11:58:16 +0300 Subject: [PATCH 20/23] nvme-rdma: fix error code in nvme_rdma_setup_ctrl In case that icdoff is not zero or mandatory keyed sgls are not supported by the NVMe/RDMA target, we'll go to error flow but we'll return 0 to the caller. Fix it by returning an appropriate error code. Fixes: c66e2998c8ca ("nvme-rdma: centralize controller setup sequence") Signed-off-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/rdma.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 5ba386f00c3f..9317f26e51e0 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1096,11 +1096,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) return ret; if (ctrl->ctrl.icdoff) { + ret = -EOPNOTSUPP; dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); goto destroy_admin; } if (!(ctrl->ctrl.sgls & (1 << 2))) { + ret = -EOPNOTSUPP; dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not supported!\n"); goto destroy_admin; From 58847f12fe7823c56f844218abcca6920901097d Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 14 Oct 2021 09:45:42 -0700 Subject: [PATCH 21/23] nvme-pci: clear shadow doorbell memory on resets The host memory doorbell and event buffers need to be initialized on each reset so the driver doesn't observe stale values from the previous instantiation. Signed-off-by: Keith Busch Tested-by: John Levon Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ed684874842f..6e05cfb4879f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -245,8 +245,15 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) { unsigned int mem_size = nvme_dbbuf_size(dev); - if (dev->dbbuf_dbs) + if (dev->dbbuf_dbs) { + /* + * Clear the dbbuf memory so the driver doesn't observe stale + * values from the previous instantiation. + */ + memset(dev->dbbuf_dbs, 0, mem_size); + memset(dev->dbbuf_eis, 0, mem_size); return 0; + } dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, &dev->dbbuf_dbs_dma_addr, From 2b81a5f015199f3d585ce710190a9e87714d3c1e Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 20 Oct 2021 07:59:10 +0200 Subject: [PATCH 22/23] nvme: drop scan_lock and always kick requeue list when removing namespaces When reading the partition table on initial scan hits an I/O error the I/O will hang with the scan_mutex held: [<0>] do_read_cache_page+0x49b/0x790 [<0>] read_part_sector+0x39/0xe0 [<0>] read_lba+0xf9/0x1d0 [<0>] efi_partition+0xf1/0x7f0 [<0>] bdev_disk_changed+0x1ee/0x550 [<0>] blkdev_get_whole+0x81/0x90 [<0>] blkdev_get_by_dev+0x128/0x2e0 [<0>] device_add_disk+0x377/0x3c0 [<0>] nvme_mpath_set_live+0x130/0x1b0 [nvme_core] [<0>] nvme_mpath_add_disk+0x150/0x160 [nvme_core] [<0>] nvme_alloc_ns+0x417/0x950 [nvme_core] [<0>] nvme_validate_or_alloc_ns+0xe9/0x1e0 [nvme_core] [<0>] nvme_scan_work+0x168/0x310 [nvme_core] [<0>] process_one_work+0x231/0x420 and trying to delete the controller will deadlock as it tries to grab the scan mutex: [<0>] nvme_mpath_clear_ctrl_paths+0x25/0x80 [nvme_core] [<0>] nvme_remove_namespaces+0x31/0xf0 [nvme_core] [<0>] nvme_do_delete_ctrl+0x4b/0x80 [nvme_core] As we're now properly ordering the namespace list there is no need to hold the scan_mutex in nvme_mpath_clear_ctrl_paths() anymore. And we always need to kick the requeue list as the path will be marked as unusable and I/O will be requeued _without_ a current path. Signed-off-by: Hannes Reinecke Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/multipath.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index bc1e7d8a2c0f..954e84df6eb7 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -146,13 +146,12 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; - mutex_lock(&ctrl->scan_lock); down_read(&ctrl->namespaces_rwsem); - list_for_each_entry(ns, &ctrl->namespaces, list) - if (nvme_mpath_clear_current_path(ns)) - kblockd_schedule_work(&ns->head->requeue_work); + list_for_each_entry(ns, &ctrl->namespaces, list) { + nvme_mpath_clear_current_path(ns); + kblockd_schedule_work(&ns->head->requeue_work); + } up_read(&ctrl->namespaces_rwsem); - mutex_unlock(&ctrl->scan_lock); } void nvme_mpath_revalidate_paths(struct nvme_ns *ns) From 117d5b6d00ee02f73d7065fe906e2ef1af74bb68 Mon Sep 17 00:00:00 2001 From: Len Baker Date: Sun, 17 Oct 2021 11:56:50 +0200 Subject: [PATCH 23/23] nvmet: use struct_size over open coded arithmetic As noted in the "Deprecated Interfaces, Language Features, Attributes, and Conventions" documentation [1], size calculations (especially multiplication) should not be performed in memory allocator (or similar) function arguments due to the risk of them overflowing. This could lead to values wrapping around and a smaller allocation being made than the caller was expecting. Using those allocations could lead to linear overflows of heap memory and other misbehaviors. In this case this is not actually dynamic size: all the operands involved in the calculation are constant values. However it is better to refactor this anyway, just to keep the open-coded math idiom out of code. So, use the struct_size() helper to do the arithmetic instead of the argument "size + count * size" in the kmalloc() function. This code was detected with the help of Coccinelle and audited and fixed manually. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments Signed-off-by: Len Baker Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 6e75890bc8d9..403de678fd06 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -278,8 +278,8 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) u16 status; status = NVME_SC_INTERNAL; - desc = kmalloc(sizeof(struct nvme_ana_group_desc) + - NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL); + desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES), + GFP_KERNEL); if (!desc) goto out;