nvme updates for Linux 6.11
- Device initialization memory leak fixes (Keith) - More constants defined (Weiwen) - Target debugfs support (Hannes) - PCIe subsystem reset enhancements (Keith) - Queue-depth multipath policy (Redhat and PureStorage) - Implement get_unique_id (Christoph) - Authentication error fixes (Gaosheng) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE3Fbyvv+648XNRdHTPe3zGtjzRgkFAmaMS6oACgkQPe3zGtjz Rgljeg//f47yJNR61V+zePH2prcTUdTA21z2xnEbd1IT40qkpbBIf2mS5R6loBzT fm8cGGgd3fdZ3qUnvaMExL//A4aaQCgoKmbtJHLv616KeCu8Iwy8GC0myG2gl05h +a96zy8b30FZjRE6H08EBt0JyRZUqjxVcFtvIq1ZlRu2xLWG7f2hg4S7sSKDPKw/ SZWTeFcY1GRiUupb0tcgaJeqiQ0U7BPuyUuHGnuklMY60ePO5qgtw3D6dmRwLgj+ pbcRzrp2OWbKCUH1JrW34Ku1gzDbOYFYLL04akAq4rIp4JzsXEgvs7rlyRn0PtDP V6gGAvIvb7ktMhD4GvXFQlqT29AoOCYmWr5w2xS6uXAoLZ+t3gK9VBKa+7oRBOeo hiZNJy/EdIIuF54nWaFpZ4FBKunNocp7w9BEYkuu/HBm5GW4m9mwLxL66BLLYgPj kuC2t4Nc/waO+SrZFrHErtdb+QZNW8IUWIRG3jXLjo6yipGrv+K6lZpOY3HCEXev 7F0AAOVNFWW+nWv+mEVQkd5lCFrHDjbVX2rRC3z4saKJvDOh69pBaSCKyikZR0bO 95wz3B//sF2STBa4b/570KMPHJTJfTkKRtaaZvkHPT/0IQi0kmuWM/yKy57Q7NPE Ehkk3hfWjLUHU7jNuI5wxky8un7GMZJKArFg3Q1rkQCQ8OxQUXM= =0en2 -----END PGP SIGNATURE----- Merge tag 'nvme-6.11-2024-07-08' of git://git.infradead.org/nvme into for-6.11/block Pull NVMe updates from Keith: "nvme updates for Linux 6.11 - Device initialization memory leak fixes (Keith) - More constants defined (Weiwen) - Target debugfs support (Hannes) - PCIe subsystem reset enhancements (Keith) - Queue-depth multipath policy (Redhat and PureStorage) - Implement get_unique_id (Christoph) - Authentication error fixes (Gaosheng)" * tag 'nvme-6.11-2024-07-08' of git://git.infradead.org/nvme: (21 commits) nvmet-auth: fix nvmet_auth hash error handling nvme: implement ->get_unique_id nvme-multipath: implement "queue-depth" iopolicy nvme-multipath: prepare for "queue-depth" iopolicy nvme-pci: do not directly handle subsys reset fallout lpfc_nvmet: implement 'host_traddr' nvme-fcloop: implement 'host_traddr' nvmet-fc: implement host_traddr() nvmet-rdma: implement host_traddr() nvmet-tcp: implement host_traddr() nvmet: add 'host_traddr' callback for debugfs nvmet: add debugfs support mailmap: add entry for Weiwen Hu nvme: rename CDR/MORE/DNR to NVME_STATUS_* nvme: fix status magic numbers nvme: rename nvme_sc_to_pr_err to nvme_status_to_pr_err nvme: split device add from initialization nvme: fc: split controller bringup handling nvme: rdma: split controller bringup handling nvme: tcp: split controller bringup handling ...
This commit is contained in:
commit
6b43537fae
1
.mailmap
1
.mailmap
@ -685,6 +685,7 @@ Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
|
|||||||
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
|
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
|
||||||
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
|
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
|
||||||
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
|
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
|
||||||
|
Weiwen Hu <huweiwen@linux.alibaba.com> <sehuww@mail.scut.edu.cn>
|
||||||
WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
|
WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
|
||||||
Wen Gong <quic_wgong@quicinc.com> <wgong@codeaurora.org>
|
Wen Gong <quic_wgong@quicinc.com> <wgong@codeaurora.org>
|
||||||
Wesley Cheng <quic_wcheng@quicinc.com> <wcheng@codeaurora.org>
|
Wesley Cheng <quic_wcheng@quicinc.com> <wcheng@codeaurora.org>
|
||||||
|
@ -1388,7 +1388,7 @@ static void devm_apple_nvme_mempool_destroy(void *data)
|
|||||||
mempool_destroy(data);
|
mempool_destroy(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int apple_nvme_probe(struct platform_device *pdev)
|
static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct apple_nvme *anv;
|
struct apple_nvme *anv;
|
||||||
@ -1396,7 +1396,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
|
anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
|
||||||
if (!anv)
|
if (!anv)
|
||||||
return -ENOMEM;
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
anv->dev = get_device(dev);
|
anv->dev = get_device(dev);
|
||||||
anv->adminq.is_adminq = true;
|
anv->adminq.is_adminq = true;
|
||||||
@ -1516,10 +1516,30 @@ static int apple_nvme_probe(struct platform_device *pdev)
|
|||||||
goto put_dev;
|
goto put_dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return anv;
|
||||||
|
put_dev:
|
||||||
|
put_device(anv->dev);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int apple_nvme_probe(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct apple_nvme *anv;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
anv = apple_nvme_alloc(pdev);
|
||||||
|
if (IS_ERR(anv))
|
||||||
|
return PTR_ERR(anv);
|
||||||
|
|
||||||
|
ret = nvme_add_ctrl(&anv->ctrl);
|
||||||
|
if (ret)
|
||||||
|
goto out_put_ctrl;
|
||||||
|
|
||||||
anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
|
anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
|
||||||
if (IS_ERR(anv->ctrl.admin_q)) {
|
if (IS_ERR(anv->ctrl.admin_q)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto put_dev;
|
anv->ctrl.admin_q = NULL;
|
||||||
|
goto out_uninit_ctrl;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvme_reset_ctrl(&anv->ctrl);
|
nvme_reset_ctrl(&anv->ctrl);
|
||||||
@ -1527,8 +1547,10 @@ static int apple_nvme_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
put_dev:
|
out_uninit_ctrl:
|
||||||
put_device(anv->dev);
|
nvme_uninit_ctrl(&anv->ctrl);
|
||||||
|
out_put_ctrl:
|
||||||
|
nvme_put_ctrl(&anv->ctrl);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,7 +173,7 @@ static const char * const nvme_statuses[] = {
|
|||||||
|
|
||||||
const char *nvme_get_error_status_str(u16 status)
|
const char *nvme_get_error_status_str(u16 status)
|
||||||
{
|
{
|
||||||
status &= 0x7ff;
|
status &= NVME_SCT_SC_MASK;
|
||||||
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
|
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
|
||||||
return nvme_statuses[status];
|
return nvme_statuses[status];
|
||||||
return "Unknown";
|
return "Unknown";
|
||||||
|
@ -110,7 +110,7 @@ struct workqueue_struct *nvme_delete_wq;
|
|||||||
EXPORT_SYMBOL_GPL(nvme_delete_wq);
|
EXPORT_SYMBOL_GPL(nvme_delete_wq);
|
||||||
|
|
||||||
static LIST_HEAD(nvme_subsystems);
|
static LIST_HEAD(nvme_subsystems);
|
||||||
static DEFINE_MUTEX(nvme_subsystems_lock);
|
DEFINE_MUTEX(nvme_subsystems_lock);
|
||||||
|
|
||||||
static DEFINE_IDA(nvme_instance_ida);
|
static DEFINE_IDA(nvme_instance_ida);
|
||||||
static dev_t nvme_ctrl_base_chr_devt;
|
static dev_t nvme_ctrl_base_chr_devt;
|
||||||
@ -261,7 +261,7 @@ void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
|
|||||||
|
|
||||||
static blk_status_t nvme_error_status(u16 status)
|
static blk_status_t nvme_error_status(u16 status)
|
||||||
{
|
{
|
||||||
switch (status & 0x7ff) {
|
switch (status & NVME_SCT_SC_MASK) {
|
||||||
case NVME_SC_SUCCESS:
|
case NVME_SC_SUCCESS:
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
case NVME_SC_CAP_EXCEEDED:
|
case NVME_SC_CAP_EXCEEDED:
|
||||||
@ -307,7 +307,7 @@ static void nvme_retry_req(struct request *req)
|
|||||||
u16 crd;
|
u16 crd;
|
||||||
|
|
||||||
/* The mask and shift result must be <= 3 */
|
/* The mask and shift result must be <= 3 */
|
||||||
crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
|
crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11;
|
||||||
if (crd)
|
if (crd)
|
||||||
delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
|
delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
|
||||||
|
|
||||||
@ -329,10 +329,10 @@ static void nvme_log_error(struct request *req)
|
|||||||
nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
|
nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
|
||||||
blk_rq_bytes(req) >> ns->head->lba_shift,
|
blk_rq_bytes(req) >> ns->head->lba_shift,
|
||||||
nvme_get_error_status_str(nr->status),
|
nvme_get_error_status_str(nr->status),
|
||||||
nr->status >> 8 & 7, /* Status Code Type */
|
NVME_SCT(nr->status), /* Status Code Type */
|
||||||
nr->status & 0xff, /* Status Code */
|
nr->status & NVME_SC_MASK, /* Status Code */
|
||||||
nr->status & NVME_SC_MORE ? "MORE " : "",
|
nr->status & NVME_STATUS_MORE ? "MORE " : "",
|
||||||
nr->status & NVME_SC_DNR ? "DNR " : "");
|
nr->status & NVME_STATUS_DNR ? "DNR " : "");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,10 +341,10 @@ static void nvme_log_error(struct request *req)
|
|||||||
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
|
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
|
||||||
nr->cmd->common.opcode,
|
nr->cmd->common.opcode,
|
||||||
nvme_get_error_status_str(nr->status),
|
nvme_get_error_status_str(nr->status),
|
||||||
nr->status >> 8 & 7, /* Status Code Type */
|
NVME_SCT(nr->status), /* Status Code Type */
|
||||||
nr->status & 0xff, /* Status Code */
|
nr->status & NVME_SC_MASK, /* Status Code */
|
||||||
nr->status & NVME_SC_MORE ? "MORE " : "",
|
nr->status & NVME_STATUS_MORE ? "MORE " : "",
|
||||||
nr->status & NVME_SC_DNR ? "DNR " : "");
|
nr->status & NVME_STATUS_DNR ? "DNR " : "");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_log_err_passthru(struct request *req)
|
static void nvme_log_err_passthru(struct request *req)
|
||||||
@ -359,10 +359,10 @@ static void nvme_log_err_passthru(struct request *req)
|
|||||||
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
|
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
|
||||||
nr->cmd->common.opcode,
|
nr->cmd->common.opcode,
|
||||||
nvme_get_error_status_str(nr->status),
|
nvme_get_error_status_str(nr->status),
|
||||||
nr->status >> 8 & 7, /* Status Code Type */
|
NVME_SCT(nr->status), /* Status Code Type */
|
||||||
nr->status & 0xff, /* Status Code */
|
nr->status & NVME_SC_MASK, /* Status Code */
|
||||||
nr->status & NVME_SC_MORE ? "MORE " : "",
|
nr->status & NVME_STATUS_MORE ? "MORE " : "",
|
||||||
nr->status & NVME_SC_DNR ? "DNR " : "",
|
nr->status & NVME_STATUS_DNR ? "DNR " : "",
|
||||||
nr->cmd->common.cdw10,
|
nr->cmd->common.cdw10,
|
||||||
nr->cmd->common.cdw11,
|
nr->cmd->common.cdw11,
|
||||||
nr->cmd->common.cdw12,
|
nr->cmd->common.cdw12,
|
||||||
@ -384,11 +384,11 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
|
|||||||
return COMPLETE;
|
return COMPLETE;
|
||||||
|
|
||||||
if (blk_noretry_request(req) ||
|
if (blk_noretry_request(req) ||
|
||||||
(nvme_req(req)->status & NVME_SC_DNR) ||
|
(nvme_req(req)->status & NVME_STATUS_DNR) ||
|
||||||
nvme_req(req)->retries >= nvme_max_retries)
|
nvme_req(req)->retries >= nvme_max_retries)
|
||||||
return COMPLETE;
|
return COMPLETE;
|
||||||
|
|
||||||
if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
|
if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED)
|
||||||
return AUTHENTICATE;
|
return AUTHENTICATE;
|
||||||
|
|
||||||
if (req->cmd_flags & REQ_NVME_MPATH) {
|
if (req->cmd_flags & REQ_NVME_MPATH) {
|
||||||
@ -1256,7 +1256,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
|
* Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
|
||||||
*
|
*
|
||||||
* The host should send Keep Alive commands at half of the Keep Alive Timeout
|
* The host should send Keep Alive commands at half of the Keep Alive Timeout
|
||||||
* accounting for transport roundtrip times [..].
|
* accounting for transport roundtrip times [..].
|
||||||
*/
|
*/
|
||||||
@ -2286,6 +2286,32 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
|
||||||
|
enum blk_unique_id type)
|
||||||
|
{
|
||||||
|
struct nvme_ns_ids *ids = &ns->head->ids;
|
||||||
|
|
||||||
|
if (type != BLK_UID_EUI64)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) {
|
||||||
|
memcpy(id, &ids->nguid, sizeof(ids->nguid));
|
||||||
|
return sizeof(ids->nguid);
|
||||||
|
}
|
||||||
|
if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) {
|
||||||
|
memcpy(id, &ids->eui64, sizeof(ids->eui64));
|
||||||
|
return sizeof(ids->eui64);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nvme_get_unique_id(struct gendisk *disk, u8 id[16],
|
||||||
|
enum blk_unique_id type)
|
||||||
|
{
|
||||||
|
return nvme_ns_get_unique_id(disk->private_data, id, type);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_SED_OPAL
|
#ifdef CONFIG_BLK_SED_OPAL
|
||||||
static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
|
static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
|
||||||
bool send)
|
bool send)
|
||||||
@ -2341,6 +2367,7 @@ const struct block_device_operations nvme_bdev_ops = {
|
|||||||
.open = nvme_open,
|
.open = nvme_open,
|
||||||
.release = nvme_release,
|
.release = nvme_release,
|
||||||
.getgeo = nvme_getgeo,
|
.getgeo = nvme_getgeo,
|
||||||
|
.get_unique_id = nvme_get_unique_id,
|
||||||
.report_zones = nvme_report_zones,
|
.report_zones = nvme_report_zones,
|
||||||
.pr_ops = &nvme_pr_ops,
|
.pr_ops = &nvme_pr_ops,
|
||||||
};
|
};
|
||||||
@ -3941,7 +3968,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
|
|||||||
|
|
||||||
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
|
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
|
||||||
{
|
{
|
||||||
int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
|
||||||
|
|
||||||
if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
|
if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
|
||||||
dev_err(ns->ctrl->device,
|
dev_err(ns->ctrl->device,
|
||||||
@ -3957,7 +3984,7 @@ out:
|
|||||||
*
|
*
|
||||||
* TODO: we should probably schedule a delayed retry here.
|
* TODO: we should probably schedule a delayed retry here.
|
||||||
*/
|
*/
|
||||||
if (ret > 0 && (ret & NVME_SC_DNR))
|
if (ret > 0 && (ret & NVME_STATUS_DNR))
|
||||||
nvme_ns_remove(ns);
|
nvme_ns_remove(ns);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4148,7 +4175,7 @@ static void nvme_scan_work(struct work_struct *work)
|
|||||||
* they report) but don't actually support it.
|
* they report) but don't actually support it.
|
||||||
*/
|
*/
|
||||||
ret = nvme_scan_ns_list(ctrl);
|
ret = nvme_scan_ns_list(ctrl);
|
||||||
if (ret > 0 && ret & NVME_SC_DNR)
|
if (ret > 0 && ret & NVME_STATUS_DNR)
|
||||||
nvme_scan_ns_sequential(ctrl);
|
nvme_scan_ns_sequential(ctrl);
|
||||||
}
|
}
|
||||||
mutex_unlock(&ctrl->scan_lock);
|
mutex_unlock(&ctrl->scan_lock);
|
||||||
@ -4668,6 +4695,9 @@ static void nvme_free_ctrl(struct device *dev)
|
|||||||
* Initialize a NVMe controller structures. This needs to be called during
|
* Initialize a NVMe controller structures. This needs to be called during
|
||||||
* earliest initialization so that we have the initialized structured around
|
* earliest initialization so that we have the initialized structured around
|
||||||
* during probing.
|
* during probing.
|
||||||
|
*
|
||||||
|
* On success, the caller must use the nvme_put_ctrl() to release this when
|
||||||
|
* needed, which also invokes the ops->free_ctrl() callback.
|
||||||
*/
|
*/
|
||||||
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
||||||
const struct nvme_ctrl_ops *ops, unsigned long quirks)
|
const struct nvme_ctrl_ops *ops, unsigned long quirks)
|
||||||
@ -4716,6 +4746,12 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
|||||||
goto out;
|
goto out;
|
||||||
ctrl->instance = ret;
|
ctrl->instance = ret;
|
||||||
|
|
||||||
|
ret = nvme_auth_init_ctrl(ctrl);
|
||||||
|
if (ret)
|
||||||
|
goto out_release_instance;
|
||||||
|
|
||||||
|
nvme_mpath_init_ctrl(ctrl);
|
||||||
|
|
||||||
device_initialize(&ctrl->ctrl_device);
|
device_initialize(&ctrl->ctrl_device);
|
||||||
ctrl->device = &ctrl->ctrl_device;
|
ctrl->device = &ctrl->ctrl_device;
|
||||||
ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
|
ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
|
||||||
@ -4728,16 +4764,36 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
|||||||
ctrl->device->groups = nvme_dev_attr_groups;
|
ctrl->device->groups = nvme_dev_attr_groups;
|
||||||
ctrl->device->release = nvme_free_ctrl;
|
ctrl->device->release = nvme_free_ctrl;
|
||||||
dev_set_drvdata(ctrl->device, ctrl);
|
dev_set_drvdata(ctrl->device, ctrl);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
out_release_instance:
|
||||||
|
ida_free(&nvme_instance_ida, ctrl->instance);
|
||||||
|
out:
|
||||||
|
if (ctrl->discard_page)
|
||||||
|
__free_page(ctrl->discard_page);
|
||||||
|
cleanup_srcu_struct(&ctrl->srcu);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On success, returns with an elevated controller reference and caller must
|
||||||
|
* use nvme_uninit_ctrl() to properly free resources associated with the ctrl.
|
||||||
|
*/
|
||||||
|
int nvme_add_ctrl(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
|
ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_release_instance;
|
return ret;
|
||||||
|
|
||||||
nvme_get_ctrl(ctrl);
|
|
||||||
cdev_init(&ctrl->cdev, &nvme_dev_fops);
|
cdev_init(&ctrl->cdev, &nvme_dev_fops);
|
||||||
ctrl->cdev.owner = ops->module;
|
ctrl->cdev.owner = ctrl->ops->module;
|
||||||
ret = cdev_device_add(&ctrl->cdev, ctrl->device);
|
ret = cdev_device_add(&ctrl->cdev, ctrl->device);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free_name;
|
return ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize latency tolerance controls. The sysfs files won't
|
* Initialize latency tolerance controls. The sysfs files won't
|
||||||
@ -4748,28 +4804,11 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
|||||||
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
|
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
|
||||||
|
|
||||||
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
|
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
|
||||||
nvme_mpath_init_ctrl(ctrl);
|
nvme_get_ctrl(ctrl);
|
||||||
ret = nvme_auth_init_ctrl(ctrl);
|
|
||||||
if (ret)
|
|
||||||
goto out_free_cdev;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
out_free_cdev:
|
|
||||||
nvme_fault_inject_fini(&ctrl->fault_inject);
|
|
||||||
dev_pm_qos_hide_latency_tolerance(ctrl->device);
|
|
||||||
cdev_device_del(&ctrl->cdev, ctrl->device);
|
|
||||||
out_free_name:
|
|
||||||
nvme_put_ctrl(ctrl);
|
|
||||||
kfree_const(ctrl->device->kobj.name);
|
|
||||||
out_release_instance:
|
|
||||||
ida_free(&nvme_instance_ida, ctrl->instance);
|
|
||||||
out:
|
|
||||||
if (ctrl->discard_page)
|
|
||||||
__free_page(ctrl->discard_page);
|
|
||||||
cleanup_srcu_struct(&ctrl->srcu);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
|
EXPORT_SYMBOL_GPL(nvme_add_ctrl);
|
||||||
|
|
||||||
/* let I/O to all namespaces fail in preparation for surprise removal */
|
/* let I/O to all namespaces fail in preparation for surprise removal */
|
||||||
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
|
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
|
||||||
|
@ -187,7 +187,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
|
|||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
dev_err(ctrl->device,
|
dev_err(ctrl->device,
|
||||||
"Property Get error: %d, offset %#x\n",
|
"Property Get error: %d, offset %#x\n",
|
||||||
ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
|
ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -233,7 +233,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
|
|||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
dev_err(ctrl->device,
|
dev_err(ctrl->device,
|
||||||
"Property Get error: %d, offset %#x\n",
|
"Property Get error: %d, offset %#x\n",
|
||||||
ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
|
ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvmf_reg_read64);
|
EXPORT_SYMBOL_GPL(nvmf_reg_read64);
|
||||||
@ -275,11 +275,26 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
|
|||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
dev_err(ctrl->device,
|
dev_err(ctrl->device,
|
||||||
"Property Set error: %d, offset %#x\n",
|
"Property Set error: %d, offset %#x\n",
|
||||||
ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
|
ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvmf_reg_write32);
|
EXPORT_SYMBOL_GPL(nvmf_reg_write32);
|
||||||
|
|
||||||
|
int nvmf_subsystem_reset(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!nvme_wait_reset(ctrl))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, NVME_SUBSYS_RESET);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return nvme_try_sched_reset(ctrl);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(nvmf_subsystem_reset);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
|
* nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
|
||||||
* connect() errors.
|
* connect() errors.
|
||||||
@ -295,7 +310,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
|
|||||||
int errval, int offset, struct nvme_command *cmd,
|
int errval, int offset, struct nvme_command *cmd,
|
||||||
struct nvmf_connect_data *data)
|
struct nvmf_connect_data *data)
|
||||||
{
|
{
|
||||||
int err_sctype = errval & ~NVME_SC_DNR;
|
int err_sctype = errval & ~NVME_STATUS_DNR;
|
||||||
|
|
||||||
if (errval < 0) {
|
if (errval < 0) {
|
||||||
dev_err(ctrl->device,
|
dev_err(ctrl->device,
|
||||||
@ -573,7 +588,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
|
|||||||
*/
|
*/
|
||||||
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
|
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
|
||||||
{
|
{
|
||||||
if (status > 0 && (status & NVME_SC_DNR))
|
if (status > 0 && (status & NVME_STATUS_DNR))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (status == -EKEYREJECTED)
|
if (status == -EKEYREJECTED)
|
||||||
|
@ -217,6 +217,7 @@ static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
|
|||||||
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
|
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
|
||||||
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
|
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
|
||||||
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
|
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
|
||||||
|
int nvmf_subsystem_reset(struct nvme_ctrl *ctrl);
|
||||||
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
|
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
|
||||||
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
|
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
|
||||||
int nvmf_register_transport(struct nvmf_transport_ops *ops);
|
int nvmf_register_transport(struct nvmf_transport_ops *ops);
|
||||||
|
@ -75,7 +75,7 @@ void nvme_should_fail(struct request *req)
|
|||||||
/* inject status code and DNR bit */
|
/* inject status code and DNR bit */
|
||||||
status = fault_inject->status;
|
status = fault_inject->status;
|
||||||
if (fault_inject->dont_retry)
|
if (fault_inject->dont_retry)
|
||||||
status |= NVME_SC_DNR;
|
status |= NVME_STATUS_DNR;
|
||||||
nvme_req(req)->status = status;
|
nvme_req(req)->status = status;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3132,7 +3132,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
|||||||
if (ctrl->ctrl.icdoff) {
|
if (ctrl->ctrl.icdoff) {
|
||||||
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
|
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
|
||||||
ctrl->ctrl.icdoff);
|
ctrl->ctrl.icdoff);
|
||||||
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto out_stop_keep_alive;
|
goto out_stop_keep_alive;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3140,7 +3140,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
|||||||
if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
|
if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
|
||||||
dev_err(ctrl->ctrl.device,
|
dev_err(ctrl->ctrl.device,
|
||||||
"Mandatory sgls are not supported!\n");
|
"Mandatory sgls are not supported!\n");
|
||||||
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto out_stop_keep_alive;
|
goto out_stop_keep_alive;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3325,7 +3325,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
|
|||||||
queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
|
queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
|
||||||
} else {
|
} else {
|
||||||
if (portptr->port_state == FC_OBJSTATE_ONLINE) {
|
if (portptr->port_state == FC_OBJSTATE_ONLINE) {
|
||||||
if (status > 0 && (status & NVME_SC_DNR))
|
if (status > 0 && (status & NVME_STATUS_DNR))
|
||||||
dev_warn(ctrl->ctrl.device,
|
dev_warn(ctrl->ctrl.device,
|
||||||
"NVME-FC{%d}: reconnect failure\n",
|
"NVME-FC{%d}: reconnect failure\n",
|
||||||
ctrl->cnum);
|
ctrl->cnum);
|
||||||
@ -3382,6 +3382,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
|
|||||||
.reg_read32 = nvmf_reg_read32,
|
.reg_read32 = nvmf_reg_read32,
|
||||||
.reg_read64 = nvmf_reg_read64,
|
.reg_read64 = nvmf_reg_read64,
|
||||||
.reg_write32 = nvmf_reg_write32,
|
.reg_write32 = nvmf_reg_write32,
|
||||||
|
.subsystem_reset = nvmf_subsystem_reset,
|
||||||
.free_ctrl = nvme_fc_free_ctrl,
|
.free_ctrl = nvme_fc_free_ctrl,
|
||||||
.submit_async_event = nvme_fc_submit_async_event,
|
.submit_async_event = nvme_fc_submit_async_event,
|
||||||
.delete_ctrl = nvme_fc_delete_ctrl,
|
.delete_ctrl = nvme_fc_delete_ctrl,
|
||||||
@ -3444,12 +3445,11 @@ nvme_fc_existing_controller(struct nvme_fc_rport *rport,
|
|||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nvme_ctrl *
|
static struct nvme_fc_ctrl *
|
||||||
nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
nvme_fc_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||||
struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
|
struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
|
||||||
{
|
{
|
||||||
struct nvme_fc_ctrl *ctrl;
|
struct nvme_fc_ctrl *ctrl;
|
||||||
unsigned long flags;
|
|
||||||
int ret, idx, ctrl_loss_tmo;
|
int ret, idx, ctrl_loss_tmo;
|
||||||
|
|
||||||
if (!(rport->remoteport.port_role &
|
if (!(rport->remoteport.port_role &
|
||||||
@ -3538,7 +3538,35 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|||||||
if (lport->dev)
|
if (lport->dev)
|
||||||
ctrl->ctrl.numa_node = dev_to_node(lport->dev);
|
ctrl->ctrl.numa_node = dev_to_node(lport->dev);
|
||||||
|
|
||||||
/* at this point, teardown path changes to ref counting on nvme ctrl */
|
return ctrl;
|
||||||
|
|
||||||
|
out_free_queues:
|
||||||
|
kfree(ctrl->queues);
|
||||||
|
out_free_ida:
|
||||||
|
put_device(ctrl->dev);
|
||||||
|
ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
|
||||||
|
out_free_ctrl:
|
||||||
|
kfree(ctrl);
|
||||||
|
out_fail:
|
||||||
|
/* exit via here doesn't follow ctlr ref points */
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvme_ctrl *
|
||||||
|
nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||||
|
struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
|
||||||
|
{
|
||||||
|
struct nvme_fc_ctrl *ctrl;
|
||||||
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ctrl = nvme_fc_alloc_ctrl(dev, opts, lport, rport);
|
||||||
|
if (IS_ERR(ctrl))
|
||||||
|
return ERR_CAST(ctrl);
|
||||||
|
|
||||||
|
ret = nvme_add_ctrl(&ctrl->ctrl);
|
||||||
|
if (ret)
|
||||||
|
goto out_put_ctrl;
|
||||||
|
|
||||||
ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
|
ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
|
||||||
&nvme_fc_admin_mq_ops,
|
&nvme_fc_admin_mq_ops,
|
||||||
@ -3584,6 +3612,7 @@ fail_ctrl:
|
|||||||
/* initiate nvme ctrl ref counting teardown */
|
/* initiate nvme ctrl ref counting teardown */
|
||||||
nvme_uninit_ctrl(&ctrl->ctrl);
|
nvme_uninit_ctrl(&ctrl->ctrl);
|
||||||
|
|
||||||
|
out_put_ctrl:
|
||||||
/* Remove core ctrl ref. */
|
/* Remove core ctrl ref. */
|
||||||
nvme_put_ctrl(&ctrl->ctrl);
|
nvme_put_ctrl(&ctrl->ctrl);
|
||||||
|
|
||||||
@ -3597,20 +3626,8 @@ fail_ctrl:
|
|||||||
nvme_fc_rport_get(rport);
|
nvme_fc_rport_get(rport);
|
||||||
|
|
||||||
return ERR_PTR(-EIO);
|
return ERR_PTR(-EIO);
|
||||||
|
|
||||||
out_free_queues:
|
|
||||||
kfree(ctrl->queues);
|
|
||||||
out_free_ida:
|
|
||||||
put_device(ctrl->dev);
|
|
||||||
ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
|
|
||||||
out_free_ctrl:
|
|
||||||
kfree(ctrl);
|
|
||||||
out_fail:
|
|
||||||
/* exit via here doesn't follow ctlr ref points */
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct nvmet_fc_traddr {
|
struct nvmet_fc_traddr {
|
||||||
u64 nn;
|
u64 nn;
|
||||||
u64 pn;
|
u64 pn;
|
||||||
|
@ -17,6 +17,7 @@ MODULE_PARM_DESC(multipath,
|
|||||||
static const char *nvme_iopolicy_names[] = {
|
static const char *nvme_iopolicy_names[] = {
|
||||||
[NVME_IOPOLICY_NUMA] = "numa",
|
[NVME_IOPOLICY_NUMA] = "numa",
|
||||||
[NVME_IOPOLICY_RR] = "round-robin",
|
[NVME_IOPOLICY_RR] = "round-robin",
|
||||||
|
[NVME_IOPOLICY_QD] = "queue-depth",
|
||||||
};
|
};
|
||||||
|
|
||||||
static int iopolicy = NVME_IOPOLICY_NUMA;
|
static int iopolicy = NVME_IOPOLICY_NUMA;
|
||||||
@ -29,6 +30,8 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
|
|||||||
iopolicy = NVME_IOPOLICY_NUMA;
|
iopolicy = NVME_IOPOLICY_NUMA;
|
||||||
else if (!strncmp(val, "round-robin", 11))
|
else if (!strncmp(val, "round-robin", 11))
|
||||||
iopolicy = NVME_IOPOLICY_RR;
|
iopolicy = NVME_IOPOLICY_RR;
|
||||||
|
else if (!strncmp(val, "queue-depth", 11))
|
||||||
|
iopolicy = NVME_IOPOLICY_QD;
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -43,7 +46,7 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
|
|||||||
module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
|
module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
|
||||||
&iopolicy, 0644);
|
&iopolicy, 0644);
|
||||||
MODULE_PARM_DESC(iopolicy,
|
MODULE_PARM_DESC(iopolicy,
|
||||||
"Default multipath I/O policy; 'numa' (default) or 'round-robin'");
|
"Default multipath I/O policy; 'numa' (default), 'round-robin' or 'queue-depth'");
|
||||||
|
|
||||||
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
|
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
|
||||||
{
|
{
|
||||||
@ -83,7 +86,7 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
|
|||||||
void nvme_failover_req(struct request *req)
|
void nvme_failover_req(struct request *req)
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns = req->q->queuedata;
|
struct nvme_ns *ns = req->q->queuedata;
|
||||||
u16 status = nvme_req(req)->status & 0x7ff;
|
u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
|
||||||
@ -128,6 +131,11 @@ void nvme_mpath_start_request(struct request *rq)
|
|||||||
struct nvme_ns *ns = rq->q->queuedata;
|
struct nvme_ns *ns = rq->q->queuedata;
|
||||||
struct gendisk *disk = ns->head->disk;
|
struct gendisk *disk = ns->head->disk;
|
||||||
|
|
||||||
|
if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
|
||||||
|
atomic_inc(&ns->ctrl->nr_active);
|
||||||
|
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
|
||||||
|
}
|
||||||
|
|
||||||
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
|
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -141,6 +149,9 @@ void nvme_mpath_end_request(struct request *rq)
|
|||||||
{
|
{
|
||||||
struct nvme_ns *ns = rq->q->queuedata;
|
struct nvme_ns *ns = rq->q->queuedata;
|
||||||
|
|
||||||
|
if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)
|
||||||
|
atomic_dec_if_positive(&ns->ctrl->nr_active);
|
||||||
|
|
||||||
if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
|
if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
|
||||||
return;
|
return;
|
||||||
bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
|
bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
|
||||||
@ -291,10 +302,15 @@ static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
|
|||||||
return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
|
return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
|
static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head)
|
||||||
int node, struct nvme_ns *old)
|
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns, *found = NULL;
|
struct nvme_ns *ns, *found = NULL;
|
||||||
|
int node = numa_node_id();
|
||||||
|
struct nvme_ns *old = srcu_dereference(head->current_path[node],
|
||||||
|
&head->srcu);
|
||||||
|
|
||||||
|
if (unlikely(!old))
|
||||||
|
return __nvme_find_path(head, node);
|
||||||
|
|
||||||
if (list_is_singular(&head->list)) {
|
if (list_is_singular(&head->list)) {
|
||||||
if (nvme_path_is_disabled(old))
|
if (nvme_path_is_disabled(old))
|
||||||
@ -334,13 +350,49 @@ out:
|
|||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head)
|
||||||
|
{
|
||||||
|
struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns;
|
||||||
|
unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX;
|
||||||
|
unsigned int depth;
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(ns, &head->list, siblings) {
|
||||||
|
if (nvme_path_is_disabled(ns))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
depth = atomic_read(&ns->ctrl->nr_active);
|
||||||
|
|
||||||
|
switch (ns->ana_state) {
|
||||||
|
case NVME_ANA_OPTIMIZED:
|
||||||
|
if (depth < min_depth_opt) {
|
||||||
|
min_depth_opt = depth;
|
||||||
|
best_opt = ns;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case NVME_ANA_NONOPTIMIZED:
|
||||||
|
if (depth < min_depth_nonopt) {
|
||||||
|
min_depth_nonopt = depth;
|
||||||
|
best_nonopt = ns;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (min_depth_opt == 0)
|
||||||
|
return best_opt;
|
||||||
|
}
|
||||||
|
|
||||||
|
return best_opt ? best_opt : best_nonopt;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
|
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
|
||||||
{
|
{
|
||||||
return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
|
return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
|
||||||
ns->ana_state == NVME_ANA_OPTIMIZED;
|
ns->ana_state == NVME_ANA_OPTIMIZED;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
|
static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head)
|
||||||
{
|
{
|
||||||
int node = numa_node_id();
|
int node = numa_node_id();
|
||||||
struct nvme_ns *ns;
|
struct nvme_ns *ns;
|
||||||
@ -348,14 +400,23 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
|
|||||||
ns = srcu_dereference(head->current_path[node], &head->srcu);
|
ns = srcu_dereference(head->current_path[node], &head->srcu);
|
||||||
if (unlikely(!ns))
|
if (unlikely(!ns))
|
||||||
return __nvme_find_path(head, node);
|
return __nvme_find_path(head, node);
|
||||||
|
|
||||||
if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
|
|
||||||
return nvme_round_robin_path(head, node, ns);
|
|
||||||
if (unlikely(!nvme_path_is_optimized(ns)))
|
if (unlikely(!nvme_path_is_optimized(ns)))
|
||||||
return __nvme_find_path(head, node);
|
return __nvme_find_path(head, node);
|
||||||
return ns;
|
return ns;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
|
||||||
|
{
|
||||||
|
switch (READ_ONCE(head->subsys->iopolicy)) {
|
||||||
|
case NVME_IOPOLICY_QD:
|
||||||
|
return nvme_queue_depth_path(head);
|
||||||
|
case NVME_IOPOLICY_RR:
|
||||||
|
return nvme_round_robin_path(head);
|
||||||
|
default:
|
||||||
|
return nvme_numa_path(head);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static bool nvme_available_path(struct nvme_ns_head *head)
|
static bool nvme_available_path(struct nvme_ns_head *head)
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns;
|
struct nvme_ns *ns;
|
||||||
@ -427,6 +488,21 @@ static void nvme_ns_head_release(struct gendisk *disk)
|
|||||||
nvme_put_ns_head(disk->private_data);
|
nvme_put_ns_head(disk->private_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16],
|
||||||
|
enum blk_unique_id type)
|
||||||
|
{
|
||||||
|
struct nvme_ns_head *head = disk->private_data;
|
||||||
|
struct nvme_ns *ns;
|
||||||
|
int srcu_idx, ret = -EWOULDBLOCK;
|
||||||
|
|
||||||
|
srcu_idx = srcu_read_lock(&head->srcu);
|
||||||
|
ns = nvme_find_path(head);
|
||||||
|
if (ns)
|
||||||
|
ret = nvme_ns_get_unique_id(ns, id, type);
|
||||||
|
srcu_read_unlock(&head->srcu, srcu_idx);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_ZONED
|
#ifdef CONFIG_BLK_DEV_ZONED
|
||||||
static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
|
static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
|
||||||
unsigned int nr_zones, report_zones_cb cb, void *data)
|
unsigned int nr_zones, report_zones_cb cb, void *data)
|
||||||
@ -454,6 +530,7 @@ const struct block_device_operations nvme_ns_head_ops = {
|
|||||||
.ioctl = nvme_ns_head_ioctl,
|
.ioctl = nvme_ns_head_ioctl,
|
||||||
.compat_ioctl = blkdev_compat_ptr_ioctl,
|
.compat_ioctl = blkdev_compat_ptr_ioctl,
|
||||||
.getgeo = nvme_getgeo,
|
.getgeo = nvme_getgeo,
|
||||||
|
.get_unique_id = nvme_ns_head_get_unique_id,
|
||||||
.report_zones = nvme_ns_head_report_zones,
|
.report_zones = nvme_ns_head_report_zones,
|
||||||
.pr_ops = &nvme_pr_ops,
|
.pr_ops = &nvme_pr_ops,
|
||||||
};
|
};
|
||||||
@ -785,6 +862,29 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
|
|||||||
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
|
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys,
|
||||||
|
int iopolicy)
|
||||||
|
{
|
||||||
|
struct nvme_ctrl *ctrl;
|
||||||
|
int old_iopolicy = READ_ONCE(subsys->iopolicy);
|
||||||
|
|
||||||
|
if (old_iopolicy == iopolicy)
|
||||||
|
return;
|
||||||
|
|
||||||
|
WRITE_ONCE(subsys->iopolicy, iopolicy);
|
||||||
|
|
||||||
|
/* iopolicy changes clear the mpath by design */
|
||||||
|
mutex_lock(&nvme_subsystems_lock);
|
||||||
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
||||||
|
nvme_mpath_clear_ctrl_paths(ctrl);
|
||||||
|
mutex_unlock(&nvme_subsystems_lock);
|
||||||
|
|
||||||
|
pr_notice("subsysnqn %s iopolicy changed from %s to %s\n",
|
||||||
|
subsys->subnqn,
|
||||||
|
nvme_iopolicy_names[old_iopolicy],
|
||||||
|
nvme_iopolicy_names[iopolicy]);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
|
static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
|
||||||
struct device_attribute *attr, const char *buf, size_t count)
|
struct device_attribute *attr, const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
@ -794,7 +894,7 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
|
|||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
|
for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
|
||||||
if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
|
if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
|
||||||
WRITE_ONCE(subsys->iopolicy, i);
|
nvme_subsys_iopolicy_update(subsys, i);
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -902,6 +1002,9 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
|||||||
!(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
|
!(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/* initialize this in the identify path to cover controller resets */
|
||||||
|
atomic_set(&ctrl->nr_active, 0);
|
||||||
|
|
||||||
if (!ctrl->max_namespaces ||
|
if (!ctrl->max_namespaces ||
|
||||||
ctrl->max_namespaces > le32_to_cpu(id->nn)) {
|
ctrl->max_namespaces > le32_to_cpu(id->nn)) {
|
||||||
dev_err(ctrl->device,
|
dev_err(ctrl->device,
|
||||||
|
@ -49,6 +49,7 @@ extern unsigned int admin_timeout;
|
|||||||
extern struct workqueue_struct *nvme_wq;
|
extern struct workqueue_struct *nvme_wq;
|
||||||
extern struct workqueue_struct *nvme_reset_wq;
|
extern struct workqueue_struct *nvme_reset_wq;
|
||||||
extern struct workqueue_struct *nvme_delete_wq;
|
extern struct workqueue_struct *nvme_delete_wq;
|
||||||
|
extern struct mutex nvme_subsystems_lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* List of workarounds for devices that required behavior not specified in
|
* List of workarounds for devices that required behavior not specified in
|
||||||
@ -195,6 +196,7 @@ enum {
|
|||||||
NVME_REQ_CANCELLED = (1 << 0),
|
NVME_REQ_CANCELLED = (1 << 0),
|
||||||
NVME_REQ_USERCMD = (1 << 1),
|
NVME_REQ_USERCMD = (1 << 1),
|
||||||
NVME_MPATH_IO_STATS = (1 << 2),
|
NVME_MPATH_IO_STATS = (1 << 2),
|
||||||
|
NVME_MPATH_CNT_ACTIVE = (1 << 3),
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nvme_request *nvme_req(struct request *req)
|
static inline struct nvme_request *nvme_req(struct request *req)
|
||||||
@ -360,6 +362,7 @@ struct nvme_ctrl {
|
|||||||
size_t ana_log_size;
|
size_t ana_log_size;
|
||||||
struct timer_list anatt_timer;
|
struct timer_list anatt_timer;
|
||||||
struct work_struct ana_work;
|
struct work_struct ana_work;
|
||||||
|
atomic_t nr_active;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVME_HOST_AUTH
|
#ifdef CONFIG_NVME_HOST_AUTH
|
||||||
@ -408,6 +411,7 @@ static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
|
|||||||
enum nvme_iopolicy {
|
enum nvme_iopolicy {
|
||||||
NVME_IOPOLICY_NUMA,
|
NVME_IOPOLICY_NUMA,
|
||||||
NVME_IOPOLICY_RR,
|
NVME_IOPOLICY_RR,
|
||||||
|
NVME_IOPOLICY_QD,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvme_subsystem {
|
struct nvme_subsystem {
|
||||||
@ -551,6 +555,7 @@ struct nvme_ctrl_ops {
|
|||||||
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
|
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
|
||||||
void (*free_ctrl)(struct nvme_ctrl *ctrl);
|
void (*free_ctrl)(struct nvme_ctrl *ctrl);
|
||||||
void (*submit_async_event)(struct nvme_ctrl *ctrl);
|
void (*submit_async_event)(struct nvme_ctrl *ctrl);
|
||||||
|
int (*subsystem_reset)(struct nvme_ctrl *ctrl);
|
||||||
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
|
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
|
||||||
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
|
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
|
||||||
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
|
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||||
@ -649,18 +654,9 @@ int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
|
|||||||
|
|
||||||
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
|
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
int ret;
|
if (!ctrl->subsystem || !ctrl->ops->subsystem_reset)
|
||||||
|
|
||||||
if (!ctrl->subsystem)
|
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
if (!nvme_wait_reset(ctrl))
|
return ctrl->ops->subsystem_reset(ctrl);
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return nvme_try_sched_reset(ctrl);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -689,7 +685,7 @@ static inline u32 nvme_bytes_to_numd(size_t len)
|
|||||||
|
|
||||||
static inline bool nvme_is_ana_error(u16 status)
|
static inline bool nvme_is_ana_error(u16 status)
|
||||||
{
|
{
|
||||||
switch (status & 0x7ff) {
|
switch (status & NVME_SCT_SC_MASK) {
|
||||||
case NVME_SC_ANA_TRANSITION:
|
case NVME_SC_ANA_TRANSITION:
|
||||||
case NVME_SC_ANA_INACCESSIBLE:
|
case NVME_SC_ANA_INACCESSIBLE:
|
||||||
case NVME_SC_ANA_PERSISTENT_LOSS:
|
case NVME_SC_ANA_PERSISTENT_LOSS:
|
||||||
@ -702,7 +698,7 @@ static inline bool nvme_is_ana_error(u16 status)
|
|||||||
static inline bool nvme_is_path_error(u16 status)
|
static inline bool nvme_is_path_error(u16 status)
|
||||||
{
|
{
|
||||||
/* check for a status code type of 'path related status' */
|
/* check for a status code type of 'path related status' */
|
||||||
return (status & 0x700) == 0x300;
|
return (status & NVME_SCT_MASK) == NVME_SCT_PATH;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -792,6 +788,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
|
|||||||
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
|
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
|
||||||
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
||||||
const struct nvme_ctrl_ops *ops, unsigned long quirks);
|
const struct nvme_ctrl_ops *ops, unsigned long quirks);
|
||||||
|
int nvme_add_ctrl(struct nvme_ctrl *ctrl);
|
||||||
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
|
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
|
||||||
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
|
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
|
||||||
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
|
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
|
||||||
@ -877,7 +874,7 @@ enum {
|
|||||||
NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
|
NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
|
||||||
/* Set BLK_MQ_REQ_RESERVED when allocating request */
|
/* Set BLK_MQ_REQ_RESERVED when allocating request */
|
||||||
NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
|
NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
|
||||||
/* Retry command when NVME_SC_DNR is not set in the result */
|
/* Retry command when NVME_STATUS_DNR is not set in the result */
|
||||||
NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
|
NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1062,6 +1059,9 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_NVME_MULTIPATH */
|
#endif /* CONFIG_NVME_MULTIPATH */
|
||||||
|
|
||||||
|
int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
|
||||||
|
enum blk_unique_id type);
|
||||||
|
|
||||||
struct nvme_zone_info {
|
struct nvme_zone_info {
|
||||||
u64 zone_size;
|
u64 zone_size;
|
||||||
unsigned int max_open_zones;
|
unsigned int max_open_zones;
|
||||||
|
@ -1143,6 +1143,41 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
|
|||||||
spin_unlock(&nvmeq->sq_lock);
|
spin_unlock(&nvmeq->sq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
struct nvme_dev *dev = to_nvme_dev(ctrl);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Taking the shutdown_lock ensures the BAR mapping is not being
|
||||||
|
* altered by reset_work. Holding this lock before the RESETTING state
|
||||||
|
* change, if successful, also ensures nvme_remove won't be able to
|
||||||
|
* proceed to iounmap until we're done.
|
||||||
|
*/
|
||||||
|
mutex_lock(&dev->shutdown_lock);
|
||||||
|
if (!dev->bar_mapped_size) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR);
|
||||||
|
nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read controller status to flush the previous write and trigger a
|
||||||
|
* pcie read error.
|
||||||
|
*/
|
||||||
|
readl(dev->bar + NVME_REG_CSTS);
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&dev->shutdown_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
||||||
{
|
{
|
||||||
struct nvme_command c = { };
|
struct nvme_command c = { };
|
||||||
@ -2859,6 +2894,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
|
|||||||
.reg_read64 = nvme_pci_reg_read64,
|
.reg_read64 = nvme_pci_reg_read64,
|
||||||
.free_ctrl = nvme_pci_free_ctrl,
|
.free_ctrl = nvme_pci_free_ctrl,
|
||||||
.submit_async_event = nvme_pci_submit_async_event,
|
.submit_async_event = nvme_pci_submit_async_event,
|
||||||
|
.subsystem_reset = nvme_pci_subsystem_reset,
|
||||||
.get_address = nvme_pci_get_address,
|
.get_address = nvme_pci_get_address,
|
||||||
.print_device_info = nvme_pci_print_device_info,
|
.print_device_info = nvme_pci_print_device_info,
|
||||||
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
|
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
|
||||||
@ -3015,6 +3051,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
if (IS_ERR(dev))
|
if (IS_ERR(dev))
|
||||||
return PTR_ERR(dev);
|
return PTR_ERR(dev);
|
||||||
|
|
||||||
|
result = nvme_add_ctrl(&dev->ctrl);
|
||||||
|
if (result)
|
||||||
|
goto out_put_ctrl;
|
||||||
|
|
||||||
result = nvme_dev_map(dev);
|
result = nvme_dev_map(dev);
|
||||||
if (result)
|
if (result)
|
||||||
goto out_uninit_ctrl;
|
goto out_uninit_ctrl;
|
||||||
@ -3101,6 +3141,7 @@ out_dev_unmap:
|
|||||||
nvme_dev_unmap(dev);
|
nvme_dev_unmap(dev);
|
||||||
out_uninit_ctrl:
|
out_uninit_ctrl:
|
||||||
nvme_uninit_ctrl(&dev->ctrl);
|
nvme_uninit_ctrl(&dev->ctrl);
|
||||||
|
out_put_ctrl:
|
||||||
nvme_put_ctrl(&dev->ctrl);
|
nvme_put_ctrl(&dev->ctrl);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -72,12 +72,12 @@ static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
|
|||||||
return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
|
return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_sc_to_pr_err(int nvme_sc)
|
static int nvme_status_to_pr_err(int status)
|
||||||
{
|
{
|
||||||
if (nvme_is_path_error(nvme_sc))
|
if (nvme_is_path_error(status))
|
||||||
return PR_STS_PATH_FAILED;
|
return PR_STS_PATH_FAILED;
|
||||||
|
|
||||||
switch (nvme_sc & 0x7ff) {
|
switch (status & NVME_SCT_SC_MASK) {
|
||||||
case NVME_SC_SUCCESS:
|
case NVME_SC_SUCCESS:
|
||||||
return PR_STS_SUCCESS;
|
return PR_STS_SUCCESS;
|
||||||
case NVME_SC_RESERVATION_CONFLICT:
|
case NVME_SC_RESERVATION_CONFLICT:
|
||||||
@ -121,7 +121,7 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return nvme_sc_to_pr_err(ret);
|
return nvme_status_to_pr_err(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_pr_register(struct block_device *bdev, u64 old,
|
static int nvme_pr_register(struct block_device *bdev, u64 old,
|
||||||
@ -196,7 +196,7 @@ retry:
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return nvme_sc_to_pr_err(ret);
|
return nvme_status_to_pr_err(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_pr_read_keys(struct block_device *bdev,
|
static int nvme_pr_read_keys(struct block_device *bdev,
|
||||||
|
@ -2201,6 +2201,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
|||||||
.reg_read32 = nvmf_reg_read32,
|
.reg_read32 = nvmf_reg_read32,
|
||||||
.reg_read64 = nvmf_reg_read64,
|
.reg_read64 = nvmf_reg_read64,
|
||||||
.reg_write32 = nvmf_reg_write32,
|
.reg_write32 = nvmf_reg_write32,
|
||||||
|
.subsystem_reset = nvmf_subsystem_reset,
|
||||||
.free_ctrl = nvme_rdma_free_ctrl,
|
.free_ctrl = nvme_rdma_free_ctrl,
|
||||||
.submit_async_event = nvme_rdma_submit_async_event,
|
.submit_async_event = nvme_rdma_submit_async_event,
|
||||||
.delete_ctrl = nvme_rdma_delete_ctrl,
|
.delete_ctrl = nvme_rdma_delete_ctrl,
|
||||||
@ -2237,12 +2238,11 @@ nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
|
|||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
static struct nvme_rdma_ctrl *nvme_rdma_alloc_ctrl(struct device *dev,
|
||||||
struct nvmf_ctrl_options *opts)
|
struct nvmf_ctrl_options *opts)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_ctrl *ctrl;
|
struct nvme_rdma_ctrl *ctrl;
|
||||||
int ret;
|
int ret;
|
||||||
bool changed;
|
|
||||||
|
|
||||||
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
|
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
|
||||||
if (!ctrl)
|
if (!ctrl)
|
||||||
@ -2304,6 +2304,30 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out_kfree_queues;
|
goto out_kfree_queues;
|
||||||
|
|
||||||
|
return ctrl;
|
||||||
|
|
||||||
|
out_kfree_queues:
|
||||||
|
kfree(ctrl->queues);
|
||||||
|
out_free_ctrl:
|
||||||
|
kfree(ctrl);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
||||||
|
struct nvmf_ctrl_options *opts)
|
||||||
|
{
|
||||||
|
struct nvme_rdma_ctrl *ctrl;
|
||||||
|
bool changed;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ctrl = nvme_rdma_alloc_ctrl(dev, opts);
|
||||||
|
if (IS_ERR(ctrl))
|
||||||
|
return ERR_CAST(ctrl);
|
||||||
|
|
||||||
|
ret = nvme_add_ctrl(&ctrl->ctrl);
|
||||||
|
if (ret)
|
||||||
|
goto out_put_ctrl;
|
||||||
|
|
||||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
|
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
|
||||||
WARN_ON_ONCE(!changed);
|
WARN_ON_ONCE(!changed);
|
||||||
|
|
||||||
@ -2322,15 +2346,11 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||||||
|
|
||||||
out_uninit_ctrl:
|
out_uninit_ctrl:
|
||||||
nvme_uninit_ctrl(&ctrl->ctrl);
|
nvme_uninit_ctrl(&ctrl->ctrl);
|
||||||
|
out_put_ctrl:
|
||||||
nvme_put_ctrl(&ctrl->ctrl);
|
nvme_put_ctrl(&ctrl->ctrl);
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
out_kfree_queues:
|
|
||||||
kfree(ctrl->queues);
|
|
||||||
out_free_ctrl:
|
|
||||||
kfree(ctrl);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nvmf_transport_ops nvme_rdma_transport = {
|
static struct nvmf_transport_ops nvme_rdma_transport = {
|
||||||
|
@ -2662,6 +2662,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
|
|||||||
.reg_read32 = nvmf_reg_read32,
|
.reg_read32 = nvmf_reg_read32,
|
||||||
.reg_read64 = nvmf_reg_read64,
|
.reg_read64 = nvmf_reg_read64,
|
||||||
.reg_write32 = nvmf_reg_write32,
|
.reg_write32 = nvmf_reg_write32,
|
||||||
|
.subsystem_reset = nvmf_subsystem_reset,
|
||||||
.free_ctrl = nvme_tcp_free_ctrl,
|
.free_ctrl = nvme_tcp_free_ctrl,
|
||||||
.submit_async_event = nvme_tcp_submit_async_event,
|
.submit_async_event = nvme_tcp_submit_async_event,
|
||||||
.delete_ctrl = nvme_tcp_delete_ctrl,
|
.delete_ctrl = nvme_tcp_delete_ctrl,
|
||||||
@ -2686,7 +2687,7 @@ nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
|
|||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
|
static struct nvme_tcp_ctrl *nvme_tcp_alloc_ctrl(struct device *dev,
|
||||||
struct nvmf_ctrl_options *opts)
|
struct nvmf_ctrl_options *opts)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_ctrl *ctrl;
|
struct nvme_tcp_ctrl *ctrl;
|
||||||
@ -2761,6 +2762,28 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out_kfree_queues;
|
goto out_kfree_queues;
|
||||||
|
|
||||||
|
return ctrl;
|
||||||
|
out_kfree_queues:
|
||||||
|
kfree(ctrl->queues);
|
||||||
|
out_free_ctrl:
|
||||||
|
kfree(ctrl);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
|
||||||
|
struct nvmf_ctrl_options *opts)
|
||||||
|
{
|
||||||
|
struct nvme_tcp_ctrl *ctrl;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ctrl = nvme_tcp_alloc_ctrl(dev, opts);
|
||||||
|
if (IS_ERR(ctrl))
|
||||||
|
return ERR_CAST(ctrl);
|
||||||
|
|
||||||
|
ret = nvme_add_ctrl(&ctrl->ctrl);
|
||||||
|
if (ret)
|
||||||
|
goto out_put_ctrl;
|
||||||
|
|
||||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
ret = -EINTR;
|
ret = -EINTR;
|
||||||
@ -2782,15 +2805,11 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
|
|||||||
|
|
||||||
out_uninit_ctrl:
|
out_uninit_ctrl:
|
||||||
nvme_uninit_ctrl(&ctrl->ctrl);
|
nvme_uninit_ctrl(&ctrl->ctrl);
|
||||||
|
out_put_ctrl:
|
||||||
nvme_put_ctrl(&ctrl->ctrl);
|
nvme_put_ctrl(&ctrl->ctrl);
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
out_kfree_queues:
|
|
||||||
kfree(ctrl->queues);
|
|
||||||
out_free_ctrl:
|
|
||||||
kfree(ctrl);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nvmf_transport_ops nvme_tcp_transport = {
|
static struct nvmf_transport_ops nvme_tcp_transport = {
|
||||||
|
@ -17,6 +17,15 @@ config NVME_TARGET
|
|||||||
To configure the NVMe target you probably want to use the nvmetcli
|
To configure the NVMe target you probably want to use the nvmetcli
|
||||||
tool from http://git.infradead.org/users/hch/nvmetcli.git.
|
tool from http://git.infradead.org/users/hch/nvmetcli.git.
|
||||||
|
|
||||||
|
config NVME_TARGET_DEBUGFS
|
||||||
|
bool "NVMe Target debugfs support"
|
||||||
|
depends on NVME_TARGET
|
||||||
|
help
|
||||||
|
This enables debugfs support to display the connected controllers
|
||||||
|
to each subsystem
|
||||||
|
|
||||||
|
If unsure, say N.
|
||||||
|
|
||||||
config NVME_TARGET_PASSTHRU
|
config NVME_TARGET_PASSTHRU
|
||||||
bool "NVMe Target Passthrough support"
|
bool "NVMe Target Passthrough support"
|
||||||
depends on NVME_TARGET
|
depends on NVME_TARGET
|
||||||
|
@ -11,6 +11,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
|
|||||||
|
|
||||||
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
|
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
|
||||||
discovery.o io-cmd-file.o io-cmd-bdev.o
|
discovery.o io-cmd-file.o io-cmd-bdev.o
|
||||||
|
nvmet-$(CONFIG_NVME_TARGET_DEBUGFS) += debugfs.o
|
||||||
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
|
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
|
||||||
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
|
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
|
||||||
nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
|
nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
|
||||||
|
@ -344,7 +344,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
|
|||||||
pr_debug("unhandled lid %d on qid %d\n",
|
pr_debug("unhandled lid %d on qid %d\n",
|
||||||
req->cmd->get_log_page.lid, req->sq->qid);
|
req->cmd->get_log_page.lid, req->sq->qid);
|
||||||
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
|
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
|
||||||
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
||||||
@ -496,7 +496,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|||||||
|
|
||||||
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
||||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -662,7 +662,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
|
|||||||
|
|
||||||
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
|
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
|
||||||
off) != NVME_IDENTIFY_DATA_SIZE - off)
|
off) != NVME_IDENTIFY_DATA_SIZE - off)
|
||||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
nvmet_req_complete(req, status);
|
nvmet_req_complete(req, status);
|
||||||
@ -724,7 +724,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
|
|||||||
pr_debug("unhandled identify cns %d on qid %d\n",
|
pr_debug("unhandled identify cns %d on qid %d\n",
|
||||||
req->cmd->identify.cns, req->sq->qid);
|
req->cmd->identify.cns, req->sq->qid);
|
||||||
req->error_loc = offsetof(struct nvme_identify, cns);
|
req->error_loc = offsetof(struct nvme_identify, cns);
|
||||||
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -807,7 +807,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
|
|||||||
|
|
||||||
if (val32 & ~mask) {
|
if (val32 & ~mask) {
|
||||||
req->error_loc = offsetof(struct nvme_common_command, cdw11);
|
req->error_loc = offsetof(struct nvme_common_command, cdw11);
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
|
WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
|
||||||
@ -833,7 +833,7 @@ void nvmet_execute_set_features(struct nvmet_req *req)
|
|||||||
ncqr = (cdw11 >> 16) & 0xffff;
|
ncqr = (cdw11 >> 16) & 0xffff;
|
||||||
nsqr = cdw11 & 0xffff;
|
nsqr = cdw11 & 0xffff;
|
||||||
if (ncqr == 0xffff || nsqr == 0xffff) {
|
if (ncqr == 0xffff || nsqr == 0xffff) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
nvmet_set_result(req,
|
nvmet_set_result(req,
|
||||||
@ -846,14 +846,14 @@ void nvmet_execute_set_features(struct nvmet_req *req)
|
|||||||
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
|
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
|
||||||
break;
|
break;
|
||||||
case NVME_FEAT_HOST_ID:
|
case NVME_FEAT_HOST_ID:
|
||||||
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
|
status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
case NVME_FEAT_WRITE_PROTECT:
|
case NVME_FEAT_WRITE_PROTECT:
|
||||||
status = nvmet_set_feat_write_protect(req);
|
status = nvmet_set_feat_write_protect(req);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
req->error_loc = offsetof(struct nvme_common_command, cdw10);
|
req->error_loc = offsetof(struct nvme_common_command, cdw10);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -939,7 +939,7 @@ void nvmet_execute_get_features(struct nvmet_req *req)
|
|||||||
if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
|
if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvme_common_command, cdw11);
|
offsetof(struct nvme_common_command, cdw11);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -952,7 +952,7 @@ void nvmet_execute_get_features(struct nvmet_req *req)
|
|||||||
default:
|
default:
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvme_common_command, cdw10);
|
offsetof(struct nvme_common_command, cdw10);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -969,7 +969,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
|
|||||||
mutex_lock(&ctrl->lock);
|
mutex_lock(&ctrl->lock);
|
||||||
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
|
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
|
||||||
mutex_unlock(&ctrl->lock);
|
mutex_unlock(&ctrl->lock);
|
||||||
nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
|
nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
|
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
|
||||||
@ -1006,7 +1006,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
|
|||||||
if (nvme_is_fabrics(cmd))
|
if (nvme_is_fabrics(cmd))
|
||||||
return nvmet_parse_fabrics_admin_cmd(req);
|
return nvmet_parse_fabrics_admin_cmd(req);
|
||||||
if (unlikely(!nvmet_check_auth_status(req)))
|
if (unlikely(!nvmet_check_auth_status(req)))
|
||||||
return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
|
return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
|
||||||
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
|
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
|
||||||
return nvmet_parse_discovery_cmd(req);
|
return nvmet_parse_discovery_cmd(req);
|
||||||
|
|
||||||
|
@ -314,7 +314,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
|
|||||||
req->sq->dhchap_c1,
|
req->sq->dhchap_c1,
|
||||||
challenge, shash_len);
|
challenge, shash_len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free_response;
|
goto out_free_challenge;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
|
pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
|
||||||
@ -325,7 +325,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
|
|||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!shash) {
|
if (!shash) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_free_response;
|
goto out_free_challenge;
|
||||||
}
|
}
|
||||||
shash->tfm = shash_tfm;
|
shash->tfm = shash_tfm;
|
||||||
ret = crypto_shash_init(shash);
|
ret = crypto_shash_init(shash);
|
||||||
@ -361,9 +361,10 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
|
|||||||
goto out;
|
goto out;
|
||||||
ret = crypto_shash_final(shash, response);
|
ret = crypto_shash_final(shash, response);
|
||||||
out:
|
out:
|
||||||
|
kfree(shash);
|
||||||
|
out_free_challenge:
|
||||||
if (challenge != req->sq->dhchap_c1)
|
if (challenge != req->sq->dhchap_c1)
|
||||||
kfree(challenge);
|
kfree(challenge);
|
||||||
kfree(shash);
|
|
||||||
out_free_response:
|
out_free_response:
|
||||||
nvme_auth_free_key(transformed_key);
|
nvme_auth_free_key(transformed_key);
|
||||||
out_free_tfm:
|
out_free_tfm:
|
||||||
@ -427,14 +428,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
|
|||||||
req->sq->dhchap_c2,
|
req->sq->dhchap_c2,
|
||||||
challenge, shash_len);
|
challenge, shash_len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free_response;
|
goto out_free_challenge;
|
||||||
}
|
}
|
||||||
|
|
||||||
shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
|
shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!shash) {
|
if (!shash) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_free_response;
|
goto out_free_challenge;
|
||||||
}
|
}
|
||||||
shash->tfm = shash_tfm;
|
shash->tfm = shash_tfm;
|
||||||
|
|
||||||
@ -471,9 +472,10 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
|
|||||||
goto out;
|
goto out;
|
||||||
ret = crypto_shash_final(shash, response);
|
ret = crypto_shash_final(shash, response);
|
||||||
out:
|
out:
|
||||||
|
kfree(shash);
|
||||||
|
out_free_challenge:
|
||||||
if (challenge != req->sq->dhchap_c2)
|
if (challenge != req->sq->dhchap_c2)
|
||||||
kfree(challenge);
|
kfree(challenge);
|
||||||
kfree(shash);
|
|
||||||
out_free_response:
|
out_free_response:
|
||||||
nvme_auth_free_key(transformed_key);
|
nvme_auth_free_key(transformed_key);
|
||||||
out_free_tfm:
|
out_free_tfm:
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
#include "nvmet.h"
|
#include "nvmet.h"
|
||||||
|
#include "debugfs.h"
|
||||||
|
|
||||||
struct kmem_cache *nvmet_bvec_cache;
|
struct kmem_cache *nvmet_bvec_cache;
|
||||||
struct workqueue_struct *buffered_io_wq;
|
struct workqueue_struct *buffered_io_wq;
|
||||||
@ -55,18 +56,18 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
|
|||||||
return NVME_SC_SUCCESS;
|
return NVME_SC_SUCCESS;
|
||||||
case -ENOSPC:
|
case -ENOSPC:
|
||||||
req->error_loc = offsetof(struct nvme_rw_command, length);
|
req->error_loc = offsetof(struct nvme_rw_command, length);
|
||||||
return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
|
return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
|
||||||
case -EREMOTEIO:
|
case -EREMOTEIO:
|
||||||
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
||||||
return NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
|
||||||
case -EOPNOTSUPP:
|
case -EOPNOTSUPP:
|
||||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||||
switch (req->cmd->common.opcode) {
|
switch (req->cmd->common.opcode) {
|
||||||
case nvme_cmd_dsm:
|
case nvme_cmd_dsm:
|
||||||
case nvme_cmd_write_zeroes:
|
case nvme_cmd_write_zeroes:
|
||||||
return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
|
return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
|
||||||
default:
|
default:
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case -ENODATA:
|
case -ENODATA:
|
||||||
@ -76,7 +77,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
|
|||||||
fallthrough;
|
fallthrough;
|
||||||
default:
|
default:
|
||||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||||
return NVME_SC_INTERNAL | NVME_SC_DNR;
|
return NVME_SC_INTERNAL | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,7 +87,7 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
|
|||||||
req->sq->qid);
|
req->sq->qid);
|
||||||
|
|
||||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
|
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
|
||||||
@ -97,7 +98,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
|
|||||||
{
|
{
|
||||||
if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
|
if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
|
||||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||||
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
|
return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -106,7 +107,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
|
|||||||
{
|
{
|
||||||
if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
|
if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
|
||||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||||
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
|
return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -115,7 +116,7 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
|
|||||||
{
|
{
|
||||||
if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
|
if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
|
||||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||||
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
|
return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -145,7 +146,7 @@ static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
|
|||||||
while (ctrl->nr_async_event_cmds) {
|
while (ctrl->nr_async_event_cmds) {
|
||||||
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
|
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
|
||||||
mutex_unlock(&ctrl->lock);
|
mutex_unlock(&ctrl->lock);
|
||||||
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
|
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR);
|
||||||
mutex_lock(&ctrl->lock);
|
mutex_lock(&ctrl->lock);
|
||||||
}
|
}
|
||||||
mutex_unlock(&ctrl->lock);
|
mutex_unlock(&ctrl->lock);
|
||||||
@ -444,7 +445,7 @@ u16 nvmet_req_find_ns(struct nvmet_req *req)
|
|||||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||||
if (nvmet_subsys_nsid_exists(subsys, nsid))
|
if (nvmet_subsys_nsid_exists(subsys, nsid))
|
||||||
return NVME_SC_INTERNAL_PATH_ERROR;
|
return NVME_SC_INTERNAL_PATH_ERROR;
|
||||||
return NVME_SC_INVALID_NS | NVME_SC_DNR;
|
return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
percpu_ref_get(&req->ns->ref);
|
percpu_ref_get(&req->ns->ref);
|
||||||
@ -904,7 +905,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
|
|||||||
return nvmet_parse_fabrics_io_cmd(req);
|
return nvmet_parse_fabrics_io_cmd(req);
|
||||||
|
|
||||||
if (unlikely(!nvmet_check_auth_status(req)))
|
if (unlikely(!nvmet_check_auth_status(req)))
|
||||||
return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
|
return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
|
||||||
|
|
||||||
ret = nvmet_check_ctrl_status(req);
|
ret = nvmet_check_ctrl_status(req);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
@ -966,7 +967,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
|||||||
/* no support for fused commands yet */
|
/* no support for fused commands yet */
|
||||||
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
|
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
|
||||||
req->error_loc = offsetof(struct nvme_common_command, flags);
|
req->error_loc = offsetof(struct nvme_common_command, flags);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -977,7 +978,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
|||||||
*/
|
*/
|
||||||
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
|
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
|
||||||
req->error_loc = offsetof(struct nvme_common_command, flags);
|
req->error_loc = offsetof(struct nvme_common_command, flags);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -995,7 +996,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
|||||||
trace_nvmet_req_init(req, req->cmd);
|
trace_nvmet_req_init(req, req->cmd);
|
||||||
|
|
||||||
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
|
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1022,7 +1023,7 @@ bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
|
|||||||
{
|
{
|
||||||
if (unlikely(len != req->transfer_len)) {
|
if (unlikely(len != req->transfer_len)) {
|
||||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||||
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
|
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1034,7 +1035,7 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
|
|||||||
{
|
{
|
||||||
if (unlikely(data_len > req->transfer_len)) {
|
if (unlikely(data_len > req->transfer_len)) {
|
||||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||||
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
|
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1303,18 +1304,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req)
|
|||||||
if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
|
if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
|
||||||
pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
|
pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
|
||||||
req->cmd->common.opcode, req->sq->qid);
|
req->cmd->common.opcode, req->sq->qid);
|
||||||
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
|
return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
|
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
|
||||||
pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
|
pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
|
||||||
req->cmd->common.opcode, req->sq->qid);
|
req->cmd->common.opcode, req->sq->qid);
|
||||||
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
|
return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!nvmet_check_auth_status(req))) {
|
if (unlikely(!nvmet_check_auth_status(req))) {
|
||||||
pr_warn("qid %d not authenticated\n", req->sq->qid);
|
pr_warn("qid %d not authenticated\n", req->sq->qid);
|
||||||
return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
|
return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1388,7 +1389,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
|||||||
int ret;
|
int ret;
|
||||||
u16 status;
|
u16 status;
|
||||||
|
|
||||||
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
|
||||||
subsys = nvmet_find_get_subsys(req->port, subsysnqn);
|
subsys = nvmet_find_get_subsys(req->port, subsysnqn);
|
||||||
if (!subsys) {
|
if (!subsys) {
|
||||||
pr_warn("connect request for invalid subsystem %s!\n",
|
pr_warn("connect request for invalid subsystem %s!\n",
|
||||||
@ -1404,7 +1405,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
|||||||
hostnqn, subsysnqn);
|
hostnqn, subsysnqn);
|
||||||
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
|
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
|
||||||
up_read(&nvmet_config_sem);
|
up_read(&nvmet_config_sem);
|
||||||
status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
|
status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
|
||||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||||
goto out_put_subsystem;
|
goto out_put_subsystem;
|
||||||
}
|
}
|
||||||
@ -1455,7 +1456,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
|||||||
subsys->cntlid_min, subsys->cntlid_max,
|
subsys->cntlid_min, subsys->cntlid_max,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
|
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
|
||||||
goto out_free_sqs;
|
goto out_free_sqs;
|
||||||
}
|
}
|
||||||
ctrl->cntlid = ret;
|
ctrl->cntlid = ret;
|
||||||
@ -1478,6 +1479,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
|||||||
mutex_lock(&subsys->lock);
|
mutex_lock(&subsys->lock);
|
||||||
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
|
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
|
||||||
nvmet_setup_p2p_ns_map(ctrl, req);
|
nvmet_setup_p2p_ns_map(ctrl, req);
|
||||||
|
nvmet_debugfs_ctrl_setup(ctrl);
|
||||||
mutex_unlock(&subsys->lock);
|
mutex_unlock(&subsys->lock);
|
||||||
|
|
||||||
*ctrlp = ctrl;
|
*ctrlp = ctrl;
|
||||||
@ -1512,6 +1514,8 @@ static void nvmet_ctrl_free(struct kref *ref)
|
|||||||
|
|
||||||
nvmet_destroy_auth(ctrl);
|
nvmet_destroy_auth(ctrl);
|
||||||
|
|
||||||
|
nvmet_debugfs_ctrl_free(ctrl);
|
||||||
|
|
||||||
ida_free(&cntlid_ida, ctrl->cntlid);
|
ida_free(&cntlid_ida, ctrl->cntlid);
|
||||||
|
|
||||||
nvmet_async_events_free(ctrl);
|
nvmet_async_events_free(ctrl);
|
||||||
@ -1538,6 +1542,14 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
|
EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
|
||||||
|
|
||||||
|
ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
|
||||||
|
char *traddr, size_t traddr_len)
|
||||||
|
{
|
||||||
|
if (!ctrl->ops->host_traddr)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
return ctrl->ops->host_traddr(ctrl, traddr, traddr_len);
|
||||||
|
}
|
||||||
|
|
||||||
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
|
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
|
||||||
const char *subsysnqn)
|
const char *subsysnqn)
|
||||||
{
|
{
|
||||||
@ -1632,8 +1644,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
|||||||
INIT_LIST_HEAD(&subsys->ctrls);
|
INIT_LIST_HEAD(&subsys->ctrls);
|
||||||
INIT_LIST_HEAD(&subsys->hosts);
|
INIT_LIST_HEAD(&subsys->hosts);
|
||||||
|
|
||||||
|
ret = nvmet_debugfs_subsys_setup(subsys);
|
||||||
|
if (ret)
|
||||||
|
goto free_subsysnqn;
|
||||||
|
|
||||||
return subsys;
|
return subsys;
|
||||||
|
|
||||||
|
free_subsysnqn:
|
||||||
|
kfree(subsys->subsysnqn);
|
||||||
free_fr:
|
free_fr:
|
||||||
kfree(subsys->firmware_rev);
|
kfree(subsys->firmware_rev);
|
||||||
free_mn:
|
free_mn:
|
||||||
@ -1650,6 +1668,8 @@ static void nvmet_subsys_free(struct kref *ref)
|
|||||||
|
|
||||||
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
|
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
|
||||||
|
|
||||||
|
nvmet_debugfs_subsys_free(subsys);
|
||||||
|
|
||||||
xa_destroy(&subsys->namespaces);
|
xa_destroy(&subsys->namespaces);
|
||||||
nvmet_passthru_subsys_free(subsys);
|
nvmet_passthru_subsys_free(subsys);
|
||||||
|
|
||||||
@ -1704,11 +1724,18 @@ static int __init nvmet_init(void)
|
|||||||
if (error)
|
if (error)
|
||||||
goto out_free_nvmet_work_queue;
|
goto out_free_nvmet_work_queue;
|
||||||
|
|
||||||
error = nvmet_init_configfs();
|
error = nvmet_init_debugfs();
|
||||||
if (error)
|
if (error)
|
||||||
goto out_exit_discovery;
|
goto out_exit_discovery;
|
||||||
|
|
||||||
|
error = nvmet_init_configfs();
|
||||||
|
if (error)
|
||||||
|
goto out_exit_debugfs;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_exit_debugfs:
|
||||||
|
nvmet_exit_debugfs();
|
||||||
out_exit_discovery:
|
out_exit_discovery:
|
||||||
nvmet_exit_discovery();
|
nvmet_exit_discovery();
|
||||||
out_free_nvmet_work_queue:
|
out_free_nvmet_work_queue:
|
||||||
@ -1725,6 +1752,7 @@ out_destroy_bvec_cache:
|
|||||||
static void __exit nvmet_exit(void)
|
static void __exit nvmet_exit(void)
|
||||||
{
|
{
|
||||||
nvmet_exit_configfs();
|
nvmet_exit_configfs();
|
||||||
|
nvmet_exit_debugfs();
|
||||||
nvmet_exit_discovery();
|
nvmet_exit_discovery();
|
||||||
ida_destroy(&cntlid_ida);
|
ida_destroy(&cntlid_ida);
|
||||||
destroy_workqueue(nvmet_wq);
|
destroy_workqueue(nvmet_wq);
|
||||||
|
202
drivers/nvme/target/debugfs.c
Normal file
202
drivers/nvme/target/debugfs.c
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* DebugFS interface for the NVMe target.
|
||||||
|
* Copyright (c) 2022-2024 Shadow
|
||||||
|
* Copyright (c) 2024 SUSE LLC
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/debugfs.h>
|
||||||
|
#include <linux/fs.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
|
||||||
|
#include "nvmet.h"
|
||||||
|
#include "debugfs.h"
|
||||||
|
|
||||||
|
struct dentry *nvmet_debugfs;
|
||||||
|
|
||||||
|
#define NVMET_DEBUGFS_ATTR(field) \
|
||||||
|
static int field##_open(struct inode *inode, struct file *file) \
|
||||||
|
{ return single_open(file, field##_show, inode->i_private); } \
|
||||||
|
\
|
||||||
|
static const struct file_operations field##_fops = { \
|
||||||
|
.open = field##_open, \
|
||||||
|
.read = seq_read, \
|
||||||
|
.release = single_release, \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define NVMET_DEBUGFS_RW_ATTR(field) \
|
||||||
|
static int field##_open(struct inode *inode, struct file *file) \
|
||||||
|
{ return single_open(file, field##_show, inode->i_private); } \
|
||||||
|
\
|
||||||
|
static const struct file_operations field##_fops = { \
|
||||||
|
.open = field##_open, \
|
||||||
|
.read = seq_read, \
|
||||||
|
.write = field##_write, \
|
||||||
|
.release = single_release, \
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nvmet_ctrl_hostnqn_show(struct seq_file *m, void *p)
|
||||||
|
{
|
||||||
|
struct nvmet_ctrl *ctrl = m->private;
|
||||||
|
|
||||||
|
seq_puts(m, ctrl->hostnqn);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
NVMET_DEBUGFS_ATTR(nvmet_ctrl_hostnqn);
|
||||||
|
|
||||||
|
static int nvmet_ctrl_kato_show(struct seq_file *m, void *p)
|
||||||
|
{
|
||||||
|
struct nvmet_ctrl *ctrl = m->private;
|
||||||
|
|
||||||
|
seq_printf(m, "%d\n", ctrl->kato);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
NVMET_DEBUGFS_ATTR(nvmet_ctrl_kato);
|
||||||
|
|
||||||
|
static int nvmet_ctrl_port_show(struct seq_file *m, void *p)
|
||||||
|
{
|
||||||
|
struct nvmet_ctrl *ctrl = m->private;
|
||||||
|
|
||||||
|
seq_printf(m, "%d\n", le16_to_cpu(ctrl->port->disc_addr.portid));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
NVMET_DEBUGFS_ATTR(nvmet_ctrl_port);
|
||||||
|
|
||||||
|
static const char *const csts_state_names[] = {
|
||||||
|
[NVME_CSTS_RDY] = "ready",
|
||||||
|
[NVME_CSTS_CFS] = "fatal",
|
||||||
|
[NVME_CSTS_NSSRO] = "reset",
|
||||||
|
[NVME_CSTS_SHST_OCCUR] = "shutdown",
|
||||||
|
[NVME_CSTS_SHST_CMPLT] = "completed",
|
||||||
|
[NVME_CSTS_PP] = "paused",
|
||||||
|
};
|
||||||
|
|
||||||
|
static int nvmet_ctrl_state_show(struct seq_file *m, void *p)
|
||||||
|
{
|
||||||
|
struct nvmet_ctrl *ctrl = m->private;
|
||||||
|
bool sep = false;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < 7; i++) {
|
||||||
|
int state = BIT(i);
|
||||||
|
|
||||||
|
if (!(ctrl->csts & state))
|
||||||
|
continue;
|
||||||
|
if (sep)
|
||||||
|
seq_puts(m, "|");
|
||||||
|
sep = true;
|
||||||
|
if (csts_state_names[state])
|
||||||
|
seq_puts(m, csts_state_names[state]);
|
||||||
|
else
|
||||||
|
seq_printf(m, "%d", state);
|
||||||
|
}
|
||||||
|
if (sep)
|
||||||
|
seq_printf(m, "\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t nvmet_ctrl_state_write(struct file *file, const char __user *buf,
|
||||||
|
size_t count, loff_t *ppos)
|
||||||
|
{
|
||||||
|
struct seq_file *m = file->private_data;
|
||||||
|
struct nvmet_ctrl *ctrl = m->private;
|
||||||
|
char reset[16];
|
||||||
|
|
||||||
|
if (count >= sizeof(reset))
|
||||||
|
return -EINVAL;
|
||||||
|
if (copy_from_user(reset, buf, count))
|
||||||
|
return -EFAULT;
|
||||||
|
if (!memcmp(reset, "fatal", 5))
|
||||||
|
nvmet_ctrl_fatal_error(ctrl);
|
||||||
|
else
|
||||||
|
return -EINVAL;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
NVMET_DEBUGFS_RW_ATTR(nvmet_ctrl_state);
|
||||||
|
|
||||||
|
static int nvmet_ctrl_host_traddr_show(struct seq_file *m, void *p)
|
||||||
|
{
|
||||||
|
struct nvmet_ctrl *ctrl = m->private;
|
||||||
|
ssize_t size;
|
||||||
|
char buf[NVMF_TRADDR_SIZE + 1];
|
||||||
|
|
||||||
|
size = nvmet_ctrl_host_traddr(ctrl, buf, NVMF_TRADDR_SIZE);
|
||||||
|
if (size < 0) {
|
||||||
|
buf[0] = '\0';
|
||||||
|
size = 0;
|
||||||
|
}
|
||||||
|
buf[size] = '\0';
|
||||||
|
seq_printf(m, "%s\n", buf);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
NVMET_DEBUGFS_ATTR(nvmet_ctrl_host_traddr);
|
||||||
|
|
||||||
|
int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
char name[32];
|
||||||
|
struct dentry *parent = ctrl->subsys->debugfs_dir;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!parent)
|
||||||
|
return -ENODEV;
|
||||||
|
snprintf(name, sizeof(name), "ctrl%d", ctrl->cntlid);
|
||||||
|
ctrl->debugfs_dir = debugfs_create_dir(name, parent);
|
||||||
|
if (IS_ERR(ctrl->debugfs_dir)) {
|
||||||
|
ret = PTR_ERR(ctrl->debugfs_dir);
|
||||||
|
ctrl->debugfs_dir = NULL;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
debugfs_create_file("port", S_IRUSR, ctrl->debugfs_dir, ctrl,
|
||||||
|
&nvmet_ctrl_port_fops);
|
||||||
|
debugfs_create_file("hostnqn", S_IRUSR, ctrl->debugfs_dir, ctrl,
|
||||||
|
&nvmet_ctrl_hostnqn_fops);
|
||||||
|
debugfs_create_file("kato", S_IRUSR, ctrl->debugfs_dir, ctrl,
|
||||||
|
&nvmet_ctrl_kato_fops);
|
||||||
|
debugfs_create_file("state", S_IRUSR | S_IWUSR, ctrl->debugfs_dir, ctrl,
|
||||||
|
&nvmet_ctrl_state_fops);
|
||||||
|
debugfs_create_file("host_traddr", S_IRUSR, ctrl->debugfs_dir, ctrl,
|
||||||
|
&nvmet_ctrl_host_traddr_fops);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
debugfs_remove_recursive(ctrl->debugfs_dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
subsys->debugfs_dir = debugfs_create_dir(subsys->subsysnqn,
|
||||||
|
nvmet_debugfs);
|
||||||
|
if (IS_ERR(subsys->debugfs_dir)) {
|
||||||
|
ret = PTR_ERR(subsys->debugfs_dir);
|
||||||
|
subsys->debugfs_dir = NULL;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys)
|
||||||
|
{
|
||||||
|
debugfs_remove_recursive(subsys->debugfs_dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
int __init nvmet_init_debugfs(void)
|
||||||
|
{
|
||||||
|
struct dentry *parent;
|
||||||
|
|
||||||
|
parent = debugfs_create_dir("nvmet", NULL);
|
||||||
|
if (IS_ERR(parent)) {
|
||||||
|
pr_warn("%s: failed to create debugfs directory\n", "nvmet");
|
||||||
|
return PTR_ERR(parent);
|
||||||
|
}
|
||||||
|
nvmet_debugfs = parent;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvmet_exit_debugfs(void)
|
||||||
|
{
|
||||||
|
debugfs_remove_recursive(nvmet_debugfs);
|
||||||
|
}
|
42
drivers/nvme/target/debugfs.h
Normal file
42
drivers/nvme/target/debugfs.h
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
/*
|
||||||
|
* DebugFS interface for the NVMe target.
|
||||||
|
* Copyright (c) 2022-2024 Shadow
|
||||||
|
* Copyright (c) 2024 SUSE LLC
|
||||||
|
*/
|
||||||
|
#ifndef NVMET_DEBUGFS_H
|
||||||
|
#define NVMET_DEBUGFS_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_NVME_TARGET_DEBUGFS
|
||||||
|
int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys);
|
||||||
|
void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys);
|
||||||
|
int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl);
|
||||||
|
void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl);
|
||||||
|
|
||||||
|
int __init nvmet_init_debugfs(void);
|
||||||
|
void nvmet_exit_debugfs(void);
|
||||||
|
#else
|
||||||
|
static inline int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
static inline void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys){}
|
||||||
|
|
||||||
|
static inline int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
static inline void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl) {}
|
||||||
|
|
||||||
|
static inline int __init nvmet_init_debugfs(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nvmet_exit_debugfs(void) {}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* NVMET_DEBUGFS_H */
|
@ -179,7 +179,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
|
|||||||
if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
|
if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvme_get_log_page_command, lid);
|
offsetof(struct nvme_get_log_page_command, lid);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,7 +187,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
|
|||||||
if (offset & 0x3) {
|
if (offset & 0x3) {
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvme_get_log_page_command, lpo);
|
offsetof(struct nvme_get_log_page_command, lpo);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,7 +256,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
|
|||||||
|
|
||||||
if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
|
if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
|
||||||
req->error_loc = offsetof(struct nvme_identify, cns);
|
req->error_loc = offsetof(struct nvme_identify, cns);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -320,7 +320,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
|
|||||||
default:
|
default:
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvme_common_command, cdw10);
|
offsetof(struct nvme_common_command, cdw10);
|
||||||
stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
|
|||||||
default:
|
default:
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvme_common_command, cdw10);
|
offsetof(struct nvme_common_command, cdw10);
|
||||||
stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -361,7 +361,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
|
|||||||
cmd->common.opcode);
|
cmd->common.opcode);
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvme_common_command, opcode);
|
offsetof(struct nvme_common_command, opcode);
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (cmd->common.opcode) {
|
switch (cmd->common.opcode) {
|
||||||
@ -386,7 +386,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
|
|||||||
default:
|
default:
|
||||||
pr_debug("unhandled cmd %d\n", cmd->common.opcode);
|
pr_debug("unhandled cmd %d\n", cmd->common.opcode);
|
||||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -189,26 +189,26 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
|
|||||||
u8 dhchap_status;
|
u8 dhchap_status;
|
||||||
|
|
||||||
if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
|
if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_auth_send_command, secp);
|
offsetof(struct nvmf_auth_send_command, secp);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (req->cmd->auth_send.spsp0 != 0x01) {
|
if (req->cmd->auth_send.spsp0 != 0x01) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_auth_send_command, spsp0);
|
offsetof(struct nvmf_auth_send_command, spsp0);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (req->cmd->auth_send.spsp1 != 0x01) {
|
if (req->cmd->auth_send.spsp1 != 0x01) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_auth_send_command, spsp1);
|
offsetof(struct nvmf_auth_send_command, spsp1);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
tl = le32_to_cpu(req->cmd->auth_send.tl);
|
tl = le32_to_cpu(req->cmd->auth_send.tl);
|
||||||
if (!tl) {
|
if (!tl) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_auth_send_command, tl);
|
offsetof(struct nvmf_auth_send_command, tl);
|
||||||
goto done;
|
goto done;
|
||||||
@ -438,26 +438,26 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
|
|||||||
u16 status = 0;
|
u16 status = 0;
|
||||||
|
|
||||||
if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
|
if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_auth_receive_command, secp);
|
offsetof(struct nvmf_auth_receive_command, secp);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (req->cmd->auth_receive.spsp0 != 0x01) {
|
if (req->cmd->auth_receive.spsp0 != 0x01) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_auth_receive_command, spsp0);
|
offsetof(struct nvmf_auth_receive_command, spsp0);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (req->cmd->auth_receive.spsp1 != 0x01) {
|
if (req->cmd->auth_receive.spsp1 != 0x01) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_auth_receive_command, spsp1);
|
offsetof(struct nvmf_auth_receive_command, spsp1);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
al = le32_to_cpu(req->cmd->auth_receive.al);
|
al = le32_to_cpu(req->cmd->auth_receive.al);
|
||||||
if (!al) {
|
if (!al) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_auth_receive_command, al);
|
offsetof(struct nvmf_auth_receive_command, al);
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -18,7 +18,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
|
|||||||
if (req->cmd->prop_set.attrib & 1) {
|
if (req->cmd->prop_set.attrib & 1) {
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_property_set_command, attrib);
|
offsetof(struct nvmf_property_set_command, attrib);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -29,7 +29,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
|
|||||||
default:
|
default:
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvmf_property_set_command, offset);
|
offsetof(struct nvmf_property_set_command, offset);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
nvmet_req_complete(req, status);
|
nvmet_req_complete(req, status);
|
||||||
@ -50,7 +50,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
|
|||||||
val = ctrl->cap;
|
val = ctrl->cap;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -65,7 +65,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
|
|||||||
val = ctrl->csts;
|
val = ctrl->csts;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -105,7 +105,7 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
|
|||||||
pr_debug("received unknown capsule type 0x%x\n",
|
pr_debug("received unknown capsule type 0x%x\n",
|
||||||
cmd->fabrics.fctype);
|
cmd->fabrics.fctype);
|
||||||
req->error_loc = offsetof(struct nvmf_common_command, fctype);
|
req->error_loc = offsetof(struct nvmf_common_command, fctype);
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -128,7 +128,7 @@ u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
|
|||||||
pr_debug("received unknown capsule type 0x%x\n",
|
pr_debug("received unknown capsule type 0x%x\n",
|
||||||
cmd->fabrics.fctype);
|
cmd->fabrics.fctype);
|
||||||
req->error_loc = offsetof(struct nvmf_common_command, fctype);
|
req->error_loc = offsetof(struct nvmf_common_command, fctype);
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -147,14 +147,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
|
|||||||
pr_warn("queue size zero!\n");
|
pr_warn("queue size zero!\n");
|
||||||
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
|
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
|
||||||
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
|
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
|
||||||
ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctrl->sqs[qid] != NULL) {
|
if (ctrl->sqs[qid] != NULL) {
|
||||||
pr_warn("qid %u has already been created\n", qid);
|
pr_warn("qid %u has already been created\n", qid);
|
||||||
req->error_loc = offsetof(struct nvmf_connect_command, qid);
|
req->error_loc = offsetof(struct nvmf_connect_command, qid);
|
||||||
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
|
return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* for fabrics, this value applies to only the I/O Submission Queues */
|
/* for fabrics, this value applies to only the I/O Submission Queues */
|
||||||
@ -163,14 +163,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
|
|||||||
sqsize, mqes, ctrl->cntlid);
|
sqsize, mqes, ctrl->cntlid);
|
||||||
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
|
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
|
||||||
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
|
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
|
||||||
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
return NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
|
old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
|
||||||
if (old) {
|
if (old) {
|
||||||
pr_warn("queue already connected!\n");
|
pr_warn("queue already connected!\n");
|
||||||
req->error_loc = offsetof(struct nvmf_connect_command, opcode);
|
req->error_loc = offsetof(struct nvmf_connect_command, opcode);
|
||||||
return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
|
return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* note: convert queue size from 0's-based value to 1's-based value */
|
/* note: convert queue size from 0's-based value to 1's-based value */
|
||||||
@ -233,14 +233,14 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
|
|||||||
pr_warn("invalid connect version (%d).\n",
|
pr_warn("invalid connect version (%d).\n",
|
||||||
le16_to_cpu(c->recfmt));
|
le16_to_cpu(c->recfmt));
|
||||||
req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
|
req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
|
||||||
status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
|
status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
|
if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
|
||||||
pr_warn("connect attempt for invalid controller ID %#x\n",
|
pr_warn("connect attempt for invalid controller ID %#x\n",
|
||||||
d->cntlid);
|
d->cntlid);
|
||||||
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
|
||||||
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
|
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -260,7 +260,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
|
|||||||
dhchap_status);
|
dhchap_status);
|
||||||
nvmet_ctrl_put(ctrl);
|
nvmet_ctrl_put(ctrl);
|
||||||
if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
|
if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
|
||||||
status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
|
status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR);
|
||||||
else
|
else
|
||||||
status = NVME_SC_INTERNAL;
|
status = NVME_SC_INTERNAL;
|
||||||
goto out;
|
goto out;
|
||||||
@ -311,7 +311,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
|
|||||||
if (c->recfmt != 0) {
|
if (c->recfmt != 0) {
|
||||||
pr_warn("invalid connect version (%d).\n",
|
pr_warn("invalid connect version (%d).\n",
|
||||||
le16_to_cpu(c->recfmt));
|
le16_to_cpu(c->recfmt));
|
||||||
status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
|
status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -320,13 +320,13 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
|
|||||||
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
|
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
|
||||||
le16_to_cpu(d->cntlid), req);
|
le16_to_cpu(d->cntlid), req);
|
||||||
if (!ctrl) {
|
if (!ctrl) {
|
||||||
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(qid > ctrl->subsys->max_qid)) {
|
if (unlikely(qid > ctrl->subsys->max_qid)) {
|
||||||
pr_warn("invalid queue id (%d)\n", qid);
|
pr_warn("invalid queue id (%d)\n", qid);
|
||||||
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
|
||||||
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
|
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
|
||||||
goto out_ctrl_put;
|
goto out_ctrl_put;
|
||||||
}
|
}
|
||||||
@ -356,13 +356,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
|
|||||||
pr_debug("invalid command 0x%x on unconnected queue.\n",
|
pr_debug("invalid command 0x%x on unconnected queue.\n",
|
||||||
cmd->fabrics.opcode);
|
cmd->fabrics.opcode);
|
||||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
|
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
|
||||||
pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
|
pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
|
||||||
cmd->fabrics.fctype);
|
cmd->fabrics.fctype);
|
||||||
req->error_loc = offsetof(struct nvmf_common_command, fctype);
|
req->error_loc = offsetof(struct nvmf_common_command, fctype);
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd->connect.qid == 0)
|
if (cmd->connect.qid == 0)
|
||||||
|
@ -2934,6 +2934,38 @@ nvmet_fc_discovery_chg(struct nvmet_port *port)
|
|||||||
tgtport->ops->discovery_event(&tgtport->fc_target_port);
|
tgtport->ops->discovery_event(&tgtport->fc_target_port);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl,
|
||||||
|
char *traddr, size_t traddr_size)
|
||||||
|
{
|
||||||
|
struct nvmet_sq *sq = ctrl->sqs[0];
|
||||||
|
struct nvmet_fc_tgt_queue *queue =
|
||||||
|
container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq);
|
||||||
|
struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL;
|
||||||
|
struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL;
|
||||||
|
u64 wwnn, wwpn;
|
||||||
|
ssize_t ret = 0;
|
||||||
|
|
||||||
|
if (!tgtport || !nvmet_fc_tgtport_get(tgtport))
|
||||||
|
return -ENODEV;
|
||||||
|
if (!hostport || !nvmet_fc_hostport_get(hostport)) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto out_put;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tgtport->ops->host_traddr) {
|
||||||
|
ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn);
|
||||||
|
if (ret)
|
||||||
|
goto out_put_host;
|
||||||
|
ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn);
|
||||||
|
}
|
||||||
|
out_put_host:
|
||||||
|
nvmet_fc_hostport_put(hostport);
|
||||||
|
out_put:
|
||||||
|
nvmet_fc_tgtport_put(tgtport);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
|
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.type = NVMF_TRTYPE_FC,
|
.type = NVMF_TRTYPE_FC,
|
||||||
@ -2943,6 +2975,7 @@ static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
|
|||||||
.queue_response = nvmet_fc_fcp_nvme_cmd_done,
|
.queue_response = nvmet_fc_fcp_nvme_cmd_done,
|
||||||
.delete_ctrl = nvmet_fc_delete_ctrl,
|
.delete_ctrl = nvmet_fc_delete_ctrl,
|
||||||
.discovery_chg = nvmet_fc_discovery_chg,
|
.discovery_chg = nvmet_fc_discovery_chg,
|
||||||
|
.host_traddr = nvmet_fc_host_traddr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init nvmet_fc_init_module(void)
|
static int __init nvmet_fc_init_module(void)
|
||||||
|
@ -492,6 +492,16 @@ fcloop_t2h_host_release(void *hosthandle)
|
|||||||
/* host handle ignored for now */
|
/* host handle ignored for now */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
|
||||||
|
{
|
||||||
|
struct fcloop_rport *rport = hosthandle;
|
||||||
|
|
||||||
|
*wwnn = rport->lport->localport->node_name;
|
||||||
|
*wwpn = rport->lport->localport->port_name;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simulate reception of RSCN and converting it to a initiator transport
|
* Simulate reception of RSCN and converting it to a initiator transport
|
||||||
* call to rescan a remote port.
|
* call to rescan a remote port.
|
||||||
@ -1074,6 +1084,7 @@ static struct nvmet_fc_target_template tgttemplate = {
|
|||||||
.ls_req = fcloop_t2h_ls_req,
|
.ls_req = fcloop_t2h_ls_req,
|
||||||
.ls_abort = fcloop_t2h_ls_abort,
|
.ls_abort = fcloop_t2h_ls_abort,
|
||||||
.host_release = fcloop_t2h_host_release,
|
.host_release = fcloop_t2h_host_release,
|
||||||
|
.host_traddr = fcloop_t2h_host_traddr,
|
||||||
.max_hw_queues = FCLOOP_HW_QUEUES,
|
.max_hw_queues = FCLOOP_HW_QUEUES,
|
||||||
.max_sgl_segments = FCLOOP_SGL_SEGS,
|
.max_sgl_segments = FCLOOP_SGL_SEGS,
|
||||||
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
||||||
|
@ -137,11 +137,11 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
|
|||||||
*/
|
*/
|
||||||
switch (blk_sts) {
|
switch (blk_sts) {
|
||||||
case BLK_STS_NOSPC:
|
case BLK_STS_NOSPC:
|
||||||
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
|
status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
|
||||||
req->error_loc = offsetof(struct nvme_rw_command, length);
|
req->error_loc = offsetof(struct nvme_rw_command, length);
|
||||||
break;
|
break;
|
||||||
case BLK_STS_TARGET:
|
case BLK_STS_TARGET:
|
||||||
status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
|
||||||
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
||||||
break;
|
break;
|
||||||
case BLK_STS_NOTSUPP:
|
case BLK_STS_NOTSUPP:
|
||||||
@ -149,10 +149,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
|
|||||||
switch (req->cmd->common.opcode) {
|
switch (req->cmd->common.opcode) {
|
||||||
case nvme_cmd_dsm:
|
case nvme_cmd_dsm:
|
||||||
case nvme_cmd_write_zeroes:
|
case nvme_cmd_write_zeroes:
|
||||||
status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
|
status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BLK_STS_MEDIUM:
|
case BLK_STS_MEDIUM:
|
||||||
@ -161,7 +161,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
|
|||||||
break;
|
break;
|
||||||
case BLK_STS_IOERR:
|
case BLK_STS_IOERR:
|
||||||
default:
|
default:
|
||||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
|
||||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,7 +358,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (blkdev_issue_flush(req->ns->bdev))
|
if (blkdev_issue_flush(req->ns->bdev))
|
||||||
return NVME_SC_INTERNAL | NVME_SC_DNR;
|
return NVME_SC_INTERNAL | NVME_STATUS_DNR;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -555,6 +555,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = nvme_add_ctrl(&ctrl->ctrl);
|
||||||
|
if (ret)
|
||||||
|
goto out_put_ctrl;
|
||||||
|
|
||||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
|
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
|
|
||||||
@ -611,6 +615,7 @@ out_free_queues:
|
|||||||
kfree(ctrl->queues);
|
kfree(ctrl->queues);
|
||||||
out_uninit_ctrl:
|
out_uninit_ctrl:
|
||||||
nvme_uninit_ctrl(&ctrl->ctrl);
|
nvme_uninit_ctrl(&ctrl->ctrl);
|
||||||
|
out_put_ctrl:
|
||||||
nvme_put_ctrl(&ctrl->ctrl);
|
nvme_put_ctrl(&ctrl->ctrl);
|
||||||
out:
|
out:
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
|
@ -230,7 +230,9 @@ struct nvmet_ctrl {
|
|||||||
|
|
||||||
struct device *p2p_client;
|
struct device *p2p_client;
|
||||||
struct radix_tree_root p2p_ns_map;
|
struct radix_tree_root p2p_ns_map;
|
||||||
|
#ifdef CONFIG_NVME_TARGET_DEBUGFS
|
||||||
|
struct dentry *debugfs_dir;
|
||||||
|
#endif
|
||||||
spinlock_t error_lock;
|
spinlock_t error_lock;
|
||||||
u64 err_counter;
|
u64 err_counter;
|
||||||
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
|
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
|
||||||
@ -262,7 +264,9 @@ struct nvmet_subsys {
|
|||||||
|
|
||||||
struct list_head hosts;
|
struct list_head hosts;
|
||||||
bool allow_any_host;
|
bool allow_any_host;
|
||||||
|
#ifdef CONFIG_NVME_TARGET_DEBUGFS
|
||||||
|
struct dentry *debugfs_dir;
|
||||||
|
#endif
|
||||||
u16 max_qid;
|
u16 max_qid;
|
||||||
|
|
||||||
u64 ver;
|
u64 ver;
|
||||||
@ -350,6 +354,8 @@ struct nvmet_fabrics_ops {
|
|||||||
void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
|
void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
|
||||||
void (*disc_traddr)(struct nvmet_req *req,
|
void (*disc_traddr)(struct nvmet_req *req,
|
||||||
struct nvmet_port *port, char *traddr);
|
struct nvmet_port *port, char *traddr);
|
||||||
|
ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl,
|
||||||
|
char *traddr, size_t traddr_len);
|
||||||
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
|
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
|
||||||
void (*discovery_chg)(struct nvmet_port *port);
|
void (*discovery_chg)(struct nvmet_port *port);
|
||||||
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
|
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
|
||||||
@ -498,6 +504,8 @@ struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
|
|||||||
struct nvmet_req *req);
|
struct nvmet_req *req);
|
||||||
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
|
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
|
||||||
u16 nvmet_check_ctrl_status(struct nvmet_req *req);
|
u16 nvmet_check_ctrl_status(struct nvmet_req *req);
|
||||||
|
ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
|
||||||
|
char *traddr, size_t traddr_len);
|
||||||
|
|
||||||
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
||||||
enum nvme_subsys_type type);
|
enum nvme_subsys_type type);
|
||||||
|
@ -306,7 +306,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
|
|||||||
ns = nvme_find_get_ns(ctrl, nsid);
|
ns = nvme_find_get_ns(ctrl, nsid);
|
||||||
if (unlikely(!ns)) {
|
if (unlikely(!ns)) {
|
||||||
pr_err("failed to get passthru ns nsid:%u\n", nsid);
|
pr_err("failed to get passthru ns nsid:%u\n", nsid);
|
||||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -426,7 +426,7 @@ u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
|
|||||||
* emulated in the future if regular targets grow support for
|
* emulated in the future if regular targets grow support for
|
||||||
* this feature.
|
* this feature.
|
||||||
*/
|
*/
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
return nvmet_setup_passthru_command(req);
|
return nvmet_setup_passthru_command(req);
|
||||||
@ -478,7 +478,7 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
|
|||||||
case NVME_FEAT_RESV_PERSIST:
|
case NVME_FEAT_RESV_PERSIST:
|
||||||
/* No reservations, see nvmet_parse_passthru_io_cmd() */
|
/* No reservations, see nvmet_parse_passthru_io_cmd() */
|
||||||
default:
|
default:
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -546,7 +546,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
|
|||||||
req->p.use_workqueue = true;
|
req->p.use_workqueue = true;
|
||||||
return NVME_SC_SUCCESS;
|
return NVME_SC_SUCCESS;
|
||||||
}
|
}
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
case NVME_ID_CNS_NS:
|
case NVME_ID_CNS_NS:
|
||||||
req->execute = nvmet_passthru_execute_cmd;
|
req->execute = nvmet_passthru_execute_cmd;
|
||||||
req->p.use_workqueue = true;
|
req->p.use_workqueue = true;
|
||||||
@ -558,7 +558,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
|
|||||||
req->p.use_workqueue = true;
|
req->p.use_workqueue = true;
|
||||||
return NVME_SC_SUCCESS;
|
return NVME_SC_SUCCESS;
|
||||||
}
|
}
|
||||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
||||||
default:
|
default:
|
||||||
return nvmet_setup_passthru_command(req);
|
return nvmet_setup_passthru_command(req);
|
||||||
}
|
}
|
||||||
|
@ -852,12 +852,12 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
|
|||||||
if (!nvme_is_write(rsp->req.cmd)) {
|
if (!nvme_is_write(rsp->req.cmd)) {
|
||||||
rsp->req.error_loc =
|
rsp->req.error_loc =
|
||||||
offsetof(struct nvme_common_command, opcode);
|
offsetof(struct nvme_common_command, opcode);
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (off + len > rsp->queue->dev->inline_data_size) {
|
if (off + len > rsp->queue->dev->inline_data_size) {
|
||||||
pr_err("invalid inline data offset!\n");
|
pr_err("invalid inline data offset!\n");
|
||||||
return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
|
return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* no data command? */
|
/* no data command? */
|
||||||
@ -919,7 +919,7 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
|
|||||||
pr_err("invalid SGL subtype: %#x\n", sgl->type);
|
pr_err("invalid SGL subtype: %#x\n", sgl->type);
|
||||||
rsp->req.error_loc =
|
rsp->req.error_loc =
|
||||||
offsetof(struct nvme_common_command, dptr);
|
offsetof(struct nvme_common_command, dptr);
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
case NVME_KEY_SGL_FMT_DATA_DESC:
|
case NVME_KEY_SGL_FMT_DATA_DESC:
|
||||||
switch (sgl->type & 0xf) {
|
switch (sgl->type & 0xf) {
|
||||||
@ -931,12 +931,12 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
|
|||||||
pr_err("invalid SGL subtype: %#x\n", sgl->type);
|
pr_err("invalid SGL subtype: %#x\n", sgl->type);
|
||||||
rsp->req.error_loc =
|
rsp->req.error_loc =
|
||||||
offsetof(struct nvme_common_command, dptr);
|
offsetof(struct nvme_common_command, dptr);
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
pr_err("invalid SGL type: %#x\n", sgl->type);
|
pr_err("invalid SGL type: %#x\n", sgl->type);
|
||||||
rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
|
rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
|
||||||
return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
|
return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2000,6 +2000,17 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t nvmet_rdma_host_port_addr(struct nvmet_ctrl *ctrl,
|
||||||
|
char *traddr, size_t traddr_len)
|
||||||
|
{
|
||||||
|
struct nvmet_sq *nvme_sq = ctrl->sqs[0];
|
||||||
|
struct nvmet_rdma_queue *queue =
|
||||||
|
container_of(nvme_sq, struct nvmet_rdma_queue, nvme_sq);
|
||||||
|
|
||||||
|
return snprintf(traddr, traddr_len, "%pISc",
|
||||||
|
(struct sockaddr *)&queue->cm_id->route.addr.dst_addr);
|
||||||
|
}
|
||||||
|
|
||||||
static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
|
static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
if (ctrl->pi_support)
|
if (ctrl->pi_support)
|
||||||
@ -2024,6 +2035,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
|
|||||||
.queue_response = nvmet_rdma_queue_response,
|
.queue_response = nvmet_rdma_queue_response,
|
||||||
.delete_ctrl = nvmet_rdma_delete_ctrl,
|
.delete_ctrl = nvmet_rdma_delete_ctrl,
|
||||||
.disc_traddr = nvmet_rdma_disc_port_addr,
|
.disc_traddr = nvmet_rdma_disc_port_addr,
|
||||||
|
.host_traddr = nvmet_rdma_host_port_addr,
|
||||||
.get_mdts = nvmet_rdma_get_mdts,
|
.get_mdts = nvmet_rdma_get_mdts,
|
||||||
.get_max_queue_size = nvmet_rdma_get_max_queue_size,
|
.get_max_queue_size = nvmet_rdma_get_max_queue_size,
|
||||||
};
|
};
|
||||||
|
@ -416,10 +416,10 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
|
|||||||
if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
|
if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
|
||||||
NVME_SGL_FMT_OFFSET)) {
|
NVME_SGL_FMT_OFFSET)) {
|
||||||
if (!nvme_is_write(cmd->req.cmd))
|
if (!nvme_is_write(cmd->req.cmd))
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
|
|
||||||
if (len > cmd->req.port->inline_data_size)
|
if (len > cmd->req.port->inline_data_size)
|
||||||
return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
|
return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
|
||||||
cmd->pdu_len = len;
|
cmd->pdu_len = len;
|
||||||
}
|
}
|
||||||
cmd->req.transfer_len += len;
|
cmd->req.transfer_len += len;
|
||||||
@ -2167,6 +2167,19 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl,
|
||||||
|
char *traddr, size_t traddr_len)
|
||||||
|
{
|
||||||
|
struct nvmet_sq *sq = ctrl->sqs[0];
|
||||||
|
struct nvmet_tcp_queue *queue =
|
||||||
|
container_of(sq, struct nvmet_tcp_queue, nvme_sq);
|
||||||
|
|
||||||
|
if (queue->sockaddr_peer.ss_family == AF_UNSPEC)
|
||||||
|
return -EINVAL;
|
||||||
|
return snprintf(traddr, traddr_len, "%pISc",
|
||||||
|
(struct sockaddr *)&queue->sockaddr_peer);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
|
static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.type = NVMF_TRTYPE_TCP,
|
.type = NVMF_TRTYPE_TCP,
|
||||||
@ -2177,6 +2190,7 @@ static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
|
|||||||
.delete_ctrl = nvmet_tcp_delete_ctrl,
|
.delete_ctrl = nvmet_tcp_delete_ctrl,
|
||||||
.install_queue = nvmet_tcp_install_queue,
|
.install_queue = nvmet_tcp_install_queue,
|
||||||
.disc_traddr = nvmet_tcp_disc_port_addr,
|
.disc_traddr = nvmet_tcp_disc_port_addr,
|
||||||
|
.host_traddr = nvmet_tcp_host_port_addr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init nvmet_tcp_init(void)
|
static int __init nvmet_tcp_init(void)
|
||||||
|
@ -100,7 +100,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
|
|||||||
|
|
||||||
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
||||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!bdev_is_zoned(req->ns->bdev)) {
|
if (!bdev_is_zoned(req->ns->bdev)) {
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -158,17 +158,17 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
|
|||||||
|
|
||||||
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
|
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
|
||||||
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
|
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
|
||||||
return NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (out_bufsize < sizeof(struct nvme_zone_report)) {
|
if (out_bufsize < sizeof(struct nvme_zone_report)) {
|
||||||
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
|
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
|
if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
|
||||||
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
|
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (req->cmd->zmr.pr) {
|
switch (req->cmd->zmr.pr) {
|
||||||
@ -177,7 +177,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
|
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (req->cmd->zmr.zrasf) {
|
switch (req->cmd->zmr.zrasf) {
|
||||||
@ -193,7 +193,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
|
|||||||
default:
|
default:
|
||||||
req->error_loc =
|
req->error_loc =
|
||||||
offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
|
offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NVME_SC_SUCCESS;
|
return NVME_SC_SUCCESS;
|
||||||
@ -341,7 +341,7 @@ static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
|
|||||||
return NVME_SC_SUCCESS;
|
return NVME_SC_SUCCESS;
|
||||||
case -EINVAL:
|
case -EINVAL:
|
||||||
case -EIO:
|
case -EIO:
|
||||||
return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
|
return NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR;
|
||||||
default:
|
default:
|
||||||
return NVME_SC_INTERNAL;
|
return NVME_SC_INTERNAL;
|
||||||
}
|
}
|
||||||
@ -463,7 +463,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
|
|||||||
default:
|
default:
|
||||||
/* this is needed to quiet compiler warning */
|
/* this is needed to quiet compiler warning */
|
||||||
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
|
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
|
||||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NVME_SC_SUCCESS;
|
return NVME_SC_SUCCESS;
|
||||||
@ -481,7 +481,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
|
|||||||
|
|
||||||
if (op == REQ_OP_LAST) {
|
if (op == REQ_OP_LAST) {
|
||||||
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
|
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
|
||||||
status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
|
status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -493,13 +493,13 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
|
|||||||
|
|
||||||
if (sect >= get_capacity(bdev->bd_disk)) {
|
if (sect >= get_capacity(bdev->bd_disk)) {
|
||||||
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
|
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
|
||||||
status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sect & (zone_sectors - 1)) {
|
if (sect & (zone_sectors - 1)) {
|
||||||
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
|
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -551,13 +551,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
|
|||||||
|
|
||||||
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
|
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
|
||||||
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
||||||
status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
|
if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
|
||||||
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
||||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -590,7 +590,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (total_len != nvmet_rw_data_len(req)) {
|
if (total_len != nvmet_rw_data_len(req)) {
|
||||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
|
||||||
goto out_put_bio;
|
goto out_put_bio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1363,6 +1363,16 @@ lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
|
|||||||
atomic_inc(&lpfc_nvmet->xmt_ls_abort);
|
atomic_inc(&lpfc_nvmet->xmt_ls_abort);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
lpfc_nvmet_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
|
||||||
|
{
|
||||||
|
struct lpfc_nodelist *ndlp = hosthandle;
|
||||||
|
|
||||||
|
*wwnn = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||||
|
*wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
lpfc_nvmet_host_release(void *hosthandle)
|
lpfc_nvmet_host_release(void *hosthandle)
|
||||||
{
|
{
|
||||||
@ -1413,6 +1423,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
|
|||||||
.ls_req = lpfc_nvmet_ls_req,
|
.ls_req = lpfc_nvmet_ls_req,
|
||||||
.ls_abort = lpfc_nvmet_ls_abort,
|
.ls_abort = lpfc_nvmet_ls_abort,
|
||||||
.host_release = lpfc_nvmet_host_release,
|
.host_release = lpfc_nvmet_host_release,
|
||||||
|
.host_traddr = lpfc_nvmet_host_traddr,
|
||||||
|
|
||||||
.max_hw_queues = 1,
|
.max_hw_queues = 1,
|
||||||
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
|
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
|
||||||
|
@ -920,6 +920,9 @@ struct nvmet_fc_target_port {
|
|||||||
* further references to hosthandle.
|
* further references to hosthandle.
|
||||||
* Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host().
|
* Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host().
|
||||||
*
|
*
|
||||||
|
* @host_traddr: called by the transport to retrieve the node name and
|
||||||
|
* port name of the host port address.
|
||||||
|
*
|
||||||
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
|
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
|
||||||
* supports for cpu affinitization.
|
* supports for cpu affinitization.
|
||||||
* Value is Mandatory. Must be at least 1.
|
* Value is Mandatory. Must be at least 1.
|
||||||
@ -975,6 +978,7 @@ struct nvmet_fc_target_template {
|
|||||||
void (*ls_abort)(struct nvmet_fc_target_port *targetport,
|
void (*ls_abort)(struct nvmet_fc_target_port *targetport,
|
||||||
void *hosthandle, struct nvmefc_ls_req *lsreq);
|
void *hosthandle, struct nvmefc_ls_req *lsreq);
|
||||||
void (*host_release)(void *hosthandle);
|
void (*host_release)(void *hosthandle);
|
||||||
|
int (*host_traddr)(void *hosthandle, u64 *wwnn, u64 *wwpn);
|
||||||
|
|
||||||
u32 max_hw_queues;
|
u32 max_hw_queues;
|
||||||
u16 max_sgl_segments;
|
u16 max_sgl_segments;
|
||||||
|
@ -25,6 +25,9 @@
|
|||||||
|
|
||||||
#define NVME_NSID_ALL 0xffffffff
|
#define NVME_NSID_ALL 0xffffffff
|
||||||
|
|
||||||
|
/* Special NSSR value, 'NVMe' */
|
||||||
|
#define NVME_SUBSYS_RESET 0x4E564D65
|
||||||
|
|
||||||
enum nvme_subsys_type {
|
enum nvme_subsys_type {
|
||||||
/* Referral to another discovery type target subsystem */
|
/* Referral to another discovery type target subsystem */
|
||||||
NVME_NQN_DISC = 1,
|
NVME_NQN_DISC = 1,
|
||||||
@ -1846,6 +1849,7 @@ enum {
|
|||||||
/*
|
/*
|
||||||
* Generic Command Status:
|
* Generic Command Status:
|
||||||
*/
|
*/
|
||||||
|
NVME_SCT_GENERIC = 0x0,
|
||||||
NVME_SC_SUCCESS = 0x0,
|
NVME_SC_SUCCESS = 0x0,
|
||||||
NVME_SC_INVALID_OPCODE = 0x1,
|
NVME_SC_INVALID_OPCODE = 0x1,
|
||||||
NVME_SC_INVALID_FIELD = 0x2,
|
NVME_SC_INVALID_FIELD = 0x2,
|
||||||
@ -1893,6 +1897,7 @@ enum {
|
|||||||
/*
|
/*
|
||||||
* Command Specific Status:
|
* Command Specific Status:
|
||||||
*/
|
*/
|
||||||
|
NVME_SCT_COMMAND_SPECIFIC = 0x100,
|
||||||
NVME_SC_CQ_INVALID = 0x100,
|
NVME_SC_CQ_INVALID = 0x100,
|
||||||
NVME_SC_QID_INVALID = 0x101,
|
NVME_SC_QID_INVALID = 0x101,
|
||||||
NVME_SC_QUEUE_SIZE = 0x102,
|
NVME_SC_QUEUE_SIZE = 0x102,
|
||||||
@ -1966,6 +1971,7 @@ enum {
|
|||||||
/*
|
/*
|
||||||
* Media and Data Integrity Errors:
|
* Media and Data Integrity Errors:
|
||||||
*/
|
*/
|
||||||
|
NVME_SCT_MEDIA_ERROR = 0x200,
|
||||||
NVME_SC_WRITE_FAULT = 0x280,
|
NVME_SC_WRITE_FAULT = 0x280,
|
||||||
NVME_SC_READ_ERROR = 0x281,
|
NVME_SC_READ_ERROR = 0x281,
|
||||||
NVME_SC_GUARD_CHECK = 0x282,
|
NVME_SC_GUARD_CHECK = 0x282,
|
||||||
@ -1978,6 +1984,7 @@ enum {
|
|||||||
/*
|
/*
|
||||||
* Path-related Errors:
|
* Path-related Errors:
|
||||||
*/
|
*/
|
||||||
|
NVME_SCT_PATH = 0x300,
|
||||||
NVME_SC_INTERNAL_PATH_ERROR = 0x300,
|
NVME_SC_INTERNAL_PATH_ERROR = 0x300,
|
||||||
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
|
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
|
||||||
NVME_SC_ANA_INACCESSIBLE = 0x302,
|
NVME_SC_ANA_INACCESSIBLE = 0x302,
|
||||||
@ -1986,11 +1993,17 @@ enum {
|
|||||||
NVME_SC_HOST_PATH_ERROR = 0x370,
|
NVME_SC_HOST_PATH_ERROR = 0x370,
|
||||||
NVME_SC_HOST_ABORTED_CMD = 0x371,
|
NVME_SC_HOST_ABORTED_CMD = 0x371,
|
||||||
|
|
||||||
NVME_SC_CRD = 0x1800,
|
NVME_SC_MASK = 0x00ff, /* Status Code */
|
||||||
NVME_SC_MORE = 0x2000,
|
NVME_SCT_MASK = 0x0700, /* Status Code Type */
|
||||||
NVME_SC_DNR = 0x4000,
|
NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK,
|
||||||
|
|
||||||
|
NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */
|
||||||
|
NVME_STATUS_MORE = 0x2000,
|
||||||
|
NVME_STATUS_DNR = 0x4000, /* Do Not Retry */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define NVME_SCT(status) ((status) >> 8 & 7)
|
||||||
|
|
||||||
struct nvme_completion {
|
struct nvme_completion {
|
||||||
/*
|
/*
|
||||||
* Used by Admin and Fabrics commands to return data:
|
* Used by Admin and Fabrics commands to return data:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user