nvme updates for Linux 5.14:
- move the ACPI StorageD3 code to drivers/acpi/ and add quirks for certain AMD CPUs (Mario Limonciello) - zoned device support for nvmet (Chaitanya Kulkarni) - fix the rules for changing the serial number in nvmet (Noam Gottlieb) - various small fixes and cleanups (Dan Carpenter, JK Kim, Chaitanya Kulkarni, Hannes Reinecke, Wesley Sheng, Geert Uytterhoeven, Daniel Wagner) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmDRiswLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYOz6g//aalapvqbZJpEUQJUVDRmZM9af6Js1reFYy6AW8D8 h0rDi3nE7sq76ku6kJDleg5141j9CJJpIpCkxrC2Hc8V41mKEjYbXoHwy0qYTYFY iroVq9/pL+qDzXolWu8epWk2bYrxA+UjksvQ1UPMk4cz3fCmlSOe/fr3DrCLQ/Vy vE9YhisTGH9HoJzGITp/YG2seNJWql3tSh3dgbmPNZuMaW4+nu3VPjLbSKhJFT7Z WZzxopBQdkyyhO+0odPI1qqoYpAmXtKA5k+5Qbk6Cfa6kC8J4xHsmdbQ7MS5P/lc zphtHjU8q90vU6Js37Gy2n+IMbeut2feseYVou715jt8j2I14wvotI7w9wuEPUMQ BTW3CXmXK0XkS55i/8k1GCrkrZaNyF8Xps3zzC6ZYFlAJGYbur1XEyPFgjPNRmCZ G8B/dUG1qQCJd8C0TPm7TGjDKkiPtRu+ICx+rhD82EEIDBOWKm41O4MBLlWSwb3Y P9W1+t5LkDKhVPf3C4aZTRJakqAKfeBFCQkXAg98kwiuSFhkw3DelJaaCEUMB8KS 8pGU9fpsp9nVOmrQjTAkLdVGQyzw7Jj5P7tZmAsjkzu+LT31AmW/4WjPzI2tWCia S22sPXRxYH6la2pHFiTH4ksPIsSGEKGhNkYd+cfHpstL9T4c/0cHzQVKoRc9mitR Tg0= =Gfad -----END PGP SIGNATURE----- Merge tag 'nvme-5.14-2021-06-22' of git://git.infradead.org/nvme into for-5.14/drivers Pull NVMe updates from Christoph "nvme updates for Linux 5.14: - move the ACPI StorageD3 code to drivers/acpi/ and add quirks for certain AMD CPUs (Mario Limonciello) - zoned device support for nvmet (Chaitanya Kulkarni) - fix the rules for changing the serial number in nvmet (Noam Gottlieb) - various small fixes and cleanups (Dan Carpenter, JK Kim, Chaitanya Kulkarni, Hannes Reinecke, Wesley Sheng, Geert Uytterhoeven, Daniel Wagner)" * tag 'nvme-5.14-2021-06-22' of git://git.infradead.org/nvme: (38 commits) nvmet: use NVMET_MAX_NAMESPACES to set nn value nvme.h: add missing nvme_lba_range_type endianness annotations nvme: remove zeroout memset call for struct nvme-pci: remove zeroout memset call for struct nvmet: remove zeroout memset call for struct nvmet: add ZBD over ZNS backend support nvmet: add Command Set Identifier support nvmet: add nvmet_req_bio put helper for backends nvmet: add req cns error complete helper block: export blk_next_bio() nvmet: remove local variable nvmet: use nvme status value directly nvmet: use u32 type for the local variable nsid nvmet: use u32 for nvmet_subsys max_nsid nvmet: use req->cmd directly in file-ns fast path nvmet: use req->cmd directly in bdev-ns fast path nvmet: make ver stable once connection established nvmet: allow mn change if subsys not discovered nvmet: make sn stable once connection was established nvmet: change sn size and check validity ...
This commit is contained in:
commit
5ed9b35702
@ -21,6 +21,7 @@ struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
|
||||
|
||||
return new;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_next_bio);
|
||||
|
||||
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, int flags,
|
||||
|
@ -1340,4 +1340,36 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
|
||||
|
||||
/**
|
||||
* acpi_storage_d3 - Check if D3 should be used in the suspend path
|
||||
* @dev: Device to check
|
||||
*
|
||||
* Return %true if the platform firmware wants @dev to be programmed
|
||||
* into D3hot or D3cold (if supported) in the suspend path, or %false
|
||||
* when there is no specific preference. On some platforms, if this
|
||||
* hint is ignored, @dev may remain unresponsive after suspending the
|
||||
* platform as a whole.
|
||||
*
|
||||
* Although the property has storage in the name it actually is
|
||||
* applied to the PCIe slot and plugging in a non-storage device the
|
||||
* same platform restrictions will likely apply.
|
||||
*/
|
||||
bool acpi_storage_d3(struct device *dev)
|
||||
{
|
||||
struct acpi_device *adev = ACPI_COMPANION(dev);
|
||||
u8 val;
|
||||
|
||||
if (force_storage_d3())
|
||||
return true;
|
||||
|
||||
if (!adev)
|
||||
return false;
|
||||
if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
|
||||
&val))
|
||||
return false;
|
||||
return val == 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_storage_d3);
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
@ -236,6 +236,15 @@ static inline int suspend_nvs_save(void) { return 0; }
|
||||
static inline void suspend_nvs_restore(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
bool force_storage_d3(void);
|
||||
#else
|
||||
static inline bool force_storage_d3(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*--------------------------------------------------------------------------
|
||||
Device properties
|
||||
-------------------------------------------------------------------------- */
|
||||
|
@ -135,3 +135,28 @@ bool acpi_device_always_present(struct acpi_device *adev)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* AMD systems from Renoir and Lucienne *require* that the NVME controller
|
||||
* is put into D3 over a Modern Standby / suspend-to-idle cycle.
|
||||
*
|
||||
* This is "typically" accomplished using the `StorageD3Enable`
|
||||
* property in the _DSD that is checked via the `acpi_storage_d3` function
|
||||
* but this property was introduced after many of these systems launched
|
||||
* and most OEM systems don't have it in their BIOS.
|
||||
*
|
||||
* The Microsoft documentation for StorageD3Enable mentioned that Windows has
|
||||
* a hardcoded allowlist for D3 support, which was used for these platforms.
|
||||
*
|
||||
* This allows quirking on Linux in a similar fashion.
|
||||
*/
|
||||
static const struct x86_cpu_id storage_d3_cpu_ids[] = {
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
|
||||
{}
|
||||
};
|
||||
|
||||
bool force_storage_d3(void)
|
||||
{
|
||||
return x86_match_cpu(storage_d3_cpu_ids);
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ config NVME_MULTIPATH
|
||||
help
|
||||
This option enables support for multipath access to NVMe
|
||||
subsystems. If this option is enabled only a single
|
||||
/dev/nvmeXnY device will show up for each NVMe namespaces,
|
||||
/dev/nvmeXnY device will show up for each NVMe namespace,
|
||||
even if it is accessible through multiple controllers.
|
||||
|
||||
config NVME_HWMON
|
||||
|
@ -721,9 +721,7 @@ EXPORT_SYMBOL_GPL(__nvme_check_ready);
|
||||
|
||||
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
|
||||
{
|
||||
struct nvme_command c;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
struct nvme_command c = { };
|
||||
|
||||
c.directive.opcode = nvme_admin_directive_send;
|
||||
c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
|
||||
@ -748,9 +746,8 @@ static int nvme_enable_streams(struct nvme_ctrl *ctrl)
|
||||
static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
|
||||
struct streams_directive_params *s, u32 nsid)
|
||||
{
|
||||
struct nvme_command c;
|
||||
struct nvme_command c = { };
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
memset(s, 0, sizeof(*s));
|
||||
|
||||
c.directive.opcode = nvme_admin_directive_recv;
|
||||
@ -1460,10 +1457,9 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
|
||||
unsigned int dword11, void *buffer, size_t buflen, u32 *result)
|
||||
{
|
||||
union nvme_result res = { 0 };
|
||||
struct nvme_command c;
|
||||
struct nvme_command c = { };
|
||||
int ret;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.features.opcode = op;
|
||||
c.features.fid = cpu_to_le32(fid);
|
||||
c.features.dword11 = cpu_to_le32(dword11);
|
||||
@ -1591,9 +1587,8 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
|
||||
u32 max_integrity_segments)
|
||||
{
|
||||
struct blk_integrity integrity;
|
||||
struct blk_integrity integrity = { };
|
||||
|
||||
memset(&integrity, 0, sizeof(integrity));
|
||||
switch (pi_type) {
|
||||
case NVME_NS_DPS_PI_TYPE3:
|
||||
integrity.profile = &t10_pi_type3_crc;
|
||||
@ -1964,13 +1959,12 @@ static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
|
||||
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
|
||||
u64 key, u64 sa_key, u8 op)
|
||||
{
|
||||
struct nvme_command c;
|
||||
struct nvme_command c = { };
|
||||
u8 data[16] = { 0, };
|
||||
|
||||
put_unaligned_le64(key, &data[0]);
|
||||
put_unaligned_le64(sa_key, &data[8]);
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.common.opcode = op;
|
||||
c.common.cdw10 = cpu_to_le32(cdw10);
|
||||
|
||||
@ -2042,9 +2036,8 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
|
||||
bool send)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = data;
|
||||
struct nvme_command cmd;
|
||||
struct nvme_command cmd = { };
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
if (send)
|
||||
cmd.common.opcode = nvme_admin_security_send;
|
||||
else
|
||||
|
@ -190,11 +190,10 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
|
||||
*/
|
||||
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
|
||||
{
|
||||
struct nvme_command cmd;
|
||||
struct nvme_command cmd = { };
|
||||
union nvme_result res;
|
||||
int ret;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.prop_get.opcode = nvme_fabrics_command;
|
||||
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
|
||||
cmd.prop_get.attrib = 1;
|
||||
@ -236,10 +235,9 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read64);
|
||||
*/
|
||||
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
|
||||
{
|
||||
struct nvme_command cmd;
|
||||
struct nvme_command cmd = { };
|
||||
int ret;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.prop_set.opcode = nvme_fabrics_command;
|
||||
cmd.prop_set.fctype = nvme_fabrics_type_property_set;
|
||||
cmd.prop_set.attrib = 0;
|
||||
@ -364,12 +362,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
|
||||
*/
|
||||
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct nvme_command cmd;
|
||||
struct nvme_command cmd = { };
|
||||
union nvme_result res;
|
||||
struct nvmf_connect_data *data;
|
||||
int ret;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.connect.opcode = nvme_fabrics_command;
|
||||
cmd.connect.fctype = nvme_fabrics_type_connect;
|
||||
cmd.connect.qid = 0;
|
||||
@ -432,12 +429,11 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
|
||||
*/
|
||||
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
|
||||
{
|
||||
struct nvme_command cmd;
|
||||
struct nvme_command cmd = { };
|
||||
struct nvmf_connect_data *data;
|
||||
union nvme_result res;
|
||||
int ret;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.connect.opcode = nvme_fabrics_command;
|
||||
cmd.connect.fctype = nvme_fabrics_type_connect;
|
||||
cmd.connect.qid = cpu_to_le16(qid);
|
||||
|
@ -3111,7 +3111,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
||||
}
|
||||
|
||||
/* FC-NVME supports normal SGL Data Block Descriptors */
|
||||
if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
|
||||
if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
|
||||
dev_err(ctrl->ctrl.device,
|
||||
"Mandatory sgls are not supported!\n");
|
||||
goto out_disconnect_admin_queue;
|
||||
|
@ -177,6 +177,20 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
||||
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
|
||||
}
|
||||
|
||||
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
|
||||
struct nvme_ns *ns, __u32 nsid)
|
||||
{
|
||||
if (ns && nsid != ns->head->ns_id) {
|
||||
dev_err(ctrl->device,
|
||||
"%s: nsid (%u) in cmd does not match nsid (%u)"
|
||||
"of namespace\n",
|
||||
current->comm, nsid, ns->head->ns_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
struct nvme_passthru_cmd __user *ucmd)
|
||||
{
|
||||
@ -192,12 +206,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
return -EFAULT;
|
||||
if (cmd.flags)
|
||||
return -EINVAL;
|
||||
if (ns && cmd.nsid != ns->head->ns_id) {
|
||||
dev_err(ctrl->device,
|
||||
"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
|
||||
current->comm, cmd.nsid, ns->head->ns_id);
|
||||
if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.common.opcode = cmd.opcode;
|
||||
@ -242,12 +252,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
return -EFAULT;
|
||||
if (cmd.flags)
|
||||
return -EINVAL;
|
||||
if (ns && cmd.nsid != ns->head->ns_id) {
|
||||
dev_err(ctrl->device,
|
||||
"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
|
||||
current->comm, cmd.nsid, ns->head->ns_id);
|
||||
if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.common.opcode = cmd.opcode;
|
||||
|
@ -435,11 +435,6 @@ static void nvme_requeue_work(struct work_struct *work)
|
||||
next = bio->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
|
||||
/*
|
||||
* Reset disk to the mpath node and resubmit to select a new
|
||||
* path.
|
||||
*/
|
||||
bio_set_dev(bio, head->disk->part0);
|
||||
submit_bio_noacct(bio);
|
||||
}
|
||||
}
|
||||
@ -818,6 +813,13 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
||||
!(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
|
||||
return 0;
|
||||
|
||||
if (!ctrl->max_namespaces ||
|
||||
ctrl->max_namespaces > le32_to_cpu(id->nn)) {
|
||||
dev_err(ctrl->device,
|
||||
"Invalid MNAN value %u\n", ctrl->max_namespaces);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctrl->anacap = id->anacap;
|
||||
ctrl->anatt = id->anatt;
|
||||
ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
|
||||
|
@ -869,6 +869,11 @@ static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return ctrl->sgls & ((1 << 0) | (1 << 1));
|
||||
}
|
||||
|
||||
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
u8 opcode);
|
||||
void nvme_execute_passthru_rq(struct request *rq);
|
||||
|
@ -307,13 +307,12 @@ static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
|
||||
|
||||
static void nvme_dbbuf_set(struct nvme_dev *dev)
|
||||
{
|
||||
struct nvme_command c;
|
||||
struct nvme_command c = { };
|
||||
unsigned int i;
|
||||
|
||||
if (!dev->dbbuf_dbs)
|
||||
return;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.dbbuf.opcode = nvme_admin_dbbuf;
|
||||
c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
|
||||
c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
|
||||
@ -536,7 +535,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
|
||||
|
||||
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
|
||||
|
||||
if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
|
||||
if (!nvme_ctrl_sgl_supported(&dev->ctrl))
|
||||
return false;
|
||||
if (!iod->nvmeq->qid)
|
||||
return false;
|
||||
@ -559,7 +558,6 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
|
||||
dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
|
||||
dma_addr = next_dma_addr;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
|
||||
@ -576,7 +574,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
|
||||
dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
|
||||
dma_addr = next_dma_addr;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
|
||||
@ -855,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
||||
&cmnd->rw, &bv);
|
||||
|
||||
if (iod->nvmeq->qid && sgl_threshold &&
|
||||
dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
|
||||
nvme_ctrl_sgl_supported(&dev->ctrl))
|
||||
return nvme_setup_sgl_simple(dev, req,
|
||||
&cmnd->rw, &bv);
|
||||
}
|
||||
@ -1032,7 +1029,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||
|
||||
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
|
||||
{
|
||||
u16 tmp = nvmeq->cq_head + 1;
|
||||
u32 tmp = nvmeq->cq_head + 1;
|
||||
|
||||
if (tmp == nvmeq->q_depth) {
|
||||
nvmeq->cq_head = 0;
|
||||
@ -1114,9 +1111,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct nvme_dev *dev = to_nvme_dev(ctrl);
|
||||
struct nvme_queue *nvmeq = &dev->queues[0];
|
||||
struct nvme_command c;
|
||||
struct nvme_command c = { };
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.common.opcode = nvme_admin_async_event;
|
||||
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
|
||||
nvme_submit_cmd(nvmeq, &c, true);
|
||||
@ -1124,9 +1120,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
|
||||
|
||||
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
||||
{
|
||||
struct nvme_command c;
|
||||
struct nvme_command c = { };
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.delete_queue.opcode = opcode;
|
||||
c.delete_queue.qid = cpu_to_le16(id);
|
||||
|
||||
@ -1136,7 +1131,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
||||
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
|
||||
struct nvme_queue *nvmeq, s16 vector)
|
||||
{
|
||||
struct nvme_command c;
|
||||
struct nvme_command c = { };
|
||||
int flags = NVME_QUEUE_PHYS_CONTIG;
|
||||
|
||||
if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
|
||||
@ -1146,7 +1141,6 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
|
||||
* Note: we (ab)use the fact that the prp fields survive if no data
|
||||
* is attached to the request.
|
||||
*/
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.create_cq.opcode = nvme_admin_create_cq;
|
||||
c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
|
||||
c.create_cq.cqid = cpu_to_le16(qid);
|
||||
@ -1161,7 +1155,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
|
||||
struct nvme_queue *nvmeq)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = &dev->ctrl;
|
||||
struct nvme_command c;
|
||||
struct nvme_command c = { };
|
||||
int flags = NVME_QUEUE_PHYS_CONTIG;
|
||||
|
||||
/*
|
||||
@ -1176,7 +1170,6 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
|
||||
* Note: we (ab)use the fact that the prp fields survive if no data
|
||||
* is attached to the request.
|
||||
*/
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.create_sq.opcode = nvme_admin_create_sq;
|
||||
c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
|
||||
c.create_sq.sqid = cpu_to_le16(qid);
|
||||
@ -1257,7 +1250,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
||||
struct nvme_queue *nvmeq = iod->nvmeq;
|
||||
struct nvme_dev *dev = nvmeq->dev;
|
||||
struct request *abort_req;
|
||||
struct nvme_command cmd;
|
||||
struct nvme_command cmd = { };
|
||||
u32 csts = readl(dev->bar + NVME_REG_CSTS);
|
||||
|
||||
/* If PCI error recovery process is happening, we cannot reset or
|
||||
@ -1337,7 +1330,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
||||
}
|
||||
iod->aborted = 1;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.abort.opcode = nvme_admin_abort_cmd;
|
||||
cmd.abort.cid = req->tag;
|
||||
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
|
||||
@ -1888,10 +1880,9 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
|
||||
{
|
||||
u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
|
||||
u64 dma_addr = dev->host_mem_descs_dma;
|
||||
struct nvme_command c;
|
||||
struct nvme_command c = { };
|
||||
int ret;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.features.opcode = nvme_admin_set_features;
|
||||
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
|
||||
c.features.dword11 = cpu_to_le32(bits);
|
||||
@ -2265,9 +2256,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
|
||||
{
|
||||
struct request_queue *q = nvmeq->dev->ctrl.admin_q;
|
||||
struct request *req;
|
||||
struct nvme_command cmd;
|
||||
struct nvme_command cmd = { };
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.delete_queue.opcode = opcode;
|
||||
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
|
||||
|
||||
@ -2828,32 +2818,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool nvme_acpi_storage_d3(struct pci_dev *dev)
|
||||
{
|
||||
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
|
||||
u8 val;
|
||||
|
||||
/*
|
||||
* Look for _DSD property specifying that the storage device on the port
|
||||
* must use D3 to support deep platform power savings during
|
||||
* suspend-to-idle.
|
||||
*/
|
||||
|
||||
if (!adev)
|
||||
return false;
|
||||
if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
|
||||
&val))
|
||||
return false;
|
||||
return val == 1;
|
||||
}
|
||||
#else
|
||||
static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
static void nvme_async_probe(void *data, async_cookie_t cookie)
|
||||
{
|
||||
struct nvme_dev *dev = data;
|
||||
@ -2903,7 +2867,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
quirks |= check_vendor_combination_bug(pdev);
|
||||
|
||||
if (!noacpi && nvme_acpi_storage_d3(pdev)) {
|
||||
if (!noacpi && acpi_storage_d3(&pdev->dev)) {
|
||||
/*
|
||||
* Some systems use a bios work around to ask for D3 on
|
||||
* platforms that support kernel managed suspend.
|
||||
|
@ -1988,11 +1988,13 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
|
||||
return ret;
|
||||
|
||||
if (ctrl->icdoff) {
|
||||
ret = -EOPNOTSUPP;
|
||||
dev_err(ctrl->device, "icdoff is not supported!\n");
|
||||
goto destroy_admin;
|
||||
}
|
||||
|
||||
if (!(ctrl->sgls & ((1 << 0) | (1 << 1)))) {
|
||||
if (!nvme_ctrl_sgl_supported(ctrl)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
|
||||
goto destroy_admin;
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
|
||||
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
|
||||
discovery.o io-cmd-file.o io-cmd-bdev.o
|
||||
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
|
||||
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
|
||||
nvme-loop-y += loop.o
|
||||
nvmet-rdma-y += rdma.o
|
||||
nvmet-fc-y += fc.o
|
||||
|
@ -162,15 +162,8 @@ out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
|
||||
static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
|
||||
{
|
||||
u16 status = NVME_SC_INTERNAL;
|
||||
struct nvme_effects_log *log;
|
||||
|
||||
log = kzalloc(sizeof(*log), GFP_KERNEL);
|
||||
if (!log)
|
||||
goto out;
|
||||
|
||||
log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
|
||||
log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
|
||||
log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
|
||||
@ -184,9 +177,45 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
|
||||
log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
|
||||
log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
|
||||
log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
|
||||
}
|
||||
|
||||
static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
|
||||
{
|
||||
log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
|
||||
log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
|
||||
log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
|
||||
}
|
||||
|
||||
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
|
||||
{
|
||||
struct nvme_effects_log *log;
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
|
||||
log = kzalloc(sizeof(*log), GFP_KERNEL);
|
||||
if (!log) {
|
||||
status = NVME_SC_INTERNAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (req->cmd->get_log_page.csi) {
|
||||
case NVME_CSI_NVM:
|
||||
nvmet_get_cmd_effects_nvm(log);
|
||||
break;
|
||||
case NVME_CSI_ZNS:
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
||||
status = NVME_SC_INVALID_IO_CMD_SET;
|
||||
goto free;
|
||||
}
|
||||
nvmet_get_cmd_effects_nvm(log);
|
||||
nvmet_get_cmd_effects_zns(log);
|
||||
break;
|
||||
default:
|
||||
status = NVME_SC_INVALID_LOG_PAGE;
|
||||
goto free;
|
||||
}
|
||||
|
||||
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
|
||||
|
||||
free:
|
||||
kfree(log);
|
||||
out:
|
||||
nvmet_req_complete(req, status);
|
||||
@ -313,22 +342,6 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
|
||||
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
||||
}
|
||||
|
||||
static u16 nvmet_set_model_number(struct nvmet_subsys *subsys)
|
||||
{
|
||||
u16 status = 0;
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
if (!subsys->model_number) {
|
||||
subsys->model_number =
|
||||
kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
|
||||
if (!subsys->model_number)
|
||||
status = NVME_SC_INTERNAL;
|
||||
}
|
||||
mutex_unlock(&subsys->lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
@ -337,14 +350,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
||||
u32 cmd_capsule_size;
|
||||
u16 status = 0;
|
||||
|
||||
/*
|
||||
* If there is no model number yet, set it now. It will then remain
|
||||
* stable for the life time of the subsystem.
|
||||
*/
|
||||
if (!subsys->model_number) {
|
||||
status = nvmet_set_model_number(subsys);
|
||||
if (status)
|
||||
goto out;
|
||||
if (!subsys->subsys_discovered) {
|
||||
mutex_lock(&subsys->lock);
|
||||
subsys->subsys_discovered = true;
|
||||
mutex_unlock(&subsys->lock);
|
||||
}
|
||||
|
||||
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
||||
@ -357,9 +366,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
||||
id->vid = 0;
|
||||
id->ssvid = 0;
|
||||
|
||||
memset(id->sn, ' ', sizeof(id->sn));
|
||||
bin2hex(id->sn, &ctrl->subsys->serial,
|
||||
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
|
||||
memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
|
||||
memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
|
||||
strlen(subsys->model_number), ' ');
|
||||
memcpy_and_pad(id->fr, sizeof(id->fr),
|
||||
@ -415,7 +422,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
||||
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
|
||||
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
|
||||
|
||||
id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
|
||||
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
|
||||
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
|
||||
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
|
||||
NVME_CTRL_ONCS_WRITE_ZEROES);
|
||||
@ -635,6 +642,12 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
|
||||
NVME_NIDT_CSI_LEN,
|
||||
&req->ns->csi, &off);
|
||||
if (status)
|
||||
goto out;
|
||||
|
||||
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
|
||||
off) != NVME_IDENTIFY_DATA_SIZE - off)
|
||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
@ -643,6 +656,23 @@ out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
|
||||
{
|
||||
switch (req->cmd->identify.csi) {
|
||||
case NVME_CSI_NVM:
|
||||
nvmet_execute_identify_desclist(req);
|
||||
return true;
|
||||
case NVME_CSI_ZNS:
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
||||
nvmet_execute_identify_desclist(req);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void nvmet_execute_identify(struct nvmet_req *req)
|
||||
{
|
||||
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
|
||||
@ -650,19 +680,54 @@ static void nvmet_execute_identify(struct nvmet_req *req)
|
||||
|
||||
switch (req->cmd->identify.cns) {
|
||||
case NVME_ID_CNS_NS:
|
||||
return nvmet_execute_identify_ns(req);
|
||||
switch (req->cmd->identify.csi) {
|
||||
case NVME_CSI_NVM:
|
||||
return nvmet_execute_identify_ns(req);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case NVME_ID_CNS_CS_NS:
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
||||
switch (req->cmd->identify.csi) {
|
||||
case NVME_CSI_ZNS:
|
||||
return nvmet_execute_identify_cns_cs_ns(req);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case NVME_ID_CNS_CTRL:
|
||||
return nvmet_execute_identify_ctrl(req);
|
||||
switch (req->cmd->identify.csi) {
|
||||
case NVME_CSI_NVM:
|
||||
return nvmet_execute_identify_ctrl(req);
|
||||
}
|
||||
break;
|
||||
case NVME_ID_CNS_CS_CTRL:
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
|
||||
switch (req->cmd->identify.csi) {
|
||||
case NVME_CSI_ZNS:
|
||||
return nvmet_execute_identify_cns_cs_ctrl(req);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case NVME_ID_CNS_NS_ACTIVE_LIST:
|
||||
return nvmet_execute_identify_nslist(req);
|
||||
switch (req->cmd->identify.csi) {
|
||||
case NVME_CSI_NVM:
|
||||
return nvmet_execute_identify_nslist(req);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case NVME_ID_CNS_NS_DESC_LIST:
|
||||
return nvmet_execute_identify_desclist(req);
|
||||
if (nvmet_handle_identify_desclist(req) == true)
|
||||
return;
|
||||
break;
|
||||
}
|
||||
|
||||
pr_debug("unhandled identify cns %d on qid %d\n",
|
||||
req->cmd->identify.cns, req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_identify, cns);
|
||||
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
||||
nvmet_req_cns_error_complete(req);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1007,13 +1007,26 @@ static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
|
||||
NVME_MINOR(subsys->ver));
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
static ssize_t
|
||||
nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct nvmet_subsys *subsys = to_subsys(item);
|
||||
int major, minor, tertiary = 0;
|
||||
int ret;
|
||||
|
||||
if (subsys->subsys_discovered) {
|
||||
if (NVME_TERTIARY(subsys->ver))
|
||||
pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
|
||||
NVME_MAJOR(subsys->ver),
|
||||
NVME_MINOR(subsys->ver),
|
||||
NVME_TERTIARY(subsys->ver));
|
||||
else
|
||||
pr_err("Can't set version number. %llu.%llu is already assigned\n",
|
||||
NVME_MAJOR(subsys->ver),
|
||||
NVME_MINOR(subsys->ver));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* passthru subsystems use the underlying controller's version */
|
||||
if (nvmet_passthru_ctrl(subsys))
|
||||
return -EINVAL;
|
||||
@ -1022,35 +1035,84 @@ static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
|
||||
if (ret != 2 && ret != 3)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&nvmet_config_sem);
|
||||
subsys->ver = NVME_VS(major, minor, tertiary);
|
||||
up_write(&nvmet_config_sem);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct nvmet_subsys *subsys = to_subsys(item);
|
||||
ssize_t ret;
|
||||
|
||||
down_write(&nvmet_config_sem);
|
||||
mutex_lock(&subsys->lock);
|
||||
ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
|
||||
mutex_unlock(&subsys->lock);
|
||||
up_write(&nvmet_config_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
CONFIGFS_ATTR(nvmet_subsys_, attr_version);
|
||||
|
||||
/* See Section 1.5 of NVMe 1.4 */
|
||||
static bool nvmet_is_ascii(const char c)
|
||||
{
|
||||
return c >= 0x20 && c <= 0x7e;
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
struct nvmet_subsys *subsys = to_subsys(item);
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", subsys->serial);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
int pos, len = strcspn(page, "\n");
|
||||
|
||||
if (subsys->subsys_discovered) {
|
||||
pr_err("Can't set serial number. %s is already assigned\n",
|
||||
subsys->serial);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!len || len > NVMET_SN_MAX_SIZE) {
|
||||
pr_err("Serial Number can not be empty or exceed %d Bytes\n",
|
||||
NVMET_SN_MAX_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (pos = 0; pos < len; pos++) {
|
||||
if (!nvmet_is_ascii(page[pos])) {
|
||||
pr_err("Serial Number must contain only ASCII strings\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
u64 serial;
|
||||
|
||||
if (sscanf(page, "%llx\n", &serial) != 1)
|
||||
return -EINVAL;
|
||||
struct nvmet_subsys *subsys = to_subsys(item);
|
||||
ssize_t ret;
|
||||
|
||||
down_write(&nvmet_config_sem);
|
||||
to_subsys(item)->serial = serial;
|
||||
mutex_lock(&subsys->lock);
|
||||
ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
|
||||
mutex_unlock(&subsys->lock);
|
||||
up_write(&nvmet_config_sem);
|
||||
|
||||
return count;
|
||||
return ret;
|
||||
}
|
||||
CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
|
||||
|
||||
@ -1118,20 +1180,8 @@ static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
struct nvmet_subsys *subsys = to_subsys(item);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
ret = snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number ?
|
||||
subsys->model_number : NVMET_DEFAULT_CTRL_MODEL);
|
||||
mutex_unlock(&subsys->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* See Section 1.5 of NVMe 1.4 */
|
||||
static bool nvmet_is_ascii(const char c)
|
||||
{
|
||||
return c >= 0x20 && c <= 0x7e;
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
|
||||
@ -1139,7 +1189,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
|
||||
{
|
||||
int pos = 0, len;
|
||||
|
||||
if (subsys->model_number) {
|
||||
if (subsys->subsys_discovered) {
|
||||
pr_err("Can't set model number. %s is already assigned\n",
|
||||
subsys->model_number);
|
||||
return -EINVAL;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "nvmet.h"
|
||||
|
||||
struct workqueue_struct *buffered_io_wq;
|
||||
struct workqueue_struct *zbd_wq;
|
||||
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
|
||||
static DEFINE_IDA(cntlid_ida);
|
||||
|
||||
@ -43,43 +44,34 @@ DECLARE_RWSEM(nvmet_ana_sem);
|
||||
|
||||
inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
|
||||
{
|
||||
u16 status;
|
||||
|
||||
switch (errno) {
|
||||
case 0:
|
||||
status = NVME_SC_SUCCESS;
|
||||
break;
|
||||
return NVME_SC_SUCCESS;
|
||||
case -ENOSPC:
|
||||
req->error_loc = offsetof(struct nvme_rw_command, length);
|
||||
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
|
||||
break;
|
||||
return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
|
||||
case -EREMOTEIO:
|
||||
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
||||
status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
||||
break;
|
||||
return NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
||||
case -EOPNOTSUPP:
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
switch (req->cmd->common.opcode) {
|
||||
case nvme_cmd_dsm:
|
||||
case nvme_cmd_write_zeroes:
|
||||
status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
|
||||
break;
|
||||
return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
|
||||
default:
|
||||
status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
break;
|
||||
case -ENODATA:
|
||||
req->error_loc = offsetof(struct nvme_rw_command, nsid);
|
||||
status = NVME_SC_ACCESS_DENIED;
|
||||
break;
|
||||
return NVME_SC_ACCESS_DENIED;
|
||||
case -EIO:
|
||||
fallthrough;
|
||||
default:
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
return NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
|
||||
@ -122,11 +114,11 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
|
||||
static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
|
||||
{
|
||||
unsigned long nsid = 0;
|
||||
struct nvmet_ns *cur;
|
||||
unsigned long idx;
|
||||
u32 nsid = 0;
|
||||
|
||||
xa_for_each(&subsys->namespaces, idx, cur)
|
||||
nsid = cur->nsid;
|
||||
@ -141,14 +133,13 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
|
||||
|
||||
static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
struct nvmet_req *req;
|
||||
|
||||
mutex_lock(&ctrl->lock);
|
||||
while (ctrl->nr_async_event_cmds) {
|
||||
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
|
||||
mutex_unlock(&ctrl->lock);
|
||||
nvmet_req_complete(req, status);
|
||||
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
|
||||
mutex_lock(&ctrl->lock);
|
||||
}
|
||||
mutex_unlock(&ctrl->lock);
|
||||
@ -692,6 +683,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
|
||||
|
||||
uuid_gen(&ns->uuid);
|
||||
ns->buffered_io = false;
|
||||
ns->csi = NVME_CSI_NVM;
|
||||
|
||||
return ns;
|
||||
}
|
||||
@ -887,10 +879,18 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (req->ns->file)
|
||||
return nvmet_file_parse_io_cmd(req);
|
||||
|
||||
return nvmet_bdev_parse_io_cmd(req);
|
||||
switch (req->ns->csi) {
|
||||
case NVME_CSI_NVM:
|
||||
if (req->ns->file)
|
||||
return nvmet_file_parse_io_cmd(req);
|
||||
return nvmet_bdev_parse_io_cmd(req);
|
||||
case NVME_CSI_ZNS:
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
|
||||
return nvmet_bdev_zns_parse_io_cmd(req);
|
||||
return NVME_SC_INVALID_IO_CMD_SET;
|
||||
default:
|
||||
return NVME_SC_INVALID_IO_CMD_SET;
|
||||
}
|
||||
}
|
||||
|
||||
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
||||
@ -1112,6 +1112,17 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
|
||||
return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
|
||||
}
|
||||
|
||||
static inline bool nvmet_css_supported(u8 cc_css)
|
||||
{
|
||||
switch (cc_css <<= NVME_CC_CSS_SHIFT) {
|
||||
case NVME_CC_CSS_NVM:
|
||||
case NVME_CC_CSS_CSI:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
lockdep_assert_held(&ctrl->lock);
|
||||
@ -1131,7 +1142,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
|
||||
|
||||
if (nvmet_cc_mps(ctrl->cc) != 0 ||
|
||||
nvmet_cc_ams(ctrl->cc) != 0 ||
|
||||
nvmet_cc_css(ctrl->cc) != 0) {
|
||||
!nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
|
||||
ctrl->csts = NVME_CSTS_CFS;
|
||||
return;
|
||||
}
|
||||
@ -1182,6 +1193,8 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
/* command sets supported: NVMe command set: */
|
||||
ctrl->cap = (1ULL << 37);
|
||||
/* Controller supports one or more I/O Command Sets */
|
||||
ctrl->cap |= (1ULL << 43);
|
||||
/* CC.EN timeout in 500msec units: */
|
||||
ctrl->cap |= (15ULL << 24);
|
||||
/* maximum queue entries supported: */
|
||||
@ -1493,6 +1506,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
||||
enum nvme_subsys_type type)
|
||||
{
|
||||
struct nvmet_subsys *subsys;
|
||||
char serial[NVMET_SN_MAX_SIZE / 2];
|
||||
int ret;
|
||||
|
||||
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
|
||||
if (!subsys)
|
||||
@ -1500,7 +1515,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
||||
|
||||
subsys->ver = NVMET_DEFAULT_VS;
|
||||
/* generate a random serial number as our controllers are ephemeral: */
|
||||
get_random_bytes(&subsys->serial, sizeof(subsys->serial));
|
||||
get_random_bytes(&serial, sizeof(serial));
|
||||
bin2hex(subsys->serial, &serial, sizeof(serial));
|
||||
|
||||
subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
|
||||
if (!subsys->model_number) {
|
||||
ret = -ENOMEM;
|
||||
goto free_subsys;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case NVME_NQN_NVME:
|
||||
@ -1511,15 +1533,15 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
|
||||
kfree(subsys);
|
||||
return ERR_PTR(-EINVAL);
|
||||
ret = -EINVAL;
|
||||
goto free_mn;
|
||||
}
|
||||
subsys->type = type;
|
||||
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (!subsys->subsysnqn) {
|
||||
kfree(subsys);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = -ENOMEM;
|
||||
goto free_mn;
|
||||
}
|
||||
subsys->cntlid_min = NVME_CNTLID_MIN;
|
||||
subsys->cntlid_max = NVME_CNTLID_MAX;
|
||||
@ -1531,6 +1553,12 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
||||
INIT_LIST_HEAD(&subsys->hosts);
|
||||
|
||||
return subsys;
|
||||
|
||||
free_mn:
|
||||
kfree(subsys->model_number);
|
||||
free_subsys:
|
||||
kfree(subsys);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void nvmet_subsys_free(struct kref *ref)
|
||||
@ -1569,11 +1597,15 @@ static int __init nvmet_init(void)
|
||||
|
||||
nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
|
||||
|
||||
zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
|
||||
if (!zbd_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
|
||||
WQ_MEM_RECLAIM, 0);
|
||||
if (!buffered_io_wq) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
goto out_free_zbd_work_queue;
|
||||
}
|
||||
|
||||
error = nvmet_init_discovery();
|
||||
@ -1589,7 +1621,8 @@ out_exit_discovery:
|
||||
nvmet_exit_discovery();
|
||||
out_free_work_queue:
|
||||
destroy_workqueue(buffered_io_wq);
|
||||
out:
|
||||
out_free_zbd_work_queue:
|
||||
destroy_workqueue(zbd_wq);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -1599,6 +1632,7 @@ static void __exit nvmet_exit(void)
|
||||
nvmet_exit_discovery();
|
||||
ida_destroy(&cntlid_ida);
|
||||
destroy_workqueue(buffered_io_wq);
|
||||
destroy_workqueue(zbd_wq);
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
|
||||
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
|
||||
|
@ -244,7 +244,6 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvme_id_ctrl *id;
|
||||
const char model[] = "Linux";
|
||||
u16 status = 0;
|
||||
|
||||
if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
|
||||
@ -262,11 +261,10 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(id->sn, ' ', sizeof(id->sn));
|
||||
bin2hex(id->sn, &ctrl->subsys->serial,
|
||||
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
|
||||
memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
|
||||
memset(id->fr, ' ', sizeof(id->fr));
|
||||
memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
|
||||
memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number,
|
||||
strlen(ctrl->subsys->model_number), ' ');
|
||||
memcpy_and_pad(id->fr, sizeof(id->fr),
|
||||
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
|
||||
|
||||
|
@ -2510,13 +2510,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
||||
u32 xfrlen = be32_to_cpu(cmdiu->data_len);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* if there is no nvmet mapping to the targetport there
|
||||
* shouldn't be requests. just terminate them.
|
||||
*/
|
||||
if (!tgtport->pe)
|
||||
goto transport_error;
|
||||
|
||||
/*
|
||||
* Fused commands are currently not supported in the linux
|
||||
* implementation.
|
||||
@ -2544,7 +2537,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
||||
|
||||
fod->req.cmd = &fod->cmdiubuf.sqe;
|
||||
fod->req.cqe = &fod->rspiubuf.cqe;
|
||||
fod->req.port = tgtport->pe->port;
|
||||
if (tgtport->pe)
|
||||
fod->req.port = tgtport->pe->port;
|
||||
|
||||
/* clear any response payload */
|
||||
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
|
||||
|
@ -47,6 +47,14 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
|
||||
id->nows = to0based(ql->io_opt / ql->logical_block_size);
|
||||
}
|
||||
|
||||
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
|
||||
{
|
||||
if (ns->bdev) {
|
||||
blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
|
||||
ns->bdev = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
|
||||
{
|
||||
struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
|
||||
@ -86,15 +94,15 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
|
||||
nvmet_bdev_ns_enable_integrity(ns);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
|
||||
{
|
||||
if (ns->bdev) {
|
||||
blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
|
||||
ns->bdev = NULL;
|
||||
if (bdev_is_zoned(ns->bdev)) {
|
||||
if (!nvmet_bdev_zns_enable(ns)) {
|
||||
nvmet_bdev_ns_disable(ns);
|
||||
return -EINVAL;
|
||||
}
|
||||
ns->csi = NVME_CSI_ZNS;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
|
||||
@ -102,7 +110,7 @@ void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
|
||||
ns->size = i_size_read(ns->bdev->bd_inode);
|
||||
}
|
||||
|
||||
static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
|
||||
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
|
||||
{
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
|
||||
@ -164,8 +172,7 @@ static void nvmet_bio_done(struct bio *bio)
|
||||
struct nvmet_req *req = bio->bi_private;
|
||||
|
||||
nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
|
||||
if (bio != &req->b.inline_bio)
|
||||
bio_put(bio);
|
||||
nvmet_req_bio_put(req, bio);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
@ -429,9 +436,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
|
||||
|
||||
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
|
||||
{
|
||||
struct nvme_command *cmd = req->cmd;
|
||||
|
||||
switch (cmd->common.opcode) {
|
||||
switch (req->cmd->common.opcode) {
|
||||
case nvme_cmd_read:
|
||||
case nvme_cmd_write:
|
||||
req->execute = nvmet_bdev_execute_rw;
|
||||
|
@ -385,9 +385,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
|
||||
|
||||
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
|
||||
{
|
||||
struct nvme_command *cmd = req->cmd;
|
||||
|
||||
switch (cmd->common.opcode) {
|
||||
switch (req->cmd->common.opcode) {
|
||||
case nvme_cmd_read:
|
||||
case nvme_cmd_write:
|
||||
req->execute = nvmet_file_execute_rw;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#define NVMET_NO_ERROR_LOC ((u16)-1)
|
||||
#define NVMET_DEFAULT_CTRL_MODEL "Linux"
|
||||
#define NVMET_MN_MAX_SIZE 40
|
||||
#define NVMET_SN_MAX_SIZE 20
|
||||
|
||||
/*
|
||||
* Supported optional AENs:
|
||||
@ -82,6 +83,7 @@ struct nvmet_ns {
|
||||
struct pci_dev *p2p_dev;
|
||||
int pi_type;
|
||||
int metadata_size;
|
||||
u8 csi;
|
||||
};
|
||||
|
||||
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
|
||||
@ -217,7 +219,7 @@ struct nvmet_subsys {
|
||||
|
||||
struct xarray namespaces;
|
||||
unsigned int nr_namespaces;
|
||||
unsigned int max_nsid;
|
||||
u32 max_nsid;
|
||||
u16 cntlid_min;
|
||||
u16 cntlid_max;
|
||||
|
||||
@ -229,7 +231,8 @@ struct nvmet_subsys {
|
||||
u16 max_qid;
|
||||
|
||||
u64 ver;
|
||||
u64 serial;
|
||||
char serial[NVMET_SN_MAX_SIZE];
|
||||
bool subsys_discovered;
|
||||
char *subsysnqn;
|
||||
bool pi_support;
|
||||
|
||||
@ -247,6 +250,10 @@ struct nvmet_subsys {
|
||||
unsigned int admin_timeout;
|
||||
unsigned int io_timeout;
|
||||
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
u8 zasl;
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
};
|
||||
|
||||
static inline struct nvmet_subsys *to_subsys(struct config_item *item)
|
||||
@ -332,6 +339,12 @@ struct nvmet_req {
|
||||
struct work_struct work;
|
||||
bool use_workqueue;
|
||||
} p;
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
struct {
|
||||
struct bio inline_bio;
|
||||
struct work_struct zmgmt_work;
|
||||
} z;
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
};
|
||||
int sg_cnt;
|
||||
int metadata_sg_cnt;
|
||||
@ -351,6 +364,7 @@ struct nvmet_req {
|
||||
};
|
||||
|
||||
extern struct workqueue_struct *buffered_io_wq;
|
||||
extern struct workqueue_struct *zbd_wq;
|
||||
|
||||
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
|
||||
{
|
||||
@ -400,6 +414,7 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
|
||||
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
|
||||
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
|
||||
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
|
||||
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
|
||||
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
|
||||
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
|
||||
u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
|
||||
@ -527,6 +542,14 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
|
||||
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
|
||||
int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
|
||||
void nvmet_ns_revalidate(struct nvmet_ns *ns);
|
||||
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
|
||||
|
||||
bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
|
||||
void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
|
||||
void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
|
||||
void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
|
||||
void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
|
||||
void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
|
||||
|
||||
static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
|
||||
{
|
||||
@ -622,4 +645,18 @@ static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
|
||||
req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
|
||||
}
|
||||
|
||||
static inline void nvmet_req_cns_error_complete(struct nvmet_req *req)
|
||||
{
|
||||
pr_debug("unhandled identify cns %d on qid %d\n",
|
||||
req->cmd->identify.cns, req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_identify, cns);
|
||||
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
||||
}
|
||||
|
||||
static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
|
||||
{
|
||||
if (bio != &req->b.inline_bio)
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
#endif /* _NVMET_H */
|
||||
|
@ -206,8 +206,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
|
||||
for_each_sg(req->sg, sg, req->sg_cnt, i) {
|
||||
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
|
||||
sg->offset) < sg->length) {
|
||||
if (bio != &req->p.inline_bio)
|
||||
bio_put(bio);
|
||||
nvmet_req_bio_put(req, bio);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -1257,7 +1257,7 @@ out_err:
|
||||
|
||||
static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
|
||||
{
|
||||
struct ib_qp_init_attr qp_attr;
|
||||
struct ib_qp_init_attr qp_attr = { };
|
||||
struct nvmet_rdma_device *ndev = queue->dev;
|
||||
int nr_cqe, ret, i, factor;
|
||||
|
||||
@ -1275,7 +1275,6 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&qp_attr, 0, sizeof(qp_attr));
|
||||
qp_attr.qp_context = queue;
|
||||
qp_attr.event_handler = nvmet_rdma_qp_event;
|
||||
qp_attr.send_cq = queue->cq;
|
||||
|
615
drivers/nvme/target/zns.c
Normal file
615
drivers/nvme/target/zns.c
Normal file
@ -0,0 +1,615 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* NVMe ZNS-ZBD command implementation.
|
||||
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
#include <linux/nvme.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include "nvmet.h"
|
||||
|
||||
/*
|
||||
* We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
|
||||
* which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
|
||||
* as page_shift value. When calculating the ZASL use shift by 12.
|
||||
*/
|
||||
#define NVMET_MPSMIN_SHIFT 12
|
||||
|
||||
static inline u8 nvmet_zasl(unsigned int zone_append_sects)
|
||||
{
|
||||
/*
|
||||
* Zone Append Size Limit (zasl) is expressed as a power of 2 value
|
||||
* with the minimum memory page size (i.e. 12) as unit.
|
||||
*/
|
||||
return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
|
||||
}
|
||||
|
||||
static int validate_conv_zones_cb(struct blk_zone *z,
|
||||
unsigned int i, void *data)
|
||||
{
|
||||
if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
|
||||
return -EOPNOTSUPP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
|
||||
{
|
||||
struct request_queue *q = ns->bdev->bd_disk->queue;
|
||||
u8 zasl = nvmet_zasl(queue_max_zone_append_sectors(q));
|
||||
struct gendisk *bd_disk = ns->bdev->bd_disk;
|
||||
int ret;
|
||||
|
||||
if (ns->subsys->zasl) {
|
||||
if (ns->subsys->zasl > zasl)
|
||||
return false;
|
||||
}
|
||||
ns->subsys->zasl = zasl;
|
||||
|
||||
/*
|
||||
* Generic zoned block devices may have a smaller last zone which is
|
||||
* not supported by ZNS. Exclude zoned drives that have such smaller
|
||||
* last zone.
|
||||
*/
|
||||
if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
|
||||
return false;
|
||||
/*
|
||||
* ZNS does not define a conventional zone type. If the underlying
|
||||
* device has a bitmap set indicating the existence of conventional
|
||||
* zones, reject the device. Otherwise, use report zones to detect if
|
||||
* the device has conventional zones.
|
||||
*/
|
||||
if (ns->bdev->bd_disk->queue->conv_zones_bitmap)
|
||||
return false;
|
||||
|
||||
ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk),
|
||||
validate_conv_zones_cb, NULL);
|
||||
if (ret < 0)
|
||||
return false;
|
||||
|
||||
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
|
||||
{
|
||||
u8 zasl = req->sq->ctrl->subsys->zasl;
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvme_id_ctrl_zns *id;
|
||||
u16 status;
|
||||
|
||||
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
||||
if (!id) {
|
||||
status = NVME_SC_INTERNAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ctrl->ops->get_mdts)
|
||||
id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
|
||||
else
|
||||
id->zasl = zasl;
|
||||
|
||||
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
||||
|
||||
kfree(id);
|
||||
out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
|
||||
{
|
||||
struct nvme_id_ns_zns *id_zns;
|
||||
u64 zsze;
|
||||
u16 status;
|
||||
|
||||
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
|
||||
if (!id_zns) {
|
||||
status = NVME_SC_INTERNAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
status = nvmet_req_find_ns(req);
|
||||
if (status) {
|
||||
status = NVME_SC_INTERNAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!bdev_is_zoned(req->ns->bdev)) {
|
||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
goto done;
|
||||
}
|
||||
|
||||
nvmet_ns_revalidate(req->ns);
|
||||
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
|
||||
req->ns->blksize_shift;
|
||||
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
|
||||
id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
|
||||
id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
|
||||
|
||||
done:
|
||||
status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
|
||||
kfree(id_zns);
|
||||
out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
|
||||
{
|
||||
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
|
||||
u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
|
||||
|
||||
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
|
||||
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
|
||||
return NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
if (out_bufsize < sizeof(struct nvme_zone_report)) {
|
||||
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
|
||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
|
||||
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
|
||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
switch (req->cmd->zmr.pr) {
|
||||
case 0:
|
||||
case 1:
|
||||
break;
|
||||
default:
|
||||
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
|
||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
switch (req->cmd->zmr.zrasf) {
|
||||
case NVME_ZRASF_ZONE_REPORT_ALL:
|
||||
case NVME_ZRASF_ZONE_STATE_EMPTY:
|
||||
case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
|
||||
case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
|
||||
case NVME_ZRASF_ZONE_STATE_CLOSED:
|
||||
case NVME_ZRASF_ZONE_STATE_FULL:
|
||||
case NVME_ZRASF_ZONE_STATE_READONLY:
|
||||
case NVME_ZRASF_ZONE_STATE_OFFLINE:
|
||||
break;
|
||||
default:
|
||||
req->error_loc =
|
||||
offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
|
||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
return NVME_SC_SUCCESS;
|
||||
}
|
||||
|
||||
struct nvmet_report_zone_data {
|
||||
struct nvmet_req *req;
|
||||
u64 out_buf_offset;
|
||||
u64 out_nr_zones;
|
||||
u64 nr_zones;
|
||||
u8 zrasf;
|
||||
};
|
||||
|
||||
static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
|
||||
{
|
||||
static const unsigned int nvme_zrasf_to_blk_zcond[] = {
|
||||
[NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY,
|
||||
[NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
|
||||
[NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
|
||||
[NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED,
|
||||
[NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
|
||||
[NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL,
|
||||
[NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE,
|
||||
};
|
||||
struct nvmet_report_zone_data *rz = d;
|
||||
|
||||
if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
|
||||
z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
|
||||
return 0;
|
||||
|
||||
if (rz->nr_zones < rz->out_nr_zones) {
|
||||
struct nvme_zone_descriptor zdesc = { };
|
||||
u16 status;
|
||||
|
||||
zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
|
||||
zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
|
||||
zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
|
||||
zdesc.za = z->reset ? 1 << 2 : 0;
|
||||
zdesc.zs = z->cond << 4;
|
||||
zdesc.zt = z->type;
|
||||
|
||||
status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
|
||||
sizeof(zdesc));
|
||||
if (status)
|
||||
return -EINVAL;
|
||||
|
||||
rz->out_buf_offset += sizeof(zdesc);
|
||||
}
|
||||
|
||||
rz->nr_zones++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
|
||||
{
|
||||
unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
|
||||
|
||||
return blkdev_nr_zones(req->ns->bdev->bd_disk) -
|
||||
(sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
|
||||
}
|
||||
|
||||
static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
|
||||
{
|
||||
if (bufsize <= sizeof(struct nvme_zone_report))
|
||||
return 0;
|
||||
|
||||
return (bufsize - sizeof(struct nvme_zone_report)) /
|
||||
sizeof(struct nvme_zone_descriptor);
|
||||
}
|
||||
|
||||
static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
|
||||
{
|
||||
struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
|
||||
sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
|
||||
unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
|
||||
u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
|
||||
__le64 nr_zones;
|
||||
u16 status;
|
||||
int ret;
|
||||
struct nvmet_report_zone_data rz_data = {
|
||||
.out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
|
||||
/* leave the place for report zone header */
|
||||
.out_buf_offset = sizeof(struct nvme_zone_report),
|
||||
.zrasf = req->cmd->zmr.zrasf,
|
||||
.nr_zones = 0,
|
||||
.req = req,
|
||||
};
|
||||
|
||||
status = nvmet_bdev_validate_zone_mgmt_recv(req);
|
||||
if (status)
|
||||
goto out;
|
||||
|
||||
if (!req_slba_nr_zones) {
|
||||
status = NVME_SC_SUCCESS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
|
||||
nvmet_bdev_report_zone_cb, &rz_data);
|
||||
if (ret < 0) {
|
||||
status = NVME_SC_INTERNAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* When partial bit is set nr_zones must indicate the number of zone
|
||||
* descriptors actually transferred.
|
||||
*/
|
||||
if (req->cmd->zmr.pr)
|
||||
rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
|
||||
|
||||
nr_zones = cpu_to_le64(rz_data.nr_zones);
|
||||
status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
|
||||
|
||||
out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
|
||||
{
|
||||
INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
|
||||
queue_work(zbd_wq, &req->z.zmgmt_work);
|
||||
}
|
||||
|
||||
static inline enum req_opf zsa_req_op(u8 zsa)
|
||||
{
|
||||
switch (zsa) {
|
||||
case NVME_ZONE_OPEN:
|
||||
return REQ_OP_ZONE_OPEN;
|
||||
case NVME_ZONE_CLOSE:
|
||||
return REQ_OP_ZONE_CLOSE;
|
||||
case NVME_ZONE_FINISH:
|
||||
return REQ_OP_ZONE_FINISH;
|
||||
case NVME_ZONE_RESET:
|
||||
return REQ_OP_ZONE_RESET;
|
||||
default:
|
||||
return REQ_OP_LAST;
|
||||
}
|
||||
}
|
||||
|
||||
static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
|
||||
{
|
||||
switch (ret) {
|
||||
case 0:
|
||||
return NVME_SC_SUCCESS;
|
||||
case -EINVAL:
|
||||
case -EIO:
|
||||
return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
|
||||
default:
|
||||
return NVME_SC_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
struct nvmet_zone_mgmt_send_all_data {
|
||||
unsigned long *zbitmap;
|
||||
struct nvmet_req *req;
|
||||
};
|
||||
|
||||
static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
|
||||
{
|
||||
struct nvmet_zone_mgmt_send_all_data *data = d;
|
||||
|
||||
switch (zsa_req_op(data->req->cmd->zms.zsa)) {
|
||||
case REQ_OP_ZONE_OPEN:
|
||||
switch (z->cond) {
|
||||
case BLK_ZONE_COND_CLOSED:
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case REQ_OP_ZONE_CLOSE:
|
||||
switch (z->cond) {
|
||||
case BLK_ZONE_COND_IMP_OPEN:
|
||||
case BLK_ZONE_COND_EXP_OPEN:
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case REQ_OP_ZONE_FINISH:
|
||||
switch (z->cond) {
|
||||
case BLK_ZONE_COND_IMP_OPEN:
|
||||
case BLK_ZONE_COND_EXP_OPEN:
|
||||
case BLK_ZONE_COND_CLOSED:
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
set_bit(i, data->zbitmap);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
|
||||
{
|
||||
struct block_device *bdev = req->ns->bdev;
|
||||
unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk);
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
struct bio *bio = NULL;
|
||||
sector_t sector = 0;
|
||||
int ret;
|
||||
struct nvmet_zone_mgmt_send_all_data d = {
|
||||
.req = req,
|
||||
};
|
||||
|
||||
d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
|
||||
GFP_NOIO, q->node);
|
||||
if (!d.zbitmap) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Scan and build bitmap of the eligible zones */
|
||||
ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
|
||||
if (ret != nr_zones) {
|
||||
if (ret > 0)
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
} else {
|
||||
/* We scanned all the zones */
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
while (sector < get_capacity(bdev->bd_disk)) {
|
||||
if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
|
||||
bio = blk_next_bio(bio, 0, GFP_KERNEL);
|
||||
bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio_set_dev(bio, bdev);
|
||||
/* This may take a while, so be nice to others */
|
||||
cond_resched();
|
||||
}
|
||||
sector += blk_queue_zone_sectors(q);
|
||||
}
|
||||
|
||||
if (bio) {
|
||||
ret = submit_bio_wait(bio);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(d.zbitmap);
|
||||
|
||||
return blkdev_zone_mgmt_errno_to_nvme_status(ret);
|
||||
}
|
||||
|
||||
static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (zsa_req_op(req->cmd->zms.zsa)) {
|
||||
case REQ_OP_ZONE_RESET:
|
||||
ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
|
||||
get_capacity(req->ns->bdev->bd_disk),
|
||||
GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return blkdev_zone_mgmt_errno_to_nvme_status(ret);
|
||||
break;
|
||||
case REQ_OP_ZONE_OPEN:
|
||||
case REQ_OP_ZONE_CLOSE:
|
||||
case REQ_OP_ZONE_FINISH:
|
||||
return nvmet_bdev_zone_mgmt_emulate_all(req);
|
||||
default:
|
||||
/* this is needed to quiet compiler warning */
|
||||
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
|
||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
return NVME_SC_SUCCESS;
|
||||
}
|
||||
|
||||
static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
|
||||
{
|
||||
struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
|
||||
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
|
||||
enum req_opf op = zsa_req_op(req->cmd->zms.zsa);
|
||||
struct block_device *bdev = req->ns->bdev;
|
||||
sector_t zone_sectors = bdev_zone_sectors(bdev);
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
int ret;
|
||||
|
||||
if (op == REQ_OP_LAST) {
|
||||
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
|
||||
status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* when select all bit is set slba field is ignored */
|
||||
if (req->cmd->zms.select_all) {
|
||||
status = nvmet_bdev_execute_zmgmt_send_all(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sect >= get_capacity(bdev->bd_disk)) {
|
||||
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
|
||||
status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sect & (zone_sectors - 1)) {
|
||||
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
|
||||
|
||||
out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
|
||||
{
|
||||
INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
|
||||
queue_work(zbd_wq, &req->z.zmgmt_work);
|
||||
}
|
||||
|
||||
static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
|
||||
{
|
||||
struct nvmet_req *req = bio->bi_private;
|
||||
|
||||
if (bio->bi_status == BLK_STS_OK) {
|
||||
req->cqe->result.u64 =
|
||||
nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
|
||||
}
|
||||
|
||||
nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
|
||||
nvmet_req_bio_put(req, bio);
|
||||
}
|
||||
|
||||
void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
|
||||
{
|
||||
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
unsigned int total_len = 0;
|
||||
struct scatterlist *sg;
|
||||
struct bio *bio;
|
||||
int sg_cnt;
|
||||
|
||||
/* Request is completed on len mismatch in nvmet_check_transter_len() */
|
||||
if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
|
||||
return;
|
||||
|
||||
if (!req->sg_cnt) {
|
||||
nvmet_req_complete(req, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
|
||||
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
||||
status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
|
||||
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nvmet_use_inline_bvec(req)) {
|
||||
bio = &req->z.inline_bio;
|
||||
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
|
||||
} else {
|
||||
bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
|
||||
}
|
||||
|
||||
bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
|
||||
bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
|
||||
bio_set_dev(bio, req->ns->bdev);
|
||||
bio->bi_iter.bi_sector = sect;
|
||||
bio->bi_private = req;
|
||||
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
|
||||
bio->bi_opf |= REQ_FUA;
|
||||
|
||||
for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
|
||||
struct page *p = sg_page(sg);
|
||||
unsigned int l = sg->length;
|
||||
unsigned int o = sg->offset;
|
||||
unsigned int ret;
|
||||
|
||||
ret = bio_add_zone_append_page(bio, p, l, o);
|
||||
if (ret != sg->length) {
|
||||
status = NVME_SC_INTERNAL;
|
||||
goto out_put_bio;
|
||||
}
|
||||
total_len += sg->length;
|
||||
}
|
||||
|
||||
if (total_len != nvmet_rw_data_len(req)) {
|
||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
goto out_put_bio;
|
||||
}
|
||||
|
||||
submit_bio(bio);
|
||||
return;
|
||||
|
||||
out_put_bio:
|
||||
nvmet_req_bio_put(req, bio);
|
||||
out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
|
||||
{
|
||||
struct nvme_command *cmd = req->cmd;
|
||||
|
||||
switch (cmd->common.opcode) {
|
||||
case nvme_cmd_zone_append:
|
||||
req->execute = nvmet_bdev_execute_zone_append;
|
||||
return 0;
|
||||
case nvme_cmd_zone_mgmt_recv:
|
||||
req->execute = nvmet_bdev_execute_zone_mgmt_recv;
|
||||
return 0;
|
||||
case nvme_cmd_zone_mgmt_send:
|
||||
req->execute = nvmet_bdev_execute_zone_mgmt_send;
|
||||
return 0;
|
||||
default:
|
||||
return nvmet_bdev_parse_io_cmd(req);
|
||||
}
|
||||
}
|
@ -1004,6 +1004,7 @@ int acpi_dev_resume(struct device *dev);
|
||||
int acpi_subsys_runtime_suspend(struct device *dev);
|
||||
int acpi_subsys_runtime_resume(struct device *dev);
|
||||
int acpi_dev_pm_attach(struct device *dev, bool power_on);
|
||||
bool acpi_storage_d3(struct device *dev);
|
||||
#else
|
||||
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
|
||||
@ -1011,6 +1012,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool acpi_storage_d3(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
|
||||
|
@ -822,4 +822,6 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
|
||||
bio->bi_opf |= REQ_NOWAIT;
|
||||
}
|
||||
|
||||
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
|
||||
|
||||
#endif /* __LINUX_BIO_H */
|
||||
|
@ -636,8 +636,8 @@ struct nvme_lba_range_type {
|
||||
__u8 type;
|
||||
__u8 attributes;
|
||||
__u8 rsvd2[14];
|
||||
__u64 slba;
|
||||
__u64 nlb;
|
||||
__le64 slba;
|
||||
__le64 nlb;
|
||||
__u8 guid[16];
|
||||
__u8 rsvd48[16];
|
||||
};
|
||||
@ -944,6 +944,13 @@ struct nvme_zone_mgmt_recv_cmd {
|
||||
enum {
|
||||
NVME_ZRA_ZONE_REPORT = 0,
|
||||
NVME_ZRASF_ZONE_REPORT_ALL = 0,
|
||||
NVME_ZRASF_ZONE_STATE_EMPTY = 0x01,
|
||||
NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02,
|
||||
NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03,
|
||||
NVME_ZRASF_ZONE_STATE_CLOSED = 0x04,
|
||||
NVME_ZRASF_ZONE_STATE_READONLY = 0x05,
|
||||
NVME_ZRASF_ZONE_STATE_FULL = 0x06,
|
||||
NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07,
|
||||
NVME_REPORT_ZONE_PARTIAL = 1,
|
||||
};
|
||||
|
||||
@ -1504,6 +1511,7 @@ enum {
|
||||
NVME_SC_NS_WRITE_PROTECTED = 0x20,
|
||||
NVME_SC_CMD_INTERRUPTED = 0x21,
|
||||
NVME_SC_TRANSIENT_TR_ERR = 0x22,
|
||||
NVME_SC_INVALID_IO_CMD_SET = 0x2C,
|
||||
|
||||
NVME_SC_LBA_RANGE = 0x80,
|
||||
NVME_SC_CAP_EXCEEDED = 0x81,
|
||||
|
Loading…
x
Reference in New Issue
Block a user