nvmet: fix discover log page when offsets are used

The nvme target hadn't been taking the Get Log Page offset parameter
into consideration, and so has been returning corrupted log pages when
offsets are used. Since many tools, including nvme-cli, split the log
request to 4k, we've been breaking discovery log responses when more
than 3 subsystems exist.

Fix the returned data by internally generating the entire discovery
log page and copying only the requested bytes into the user buffer. The
command log page offset type has been modified to a native __le64 to
make it easier to extract the value from a command.

Signed-off-by: Keith Busch <keith.busch@intel.com>
Tested-by: Minwoo Im <minwoo.im@samsung.com>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Keith Busch 2019-04-09 10:03:59 -06:00 committed by Christoph Hellwig
parent 67f471b6ed
commit d808b7f759
4 changed files with 57 additions and 24 deletions

View File

@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len; return len;
} }
u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
{
return le64_to_cpu(cmd->get_log_page.lpo);
}
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
{ {
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));

View File

@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port
memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
} }
static size_t discovery_log_entries(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
size_t entries = 0;
list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
entries++;
}
list_for_each_entry(r, &req->port->referrals, entry)
entries++;
return entries;
}
static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
{ {
const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmf_disc_rsp_page_hdr *hdr; struct nvmf_disc_rsp_page_hdr *hdr;
u64 offset = nvmet_get_log_page_offset(req->cmd);
size_t data_len = nvmet_get_log_page_len(req->cmd); size_t data_len = nvmet_get_log_page_len(req->cmd);
size_t alloc_len = max(data_len, sizeof(*hdr)); size_t alloc_len;
int residual_len = data_len - sizeof(*hdr);
struct nvmet_subsys_link *p; struct nvmet_subsys_link *p;
struct nvmet_port *r; struct nvmet_port *r;
u32 numrec = 0; u32 numrec = 0;
u16 status = 0; u16 status = 0;
void *buffer;
/* Spec requires dword aligned offsets */
if (offset & 0x3) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out;
}
/* /*
* Make sure we're passing at least a buffer of response header size. * Make sure we're passing at least a buffer of response header size.
* If host provided data len is less than the header size, only the * If host provided data len is less than the header size, only the
* number of bytes requested by host will be sent to host. * number of bytes requested by host will be sent to host.
*/ */
hdr = kzalloc(alloc_len, GFP_KERNEL); down_read(&nvmet_config_sem);
if (!hdr) { alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
buffer = kzalloc(alloc_len, GFP_KERNEL);
if (!buffer) {
up_read(&nvmet_config_sem);
status = NVME_SC_INTERNAL; status = NVME_SC_INTERNAL;
goto out; goto out;
} }
down_read(&nvmet_config_sem); hdr = buffer;
list_for_each_entry(p, &req->port->subsystems, entry) { list_for_each_entry(p, &req->port->subsystems, entry) {
char traddr[NVMF_TRADDR_SIZE];
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue; continue;
if (residual_len >= entry_size) {
char traddr[NVMF_TRADDR_SIZE];
nvmet_set_disc_traddr(req, req->port, traddr); nvmet_set_disc_traddr(req, req->port, traddr);
nvmet_format_discovery_entry(hdr, req->port, nvmet_format_discovery_entry(hdr, req->port,
p->subsys->subsysnqn, traddr, p->subsys->subsysnqn, traddr,
NVME_NQN_NVME, numrec); NVME_NQN_NVME, numrec);
residual_len -= entry_size;
}
numrec++; numrec++;
} }
list_for_each_entry(r, &req->port->referrals, entry) { list_for_each_entry(r, &req->port->referrals, entry) {
if (residual_len >= entry_size) { nvmet_format_discovery_entry(hdr, r,
nvmet_format_discovery_entry(hdr, r, NVME_DISC_SUBSYS_NAME,
NVME_DISC_SUBSYS_NAME, r->disc_addr.traddr,
r->disc_addr.traddr, NVME_NQN_DISC, numrec);
NVME_NQN_DISC, numrec);
residual_len -= entry_size;
}
numrec++; numrec++;
} }
@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
up_read(&nvmet_config_sem); up_read(&nvmet_config_sem);
status = nvmet_copy_to_sgl(req, 0, hdr, data_len); status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
kfree(hdr); kfree(buffer);
out: out:
nvmet_req_complete(req, status); nvmet_req_complete(req, status);
} }

View File

@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
u32 nvmet_get_log_page_len(struct nvme_command *cmd); u32 nvmet_get_log_page_len(struct nvme_command *cmd);
u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
extern struct list_head *nvmet_ports; extern struct list_head *nvmet_ports;
void nvmet_port_disc_changed(struct nvmet_port *port, void nvmet_port_disc_changed(struct nvmet_port *port,

View File

@ -967,8 +967,13 @@ struct nvme_get_log_page_command {
__le16 numdl; __le16 numdl;
__le16 numdu; __le16 numdu;
__u16 rsvd11; __u16 rsvd11;
__le32 lpol; union {
__le32 lpou; struct {
__le32 lpol;
__le32 lpou;
};
__le64 lpo;
};
__u32 rsvd14[2]; __u32 rsvd14[2];
}; };