block-6.2-2023-01-13
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmPBsFAQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpqgnEAC0OqxnMsOPNbkLO7k6FsSrG7ZoENkOIMCt Grk3D1cPkM13I0xc+WiaOBezMriPzfdXvt5AGDn9fd53Ih47qpSY4eU6pCqoCk5y HWdn8KXZvhJGZsSy0Nz+cfPDW/8diJON8YBpJwWM/DfDdP8XibtjlIMTVTtJab6h aGWjmy3leNfghOJ0cZ1wjL6maWFoowQASs52PZfajSc0mQ5X0i8BgQb1WOHNu89C vEir9PYlTmdMnYlAKLsyEL3KoGUPm++zSLtJeyWYavlCMGK5WTyNkzmeXqsQhAGf b1LjovQASe//1t2wvCzQviRf4cae0pE9JhiaYt2oxoDdHrfQj/WPndVS4yE9c+0O BnLVTCFHNv86TRXNCbEUzI+Ftj6m9qt4MrHz8YpstX7FxGxYC+T5RqTwYClWZQ0j llBuJUHj+kkAv6kBMJCHTyat6pxIDgcb52QMJr5mFWuEaTloraBIJC70hMtxBQV/ j5mrBYqCngCHVs+hAl9UQ4zqQVSvkeT11QFvwFolxIfs7qtfLqeGzYxvaeomqO3V sA+H5NY50OEuPfFFmCpcNUJXeUKg7wP39iNHdz6P5cCDBCfUwbNbgKKKNmBovaC+ KhPd8Xo1MmzDuF+cylvTcjOBDte4425GN7PBj4vP1xbuHYcjg6AEFLawgqE9Y4XX xyNlgJXPOg== =ujiw -----END PGP SIGNATURE----- Merge tag 'block-6.2-2023-01-13' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: "Nothing major in here, just a collection of NVMe fixes and dropping a wrong might_sleep() that static checkers tripped over but which isn't valid" * tag 'block-6.2-2023-01-13' of git://git.kernel.dk/linux: MAINTAINERS: stop nvme matching for nvmem files nvme: don't allow unprivileged passthrough on partitions nvme: replace the "bool vec" arguments with flags in the ioctl path nvme: remove __nvme_ioctl nvme-pci: fix error handling in nvme_pci_enable() nvme-pci: add NVME_QUIRK_IDENTIFY_CNS quirk to Apple T2 controllers nvme-apple: add NVME_QUIRK_IDENTIFY_CNS quirk to fix regression block: Drop spurious might_sleep() from blk_put_queue()
This commit is contained in:
commit
97ec4d559d
@ -14919,7 +14919,8 @@ T: git://git.infradead.org/nvme.git
|
||||
F: Documentation/nvme/
|
||||
F: drivers/nvme/host/
|
||||
F: drivers/nvme/common/
|
||||
F: include/linux/nvme*
|
||||
F: include/linux/nvme.h
|
||||
F: include/linux/nvme-*.h
|
||||
F: include/uapi/linux/nvme_ioctl.h
|
||||
|
||||
NVM EXPRESS FABRICS AUTHENTICATION
|
||||
|
@ -283,12 +283,9 @@ static void blk_free_queue(struct request_queue *q)
|
||||
*
|
||||
* Decrements the refcount of the request_queue and free it when the refcount
|
||||
* reaches 0.
|
||||
*
|
||||
* Context: Can sleep.
|
||||
*/
|
||||
void blk_put_queue(struct request_queue *q)
|
||||
{
|
||||
might_sleep();
|
||||
if (refcount_dec_and_test(&q->refs))
|
||||
blk_free_queue(q);
|
||||
}
|
||||
|
@ -1493,7 +1493,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
|
||||
NVME_QUIRK_SKIP_CID_GEN);
|
||||
NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS);
|
||||
if (ret) {
|
||||
dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
|
||||
goto put_dev;
|
||||
|
@ -8,14 +8,26 @@
|
||||
#include <linux/io_uring.h>
|
||||
#include "nvme.h"
|
||||
|
||||
enum {
|
||||
NVME_IOCTL_VEC = (1 << 0),
|
||||
NVME_IOCTL_PARTITION = (1 << 1),
|
||||
};
|
||||
|
||||
static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
|
||||
fmode_t mode)
|
||||
unsigned int flags, fmode_t mode)
|
||||
{
|
||||
u32 effects;
|
||||
|
||||
if (capable(CAP_SYS_ADMIN))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Do not allow unprivileged passthrough on partitions, as that allows an
|
||||
* escape from the containment of the partition.
|
||||
*/
|
||||
if (flags & NVME_IOCTL_PARTITION)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Do not allow unprivileged processes to send vendor specific or fabrics
|
||||
* commands as we can't be sure about their effects.
|
||||
@ -150,7 +162,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
|
||||
static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
|
||||
u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
|
||||
bool vec)
|
||||
unsigned int flags)
|
||||
{
|
||||
struct request_queue *q = req->q;
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
@ -163,7 +175,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
struct iov_iter iter;
|
||||
|
||||
/* fixedbufs is only for non-vectored io */
|
||||
if (WARN_ON_ONCE(vec))
|
||||
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
|
||||
return -EINVAL;
|
||||
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
|
||||
rq_data_dir(req), &iter, ioucmd);
|
||||
@ -172,8 +184,8 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
|
||||
} else {
|
||||
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
|
||||
bufflen, GFP_KERNEL, vec, 0, 0,
|
||||
rq_data_dir(req));
|
||||
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
|
||||
0, rq_data_dir(req));
|
||||
}
|
||||
|
||||
if (ret)
|
||||
@ -203,9 +215,9 @@ out:
|
||||
}
|
||||
|
||||
static int nvme_submit_user_cmd(struct request_queue *q,
|
||||
struct nvme_command *cmd, u64 ubuffer,
|
||||
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
|
||||
u32 meta_seed, u64 *result, unsigned timeout, bool vec)
|
||||
struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
|
||||
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
|
||||
u64 *result, unsigned timeout, unsigned int flags)
|
||||
{
|
||||
struct nvme_ctrl *ctrl;
|
||||
struct request *req;
|
||||
@ -221,7 +233,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
|
||||
req->timeout = timeout;
|
||||
if (ubuffer && bufflen) {
|
||||
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
|
||||
meta_len, meta_seed, &meta, NULL, vec);
|
||||
meta_len, meta_seed, &meta, NULL, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -304,10 +316,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
||||
c.rw.apptag = cpu_to_le16(io.apptag);
|
||||
c.rw.appmask = cpu_to_le16(io.appmask);
|
||||
|
||||
return nvme_submit_user_cmd(ns->queue, &c,
|
||||
io.addr, length,
|
||||
metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
|
||||
false);
|
||||
return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
|
||||
meta_len, lower_32_bits(io.slba), NULL, 0, 0);
|
||||
}
|
||||
|
||||
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
|
||||
@ -325,7 +335,8 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
|
||||
}
|
||||
|
||||
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
struct nvme_passthru_cmd __user *ucmd, fmode_t mode)
|
||||
struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
|
||||
fmode_t mode)
|
||||
{
|
||||
struct nvme_passthru_cmd cmd;
|
||||
struct nvme_command c;
|
||||
@ -353,16 +364,15 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
|
||||
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
|
||||
|
||||
if (!nvme_cmd_allowed(ns, &c, mode))
|
||||
if (!nvme_cmd_allowed(ns, &c, 0, mode))
|
||||
return -EACCES;
|
||||
|
||||
if (cmd.timeout_ms)
|
||||
timeout = msecs_to_jiffies(cmd.timeout_ms);
|
||||
|
||||
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
||||
cmd.addr, cmd.data_len,
|
||||
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
|
||||
0, &result, timeout, false);
|
||||
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
|
||||
cmd.metadata_len, 0, &result, timeout, 0);
|
||||
|
||||
if (status >= 0) {
|
||||
if (put_user(result, &ucmd->result))
|
||||
@ -373,8 +383,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
}
|
||||
|
||||
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
struct nvme_passthru_cmd64 __user *ucmd, bool vec,
|
||||
fmode_t mode)
|
||||
struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
|
||||
fmode_t mode)
|
||||
{
|
||||
struct nvme_passthru_cmd64 cmd;
|
||||
struct nvme_command c;
|
||||
@ -401,16 +411,15 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
|
||||
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
|
||||
|
||||
if (!nvme_cmd_allowed(ns, &c, mode))
|
||||
if (!nvme_cmd_allowed(ns, &c, flags, mode))
|
||||
return -EACCES;
|
||||
|
||||
if (cmd.timeout_ms)
|
||||
timeout = msecs_to_jiffies(cmd.timeout_ms);
|
||||
|
||||
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
||||
cmd.addr, cmd.data_len,
|
||||
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
|
||||
0, &cmd.result, timeout, vec);
|
||||
cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
|
||||
cmd.metadata_len, 0, &cmd.result, timeout, flags);
|
||||
|
||||
if (status >= 0) {
|
||||
if (put_user(cmd.result, &ucmd->result))
|
||||
@ -571,7 +580,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
|
||||
c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
|
||||
|
||||
if (!nvme_cmd_allowed(ns, &c, ioucmd->file->f_mode))
|
||||
if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode))
|
||||
return -EACCES;
|
||||
|
||||
d.metadata = READ_ONCE(cmd->metadata);
|
||||
@ -641,9 +650,9 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
|
||||
{
|
||||
switch (cmd) {
|
||||
case NVME_IOCTL_ADMIN_CMD:
|
||||
return nvme_user_cmd(ctrl, NULL, argp, mode);
|
||||
return nvme_user_cmd(ctrl, NULL, argp, 0, mode);
|
||||
case NVME_IOCTL_ADMIN64_CMD:
|
||||
return nvme_user_cmd64(ctrl, NULL, argp, false, mode);
|
||||
return nvme_user_cmd64(ctrl, NULL, argp, 0, mode);
|
||||
default:
|
||||
return sed_ioctl(ctrl->opal_dev, cmd, argp);
|
||||
}
|
||||
@ -668,14 +677,14 @@ struct nvme_user_io32 {
|
||||
#endif /* COMPAT_FOR_U64_ALIGNMENT */
|
||||
|
||||
static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
|
||||
void __user *argp, fmode_t mode)
|
||||
void __user *argp, unsigned int flags, fmode_t mode)
|
||||
{
|
||||
switch (cmd) {
|
||||
case NVME_IOCTL_ID:
|
||||
force_successful_syscall_return();
|
||||
return ns->head->ns_id;
|
||||
case NVME_IOCTL_IO_CMD:
|
||||
return nvme_user_cmd(ns->ctrl, ns, argp, mode);
|
||||
return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode);
|
||||
/*
|
||||
* struct nvme_user_io can have different padding on some 32-bit ABIs.
|
||||
* Just accept the compat version as all fields that are used are the
|
||||
@ -686,37 +695,40 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
|
||||
#endif
|
||||
case NVME_IOCTL_SUBMIT_IO:
|
||||
return nvme_submit_io(ns, argp);
|
||||
case NVME_IOCTL_IO64_CMD:
|
||||
return nvme_user_cmd64(ns->ctrl, ns, argp, false, mode);
|
||||
case NVME_IOCTL_IO64_CMD_VEC:
|
||||
return nvme_user_cmd64(ns->ctrl, ns, argp, true, mode);
|
||||
flags |= NVME_IOCTL_VEC;
|
||||
fallthrough;
|
||||
case NVME_IOCTL_IO64_CMD:
|
||||
return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode);
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
}
|
||||
|
||||
static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg,
|
||||
fmode_t mode)
|
||||
{
|
||||
if (is_ctrl_ioctl(cmd))
|
||||
return nvme_ctrl_ioctl(ns->ctrl, cmd, arg, mode);
|
||||
return nvme_ns_ioctl(ns, cmd, arg, mode);
|
||||
}
|
||||
|
||||
int nvme_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct nvme_ns *ns = bdev->bd_disk->private_data;
|
||||
void __user *argp = (void __user *)arg;
|
||||
unsigned int flags = 0;
|
||||
|
||||
return __nvme_ioctl(ns, cmd, (void __user *)arg, mode);
|
||||
if (bdev_is_partition(bdev))
|
||||
flags |= NVME_IOCTL_PARTITION;
|
||||
|
||||
if (is_ctrl_ioctl(cmd))
|
||||
return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
|
||||
return nvme_ns_ioctl(ns, cmd, argp, flags, mode);
|
||||
}
|
||||
|
||||
long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct nvme_ns *ns =
|
||||
container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
|
||||
void __user *argp = (void __user *)arg;
|
||||
|
||||
return __nvme_ioctl(ns, cmd, (void __user *)arg, file->f_mode);
|
||||
if (is_ctrl_ioctl(cmd))
|
||||
return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode);
|
||||
return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
|
||||
}
|
||||
|
||||
static int nvme_uring_cmd_checks(unsigned int issue_flags)
|
||||
@ -806,6 +818,10 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct nvme_ns *ns;
|
||||
int srcu_idx, ret = -EWOULDBLOCK;
|
||||
unsigned int flags = 0;
|
||||
|
||||
if (bdev_is_partition(bdev))
|
||||
flags |= NVME_IOCTL_PARTITION;
|
||||
|
||||
srcu_idx = srcu_read_lock(&head->srcu);
|
||||
ns = nvme_find_path(head);
|
||||
@ -821,7 +837,7 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
|
||||
mode);
|
||||
|
||||
ret = nvme_ns_ioctl(ns, cmd, argp, mode);
|
||||
ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode);
|
||||
out_unlock:
|
||||
srcu_read_unlock(&head->srcu, srcu_idx);
|
||||
return ret;
|
||||
@ -846,7 +862,7 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
|
||||
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
|
||||
file->f_mode);
|
||||
|
||||
ret = nvme_ns_ioctl(ns, cmd, argp, file->f_mode);
|
||||
ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
|
||||
out_unlock:
|
||||
srcu_read_unlock(&head->srcu, srcu_idx);
|
||||
return ret;
|
||||
@ -945,7 +961,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
|
||||
kref_get(&ns->kref);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
|
||||
ret = nvme_user_cmd(ctrl, ns, argp, mode);
|
||||
ret = nvme_user_cmd(ctrl, ns, argp, 0, mode);
|
||||
nvme_put_ns(ns);
|
||||
return ret;
|
||||
|
||||
@ -962,9 +978,9 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
switch (cmd) {
|
||||
case NVME_IOCTL_ADMIN_CMD:
|
||||
return nvme_user_cmd(ctrl, NULL, argp, file->f_mode);
|
||||
return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode);
|
||||
case NVME_IOCTL_ADMIN64_CMD:
|
||||
return nvme_user_cmd64(ctrl, NULL, argp, false, file->f_mode);
|
||||
return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode);
|
||||
case NVME_IOCTL_IO_CMD:
|
||||
return nvme_dev_user_cmd(ctrl, argp, file->f_mode);
|
||||
case NVME_IOCTL_RESET:
|
||||
|
@ -2533,7 +2533,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
||||
*/
|
||||
result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
|
||||
if (result < 0)
|
||||
return result;
|
||||
goto disable;
|
||||
|
||||
dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
|
||||
|
||||
@ -2586,8 +2586,13 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
pci_save_state(pdev);
|
||||
|
||||
return nvme_pci_configure_admin_queue(dev);
|
||||
result = nvme_pci_configure_admin_queue(dev);
|
||||
if (result)
|
||||
goto free_irq;
|
||||
return result;
|
||||
|
||||
free_irq:
|
||||
pci_free_irq_vectors(pdev);
|
||||
disable:
|
||||
pci_disable_device(pdev);
|
||||
return result;
|
||||
@ -3495,7 +3500,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
|
||||
NVME_QUIRK_128_BYTES_SQES |
|
||||
NVME_QUIRK_SHARED_TAGS |
|
||||
NVME_QUIRK_SKIP_CID_GEN },
|
||||
NVME_QUIRK_SKIP_CID_GEN |
|
||||
NVME_QUIRK_IDENTIFY_CNS },
|
||||
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
||||
{ 0, }
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user