block-5.9-2020-09-25
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl9upXAQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgplZPD/9cIgt7FM7O1MZYCpp7TH+Da8887UxFDIJ4 VWZOs7JzV0BPHsfonMEYBSsEYvJxA+w2vtD+aTTwBK/+QwvvCNRyPNjEGZRgb8+n o41qRCuuQho1OO9ivGI2C/sGmt7mI9LRZ+ik0yHYVSzW8V9z1Z0D/KB5258pwPEN mhjC+haAX0fjzSckh7Qr+5p8RdO/yxfzR6rugB84qzmwSxiFPdDI0v2bT1paNXPy cHx45ov3Z0UjfDnzpMcldnKznUScayFZ5rkOVaC1G7M7daJbAYnT0pZPAvbE4C9G koMdcIDqX4xsNGsmRePjvAcb2la6Oo0N0tKg8IB0syhyozQBbLH76RfUaybWZpbK JJZNJnGY6KwmrAYYw94uUH/EQ2YMweSp+x2MN503D4gBmFtc3oz6X6cgxXKMB/OH Z0l2D7nRSiVZAEPf/b/RY7N3vkxq1feTQTBgW/lheYU1LPc9w4uWDlpdmQFY+Agn biSZIFspn/WAbtXtRouKbm1fygHnUYqx7PQpyXRwvENFk15wz5174OrO4Doo5r9R 1t9CYzxQFxnfVSukLFFdQxOUU78t9DQDYwTsCZXvTNNuEgv+3sOHQ8iYU7sCQiZh EAz97kqETUf/Av1+5ItzneZTaI22OU6DF2LBmkjxbKp7W+19yO15oo9gOjIR1l+r 8Nr3DMOc3Q== =e8oA -----END PGP SIGNATURE----- Merge tag 'block-5.9-2020-09-25' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "NVMe pull request from Christoph, and removal of a dead define. - fix error during controller probe that cause double free irqs (Keith Busch) - FC connection establishment fix (James Smart) - properly handle completions for invalid tags (Xianting Tian) - pass the correct nsid to the command effects and supported log (Chaitanya Kulkarni)" * tag 'block-5.9-2020-09-25' of git://git.kernel.dk/linux-block: block: remove unused BLK_QC_T_EAGAIN flag nvme-core: don't use NVME_NSID_ALL for command effects and supported log nvme-fc: fail new connections to a deleted host or remote port nvme-pci: fix NULL req in completion handler nvme: return errors for hwmon init
This commit is contained in:
commit
9d2fbaefb3
@ -3041,7 +3041,7 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
|
||||
if (!cel)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, csi,
|
||||
ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
|
||||
&cel->log, sizeof(cel->log), 0);
|
||||
if (ret) {
|
||||
kfree(cel);
|
||||
@ -3236,8 +3236,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!ctrl->identified)
|
||||
nvme_hwmon_init(ctrl);
|
||||
if (!ctrl->identified) {
|
||||
ret = nvme_hwmon_init(ctrl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctrl->identified = true;
|
||||
|
||||
|
@ -3671,12 +3671,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
|
||||
spin_lock_irqsave(&nvme_fc_lock, flags);
|
||||
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
|
||||
if (lport->localport.node_name != laddr.nn ||
|
||||
lport->localport.port_name != laddr.pn)
|
||||
lport->localport.port_name != laddr.pn ||
|
||||
lport->localport.port_state != FC_OBJSTATE_ONLINE)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(rport, &lport->endp_list, endp_list) {
|
||||
if (rport->remoteport.node_name != raddr.nn ||
|
||||
rport->remoteport.port_name != raddr.pn)
|
||||
rport->remoteport.port_name != raddr.pn ||
|
||||
rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
|
||||
continue;
|
||||
|
||||
/* if fail to get reference fall through. Will error */
|
||||
|
@ -59,12 +59,8 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
|
||||
|
||||
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
|
||||
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
|
||||
NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
|
||||
|
||||
return ret <= 0 ? ret : -EIO;
|
||||
}
|
||||
|
||||
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
|
||||
@ -225,7 +221,7 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
|
||||
.info = nvme_hwmon_info,
|
||||
};
|
||||
|
||||
void nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct device *dev = ctrl->dev;
|
||||
struct nvme_hwmon_data *data;
|
||||
@ -234,7 +230,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
|
||||
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
data->ctrl = ctrl;
|
||||
mutex_init(&data->read_lock);
|
||||
@ -244,7 +240,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
dev_warn(ctrl->device,
|
||||
"Failed to read smart log (error %d)\n", err);
|
||||
devm_kfree(dev, data);
|
||||
return;
|
||||
return err;
|
||||
}
|
||||
|
||||
hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
|
||||
@ -254,4 +250,6 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
dev_warn(dev, "Failed to instantiate hwmon device\n");
|
||||
devm_kfree(dev, data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -827,9 +827,12 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVME_HWMON
|
||||
void nvme_hwmon_init(struct nvme_ctrl *ctrl);
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
|
||||
#else
|
||||
static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { }
|
||||
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
|
@ -940,13 +940,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||
struct nvme_completion *cqe = &nvmeq->cqes[idx];
|
||||
struct request *req;
|
||||
|
||||
if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
|
||||
dev_warn(nvmeq->dev->ctrl.device,
|
||||
"invalid id %d completed on queue %d\n",
|
||||
cqe->command_id, le16_to_cpu(cqe->sq_id));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* AEN requests are special as they don't time out and can
|
||||
* survive any kind of queue freeze and often don't respond to
|
||||
@ -960,6 +953,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||
}
|
||||
|
||||
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
|
||||
if (unlikely(!req)) {
|
||||
dev_warn(nvmeq->dev->ctrl.device,
|
||||
"invalid id %d completed on queue %d\n",
|
||||
cqe->command_id, le16_to_cpu(cqe->sq_id));
|
||||
return;
|
||||
}
|
||||
|
||||
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
|
||||
if (!nvme_try_complete_req(req, cqe->status, cqe->result))
|
||||
nvme_pci_complete_rq(req);
|
||||
|
@ -497,13 +497,12 @@ static inline int op_stat_group(unsigned int op)
|
||||
|
||||
typedef unsigned int blk_qc_t;
|
||||
#define BLK_QC_T_NONE -1U
|
||||
#define BLK_QC_T_EAGAIN -2U
|
||||
#define BLK_QC_T_SHIFT 16
|
||||
#define BLK_QC_T_INTERNAL (1U << 31)
|
||||
|
||||
static inline bool blk_qc_t_valid(blk_qc_t cookie)
|
||||
{
|
||||
return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
|
||||
return cookie != BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
|
||||
|
Loading…
Reference in New Issue
Block a user