nvmet-rdma: avoid circular locking dependency on install_queue()

nvmet_rdma_install_queue() is driven from the ->io_work workqueue
function, but will call flush_workqueue() which might trigger
->release_work() which in itself calls flush_work on ->io_work.

To avoid that check for pending queue in disconnecting status,
and return 'controller busy' when we reached a certain threshold.

Signed-off-by: Hannes Reinecke <hare@suse.de>
Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
Hannes Reinecke 2023-12-08 13:53:21 +01:00 committed by Keith Busch
parent 07a29b134c
commit 31deaeb11b

View File

@ -37,6 +37,8 @@
#define NVMET_RDMA_MAX_MDTS 8
#define NVMET_RDMA_MAX_METADATA_MDTS 5
#define NVMET_RDMA_BACKLOG 128
struct nvmet_rdma_srq;
struct nvmet_rdma_cmd {
@ -1583,8 +1585,19 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
}
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
flush_workqueue(nvmet_wq);
struct nvmet_rdma_queue *q;
int pending = 0;
/* Check for pending controller teardown */
mutex_lock(&nvmet_rdma_queue_mutex);
list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) {
if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl &&
q->state == NVMET_RDMA_Q_DISCONNECTING)
pending++;
}
mutex_unlock(&nvmet_rdma_queue_mutex);
if (pending > NVMET_RDMA_BACKLOG)
return NVME_SC_CONNECT_CTRL_BUSY;
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@ -1880,7 +1893,7 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
ret = rdma_listen(cm_id, 128);
ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG);
if (ret) {
pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;