1

nvmet-tcp: avoid circular locking dependency on install_queue()

nvmet_tcp_install_queue() is driven from the ->io_work workqueue
function, but will call flush_workqueue() which might trigger
->release_work() which in itself calls flush_work on ->io_work.

To avoid that check for pending queue in disconnecting status,
and return 'controller busy' when we reached a certain threshold.

Signed-off-by: Hannes Reinecke <hare@suse.de>
Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
Hannes Reinecke 2023-12-08 13:53:20 +01:00 committed by Keith Busch
parent 06c59d4270
commit 07a29b134c

View File

@ -25,6 +25,7 @@
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
#define NVMET_TCP_BACKLOG 128
static int param_store_val(const char *str, int *val, int min, int max)
{
@ -2067,7 +2068,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
goto err_sock;
}
ret = kernel_listen(port->sock, 128);
ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
if (ret) {
pr_err("failed to listen %d on port sock\n", ret);
goto err_sock;
@ -2133,8 +2134,19 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
container_of(sq, struct nvmet_tcp_queue, nvme_sq);
if (sq->qid == 0) {
/* Let inflight controller teardown complete */
flush_workqueue(nvmet_wq);
struct nvmet_tcp_queue *q;
int pending = 0;
/* Check for pending controller teardown */
mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
if (q->nvme_sq.ctrl == sq->ctrl &&
q->state == NVMET_TCP_Q_DISCONNECTING)
pending++;
}
mutex_unlock(&nvmet_tcp_queue_mutex);
if (pending > NVMET_TCP_BACKLOG)
return NVME_SC_CONNECT_CTRL_BUSY;
}
queue->nr_cmds = sq->size * 2;