1

scsi: qedf: Wait for stag work during unload

If stag work is already scheduled and unload is called, it can lead to
issues as unload cleans up the work element. Wait for stag work to get
completed before cleanup during unload.

Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
Signed-off-by: Nilesh Javali <njavali@marvell.com>
Link: https://lore.kernel.org/r/20240515091101.18754-3-skashyap@marvell.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Saurav Kashyap 2024-05-15 14:41:00 +05:30 committed by Martin K. Petersen
parent 51071f0831
commit 78e88472b6
2 changed files with 28 additions and 3 deletions

View File

@ -363,6 +363,7 @@ struct qedf_ctx {
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
#define QEDF_PROBING 8
#define QEDF_STAG_IN_PROGRESS 9
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;

View File

@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
*/
if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++;
qedf->flogi_pending++;
if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
qedf->flogi_pending = 0;
}
if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
schedule_delayed_work(&qedf->stag_work, 2);
return NULL;
}
qedf->flogi_pending++;
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
arg, timeout);
}
@ -912,13 +919,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
struct qedf_ctx *qedf;
struct qed_link_output if_link;
qedf = lport_priv(lport);
if (lport->vport) {
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
qedf = lport_priv(lport);
qedf->flogi_pending = 0;
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
@ -938,6 +946,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
if (!if_link.link_up) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
"Physical link is not up.\n");
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
return;
}
/* Flush and wait to make sure link down is processed */
@ -950,6 +959,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
"Queue link up work.\n");
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
}
/* Reset the host by gracefully logging out and then logging back in */
@ -3721,6 +3731,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
{
struct qedf_ctx *qedf;
int rc;
int cnt = 0;
if (!pdev) {
QEDF_ERR(NULL, "pdev is NULL.\n");
@ -3738,6 +3749,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
return;
}
stag_in_prog:
if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
cnt++;
if (cnt < 5) {
msleep(500);
goto stag_in_prog;
}
}
if (mode != QEDF_MODE_RECOVERY)
set_bit(QEDF_UNLOADING, &qedf->flags);
@ -4013,6 +4035,8 @@ void qedf_stag_change_work(struct work_struct *work)
return;
}
set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
dev_name(&qedf->pdev->dev), __func__, __LINE__,
qedf->dbg_ctx.host_no);