diff --git a/lib/nvme/nvme_ctrlr.c b/lib/nvme/nvme_ctrlr.c index 41fc2f793c..b7e3822ddd 100644 --- a/lib/nvme/nvme_ctrlr.c +++ b/lib/nvme/nvme_ctrlr.c @@ -2013,6 +2013,8 @@ spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr, active_proc->timeout_cb_arg = cb_arg; } + ctrlr->timeout_enabled = true; + nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); } diff --git a/lib/nvme/nvme_internal.h b/lib/nvme/nvme_internal.h index 5282f9a93f..38a16dfe57 100644 --- a/lib/nvme/nvme_internal.h +++ b/lib/nvme/nvme_internal.h @@ -432,6 +432,8 @@ struct spdk_nvme_ctrlr { bool is_failed; + bool timeout_enabled; + uint16_t max_sges; /** Controller support flags */ diff --git a/lib/nvme/nvme_pcie.c b/lib/nvme/nvme_pcie.c index 87c17216a6..a294a970c3 100644 --- a/lib/nvme/nvme_pcie.c +++ b/lib/nvme/nvme_pcie.c @@ -1169,7 +1169,7 @@ nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracke struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(qpair->ctrlr); tr->timed_out = 0; - if (spdk_unlikely(qpair->active_proc && qpair->active_proc->timeout_cb_fn != NULL)) { + if (spdk_unlikely(pctrlr->ctrlr.timeout_enabled)) { tr->submit_tick = spdk_get_ticks(); } @@ -1980,6 +1980,23 @@ nvme_pcie_qpair_check_timeout(struct spdk_nvme_qpair *qpair) struct nvme_tracker *tr, *tmp; struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; + struct spdk_nvme_ctrlr_process *active_proc; + + /* Don't check timeouts during controller initialization. */ + if (ctrlr->state != NVME_CTRLR_STATE_READY) { + return; + } + + if (nvme_qpair_is_admin_queue(qpair)) { + active_proc = spdk_nvme_ctrlr_get_current_process(ctrlr); + } else { + active_proc = qpair->active_proc; + } + + /* Only check timeouts if the current process has a timeout callback. */ + if (active_proc == NULL || active_proc->timeout_cb_fn == NULL) { + return; + } t02 = spdk_get_ticks(); TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, tmp) { @@ -1997,7 +2014,7 @@ nvme_pcie_qpair_check_timeout(struct spdk_nvme_qpair *qpair) } } - if (tr->submit_tick + qpair->active_proc->timeout_ticks > t02) { + if (tr->submit_tick + active_proc->timeout_ticks > t02) { /* The trackers are in order, so as soon as one has not timed out, * stop iterating. */ @@ -2005,9 +2022,14 @@ nvme_pcie_qpair_check_timeout(struct spdk_nvme_qpair *qpair) } tr->timed_out = 1; - qpair->active_proc->timeout_cb_fn(qpair->active_proc->timeout_cb_arg, ctrlr, - nvme_qpair_is_admin_queue(qpair) ? NULL : qpair, - tr->cid); + + /* We don't want to expose the admin queue to the user, + * so when we're timing out admin commands set the + * qpair to NULL. + */ + active_proc->timeout_cb_fn(active_proc->timeout_cb_arg, ctrlr, + nvme_qpair_is_admin_queue(qpair) ? NULL : qpair, + tr->cid); } } @@ -2090,12 +2112,7 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_ } } - /* We don't want to expose the admin queue to the user, - * so when we're timing out admin commands set the - * qpair to NULL. - */ - if (!nvme_qpair_is_admin_queue(qpair) && spdk_unlikely(qpair->active_proc->timeout_cb_fn != NULL) && - qpair->ctrlr->state == NVME_CTRLR_STATE_READY) { + if (spdk_unlikely(ctrlr->timeout_enabled)) { /* * User registered for timeout callback */