nvme: abort outstanding requests case by case

For DSM command, the NVMe drive may take a long time to finish it,
if we set a small timeout value for DSM command, the bdev/nvme module
will try to reset the IO queue pair when timeout happens,
in `spdk_nvme_ctrlr_free_io_qpair`, we will abort the outstanding
IO requests first, then in the `nvme_pcie_ctrlr_delete_io_qpair`,
we will poll the CQ for any requests that have been completed by
the NVMe controller, if there are NVMe completions in the CQ,
we will finish them again, thus double completions happened.

Here we rename `nvme_qpair_abort_reqs` to `nvme_qpair_abort_all_queued_reqs`,
so the common layer will just abort queued request, and let each
transport to abort outstanding requests case by case.

Fix #2233.

Change-Id: Icae6214239160c615418cb514fc51cfe77b59211
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10233
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Changpeng Liu 2021-11-19 19:00:55 +08:00 committed by Tomasz Zawadzki
parent 740cf3ce2c
commit 0af4a7cd84
6 changed files with 11 additions and 9 deletions

View File

@ -620,7 +620,7 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
* with that qpair, since the callbacks will also be foreign to this process.
*/
if (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr)) {
nvme_qpair_abort_reqs(qpair, 1);
nvme_qpair_abort_all_queued_reqs(qpair, 1);
}
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);

View File

@ -1188,7 +1188,7 @@ void nvme_qpair_deinit(struct spdk_nvme_qpair *qpair);
void nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair);
int nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
struct nvme_request *req);
void nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
void nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
uint32_t nvme_qpair_abort_queued_reqs_with_cbarg(struct spdk_nvme_qpair *qpair, void *cmd_cb_arg);
void nvme_qpair_abort_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
void nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_requests);

View File

@ -630,7 +630,8 @@ nvme_qpair_check_enabled(struct spdk_nvme_qpair *qpair)
*/
if (qpair->ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE &&
!qpair->is_new_qpair) {
nvme_qpair_abort_reqs(qpair, 0);
nvme_qpair_abort_all_queued_reqs(qpair, 0);
nvme_transport_qpair_abort_reqs(qpair, 0);
}
nvme_qpair_set_state(qpair, NVME_QPAIR_ENABLED);
@ -720,7 +721,8 @@ spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
if (spdk_unlikely(qpair->ctrlr->is_failed)) {
if (qpair->ctrlr->is_removed) {
nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
nvme_qpair_abort_reqs(qpair, 1 /* Do not retry */);
nvme_qpair_abort_all_queued_reqs(qpair, 1 /* Do not retry */);
nvme_transport_qpair_abort_reqs(qpair, 1);
}
return -ENXIO;
}
@ -1057,12 +1059,11 @@ nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *
}
void
nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
nvme_qpair_abort_all_queued_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
{
nvme_qpair_complete_error_reqs(qpair);
nvme_qpair_abort_queued_reqs(qpair, dnr);
_nvme_qpair_complete_abort_queued_reqs(qpair);
nvme_transport_qpair_abort_reqs(qpair, dnr);
}
int

View File

@ -536,7 +536,8 @@ nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk
transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
nvme_qpair_abort_reqs(qpair, 0);
nvme_qpair_abort_all_queued_reqs(qpair, 0);
nvme_transport_qpair_abort_reqs(qpair, 0);
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
}

View File

@ -69,7 +69,7 @@ DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
DEFINE_STUB_V(nvme_ns_set_id_desc_list_data, (struct spdk_nvme_ns *ns));
DEFINE_STUB_V(nvme_ns_free_iocs_specific_data, (struct spdk_nvme_ns *ns));
DEFINE_STUB_V(nvme_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
DEFINE_STUB_V(nvme_qpair_abort_all_queued_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
struct spdk_nvme_qpair *qpair), 0);
DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));

View File

@ -40,7 +40,7 @@
SPDK_LOG_REGISTER_COMPONENT(nvme)
DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
DEFINE_STUB_V(nvme_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
DEFINE_STUB_V(nvme_qpair_abort_all_queued_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
DEFINE_STUB(nvme_poll_group_disconnect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
DEFINE_STUB(spdk_nvme_ctrlr_free_io_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *,