nvme: Set dnr to zero for nvme_qpair_abort_reqs()

This is necessary to failover another path when multipath is configured.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: I0b6bcf63501e38f75efb4b0d6bec58abb4b67aef
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10250
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Monica Kenguva <monica.kenguva@intel.com>
This commit is contained in:
Shuhei Matsumoto 2021-12-24 02:18:50 +09:00 committed by Tomasz Zawadzki
parent 80e81273e2
commit 1b3172f726
4 changed files with 9 additions and 8 deletions

View File

@ -3,7 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -641,7 +641,7 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
* with that qpair, since the callbacks will also be foreign to this process.
*/
if (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr)) {
nvme_qpair_abort_all_queued_reqs(qpair, 1);
nvme_qpair_abort_all_queued_reqs(qpair, 0);
}
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -721,8 +722,8 @@ spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
if (spdk_unlikely(qpair->ctrlr->is_failed)) {
if (qpair->ctrlr->is_removed) {
nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
nvme_qpair_abort_all_queued_reqs(qpair, 1 /* Do not retry */);
nvme_transport_qpair_abort_reqs(qpair, 1);
nvme_qpair_abort_all_queued_reqs(qpair, 0);
nvme_transport_qpair_abort_reqs(qpair, 0);
}
return -ENXIO;
}

View File

@ -3,7 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -1848,7 +1848,7 @@ nvme_rdma_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
nvme_rdma_unregister_reqs(rqpair);
nvme_rdma_unregister_rsps(rqpair);
nvme_rdma_qpair_abort_reqs(qpair, 1);
nvme_rdma_qpair_abort_reqs(qpair, 0);
nvme_qpair_deinit(qpair);
nvme_rdma_put_memory_domain(rqpair->memory_domain);

View File

@ -3,7 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -350,7 +350,7 @@ nvme_tcp_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_q
struct nvme_tcp_qpair *tqpair;
assert(qpair != NULL);
nvme_tcp_qpair_abort_reqs(qpair, 1);
nvme_tcp_qpair_abort_reqs(qpair, 0);
nvme_qpair_deinit(qpair);
tqpair = nvme_tcp_qpair(qpair);
nvme_tcp_free_reqs(tqpair);