nvme: Transports now set qpair state to NVME_QPAIR_CONNECTED inside

.ctrlr_connect_qpair

Previously this was assumed to be a synchronous process so the generic
layer transport code updated the state after .ctrlr_connect_qpair
returned. In preparation for making this support asynchronous mode,
shift that responsibility down into the individual transports.

While none of the transports actually do this asynchronously, insert a
busy wait in nvme_transport_ctrlr_connect_qpair to wait for the qpair to
exit from the CONNECTING state. None of the upper layer code can
actually correct handle a transport doing this asynchronously, so the
busy wait will cover that.

Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Change-Id: I3c1a5c115264ffcb87e549765d891d796e0c81fe
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8909
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Monica Kenguva <monica.kenguva@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2021-07-22 12:08:38 -07:00 committed by Tomasz Zawadzki
parent 50472c44cf
commit ea0aaf5e85
4 changed files with 19 additions and 5 deletions

View File

@ -542,11 +542,17 @@ _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
int
nvme_pcie_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
if (nvme_qpair_is_admin_queue(qpair)) {
return 0;
} else {
return _nvme_pcie_ctrlr_create_io_qpair(ctrlr, qpair, qpair->id);
int rc = 0;
if (!nvme_qpair_is_admin_queue(qpair)) {
rc = _nvme_pcie_ctrlr_create_io_qpair(ctrlr, qpair, qpair->id);
}
if (rc == 0) {
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
}
return rc;
}
void

View File

@ -1245,6 +1245,10 @@ nvme_rdma_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qp
} while (rc == -EAGAIN && retry_count < NVME_RDMA_STALE_CONN_RETRY_MAX);
}
if (rc == 0) {
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
}
return rc;
}

View File

@ -1941,6 +1941,8 @@ nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpa
return rc;
}
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
return 0;
}

View File

@ -369,7 +369,9 @@ nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nv
goto err;
}
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
/* Busy wait until the qpair exits the connecting state */
while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) { }
if (qpair->poll_group) {
rc = nvme_poll_group_connect_qpair(qpair);
if (rc) {