nvme/tcp: connect qpairs asynchronously

Polling for qpair connection was moved to `process_completions()` making
`nvme_tcp_ctrlr_connect_qpair()` asynchronous.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I4dec6bc24ed6c3865e58aaf3e0778b2af7bdb4ed
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/9075
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Monica Kenguva <monica.kenguva@intel.com>
This commit is contained in:
Konrad Sztyber 2021-08-03 11:55:26 +02:00 committed by Tomasz Zawadzki
parent 8280d5f2fc
commit 7d32600e5c
2 changed files with 25 additions and 35 deletions

View File

@ -1731,6 +1731,9 @@ nvme_tcp_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
}
}
static int nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair);
static int
nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
{
@ -1769,6 +1772,17 @@ nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_c
nvme_tcp_qpair_check_timeout(qpair);
}
if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING)) {
rc = nvme_tcp_ctrlr_connect_qpair_poll(qpair->ctrlr, qpair);
if (rc != 0 && rc != -EAGAIN) {
SPDK_ERRLOG("Failed to connect tqpair=%p\n", tqpair);
goto fail;
} else if (rc == 0) {
/* Once the connection is completed, we can submit queued requests */
nvme_qpair_resubmit_requests(qpair, tqpair->num_entries);
}
}
return reaped;
fail:
@ -1809,11 +1823,6 @@ nvme_tcp_qpair_sock_cb(void *ctx, struct spdk_sock_group *group, struct spdk_soc
}
}
static void
dummy_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
{
}
static int
nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
{
@ -1839,23 +1848,6 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
return 0;
}
static int
nvme_tcp_qpair_icreq_poll(struct nvme_tcp_qpair *tqpair)
{
int rc;
if (spdk_get_ticks() > tqpair->icreq_timeout_tsc) {
rc = -ETIMEDOUT;
} else if (tqpair->qpair.poll_group) {
rc = nvme_tcp_poll_group_process_completions(tqpair->qpair.poll_group, 0,
dummy_disconnected_qpair_cb);
} else {
rc = nvme_tcp_qpair_process_completions(&tqpair->qpair, 0);
}
return rc == 0 ? -EAGAIN : rc;
}
static int
nvme_tcp_qpair_connect_sock(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
@ -1945,10 +1937,12 @@ nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvm
switch (tqpair->state) {
case NVME_TCP_QPAIR_STATE_INVALID:
case NVME_TCP_QPAIR_STATE_INITIALIZING:
rc = nvme_tcp_qpair_icreq_poll(tqpair);
if (rc != 0 && rc != -EAGAIN) {
if (spdk_get_ticks() > tqpair->icreq_timeout_tsc) {
SPDK_ERRLOG("Failed to construct the tqpair=%p via correct icresp\n", tqpair);
rc = -ETIMEDOUT;
break;
}
rc = -EAGAIN;
break;
case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND:
rc = nvme_fabric_qpair_connect_async(&tqpair->qpair, tqpair->num_entries);
@ -2016,14 +2010,6 @@ nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpa
return rc;
}
do {
rc = nvme_tcp_ctrlr_connect_qpair_poll(ctrlr, qpair);
} while (rc == -EAGAIN);
if (rc != 0) {
nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
}
return rc;
}

View File

@ -64,6 +64,7 @@ DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
static void
test_nvme_tcp_pdu_set_data_buf(void)
@ -1399,6 +1400,7 @@ test_nvme_tcp_ctrlr_connect_qpair(void)
tqpair.sock = (struct spdk_sock *)0xDEADBEEF;
tqpair.send_pdu = &pdu;
tqpair.qpair.ctrlr = &ctrlr;
tqpair.qpair.state = NVME_QPAIR_CONNECTING;
ic_req = &pdu.hdr.ic_req;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
@ -1414,11 +1416,13 @@ test_nvme_tcp_ctrlr_connect_qpair(void)
TAILQ_INIT(&tqpair.send_queue);
rc = nvme_tcp_ctrlr_connect_qpair(&ctrlr, qpair);
while (rc == -EAGAIN) {
rc = nvme_tcp_ctrlr_connect_qpair_poll(&ctrlr, qpair);
CU_ASSERT(rc == 0);
while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
rc = nvme_tcp_qpair_process_completions(qpair, 0);
CU_ASSERT(rc >= 0);
}
CU_ASSERT(rc == 0);
CU_ASSERT(tqpair.maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_RUNNING);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);