nvme/tcp: use new async+poll APIs for fabrics connect
The fabric connect command is now sent without. It will make it possible to make `nvme_tcp_ctrlr_connect_qpair()` non-blocking too by moving the polling to process_completions (this will be done in subsequent patches). Additionally, two extra states, `NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND` and `NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL`, were added to keep track of the state of the connect command. These states are only used by the initiator code, as the target doesn't need them. Signed-off-by: Jim Harris <james.r.harris@intel.com> Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Change-Id: I25c16501e28bb3fbfde416b7c9214f42eb126358 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8605 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
parent
7e21385f8a
commit
b49fa72b22
@ -160,9 +160,11 @@ enum nvme_tcp_error_codes {
|
||||
enum nvme_tcp_qpair_state {
|
||||
NVME_TCP_QPAIR_STATE_INVALID = 0,
|
||||
NVME_TCP_QPAIR_STATE_INITIALIZING = 1,
|
||||
NVME_TCP_QPAIR_STATE_RUNNING = 2,
|
||||
NVME_TCP_QPAIR_STATE_EXITING = 3,
|
||||
NVME_TCP_QPAIR_STATE_EXITED = 4,
|
||||
NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND = 2,
|
||||
NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL = 3,
|
||||
NVME_TCP_QPAIR_STATE_RUNNING = 4,
|
||||
NVME_TCP_QPAIR_STATE_EXITING = 5,
|
||||
NVME_TCP_QPAIR_STATE_EXITED = 6,
|
||||
};
|
||||
|
||||
static const bool g_nvme_tcp_hdgst[] = {
|
||||
|
@ -851,6 +851,19 @@ nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_
|
||||
nvme_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvme_tcp_qpair_send_h2c_term_req_complete, tqpair);
|
||||
}
|
||||
|
||||
static bool
|
||||
nvme_tcp_qpair_recv_state_valid(struct nvme_tcp_qpair *tqpair)
|
||||
{
|
||||
switch (tqpair->state) {
|
||||
case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND:
|
||||
case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL:
|
||||
case NVME_TCP_QPAIR_STATE_RUNNING:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
|
||||
{
|
||||
@ -874,7 +887,7 @@ nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
|
||||
plen_error = true;
|
||||
}
|
||||
} else {
|
||||
if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
|
||||
if (spdk_unlikely(!nvme_tcp_qpair_recv_state_valid(tqpair))) {
|
||||
SPDK_ERRLOG("The TCP/IP tqpair connection is not negotitated\n");
|
||||
fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
|
||||
goto err;
|
||||
@ -1147,7 +1160,7 @@ nvme_tcp_send_icreq_complete(void *cb_arg)
|
||||
|
||||
if (tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING) {
|
||||
SPDK_DEBUGLOG(nvme, "tqpair %p %u, finilize icresp\n", tqpair, tqpair->qpair.id);
|
||||
tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
|
||||
tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1218,7 +1231,7 @@ nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair,
|
||||
return;
|
||||
}
|
||||
|
||||
tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
|
||||
tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND;
|
||||
return;
|
||||
end:
|
||||
nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
|
||||
@ -1926,13 +1939,26 @@ nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvm
|
||||
SPDK_ERRLOG("Failed to construct the tqpair=%p via correct icresp\n", tqpair);
|
||||
}
|
||||
break;
|
||||
case NVME_TCP_QPAIR_STATE_RUNNING:
|
||||
rc = nvme_fabric_qpair_connect(&tqpair->qpair, tqpair->num_entries);
|
||||
case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND:
|
||||
rc = nvme_fabric_qpair_connect_async(&tqpair->qpair, tqpair->num_entries);
|
||||
if (rc < 0) {
|
||||
SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
|
||||
break;
|
||||
}
|
||||
tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL;
|
||||
rc = -EAGAIN;
|
||||
break;
|
||||
case NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL:
|
||||
rc = nvme_fabric_qpair_connect_poll(&tqpair->qpair);
|
||||
if (rc == 0) {
|
||||
tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING;
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
|
||||
} else if (rc != -EAGAIN) {
|
||||
SPDK_ERRLOG("Failed to poll NVMe-oF Fabric CONNECT command\n");
|
||||
}
|
||||
break;
|
||||
case NVME_TCP_QPAIR_STATE_RUNNING:
|
||||
rc = 0;
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
|
@ -105,6 +105,9 @@ DEFINE_STUB(nvme_fabric_ctrlr_scan, int, (struct spdk_nvme_probe_ctx *probe_ctx,
|
||||
bool direct_connect), 0);
|
||||
DEFINE_STUB(nvme_fabric_qpair_connect, int, (struct spdk_nvme_qpair *qpair, uint32_t num_entries),
|
||||
0);
|
||||
DEFINE_STUB(nvme_fabric_qpair_connect_async, int, (struct spdk_nvme_qpair *qpair,
|
||||
uint32_t num_entries), 0);
|
||||
DEFINE_STUB(nvme_fabric_qpair_connect_poll, int, (struct spdk_nvme_qpair *qpair), 0);
|
||||
DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct spdk_nvme_qpair *qpair));
|
||||
DEFINE_STUB(nvme_poll_group_disconnect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
|
||||
|
@ -1279,7 +1279,7 @@ test_nvme_tcp_icresp_handle(void)
|
||||
nvme_tcp_icresp_handle(&tqpair, &pdu);
|
||||
|
||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
|
||||
CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_RUNNING);
|
||||
CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND);
|
||||
CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
|
||||
CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
|
||||
CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
|
||||
|
Loading…
Reference in New Issue
Block a user