nvmf/tcp: Add a poller to check the timeout of each qpair

This makes the timeout check for each qpair in the group
efficient. If there are many qpairs in the group, we
can scale.

Change-Id: I75c29a92107dc32377a2ef7edb5ac92868f1c5df
Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-on: https://review.gerrithub.io/435277
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Ziye Yang 2018-11-28 23:22:37 +08:00 committed by Changpeng Liu
parent 9d11abfd0e
commit 94cd652b18
2 changed files with 53 additions and 35 deletions

View File

@ -282,6 +282,7 @@ struct nvme_tcp_qpair {
struct spdk_nvmf_tcp_poll_group {
struct spdk_nvmf_transport_poll_group group;
struct spdk_sock_group *sock_group;
struct spdk_poller *timeout_poller;
TAILQ_HEAD(, nvme_tcp_qpair) qpairs;
};
@ -1216,6 +1217,47 @@ spdk_nvmf_tcp_discover(struct spdk_nvmf_transport *transport,
entry->tsas.tcp.sectype = SPDK_NVME_TCP_SECURITY_NONE;
}
static void
spdk_nvmf_tcp_qpair_handle_timeout(struct nvme_tcp_qpair *tqpair, uint64_t tsc)
{
if ((tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) ||
(tqpair->state == NVME_TCP_QPAIR_STATE_EXITED)) {
return;
}
/* Currently, we did not have keep alive support, so make sure that we should have the generic support later */
if ((tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR) ||
(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY)) {
return;
}
/* Check for interval expiration */
if ((tsc - tqpair->last_pdu_time) > (tqpair->timeout * spdk_get_ticks_hz())) {
SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair, tqpair->timeout);
tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
}
}
static int
spdk_nvmf_tcp_poll_group_handle_timeout(void *ctx)
{
struct spdk_nvmf_tcp_poll_group *tgroup = ctx;
struct nvme_tcp_qpair *tqpair, *tmp;
uint64_t tsc = spdk_get_ticks();
TAILQ_FOREACH_SAFE(tqpair, &tgroup->qpairs, link, tmp) {
spdk_nvmf_tcp_qpair_handle_timeout(tqpair, tsc);
if (tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) {
/* to prevent the state is set again */
tqpair->state = NVME_TCP_QPAIR_STATE_EXITED;
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "will disconect the tqpair=%p\n", tqpair);
spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL);
}
}
return -1;
}
static struct spdk_nvmf_transport_poll_group *
spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
{
@ -1232,6 +1274,9 @@ spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
}
TAILQ_INIT(&tgroup->qpairs);
tgroup->timeout_poller = spdk_poller_register(spdk_nvmf_tcp_poll_group_handle_timeout, tgroup,
1000000);
return &tgroup->group;
cleanup:
@ -1246,6 +1291,7 @@ spdk_nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
spdk_sock_group_close(&tgroup->sock_group);
spdk_poller_unregister(&tgroup->timeout_poller);
free(tgroup);
}
@ -2693,34 +2739,10 @@ spdk_nvmf_tcp_close_qpair(struct spdk_nvmf_qpair *qpair)
spdk_nvmf_tcp_qpair_destroy(SPDK_CONTAINEROF(qpair, struct nvme_tcp_qpair, qpair));
}
static void
spdk_nvmf_tcp_qpair_handle_timout(struct nvme_tcp_qpair *tqpair)
{
uint64_t tsc;
if ((tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) ||
(tqpair->state == NVME_TCP_QPAIR_STATE_EXITED)) {
return;
}
/* Currently, we did not have keep alive support, so make sure that we should have the generic support later */
if (tqpair->recv_state != NVME_TCP_PDU_RECV_STATE_ERROR) {
return;
}
/* Check for interval expiration */
tsc = spdk_get_ticks();
if ((tsc - tqpair->last_pdu_time) > (tqpair->timeout * spdk_get_ticks_hz())) {
SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair, tqpair->timeout);
tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
}
}
static int
spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
{
struct spdk_nvmf_tcp_poll_group *tgroup;
struct nvme_tcp_qpair *tqpair, *tmp;
int rc;
tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
@ -2735,17 +2757,6 @@ spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
return rc;
}
TAILQ_FOREACH_SAFE(tqpair, &tgroup->qpairs, link, tmp) {
spdk_nvmf_tcp_qpair_handle_timout(tqpair);
if (tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) {
/* to prevent the state is set again */
tqpair->state = NVME_TCP_QPAIR_STATE_EXITED;
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "will disconect the tqpair=%p\n", tqpair);
spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL);
}
}
return 0;
}

View File

@ -302,11 +302,18 @@ test_nvmf_tcp_poll_group_create(void)
{
struct spdk_nvmf_tcp_transport ttransport;
struct spdk_nvmf_transport_poll_group *group;
struct spdk_thread *thread;
thread = spdk_allocate_thread(NULL, NULL, NULL, NULL, NULL);
SPDK_CU_ASSERT_FATAL(thread != NULL);
spdk_set_thread(thread);
memset(&ttransport, 0, sizeof(ttransport));
group = spdk_nvmf_tcp_poll_group_create(&ttransport.transport);
CU_ASSERT_PTR_NOT_NULL(group);
spdk_nvmf_tcp_poll_group_destroy(group);
spdk_free_thread();
}
static void