nvme/rdma: use timeout when destroying qpairs
Replaced poll cycle count with a timeout when destroying a qpair that is part of a poll group. Tracking the time instead of a poll count is more stable, as the number of poll cycles can vary based on the application's behavior when destroying a qpair. Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Change-Id: I7445bc1b411f2905aab7bf3dc7b2d3344712e1eb Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/9200 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
b556e43e92
commit
98b483a35e
@ -90,10 +90,10 @@
|
||||
#define NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT 31
|
||||
|
||||
/*
|
||||
* Number of poller cycles to keep a pointer to destroyed qpairs
|
||||
* Number of microseconds to keep a pointer to destroyed qpairs
|
||||
* in the poll group.
|
||||
*/
|
||||
#define NVME_RDMA_DESTROYED_QPAIR_EXPIRATION_CYCLES 50
|
||||
#define NVME_RDMA_DESTROYED_QPAIR_EXPIRATION_TIMEOUT_US 1000000ull
|
||||
|
||||
/*
|
||||
* The max length of keyed SGL data block (3 bytes)
|
||||
@ -144,7 +144,7 @@ struct nvme_rdma_ctrlr {
|
||||
|
||||
struct nvme_rdma_destroyed_qpair {
|
||||
struct nvme_rdma_qpair *destroyed_qpair_tracker;
|
||||
uint32_t completed_cycles;
|
||||
uint64_t timeout_ticks;
|
||||
STAILQ_ENTRY(nvme_rdma_destroyed_qpair) link;
|
||||
};
|
||||
|
||||
@ -2470,7 +2470,9 @@ nvme_rdma_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
|
||||
}
|
||||
|
||||
destroyed_qpair->destroyed_qpair_tracker = rqpair;
|
||||
destroyed_qpair->completed_cycles = 0;
|
||||
destroyed_qpair->timeout_ticks = spdk_get_ticks() +
|
||||
(NVME_RDMA_DESTROYED_QPAIR_EXPIRATION_TIMEOUT_US *
|
||||
spdk_get_ticks_hz()) / SPDK_SEC_TO_USEC;
|
||||
STAILQ_INSERT_TAIL(&group->destroyed_qpairs, destroyed_qpair, link);
|
||||
|
||||
rqpair->defer_deletion_to_pg = true;
|
||||
@ -2595,10 +2597,9 @@ nvme_rdma_poll_group_process_completions(struct spdk_nvme_transport_poll_group *
|
||||
* but have a fallback for other cases where we don't get all of our completions back.
|
||||
*/
|
||||
STAILQ_FOREACH_SAFE(qpair_tracker, &group->destroyed_qpairs, link, tmp_qpair_tracker) {
|
||||
qpair_tracker->completed_cycles++;
|
||||
rqpair = qpair_tracker->destroyed_qpair_tracker;
|
||||
if ((rqpair->current_num_sends == 0 && rqpair->current_num_recvs == 0) ||
|
||||
qpair_tracker->completed_cycles > NVME_RDMA_DESTROYED_QPAIR_EXPIRATION_CYCLES) {
|
||||
spdk_get_ticks() > qpair_tracker->timeout_ticks) {
|
||||
nvme_rdma_poll_group_delete_qpair(group, qpair_tracker);
|
||||
}
|
||||
}
|
||||
|
@ -991,6 +991,7 @@ test_nvme_rdma_poll_group_connect_disconnect_qpair(void)
|
||||
CU_ASSERT(rqpair->cq == (void *)0xFEEDBEEF);
|
||||
CU_ASSERT(rqpair->poller != NULL);
|
||||
|
||||
MOCK_SET(spdk_get_ticks, 10);
|
||||
rc = nvme_rdma_poll_group_disconnect_qpair(&rqpair->qpair);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(rqpair->defer_deletion_to_pg == true);
|
||||
@ -1000,7 +1001,8 @@ test_nvme_rdma_poll_group_connect_disconnect_qpair(void)
|
||||
|
||||
qpair_tracker = STAILQ_FIRST(&group.destroyed_qpairs);
|
||||
CU_ASSERT(qpair_tracker->destroyed_qpair_tracker == rqpair);
|
||||
CU_ASSERT(qpair_tracker->completed_cycles == 0);
|
||||
CU_ASSERT(qpair_tracker->timeout_ticks == 10 + (NVME_RDMA_QPAIR_CM_EVENT_TIMEOUT_US *
|
||||
spdk_get_ticks_hz()) / SPDK_SEC_TO_USEC);
|
||||
|
||||
nvme_rdma_poll_group_delete_qpair(&group, qpair_tracker);
|
||||
CU_ASSERT(rc == 0);
|
||||
@ -1008,6 +1010,7 @@ test_nvme_rdma_poll_group_connect_disconnect_qpair(void)
|
||||
|
||||
nvme_rdma_poll_group_free_pollers(&group);
|
||||
CU_ASSERT(STAILQ_EMPTY(&group.pollers));
|
||||
MOCK_CLEAR(spdk_get_ticks);
|
||||
|
||||
/* No available poller */
|
||||
rqpair = calloc(1, sizeof(struct nvme_rdma_qpair));
|
||||
|
Loading…
Reference in New Issue
Block a user