nvme/tcp: NVME TCP poll group statistics

Enable dump of transport stats in functional test.
Update unit tests to support the new statistics

Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Change-Id: I815aeea7d07bd33a915f19537d60611ba7101361
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8885
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Alexey Marchuk 2021-07-19 11:57:48 +03:00 committed by Tomasz Zawadzki
parent 7d589976f2
commit ea86c035bb
6 changed files with 155 additions and 26 deletions

View File

@ -1064,6 +1064,22 @@ nvme_dump_pcie_statistics(struct spdk_nvme_transport_poll_group_stat *stat)
printf("\tqueued_requests: %"PRIu64"\n", pcie_stat->queued_requests);
}
static void
nvme_dump_tcp_statistics(struct spdk_nvme_transport_poll_group_stat *stat)
{
struct spdk_nvme_tcp_stat *tcp_stat;
tcp_stat = &stat->tcp;
printf("TCP transport:\n");
printf("\tpolls: %"PRIu64"\n", tcp_stat->polls);
printf("\tidle_polls: %"PRIu64"\n", tcp_stat->idle_polls);
printf("\tsock_completions: %"PRIu64"\n", tcp_stat->socket_completions);
printf("\tnvme_completions: %"PRIu64"\n", tcp_stat->nvme_completions);
printf("\tsubmitted_requests: %"PRIu64"\n", tcp_stat->submitted_requests);
printf("\tqueued_requests: %"PRIu64"\n", tcp_stat->queued_requests);
}
static void
nvme_dump_transport_stats(uint32_t lcore, struct ns_worker_ctx *ns_ctx)
{
@ -1094,6 +1110,9 @@ nvme_dump_transport_stats(uint32_t lcore, struct ns_worker_ctx *ns_ctx)
case SPDK_NVME_TRANSPORT_PCIE:
nvme_dump_pcie_statistics(stat->transport_stat[i]);
break;
case SPDK_NVME_TRANSPORT_TCP:
nvme_dump_tcp_statistics(stat->transport_stat[i]);
break;
default:
fprintf(stderr, "Unknown transport statistics %d %s\n", stat->transport_stat[i]->trtype,
spdk_nvme_transport_id_trtype_str(stat->transport_stat[i]->trtype));

View File

@ -497,6 +497,15 @@ struct spdk_nvme_pcie_stat {
uint64_t sq_doobell_updates;
};
struct spdk_nvme_tcp_stat {
uint64_t polls;
uint64_t idle_polls;
uint64_t socket_completions;
uint64_t nvme_completions;
uint64_t submitted_requests;
uint64_t queued_requests;
};
struct spdk_nvme_transport_poll_group_stat {
spdk_nvme_transport_type_t trtype;
union {
@ -505,6 +514,7 @@ struct spdk_nvme_transport_poll_group_stat {
struct spdk_nvme_rdma_device_stat *device_stats;
} rdma;
struct spdk_nvme_pcie_stat pcie;
struct spdk_nvme_tcp_stat tcp;
};
};

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -70,6 +71,7 @@ struct nvme_tcp_poll_group {
int64_t num_completions;
TAILQ_HEAD(, nvme_tcp_qpair) needs_poll;
struct spdk_nvme_tcp_stat stats;
};
/* NVMe TCP qpair extensions for spdk_nvme_qpair */
@ -85,8 +87,8 @@ struct nvme_tcp_qpair {
struct nvme_tcp_pdu *send_pdu; /* only for error pdu and init pdu */
struct nvme_tcp_pdu *send_pdus; /* Used by tcp_reqs */
enum nvme_tcp_pdu_recv_state recv_state;
struct nvme_tcp_req *tcp_reqs;
struct spdk_nvme_tcp_stat *stats;
uint16_t num_entries;
uint16_t async_complete;
@ -351,6 +353,7 @@ nvme_tcp_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_q
nvme_qpair_deinit(qpair);
tqpair = nvme_tcp_qpair(qpair);
nvme_tcp_free_reqs(tqpair);
free(tqpair->stats);
free(tqpair);
return 0;
@ -422,6 +425,7 @@ _tcp_write_pdu(struct nvme_tcp_pdu *pdu)
pdu->sock_req.cb_fn = _pdu_write_done;
pdu->sock_req.cb_arg = pdu;
TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
tqpair->stats->submitted_requests++;
spdk_sock_writev_async(tqpair->sock, &pdu->sock_req);
}
@ -737,6 +741,7 @@ nvme_tcp_qpair_submit_request(struct spdk_nvme_qpair *qpair,
tcp_req = nvme_tcp_req_get(tqpair);
if (!tcp_req) {
tqpair->stats->queued_requests++;
/* Inform the upper layer to try again later. */
return -EAGAIN;
}
@ -1821,6 +1826,7 @@ nvme_tcp_qpair_sock_cb(void *ctx, struct spdk_sock_group *group, struct spdk_soc
if (pgroup->num_completions >= 0 && num_completions >= 0) {
pgroup->num_completions += num_completions;
pgroup->stats.nvme_completions += num_completions;
} else {
pgroup->num_completions = -ENXIO;
}
@ -1983,6 +1989,7 @@ nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpa
{
int rc = 0;
struct nvme_tcp_qpair *tqpair;
struct nvme_tcp_poll_group *tgroup;
tqpair = nvme_tcp_qpair(qpair);
@ -1999,6 +2006,14 @@ nvme_tcp_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpa
SPDK_ERRLOG("Unable to activate the tcp qpair.\n");
return rc;
}
tgroup = nvme_tcp_poll_group(qpair->poll_group);
tqpair->stats = &tgroup->stats;
} else {
tqpair->stats = calloc(1, sizeof(*tqpair->stats));
if (!tqpair->stats) {
SPDK_ERRLOG("tcp stats memory allocation failed\n");
return -ENOMEM;
}
}
tqpair->maxr2t = NVME_TCP_MAX_R2T_DEFAULT;
@ -2257,11 +2272,19 @@ static int
nvme_tcp_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
struct nvme_tcp_qpair *tqpair;
int rc = 0;
if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
return nvme_poll_group_disconnect_qpair(qpair);
rc = nvme_poll_group_disconnect_qpair(qpair);
}
return 0;
tqpair = nvme_tcp_qpair(qpair);
/* When qpair is deleted, stats are freed. free(NULL) is valid case, so just set
* stats pointer to NULL */
tqpair->stats = NULL;
return rc;
}
static int64_t
@ -2275,6 +2298,7 @@ nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group *t
group->completions_per_qpair = completions_per_qpair;
group->num_completions = 0;
group->stats.polls++;
num_events = spdk_sock_group_poll(group->sock_group);
@ -2292,6 +2316,9 @@ nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group *t
return num_events;
}
group->stats.idle_polls += !num_events;
group->stats.socket_completions += num_events;
return group->num_completions;
}
@ -2316,6 +2343,40 @@ nvme_tcp_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
return 0;
}
static int
nvme_tcp_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat **_stats)
{
struct nvme_tcp_poll_group *group;
struct spdk_nvme_transport_poll_group_stat *stats;
if (tgroup == NULL || _stats == NULL) {
SPDK_ERRLOG("Invalid stats or group pointer\n");
return -EINVAL;
}
group = nvme_tcp_poll_group(tgroup);
stats = calloc(1, sizeof(*stats));
if (!stats) {
SPDK_ERRLOG("Can't allocate memory for TCP stats\n");
return -ENOMEM;
}
stats->trtype = SPDK_NVME_TRANSPORT_TCP;
memcpy(&stats->tcp, &group->stats, sizeof(group->stats));
*_stats = stats;
return 0;
}
static void
nvme_tcp_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat *stats)
{
free(stats);
}
const struct spdk_nvme_transport_ops tcp_ops = {
.name = "TCP",
.type = SPDK_NVME_TRANSPORT_TCP,
@ -2356,6 +2417,8 @@ const struct spdk_nvme_transport_ops tcp_ops = {
.poll_group_remove = nvme_tcp_poll_group_remove,
.poll_group_process_completions = nvme_tcp_poll_group_process_completions,
.poll_group_destroy = nvme_tcp_poll_group_destroy,
.poll_group_get_stats = nvme_tcp_poll_group_get_stats,
.poll_group_free_stats = nvme_tcp_poll_group_free_stats,
};
SPDK_NVME_TRANSPORT_REGISTER(tcp, &tcp_ops);

View File

@ -1014,6 +1014,18 @@ rpc_bdev_nvme_pcie_stats(struct spdk_json_write_ctx *w,
spdk_json_write_named_uint64(w, "sq_doobell_updates", stat->pcie.sq_doobell_updates);
}
static void
rpc_bdev_nvme_tcp_stats(struct spdk_json_write_ctx *w,
struct spdk_nvme_transport_poll_group_stat *stat)
{
spdk_json_write_named_uint64(w, "polls", stat->tcp.polls);
spdk_json_write_named_uint64(w, "idle_polls", stat->tcp.idle_polls);
spdk_json_write_named_uint64(w, "socket_completions", stat->tcp.socket_completions);
spdk_json_write_named_uint64(w, "nvme_completions", stat->tcp.nvme_completions);
spdk_json_write_named_uint64(w, "queued_requests", stat->tcp.queued_requests);
spdk_json_write_named_uint64(w, "submitted_requests", stat->tcp.submitted_requests);
}
static void
rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i)
{
@ -1051,6 +1063,9 @@ rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i)
case SPDK_NVME_TRANSPORT_PCIE:
rpc_bdev_nvme_pcie_stats(ctx->w, tr_stat);
break;
case SPDK_NVME_TRANSPORT_TCP:
rpc_bdev_nvme_tcp_stats(ctx->w, tr_stat);
break;
default:
SPDK_WARNLOG("Can't handle trtype %d %s\n", tr_stat->trtype,
spdk_nvme_transport_id_trtype_str(tr_stat->trtype));

View File

@ -44,7 +44,7 @@ fi
$SPDK_EXAMPLE_DIR/perf -q 1 -o 4096 -w randrw -M 50 -t 1 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
$SPDK_EXAMPLE_DIR/perf -q 32 -o 4096 -w randrw -M 50 -t 1 -HI -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
$SPDK_EXAMPLE_DIR/perf -q 128 -o 262144 -w randrw -M 50 -t 2 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
$SPDK_EXAMPLE_DIR/perf -q 128 -o 262144 -w randrw -M 50 -t 2 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" --transport-stat
sync
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -613,6 +614,7 @@ static void
test_nvme_tcp_qpair_capsule_cmd_send(void)
{
struct nvme_tcp_qpair tqpair = {};
struct spdk_nvme_tcp_stat stats = {};
struct nvme_tcp_req tcp_req = {};
struct nvme_tcp_pdu pdu = {};
struct nvme_request req = {};
@ -626,6 +628,7 @@ test_nvme_tcp_qpair_capsule_cmd_send(void)
tcp_req.req = &req;
tcp_req.pdu = &pdu;
TAILQ_INIT(&tqpair.send_queue);
tqpair.stats = &stats;
tcp_req.iov[0].iov_base = (void *)iov_base0;
tcp_req.iov[0].iov_len = 4096;
@ -716,6 +719,7 @@ static void
test_nvme_tcp_qpair_write_pdu(void)
{
struct nvme_tcp_qpair tqpair = {};
struct spdk_nvme_tcp_stat stats = {};
struct nvme_tcp_pdu pdu = {};
void *cb_arg = (void *)0xDEADBEEF;
char iov_base0[4096];
@ -743,6 +747,7 @@ test_nvme_tcp_qpair_write_pdu(void)
pdu.hdr.common.plen += pdu.data_len;
tqpair.flags.host_hdgst_enable = 1;
tqpair.flags.host_ddgst_enable = 1;
tqpair.stats = &stats;
nvme_tcp_qpair_write_pdu(&tqpair,
&pdu,
@ -898,12 +903,14 @@ static void
test_nvme_tcp_qpair_send_h2c_term_req(void)
{
struct nvme_tcp_qpair tqpair = {};
struct spdk_nvme_tcp_stat stats = {};
struct nvme_tcp_pdu pdu = {}, recv_pdu = {}, send_pdu = {};
enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
uint32_t error_offset = 1;
tqpair.send_pdu = &send_pdu;
tqpair.recv_pdu = &recv_pdu;
tqpair.stats = &stats;
TAILQ_INIT(&tqpair.send_queue);
/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
pdu.hdr.common.hlen = 64;
@ -928,10 +935,12 @@ static void
test_nvme_tcp_pdu_ch_handle(void)
{
struct nvme_tcp_qpair tqpair = {};
struct spdk_nvme_tcp_stat stats = {};
struct nvme_tcp_pdu send_pdu = {}, recv_pdu = {};
tqpair.send_pdu = &send_pdu;
tqpair.recv_pdu = &recv_pdu;
tqpair.stats = &stats;
TAILQ_INIT(&tqpair.send_queue);
/* case 1: Already received IC_RESP PDU. Expect: fail */
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
@ -1111,6 +1120,7 @@ static void
test_nvme_tcp_qpair_icreq_send(void)
{
struct nvme_tcp_qpair tqpair = {};
struct spdk_nvme_tcp_stat stats = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_tcp_pdu pdu = {};
struct nvme_tcp_poll_group poll_group = {};
@ -1120,6 +1130,7 @@ test_nvme_tcp_qpair_icreq_send(void)
tqpair.send_pdu = &pdu;
tqpair.qpair.ctrlr = &ctrlr;
tqpair.qpair.poll_group = &poll_group.group;
tqpair.stats = &stats;
ic_req = &pdu.hdr.ic_req;
tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
@ -1143,6 +1154,7 @@ static void
test_nvme_tcp_c2h_payload_handle(void)
{
struct nvme_tcp_qpair tqpair = {};
struct spdk_nvme_tcp_stat stats = {};
struct nvme_tcp_pdu pdu = {};
struct nvme_tcp_req tcp_req = {};
struct nvme_request req = {};
@ -1154,6 +1166,7 @@ test_nvme_tcp_c2h_payload_handle(void)
tcp_req.req->cb_fn = ut_nvme_complete_request;
tcp_req.tqpair = &tqpair;
tcp_req.cid = 1;
tqpair.stats = &stats;
TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
@ -1227,12 +1240,14 @@ static void
test_nvme_tcp_icresp_handle(void)
{
struct nvme_tcp_qpair tqpair = {};
struct spdk_nvme_tcp_stat stats = {};
struct nvme_tcp_pdu pdu = {};
struct nvme_tcp_pdu send_pdu = {};
struct nvme_tcp_pdu recv_pdu = {};
tqpair.send_pdu = &send_pdu;
tqpair.recv_pdu = &recv_pdu;
tqpair.stats = &stats;
TAILQ_INIT(&tqpair.send_queue);
/* case 1: Expected ICResp PFV and got are different. */
@ -1291,6 +1306,7 @@ static void
test_nvme_tcp_pdu_payload_handle(void)
{
struct nvme_tcp_qpair tqpair = {};
struct spdk_nvme_tcp_stat stats = {};
struct nvme_tcp_pdu recv_pdu = {};
struct nvme_tcp_req tcp_req = {};
struct nvme_request req = {};
@ -1300,6 +1316,7 @@ test_nvme_tcp_pdu_payload_handle(void)
tcp_req.tqpair = &tqpair;
tcp_req.req = &req;
tcp_req.req->qpair = &tqpair.qpair;
tqpair.stats = &stats;
tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
tqpair.qpair.id = 1;
@ -1342,6 +1359,7 @@ static void
test_nvme_tcp_capsule_resp_hdr_handle(void)
{
struct nvme_tcp_qpair tqpair = {};
struct spdk_nvme_tcp_stat stats = {};
struct nvme_request req = {};
struct spdk_nvme_cpl rccqe_tgt = {};
struct nvme_tcp_req *tcp_req = NULL;
@ -1350,6 +1368,7 @@ test_nvme_tcp_capsule_resp_hdr_handle(void)
/* Initialize requests and pdus */
tqpair.num_entries = 1;
tqpair.stats = &stats;
req.qpair = &tqpair.qpair;
rc = nvme_tcp_alloc_reqs(&tqpair);
@ -1388,32 +1407,33 @@ test_nvme_tcp_ctrlr_connect_qpair(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair *qpair;
struct nvme_tcp_qpair tqpair = {};
struct nvme_tcp_qpair *tqpair;
struct nvme_tcp_pdu pdu = {};
struct nvme_tcp_pdu recv_pdu = {};
struct spdk_nvme_tcp_ic_req *ic_req = NULL;
int rc;
tqpair.qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
tqpair.recv_pdu = &recv_pdu;
qpair = &tqpair.qpair;
tqpair.sock = (struct spdk_sock *)0xDEADBEEF;
tqpair.send_pdu = &pdu;
tqpair.qpair.ctrlr = &ctrlr;
tqpair.qpair.state = NVME_QPAIR_CONNECTING;
tqpair = calloc(1, sizeof(*tqpair));
tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
tqpair->recv_pdu = &recv_pdu;
qpair = &tqpair->qpair;
tqpair->sock = (struct spdk_sock *)0xDEADBEEF;
tqpair->send_pdu = &pdu;
tqpair->qpair.ctrlr = &ctrlr;
tqpair->qpair.state = NVME_QPAIR_CONNECTING;
ic_req = &pdu.hdr.ic_req;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu->ch_valid_bytes = 8;
tqpair.recv_pdu->psh_valid_bytes = tqpair.recv_pdu->hdr.common.hlen;
tqpair.recv_pdu->hdr.ic_resp.maxh2cdata = 4096;
tqpair.recv_pdu->hdr.ic_resp.cpda = 1;
tqpair.flags.icreq_send_ack = 1;
tqpair.qpair.ctrlr->opts.header_digest = true;
tqpair.qpair.ctrlr->opts.data_digest = true;
TAILQ_INIT(&tqpair.send_queue);
tqpair->recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair->recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair->recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair->recv_pdu->ch_valid_bytes = 8;
tqpair->recv_pdu->psh_valid_bytes = tqpair->recv_pdu->hdr.common.hlen;
tqpair->recv_pdu->hdr.ic_resp.maxh2cdata = 4096;
tqpair->recv_pdu->hdr.ic_resp.cpda = 1;
tqpair->flags.icreq_send_ack = 1;
tqpair->qpair.ctrlr->opts.header_digest = true;
tqpair->qpair.ctrlr->opts.data_digest = true;
TAILQ_INIT(&tqpair->send_queue);
rc = nvme_tcp_ctrlr_connect_qpair(&ctrlr, qpair);
CU_ASSERT(rc == 0);
@ -1423,9 +1443,9 @@ test_nvme_tcp_ctrlr_connect_qpair(void)
CU_ASSERT(rc >= 0);
}
CU_ASSERT(tqpair.maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_RUNNING);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
CU_ASSERT(tqpair->maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
CU_ASSERT(tqpair->state == NVME_TCP_QPAIR_STATE_RUNNING);
CU_ASSERT(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
@ -1434,6 +1454,8 @@ test_nvme_tcp_ctrlr_connect_qpair(void)
CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
}
static void