nvme/tcp: Change the type of recv_pdu to pointer.

This is prepared for using the hardware offloading
engine in accel framework. And some fields in nvme_tcp_pdu
needs to be DMA addressable.

Change-Id: I75325e2cd7ff25fe938bea0ac9489a5027e3e0e9
Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7770
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
This commit is contained in:
Ziye Yang 2021-05-06 22:13:11 +08:00 committed by Tomasz Zawadzki
parent f8ac678ec3
commit 82e4bfd346
2 changed files with 57 additions and 52 deletions

View File

@ -81,7 +81,7 @@ struct nvme_tcp_qpair {
TAILQ_HEAD(, nvme_tcp_req) outstanding_reqs;
TAILQ_HEAD(, nvme_tcp_pdu) send_queue;
struct nvme_tcp_pdu recv_pdu;
struct nvme_tcp_pdu *recv_pdu;
struct nvme_tcp_pdu *send_pdu; /* only for error pdu and init pdu */
struct nvme_tcp_pdu *send_pdus; /* Used by tcp_reqs */
enum nvme_tcp_pdu_recv_state recv_state;
@ -271,8 +271,8 @@ nvme_tcp_alloc_reqs(struct nvme_tcp_qpair *tqpair)
goto fail;
}
/* Add additional one member for the send_pdu owned by the tqpair */
tqpair->send_pdus = spdk_zmalloc((tqpair->num_entries + 1) * sizeof(struct nvme_tcp_pdu),
/* Add additional 2 member for the send_pdu, recv_pdu owned by the tqpair */
tqpair->send_pdus = spdk_zmalloc((tqpair->num_entries + 2) * sizeof(struct nvme_tcp_pdu),
0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
@ -293,6 +293,7 @@ nvme_tcp_alloc_reqs(struct nvme_tcp_qpair *tqpair)
}
tqpair->send_pdu = &tqpair->send_pdus[i];
tqpair->recv_pdu = &tqpair->send_pdus[i + 1];
return 0;
fail:
@ -819,7 +820,7 @@ nvme_tcp_qpair_set_recv_state(struct nvme_tcp_qpair *tqpair,
switch (state) {
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
case NVME_TCP_PDU_RECV_STATE_ERROR:
memset(&tqpair->recv_pdu, 0, sizeof(struct nvme_tcp_pdu));
memset(tqpair->recv_pdu, 0, sizeof(struct nvme_tcp_pdu));
break;
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
@ -882,7 +883,7 @@ nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
uint32_t expected_hlen, hd_len = 0;
bool plen_error = false;
pdu = &tqpair->recv_pdu;
pdu = tqpair->recv_pdu;
SPDK_DEBUGLOG(nvme, "pdu type = %d\n", pdu->hdr.common.pdu_type);
if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP) {
@ -938,7 +939,7 @@ nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
break;
default:
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu->hdr.common.pdu_type);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type);
goto err;
@ -958,7 +959,7 @@ nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
goto err;
} else {
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
nvme_tcp_pdu_calc_psh_len(&tqpair->recv_pdu, tqpair->flags.host_hdgst_enable);
nvme_tcp_pdu_calc_psh_len(tqpair->recv_pdu, tqpair->flags.host_hdgst_enable);
return;
}
err:
@ -1050,7 +1051,7 @@ nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
enum spdk_nvme_tcp_term_req_fes fes;
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
pdu = &tqpair->recv_pdu;
pdu = tqpair->recv_pdu;
SPDK_DEBUGLOG(nvme, "enter\n");
@ -1467,7 +1468,7 @@ nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
enum spdk_nvme_tcp_term_req_fes fes;
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
pdu = &tqpair->recv_pdu;
pdu = tqpair->recv_pdu;
SPDK_DEBUGLOG(nvme, "enter: pdu type =%u\n", pdu->hdr.common.pdu_type);
/* check header digest if needed */
@ -1502,7 +1503,7 @@ nvme_tcp_pdu_psh_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
break;
default:
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu.hdr.common.pdu_type);
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->recv_pdu->hdr.common.pdu_type);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = 1;
nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
@ -1529,7 +1530,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
break;
/* common header */
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
pdu = &tqpair->recv_pdu;
pdu = tqpair->recv_pdu;
if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
rc = nvme_tcp_read_data(tqpair->sock,
sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
@ -1550,7 +1551,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
break;
/* Wait for the pdu specific header */
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
pdu = &tqpair->recv_pdu;
pdu = tqpair->recv_pdu;
rc = nvme_tcp_read_data(tqpair->sock,
pdu->psh_len - pdu->psh_valid_bytes,
(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
@ -1569,7 +1570,7 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
nvme_tcp_pdu_psh_handle(tqpair, reaped);
break;
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
pdu = &tqpair->recv_pdu;
pdu = tqpair->recv_pdu;
/* check whether the data is valid, if not we just return */
if (!pdu->data_len) {
return NVME_TCP_PDU_IN_PROGRESS;

View File

@ -800,6 +800,9 @@ test_nvme_tcp_qpair_set_recv_state(void)
{
struct nvme_tcp_qpair tqpair = {};
enum nvme_tcp_pdu_recv_state state;
struct nvme_tcp_pdu recv_pdu = {};
tqpair.recv_pdu = &recv_pdu;
/* case1: The recv state of tqpair is same with the state to be set */
tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
@ -809,17 +812,17 @@ test_nvme_tcp_qpair_set_recv_state(void)
/* case2: The recv state of tqpair is different with the state to be set */
/* state is NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY or NVME_TCP_PDU_RECV_STATE_ERROR, tqpair->recv_pdu will be cleared */
tqpair.recv_pdu.cb_arg = (void *)0xDEADBEEF;
tqpair.recv_pdu->cb_arg = (void *)0xDEADBEEF;
state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY;
nvme_tcp_qpair_set_recv_state(&tqpair, state);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
CU_ASSERT(tqpair.recv_pdu.cb_arg == (void *)0x0);
CU_ASSERT(tqpair.recv_pdu->cb_arg == (void *)0x0);
tqpair.recv_pdu.cb_arg = (void *)0xDEADBEEF;
tqpair.recv_pdu->cb_arg = (void *)0xDEADBEEF;
state = NVME_TCP_PDU_RECV_STATE_ERROR;
nvme_tcp_qpair_set_recv_state(&tqpair, state);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.recv_pdu.cb_arg == (void *)0x0);
CU_ASSERT(tqpair.recv_pdu->cb_arg == (void *)0x0);
/* state is NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH or NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH or NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD or default */
state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
@ -894,12 +897,12 @@ static void
test_nvme_tcp_qpair_send_h2c_term_req(void)
{
struct nvme_tcp_qpair tqpair = {};
struct nvme_tcp_pdu pdu = {};
struct nvme_tcp_pdu send_pdu = {};
struct nvme_tcp_pdu pdu = {}, recv_pdu = {}, send_pdu = {};
enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
uint32_t error_offset = 1;
tqpair.send_pdu = &send_pdu;
tqpair.recv_pdu = &recv_pdu;
TAILQ_INIT(&tqpair.send_queue);
/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Except: copy_len == hlen */
pdu.hdr.common.hlen = 64;
@ -924,12 +927,13 @@ static void
test_nvme_tcp_pdu_ch_handle(void)
{
struct nvme_tcp_qpair tqpair = {};
struct nvme_tcp_pdu send_pdu = {};
struct nvme_tcp_pdu send_pdu = {}, recv_pdu = {};
tqpair.send_pdu = &send_pdu;
tqpair.recv_pdu = &recv_pdu;
TAILQ_INIT(&tqpair.send_queue);
/* case 1: Already received IC_RESP PDU. Expect: fail */
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
@ -938,10 +942,10 @@ test_nvme_tcp_pdu_ch_handle(void)
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
/* case 2: Expected PDU header length and received are different. Expect: fail */
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
tqpair.recv_pdu.hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu.hdr.common.hlen = 0;
tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu->hdr.common.hlen = 0;
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
@ -950,10 +954,10 @@ test_nvme_tcp_pdu_ch_handle(void)
CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 2);
/* case 3: The TCP/IP tqpair connection is not negotitated. Expect: fail */
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
tqpair.recv_pdu.hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu.hdr.common.hlen = 0;
tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu->hdr.common.hlen = 0;
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
@ -961,10 +965,10 @@ test_nvme_tcp_pdu_ch_handle(void)
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
/* case 4: Unexpected PDU type. Expect: fail */
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
tqpair.recv_pdu.hdr.common.plen = 0;
tqpair.recv_pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu->hdr.common.plen = 0;
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
@ -973,10 +977,10 @@ test_nvme_tcp_pdu_ch_handle(void)
(unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
/* case 5: plen error. Expect: fail */
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
tqpair.recv_pdu.hdr.common.plen = 0;
tqpair.recv_pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu->hdr.common.plen = 0;
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
@ -985,11 +989,11 @@ test_nvme_tcp_pdu_ch_handle(void)
(unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
tqpair.recv_pdu.hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
tqpair.recv_pdu.hdr.common.plen = 0;
tqpair.recv_pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_rsp);
tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
tqpair.recv_pdu->hdr.common.plen = 0;
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_rsp);
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
@ -998,11 +1002,11 @@ test_nvme_tcp_pdu_ch_handle(void)
(unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
tqpair.recv_pdu.hdr.common.plen = 0;
tqpair.recv_pdu.hdr.common.pdo = 64;
tqpair.recv_pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
tqpair.recv_pdu->hdr.common.plen = 0;
tqpair.recv_pdu->hdr.common.pdo = 64;
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
@ -1011,10 +1015,10 @@ test_nvme_tcp_pdu_ch_handle(void)
(unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
tqpair.recv_pdu.hdr.common.plen = 0;
tqpair.recv_pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
tqpair.recv_pdu->hdr.common.plen = 0;
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
@ -1023,11 +1027,11 @@ test_nvme_tcp_pdu_ch_handle(void)
(unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
tqpair.recv_pdu.hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
tqpair.recv_pdu.hdr.common.plen = 0;
tqpair.recv_pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
tqpair.recv_pdu->hdr.common.plen = 0;
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
@ -1037,13 +1041,13 @@ test_nvme_tcp_pdu_ch_handle(void)
CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
/* case 6: Expect: PASS */
tqpair.recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
tqpair.recv_pdu.hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
nvme_tcp_pdu_ch_handle(&tqpair);
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
CU_ASSERT(tqpair.recv_pdu.psh_len == tqpair.recv_pdu.hdr.common.hlen - sizeof(
CU_ASSERT(tqpair.recv_pdu->psh_len == tqpair.recv_pdu->hdr.common.hlen - sizeof(
struct spdk_nvme_tcp_common_pdu_hdr));
}