nvmf/tcp: add nvme_tcp_pdu_cal_psh function.

Purpose:

1 Do not caculated the psh_len every time.
2 Small fix, for ch_valid_bypes, and psh_valid_bytes,
we do not need to use uin32_t.

Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: I9b643da4b0ebabdfe50f30e9e0a738fe95beb159
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/464253
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Ziye Yang 2019-08-06 17:34:45 +08:00 committed by Changpeng Liu
parent 4b4ea84848
commit 73d9cef8c5
3 changed files with 52 additions and 72 deletions

View File

@ -102,8 +102,9 @@ struct nvme_tcp_pdu {
uint8_t data_digest[SPDK_NVME_TCP_DIGEST_LEN];
int32_t padding_valid_bytes;
uint32_t ch_valid_bytes;
uint32_t psh_valid_bytes;
uint8_t ch_valid_bytes;
uint8_t psh_valid_bytes;
uint8_t psh_len;
int ref;
nvme_tcp_qpair_xfer_complete_cb cb_fn;
@ -586,4 +587,31 @@ nvme_tcp_pdu_set_data_buf(struct nvme_tcp_pdu *pdu,
}
}
static void
nvme_tcp_pdu_calc_psh_len(struct nvme_tcp_pdu *pdu, bool hdgst_enable)
{
uint8_t psh_len, pdo, padding_len;
psh_len = pdu->hdr.common.hlen;
/* Only the following five type has header digest */
if (((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_DATA) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_R2T)) && hdgst_enable) {
pdu->has_hdgst = true;
psh_len += SPDK_NVME_TCP_DIGEST_LEN;
if (pdu->hdr.common.plen > psh_len) {
pdo = pdu->hdr.common.pdo;
padding_len = pdo - psh_len;
if (padding_len > 0) {
psh_len = pdo;
}
}
}
psh_len -= sizeof(struct spdk_nvme_tcp_common_pdu_hdr);
pdu->psh_len = psh_len;
}
#endif /* SPDK_INTERNAL_NVME_TCP_H */

View File

@ -900,6 +900,7 @@ nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
goto err;
} else {
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
nvme_tcp_pdu_calc_psh_len(&tqpair->recv_pdu, tqpair->host_hdgst_enable);
return;
}
err:
@ -1408,8 +1409,6 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
int rc = 0;
struct nvme_tcp_pdu *pdu;
uint32_t data_len;
uint8_t psh_len, pdo;
int8_t padding_len;
enum nvme_tcp_pdu_recv_state prev_state;
/* The loop here is to allow for several back-to-back state changes. */
@ -1443,41 +1442,17 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
/* Wait for the pdu specific header */
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
pdu = &tqpair->recv_pdu;
psh_len = pdu->hdr.common.hlen;
/* The following pdus can have digest */
if (((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_R2T)) &&
tqpair->host_hdgst_enable) {
pdu->has_hdgst = true;
psh_len += SPDK_NVME_TCP_DIGEST_LEN;
if (pdu->hdr.common.plen > psh_len) {
pdo = pdu->hdr.common.pdo;
padding_len = pdo - psh_len;
SPDK_DEBUGLOG(SPDK_LOG_NVME, "padding length is =%d for pdu=%p on tqpair=%p\n", padding_len,
pdu, tqpair);
if (padding_len > 0) {
psh_len = pdo;
}
}
rc = nvme_tcp_read_data(tqpair->sock,
pdu->psh_len - pdu->psh_valid_bytes,
(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
if (rc < 0) {
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
break;
}
psh_len -= sizeof(struct spdk_nvme_tcp_common_pdu_hdr);
/* The following will read psh + hdgest (if possbile) + padding (if posssible) */
if (pdu->psh_valid_bytes < psh_len) {
rc = nvme_tcp_read_data(tqpair->sock,
psh_len - pdu->psh_valid_bytes,
(uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
if (rc < 0) {
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
break;
}
pdu->psh_valid_bytes += rc;
if (pdu->psh_valid_bytes < psh_len) {
return NVME_TCP_PDU_IN_PROGRESS;
}
pdu->psh_valid_bytes += rc;
if (pdu->psh_valid_bytes < pdu->psh_len) {
return NVME_TCP_PDU_IN_PROGRESS;
}
/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */

View File

@ -1877,6 +1877,7 @@ spdk_nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
goto err;
} else {
spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
nvme_tcp_pdu_calc_psh_len(&tqpair->pdu_in_progress, tqpair->host_hdgst_enable);
return;
}
err:
@ -1907,8 +1908,6 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
struct nvme_tcp_pdu *pdu;
enum nvme_tcp_pdu_recv_state prev_state;
uint32_t data_len, current_pdu_num = 0;
uint8_t psh_len, pdo, hlen;
int8_t padding_len;
/* The loop here is to allow for several back-to-back state changes. */
do {
@ -1946,40 +1945,18 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
/* Wait for the pdu specific header */
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH:
pdu = &tqpair->pdu_in_progress;
psh_len = hlen = pdu->hdr.common.hlen;
/* Only capsule_cmd and h2c_data has header digest */
if (((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD) ||
(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_DATA)) &&
tqpair->host_hdgst_enable) {
pdu->has_hdgst = true;
psh_len += SPDK_NVME_TCP_DIGEST_LEN;
if (pdu->hdr.common.plen > psh_len) {
pdo = pdu->hdr.common.pdo;
padding_len = pdo - psh_len;
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "padding length is =%d for pdu=%p on tqpair=%p\n", padding_len,
pdu, tqpair);
if (padding_len > 0) {
psh_len = pdo;
}
}
rc = nvme_tcp_read_data(tqpair->sock,
pdu->psh_len - pdu->psh_valid_bytes,
(void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
if (rc < 0) {
return NVME_TCP_PDU_FATAL;
} else if (rc > 0) {
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE,
0, rc, 0, 0);
pdu->psh_valid_bytes += rc;
}
psh_len -= sizeof(struct spdk_nvme_tcp_common_pdu_hdr);
/* The following will read psh + hdgest (if possbile) + padding (if posssible) */
if (pdu->psh_valid_bytes < psh_len) {
rc = nvme_tcp_read_data(tqpair->sock,
psh_len - pdu->psh_valid_bytes,
(void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
if (rc < 0) {
return NVME_TCP_PDU_FATAL;
} else if (rc > 0) {
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE,
0, rc, 0, 0);
pdu->psh_valid_bytes += rc;
}
if (pdu->psh_valid_bytes < psh_len) {
return NVME_TCP_PDU_IN_PROGRESS;
}
if (pdu->psh_valid_bytes < pdu->psh_len) {
return NVME_TCP_PDU_IN_PROGRESS;
}
/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */