lib/nvmf: Change the pdu_in_progress in tqpair to pointer.
This is used to prepare using the accel framework to calculate the crc32 because some fields in this structure needs to be allocated in DMA addressable memory. Signed-off-by: Ziye Yang <ziye.yang@intel.com> Change-Id: Ib8def5596e60f4702709da647145c4e2b6d6848f Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7767 Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Paul Luse <paul.e.luse@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
d16c5b49dd
commit
f8ac678ec3
@ -223,7 +223,7 @@ struct spdk_nvmf_tcp_qpair {
|
||||
enum nvme_tcp_qpair_state state;
|
||||
|
||||
/* PDU being actively received */
|
||||
struct nvme_tcp_pdu pdu_in_progress;
|
||||
struct nvme_tcp_pdu *pdu_in_progress;
|
||||
|
||||
/* Queues to track the requests in all states */
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_working_queue;
|
||||
@ -909,7 +909,7 @@ nvmf_tcp_qpair_write_pdu(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
{
|
||||
int hlen;
|
||||
|
||||
assert(&tqpair->pdu_in_progress != pdu);
|
||||
assert(tqpair->pdu_in_progress != pdu);
|
||||
|
||||
hlen = pdu->hdr.common.hlen;
|
||||
pdu->cb_fn = cb_fn;
|
||||
@ -960,8 +960,8 @@ nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
}
|
||||
}
|
||||
|
||||
/* Add addtional one member, which will be used for mgmt_pdu owned by the tqpair */
|
||||
tqpair->pdus = spdk_dma_malloc((tqpair->resource_count + 1) * sizeof(*tqpair->pdus), 0x1000, NULL);
|
||||
/* Add addtional 2 members, which will be used for mgmt_pdu and pdu_in_progress owned by the tqpair */
|
||||
tqpair->pdus = spdk_dma_malloc((tqpair->resource_count + 2) * sizeof(*tqpair->pdus), 0x1000, NULL);
|
||||
if (!tqpair->pdus) {
|
||||
SPDK_ERRLOG("Unable to allocate pdu pool on tqpair =%p.\n", tqpair);
|
||||
return -1;
|
||||
@ -993,6 +993,7 @@ nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
|
||||
tqpair->mgmt_pdu = &tqpair->pdus[i];
|
||||
tqpair->mgmt_pdu->qpair = tqpair;
|
||||
tqpair->pdu_in_progress = &tqpair->pdus[i + 1];
|
||||
|
||||
tqpair->recv_buf_size = (in_capsule_data_size + sizeof(struct spdk_nvme_tcp_cmd) + 2 *
|
||||
SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR;
|
||||
@ -1271,7 +1272,7 @@ nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
break;
|
||||
case NVME_TCP_PDU_RECV_STATE_ERROR:
|
||||
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
|
||||
memset(&tqpair->pdu_in_progress, 0, sizeof(tqpair->pdu_in_progress));
|
||||
memset(tqpair->pdu_in_progress, 0, sizeof(*(tqpair->pdu_in_progress)));
|
||||
break;
|
||||
default:
|
||||
SPDK_ERRLOG("The state(%d) is invalid\n", state);
|
||||
@ -1671,7 +1672,7 @@ nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
enum spdk_nvme_tcp_term_req_fes fes;
|
||||
|
||||
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
|
||||
pdu = &tqpair->pdu_in_progress;
|
||||
pdu = tqpair->pdu_in_progress;
|
||||
|
||||
SPDK_DEBUGLOG(nvmf_tcp, "enter\n");
|
||||
/* check data digest if need */
|
||||
@ -1790,7 +1791,7 @@ nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
enum spdk_nvme_tcp_term_req_fes fes;
|
||||
|
||||
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
|
||||
pdu = &tqpair->pdu_in_progress;
|
||||
pdu = tqpair->pdu_in_progress;
|
||||
|
||||
SPDK_DEBUGLOG(nvmf_tcp, "pdu type of tqpair(%p) is %d\n", tqpair,
|
||||
pdu->hdr.common.pdu_type);
|
||||
@ -1824,7 +1825,7 @@ nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
break;
|
||||
|
||||
default:
|
||||
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->pdu_in_progress.hdr.common.pdu_type);
|
||||
SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->pdu_in_progress->hdr.common.pdu_type);
|
||||
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
|
||||
error_offset = 1;
|
||||
nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
|
||||
@ -1842,7 +1843,7 @@ nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
bool plen_error = false, pdo_error = false;
|
||||
|
||||
assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
|
||||
pdu = &tqpair->pdu_in_progress;
|
||||
pdu = tqpair->pdu_in_progress;
|
||||
|
||||
if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ) {
|
||||
if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
|
||||
@ -1918,7 +1919,7 @@ nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
goto err;
|
||||
} else {
|
||||
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
|
||||
nvme_tcp_pdu_calc_psh_len(&tqpair->pdu_in_progress, tqpair->host_hdgst_enable);
|
||||
nvme_tcp_pdu_calc_psh_len(tqpair->pdu_in_progress, tqpair->host_hdgst_enable);
|
||||
return;
|
||||
}
|
||||
err:
|
||||
@ -1955,7 +1956,7 @@ nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
prev_state = tqpair->recv_state;
|
||||
SPDK_DEBUGLOG(nvmf_tcp, "tqpair(%p) recv pdu entering state %d\n", tqpair, prev_state);
|
||||
|
||||
pdu = &tqpair->pdu_in_progress;
|
||||
pdu = tqpair->pdu_in_progress;
|
||||
switch (tqpair->recv_state) {
|
||||
/* Wait for the common header */
|
||||
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY:
|
||||
@ -2391,7 +2392,7 @@ nvmf_tcp_set_incapsule_data(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
struct nvme_tcp_pdu *pdu;
|
||||
uint32_t plen = 0;
|
||||
|
||||
pdu = &tqpair->pdu_in_progress;
|
||||
pdu = tqpair->pdu_in_progress;
|
||||
plen = pdu->hdr.common.hlen;
|
||||
|
||||
if (tqpair->host_hdgst_enable) {
|
||||
@ -2443,11 +2444,11 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEW, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
|
||||
/* copy the cmd from the receive pdu */
|
||||
tcp_req->cmd = tqpair->pdu_in_progress.hdr.capsule_cmd.ccsqe;
|
||||
tcp_req->cmd = tqpair->pdu_in_progress->hdr.capsule_cmd.ccsqe;
|
||||
|
||||
if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->req.dif.dif_ctx))) {
|
||||
tcp_req->req.dif.dif_insert_or_strip = true;
|
||||
tqpair->pdu_in_progress.dif_ctx = &tcp_req->req.dif.dif_ctx;
|
||||
tqpair->pdu_in_progress->dif_ctx = &tcp_req->req.dif.dif_ctx;
|
||||
}
|
||||
|
||||
/* The next state transition depends on the data transfer needs of this request. */
|
||||
@ -2520,7 +2521,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
|
||||
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
|
||||
|
||||
pdu = &tqpair->pdu_in_progress;
|
||||
pdu = tqpair->pdu_in_progress;
|
||||
SPDK_DEBUGLOG(nvmf_tcp, "Not need to send r2t for tcp_req(%p) on tqpair=%p\n", tcp_req,
|
||||
tqpair);
|
||||
/* No need to send r2t, contained in the capsuled data */
|
||||
|
@ -682,7 +682,7 @@ test_nvmf_tcp_incapsule_data_handle(void)
|
||||
{
|
||||
struct spdk_nvmf_tcp_transport ttransport = {};
|
||||
struct spdk_nvmf_tcp_qpair tqpair = {};
|
||||
struct nvme_tcp_pdu *pdu;
|
||||
struct nvme_tcp_pdu *pdu, pdu_in_progress = {};
|
||||
union nvmf_c2h_msg rsp0 = {};
|
||||
union nvmf_c2h_msg rsp = {};
|
||||
|
||||
@ -698,6 +698,7 @@ test_nvmf_tcp_incapsule_data_handle(void)
|
||||
struct spdk_nvmf_tcp_poll_group tcp_group = {};
|
||||
struct spdk_sock_group grp = {};
|
||||
|
||||
tqpair.pdu_in_progress = &pdu_in_progress;
|
||||
ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
|
||||
ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
|
||||
|
||||
@ -733,7 +734,7 @@ test_nvmf_tcp_incapsule_data_handle(void)
|
||||
tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
|
||||
|
||||
/* init pdu, make pdu need sgl buff */
|
||||
pdu = &tqpair.pdu_in_progress;
|
||||
pdu = tqpair.pdu_in_progress;
|
||||
capsule_data = &pdu->hdr.capsule_cmd;
|
||||
nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe;
|
||||
sgl = &capsule_data->ccsqe.dptr.sgl1;
|
||||
@ -756,7 +757,7 @@ test_nvmf_tcp_incapsule_data_handle(void)
|
||||
sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1;
|
||||
|
||||
/* process tqpair capsule req. but we still remain req in pending_buff. */
|
||||
nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, &tqpair.pdu_in_progress);
|
||||
nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress);
|
||||
CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
|
||||
CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
|
||||
STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) {
|
||||
@ -765,7 +766,7 @@ test_nvmf_tcp_incapsule_data_handle(void)
|
||||
}
|
||||
}
|
||||
CU_ASSERT(req_temp == NULL);
|
||||
CU_ASSERT(tqpair.pdu_in_progress.req == (void *)&tcp_req2);
|
||||
CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
|
Loading…
Reference in New Issue
Block a user