From c43cb6a706054eb6e1c370a451e480acb1360160 Mon Sep 17 00:00:00 2001 From: Ziye Yang Date: Thu, 17 Jan 2019 22:42:49 +0800 Subject: [PATCH] nvmf/tcp: fix the issues of qpair resource recycling to avoid memory leak. This patch will solve the following two cases: 1 Free the pdu resources. Add the checkout of c2h_pdu_data_cnt of the qpair. 2 Do not recyecle the req accoriding to the pdu in the send_queue, but directly recylcing the reqs in TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST state. Change-Id: I5856c3421019ec49d576d3dae4c62fefbb3925ca Signed-off-by: Ziye Yang Reviewed-on: https://review.gerrithub.io/c/440847 Reviewed-by: Shuhei Matsumoto Reviewed-by: Ben Walker Reviewed-by: Changpeng Liu Tested-by: SPDK CI Jenkins Chandler-Test-Pool: SPDK Automated Test System --- lib/nvmf/tcp.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/lib/nvmf/tcp.c b/lib/nvmf/tcp.c index faece743c7..7088e8a4b4 100644 --- a/lib/nvmf/tcp.c +++ b/lib/nvmf/tcp.c @@ -453,16 +453,22 @@ spdk_nvmf_tcp_cleanup_all_states(struct nvme_tcp_qpair *tqpair) struct nvme_tcp_req *tcp_req, *req_tmp; struct nvme_tcp_pdu *pdu, *tmp_pdu; - /* For the requests in TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, - * they can be refreed via the pdu in the send_queue - */ + /* Free the pdus in the send_queue */ TAILQ_FOREACH_SAFE(pdu, &tqpair->send_queue, tailq, tmp_pdu) { TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq); - assert(pdu->cb_fn != NULL); - pdu->cb_fn(pdu->cb_arg); + /* Also check the pdu type, we need to calculte the c2h_data_pdu_cnt later */ + if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) { + assert(tqpair->c2h_data_pdu_cnt > 0); + tqpair->c2h_data_pdu_cnt--; + } spdk_nvmf_tcp_pdu_put(pdu); } + TAILQ_FOREACH_SAFE(tcp_req, &tqpair->queued_c2h_data_tcp_req, link, req_tmp) { + TAILQ_REMOVE(&tqpair->queued_c2h_data_tcp_req, tcp_req, link); + } + spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); + spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEW); spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_DATA_PENDING_FOR_R2T); @@ -475,11 +481,6 @@ spdk_nvmf_tcp_cleanup_all_states(struct nvme_tcp_qpair *tqpair) spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER); spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_EXECUTING); - - TAILQ_FOREACH_SAFE(tcp_req, &tqpair->queued_c2h_data_tcp_req, link, req_tmp) { - TAILQ_REMOVE(&tqpair->queued_c2h_data_tcp_req, tcp_req, link); - } - spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); }