nvmf/tcp: fix the issues of qpair resource recycling to avoid memory leak.
This patch will solve the following two cases: 1 Free the pdu resources. Add the checkout of c2h_pdu_data_cnt of the qpair. 2 Do not recyecle the req accoriding to the pdu in the send_queue, but directly recylcing the reqs in TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST state. Change-Id: I5856c3421019ec49d576d3dae4c62fefbb3925ca Signed-off-by: Ziye Yang <optimistyzy@gmail.com> Reviewed-on: https://review.gerrithub.io/c/440847 Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
parent
a3c9ab66c8
commit
c43cb6a706
@ -453,16 +453,22 @@ spdk_nvmf_tcp_cleanup_all_states(struct nvme_tcp_qpair *tqpair)
|
||||
struct nvme_tcp_req *tcp_req, *req_tmp;
|
||||
struct nvme_tcp_pdu *pdu, *tmp_pdu;
|
||||
|
||||
/* For the requests in TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
|
||||
* they can be refreed via the pdu in the send_queue
|
||||
*/
|
||||
/* Free the pdus in the send_queue */
|
||||
TAILQ_FOREACH_SAFE(pdu, &tqpair->send_queue, tailq, tmp_pdu) {
|
||||
TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
|
||||
assert(pdu->cb_fn != NULL);
|
||||
pdu->cb_fn(pdu->cb_arg);
|
||||
/* Also check the pdu type, we need to calculte the c2h_data_pdu_cnt later */
|
||||
if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) {
|
||||
assert(tqpair->c2h_data_pdu_cnt > 0);
|
||||
tqpair->c2h_data_pdu_cnt--;
|
||||
}
|
||||
spdk_nvmf_tcp_pdu_put(pdu);
|
||||
}
|
||||
|
||||
TAILQ_FOREACH_SAFE(tcp_req, &tqpair->queued_c2h_data_tcp_req, link, req_tmp) {
|
||||
TAILQ_REMOVE(&tqpair->queued_c2h_data_tcp_req, tcp_req, link);
|
||||
}
|
||||
spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
|
||||
|
||||
spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEW);
|
||||
|
||||
spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_DATA_PENDING_FOR_R2T);
|
||||
@ -475,11 +481,6 @@ spdk_nvmf_tcp_cleanup_all_states(struct nvme_tcp_qpair *tqpair)
|
||||
|
||||
spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER);
|
||||
spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_EXECUTING);
|
||||
|
||||
TAILQ_FOREACH_SAFE(tcp_req, &tqpair->queued_c2h_data_tcp_req, link, req_tmp) {
|
||||
TAILQ_REMOVE(&tqpair->queued_c2h_data_tcp_req, tcp_req, link);
|
||||
}
|
||||
|
||||
spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user