nvmf/rdma: Fix data WR release

One of stop conditions in data WR release function was wrong. This
can cause release of uncompleted data WRs. Release of WRs that are
not yet completed leads to different side-effects, up to data
corruption.

The issue was introduced with send WR batching feature in commit
9d63933b7f.

This patch fixes stop condition and contains some refactoring to
simplify WR release function.

Signed-off-by: Evgeniy Kochetov <evgeniik@mellanox.com>
Signed-off-by: Sasha Kotchubievsky <sashakot@mellanox.com>
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466029 (master)

(cherry picked from commit 01887d3c96)
Change-Id: Ie79f64da345e38038f16a0210bef240f63af325b
Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/467140
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Evgeniy Kochetov 2019-08-19 12:29:11 +00:00 committed by Ben Walker
parent 6787994d30
commit d079cf5a70

View File

@ -644,40 +644,22 @@ static void
nvmf_rdma_request_free_data(struct spdk_nvmf_rdma_request *rdma_req,
struct spdk_nvmf_rdma_transport *rtransport)
{
struct spdk_nvmf_rdma_request_data *current_data_wr = NULL, *next_data_wr = NULL;
struct ibv_send_wr *send_wr;
int i;
struct spdk_nvmf_rdma_request_data *data_wr;
struct ibv_send_wr *next_send_wr;
uint64_t req_wrid;
rdma_req->num_outstanding_data_wr = 0;
current_data_wr = &rdma_req->data;
for (i = 0; i < current_data_wr->wr.num_sge; i++) {
current_data_wr->wr.sg_list[i].addr = 0;
current_data_wr->wr.sg_list[i].length = 0;
current_data_wr->wr.sg_list[i].lkey = 0;
}
current_data_wr->wr.num_sge = 0;
send_wr = current_data_wr->wr.next;
if (send_wr != NULL && send_wr != &rdma_req->rsp.wr) {
next_data_wr = SPDK_CONTAINEROF(send_wr, struct spdk_nvmf_rdma_request_data, wr);
}
while (next_data_wr) {
current_data_wr = next_data_wr;
send_wr = current_data_wr->wr.next;
if (send_wr != NULL && send_wr != &rdma_req->rsp.wr &&
send_wr->wr_id == current_data_wr->wr.wr_id) {
next_data_wr = SPDK_CONTAINEROF(send_wr, struct spdk_nvmf_rdma_request_data, wr);
} else {
next_data_wr = NULL;
data_wr = &rdma_req->data;
req_wrid = data_wr->wr.wr_id;
while (data_wr && data_wr->wr.wr_id == req_wrid) {
memset(data_wr->sgl, 0, sizeof(data_wr->wr.sg_list[0]) * data_wr->wr.num_sge);
data_wr->wr.num_sge = 0;
next_send_wr = data_wr->wr.next;
if (data_wr != &rdma_req->data) {
spdk_mempool_put(rtransport->data_wr_pool, data_wr);
}
for (i = 0; i < current_data_wr->wr.num_sge; i++) {
current_data_wr->wr.sg_list[i].addr = 0;
current_data_wr->wr.sg_list[i].length = 0;
current_data_wr->wr.sg_list[i].lkey = 0;
}
current_data_wr->wr.num_sge = 0;
spdk_mempool_put(rtransport->data_wr_pool, current_data_wr);
data_wr = (!next_send_wr || next_send_wr == &rdma_req->rsp.wr) ? NULL :
SPDK_CONTAINEROF(next_send_wr, struct spdk_nvmf_rdma_request_data, wr);
}
}