nvme: move queued request resubmit to generic layer

We were already passing up from each transport the number of completions
done during the transport specific call. So just use that return code
and batch all of the submissions together at one time in the generic
code.

This change and subsequent moves of code from the transport layer to the
genric layer are aimed at making reset handling at the generic NVMe
layer simpler.

Change-Id: I028aea86d76352363ffffe661deec2215bc9c450
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/469757
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Seth Howell 2019-10-03 16:35:03 -07:00 committed by Jim Harris
parent afc9800b06
commit 2c68fef058
4 changed files with 34 additions and 33 deletions

View File

@ -1346,18 +1346,6 @@ nvme_pcie_qpair_complete_tracker(struct spdk_nvme_qpair *qpair, struct nvme_trac
TAILQ_REMOVE(&pqpair->outstanding_tr, tr, tq_list);
TAILQ_INSERT_HEAD(&pqpair->free_tr, tr, tq_list);
/*
* If the controller is in the middle of resetting, don't
* try to submit queued requests here - let the reset logic
* handle that instead.
*/
if (!STAILQ_EMPTY(&qpair->queued_req) &&
!qpair->ctrlr->is_resetting) {
req = STAILQ_FIRST(&qpair->queued_req);
STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
nvme_qpair_submit_request(qpair, req);
}
}
}

View File

@ -35,6 +35,7 @@
#include "spdk/nvme_ocssd.h"
static void nvme_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
static int nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
struct nvme_string {
uint16_t value;
@ -420,6 +421,8 @@ int32_t
spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
{
int32_t ret;
int32_t resubmit_rc;
int32_t i;
struct nvme_request *req, *tmp;
if (qpair->ctrlr->is_failed) {
@ -460,7 +463,25 @@ spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
* routine - so it is safe to delete it now.
*/
spdk_nvme_ctrlr_free_io_qpair(qpair);
return ret;
}
/*
* At this point, ret must represent the number of completions we reaped.
* submit as many queued requests as we completed.
*/
i = 0;
while (i < ret && !STAILQ_EMPTY(&qpair->queued_req) && !qpair->ctrlr->is_resetting) {
req = STAILQ_FIRST(&qpair->queued_req);
STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
resubmit_rc = nvme_qpair_resubmit_request(qpair, req);
if (spdk_unlikely(resubmit_rc != 0)) {
SPDK_ERRLOG("Unable to resubmit as many requests as we completed.\n");
break;
}
i++;
}
return ret;
}
@ -659,6 +680,19 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
return rc;
}
static int
nvme_qpair_resubmit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
int rc;
rc = _nvme_qpair_submit_request(qpair, req);
if (spdk_unlikely(rc == -EAGAIN)) {
STAILQ_INSERT_HEAD(&qpair->queued_req, req, stailq);
}
return rc;
}
void
nvme_qpair_enable(struct spdk_nvme_qpair *qpair)
{

View File

@ -655,7 +655,6 @@ fail:
static int
nvme_rdma_recv(struct nvme_rdma_qpair *rqpair, uint64_t rsp_idx)
{
struct spdk_nvme_qpair *qpair = &rqpair->qpair;
struct spdk_nvme_rdma_req *rdma_req;
struct spdk_nvme_cpl *rsp;
struct nvme_request *req;
@ -678,12 +677,6 @@ nvme_rdma_recv(struct nvme_rdma_qpair *rqpair, uint64_t rsp_idx)
return -1;
}
if (!STAILQ_EMPTY(&qpair->queued_req) && !qpair->ctrlr->is_resetting) {
req = STAILQ_FIRST(&qpair->queued_req);
STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
nvme_qpair_submit_request(qpair, req);
}
return 0;
}

View File

@ -921,18 +921,6 @@ get_nvme_active_req_by_cid(struct nvme_tcp_qpair *tqpair, uint32_t cid)
return &tqpair->tcp_reqs[cid];
}
static void
nvme_tcp_free_and_handle_queued_req(struct spdk_nvme_qpair *qpair)
{
struct nvme_request *req;
if (!STAILQ_EMPTY(&qpair->queued_req) && !qpair->ctrlr->is_resetting) {
req = STAILQ_FIRST(&qpair->queued_req);
STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
nvme_qpair_submit_request(qpair, req);
}
}
static void
nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
struct nvme_tcp_pdu *pdu, uint32_t *reaped)
@ -963,7 +951,6 @@ nvme_tcp_c2h_data_payload_handle(struct nvme_tcp_qpair *tqpair,
nvme_tcp_req_complete(tcp_req->req, &cpl);
nvme_tcp_req_put(tqpair, tcp_req);
(*reaped)++;
nvme_tcp_free_and_handle_queued_req(&tqpair->qpair);
}
}
@ -1123,7 +1110,6 @@ nvme_tcp_capsule_resp_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_
nvme_tcp_req_complete(tcp_req->req, &cpl);
nvme_tcp_req_put(tqpair, tcp_req);
(*reaped)++;
nvme_tcp_free_and_handle_queued_req(&tqpair->qpair);
SPDK_DEBUGLOG(SPDK_LOG_NVME, "complete tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);