nvme: abort aers at common layer

We submit AERs to all controllers - both pcie and
fabrics.  But currently we only manually abort the
aers when disabling the qpair for pcie.  Make this
common instead by creating a new transport function
for aborting aers.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I1e926b61b8035488cdc6e8cb4336b373732f985e

Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/453482
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Jim Harris 2019-05-06 15:29:24 -07:00 committed by Changpeng Liu
parent a5c54a1f6a
commit f366e261a6
7 changed files with 65 additions and 3 deletions

View File

@ -842,6 +842,8 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
ctrlr->outstanding_aborts--;
}
nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
/* Disable all queues before disabling the controller hardware. */
nvme_qpair_disable(ctrlr->adminq);
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
@ -2283,6 +2285,9 @@ nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
struct spdk_nvme_qpair *qpair, *tmp;
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Prepare to destruct SSD: %s\n", ctrlr->trid.traddr);
nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
spdk_nvme_ctrlr_free_io_qpair(qpair);
}

View File

@ -993,7 +993,8 @@ struct spdk_nvme_ctrlr *spdk_nvme_get_ctrlr_by_trid_unsafe(
int nvme_ ## name ## _qpair_reset(struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_fail(struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req); \
int32_t nvme_ ## name ## _qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions);
int32_t nvme_ ## name ## _qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions); \
void nvme_ ## name ## _admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair); \
DECLARE_TRANSPORT(transport) /* generic transport dispatch functions */
DECLARE_TRANSPORT(pcie)

View File

@ -1360,7 +1360,7 @@ nvme_pcie_qpair_abort_trackers(struct spdk_nvme_qpair *qpair, uint32_t dnr)
}
}
static void
void
nvme_pcie_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
{
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
@ -1448,7 +1448,6 @@ nvme_pcie_qpair_enable(struct spdk_nvme_qpair *qpair)
static void
nvme_pcie_admin_qpair_disable(struct spdk_nvme_qpair *qpair)
{
nvme_pcie_admin_qpair_abort_aers(qpair);
}
static void

View File

@ -1790,6 +1790,29 @@ nvme_rdma_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, siz
return 0;
}
void
nvme_rdma_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_rdma_req *rdma_req, *tmp;
struct nvme_request *req;
struct spdk_nvme_cpl cpl;
struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
cpl.status.sct = SPDK_NVME_SCT_GENERIC;
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->outstanding_reqs, link, tmp) {
if (rdma_req->req->cmd.opc != SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
continue;
}
assert(rdma_req->req != NULL);
req = rdma_req->req;
nvme_rdma_req_complete(req, &cpl);
nvme_rdma_req_put(rqpair, rdma_req);
}
}
void
spdk_nvme_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
{

View File

@ -1878,3 +1878,26 @@ nvme_tcp_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size
{
return 0;
}
void
nvme_tcp_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
{
struct nvme_tcp_req *tcp_req, *tmp;
struct nvme_request *req;
struct spdk_nvme_cpl cpl;
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
cpl.status.sct = SPDK_NVME_SCT_GENERIC;
TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
if (tcp_req->req->cmd.opc != SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
continue;
}
assert(tcp_req->req != NULL);
req = tcp_req->req;
nvme_tcp_req_complete(req, &cpl);
nvme_tcp_req_put(tqpair, tcp_req);
}
}

View File

@ -220,3 +220,9 @@ nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t
{
NVME_TRANSPORT_CALL(qpair->trtype, qpair_process_completions, (qpair, max_completions));
}
void
nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
{
NVME_TRANSPORT_CALL(qpair->trtype, admin_qpair_abort_aers, (qpair));
}

View File

@ -180,6 +180,11 @@ nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
return 0;
}
void
nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
{
}
int
nvme_driver_init(void)
{