nvme: enable multi-process Async Event Reporting
Modified the async_list to be per-process instead of on the controller object. This allows an NVMe multi- process setup to have Asynchronous Events Reported to each process that may interested in them. In the previous case, where the async event list was on the controller object, AER (Async Event Requests) would not be reported to all the processes. Fixes: #1874 Signed-off-by: Curt Bruns <curt.e.bruns@gmail.com> Change-Id: I3e885c0cf5a0fd471d243bc7d96a8b7ffe65d14b Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8744 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
parent
3c701ad959
commit
4ac203b2de
@ -2867,27 +2867,36 @@ nvme_ctrlr_queue_async_event(struct spdk_nvme_ctrlr *ctrlr,
|
||||
const struct spdk_nvme_cpl *cpl)
|
||||
{
|
||||
struct spdk_nvme_ctrlr_aer_completion_list *nvme_event;
|
||||
struct spdk_nvme_ctrlr_process *proc;
|
||||
|
||||
nvme_event = calloc(1, sizeof(*nvme_event));
|
||||
if (!nvme_event) {
|
||||
NVME_CTRLR_ERRLOG(ctrlr, "Alloc nvme event failed, ignore the event\n");
|
||||
return;
|
||||
/* Add async event to each process objects event list */
|
||||
TAILQ_FOREACH(proc, &ctrlr->active_procs, tailq) {
|
||||
/* Must be shared memory so other processes can access */
|
||||
nvme_event = spdk_zmalloc(sizeof(*nvme_event), 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
|
||||
if (!nvme_event) {
|
||||
NVME_CTRLR_ERRLOG(ctrlr, "Alloc nvme event failed, ignore the event\n");
|
||||
return;
|
||||
}
|
||||
nvme_event->cpl = *cpl;
|
||||
|
||||
STAILQ_INSERT_TAIL(&proc->async_events, nvme_event, link);
|
||||
}
|
||||
|
||||
nvme_event->cpl = *cpl;
|
||||
STAILQ_INSERT_TAIL(&ctrlr->async_events, nvme_event, link);
|
||||
}
|
||||
|
||||
void
|
||||
nvme_ctrlr_complete_queued_async_events(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
struct spdk_nvme_ctrlr_aer_completion_list *nvme_event, *nvme_event_tmp;
|
||||
struct spdk_nvme_ctrlr_process *active_proc;
|
||||
|
||||
STAILQ_FOREACH_SAFE(nvme_event, &ctrlr->async_events, link, nvme_event_tmp) {
|
||||
STAILQ_REMOVE(&ctrlr->async_events, nvme_event,
|
||||
active_proc = nvme_ctrlr_get_current_process(ctrlr);
|
||||
|
||||
STAILQ_FOREACH_SAFE(nvme_event, &active_proc->async_events, link, nvme_event_tmp) {
|
||||
STAILQ_REMOVE(&active_proc->async_events, nvme_event,
|
||||
spdk_nvme_ctrlr_aer_completion_list, link);
|
||||
nvme_ctrlr_process_async_event(ctrlr, &nvme_event->cpl);
|
||||
free(nvme_event);
|
||||
spdk_free(nvme_event);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -3083,6 +3092,7 @@ nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
|
||||
ctrlr_proc->devhandle = devhandle;
|
||||
ctrlr_proc->ref = 0;
|
||||
TAILQ_INIT(&ctrlr_proc->allocated_io_qpairs);
|
||||
STAILQ_INIT(&ctrlr_proc->async_events);
|
||||
|
||||
TAILQ_INSERT_TAIL(&ctrlr->active_procs, ctrlr_proc, tailq);
|
||||
|
||||
@ -3125,6 +3135,7 @@ nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
|
||||
{
|
||||
struct nvme_request *req, *tmp_req;
|
||||
struct spdk_nvme_qpair *qpair, *tmp_qpair;
|
||||
struct spdk_nvme_ctrlr_aer_completion_list *event;
|
||||
|
||||
STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
|
||||
STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
|
||||
@ -3134,6 +3145,13 @@ nvme_ctrlr_cleanup_process(struct spdk_nvme_ctrlr_process *proc)
|
||||
nvme_free_request(req);
|
||||
}
|
||||
|
||||
/* Remove async event from each process objects event list */
|
||||
while (!STAILQ_EMPTY(&proc->async_events)) {
|
||||
event = STAILQ_FIRST(&proc->async_events);
|
||||
STAILQ_REMOVE_HEAD(&proc->async_events, link);
|
||||
spdk_free(event);
|
||||
}
|
||||
|
||||
TAILQ_FOREACH_SAFE(qpair, &proc->allocated_io_qpairs, per_process_tailq, tmp_qpair) {
|
||||
TAILQ_REMOVE(&proc->allocated_io_qpairs, qpair, per_process_tailq);
|
||||
|
||||
@ -3630,7 +3648,6 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
|
||||
|
||||
TAILQ_INIT(&ctrlr->active_io_qpairs);
|
||||
STAILQ_INIT(&ctrlr->queued_aborts);
|
||||
STAILQ_INIT(&ctrlr->async_events);
|
||||
ctrlr->outstanding_aborts = 0;
|
||||
|
||||
ctrlr->ana_log_page = NULL;
|
||||
@ -3687,7 +3704,6 @@ nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct nvme_ctrlr_detach_ctx *ctx)
|
||||
{
|
||||
struct spdk_nvme_qpair *qpair, *tmp;
|
||||
struct spdk_nvme_ctrlr_aer_completion_list *event;
|
||||
|
||||
NVME_CTRLR_DEBUGLOG(ctrlr, "Prepare to destruct SSD\n");
|
||||
|
||||
@ -3698,12 +3714,6 @@ nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr,
|
||||
nvme_ctrlr_abort_queued_aborts(ctrlr);
|
||||
nvme_transport_admin_qpair_abort_aers(ctrlr->adminq);
|
||||
|
||||
while (!STAILQ_EMPTY(&ctrlr->async_events)) {
|
||||
event = STAILQ_FIRST(&ctrlr->async_events);
|
||||
STAILQ_REMOVE_HEAD(&ctrlr->async_events, link);
|
||||
free(event);
|
||||
}
|
||||
|
||||
TAILQ_FOREACH_SAFE(qpair, &ctrlr->active_io_qpairs, tailq, tmp) {
|
||||
spdk_nvme_ctrlr_free_io_qpair(qpair);
|
||||
}
|
||||
@ -3826,6 +3836,7 @@ spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
int32_t num_completions;
|
||||
int32_t rc;
|
||||
struct spdk_nvme_ctrlr_process *active_proc;
|
||||
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
|
||||
@ -3846,7 +3857,11 @@ spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
|
||||
|
||||
rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
|
||||
nvme_ctrlr_complete_queued_async_events(ctrlr);
|
||||
/* Each process has an async list, complete the ones for this process object */
|
||||
active_proc = nvme_ctrlr_get_current_process(ctrlr);
|
||||
if (active_proc) {
|
||||
nvme_ctrlr_complete_queued_async_events(ctrlr);
|
||||
}
|
||||
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
|
||||
|
@ -727,6 +727,11 @@ enum nvme_ctrlr_state {
|
||||
|
||||
#define NVME_TIMEOUT_INFINITE 0
|
||||
|
||||
struct spdk_nvme_ctrlr_aer_completion_list {
|
||||
struct spdk_nvme_cpl cpl;
|
||||
STAILQ_ENTRY(spdk_nvme_ctrlr_aer_completion_list) link;
|
||||
};
|
||||
|
||||
/*
|
||||
* Used to track properties for all processes accessing the controller.
|
||||
*/
|
||||
@ -762,12 +767,11 @@ struct spdk_nvme_ctrlr_process {
|
||||
/** separate timeout values for io vs. admin reqs */
|
||||
uint64_t timeout_io_ticks;
|
||||
uint64_t timeout_admin_ticks;
|
||||
|
||||
/** List to publish AENs to all procs in multiprocess setup */
|
||||
STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion_list) async_events;
|
||||
};
|
||||
|
||||
struct spdk_nvme_ctrlr_aer_completion_list {
|
||||
struct spdk_nvme_cpl cpl;
|
||||
STAILQ_ENTRY(spdk_nvme_ctrlr_aer_completion_list) link;
|
||||
};
|
||||
|
||||
/*
|
||||
* One of these per allocated PCI device.
|
||||
@ -899,8 +903,6 @@ struct spdk_nvme_ctrlr {
|
||||
/* maximum zone append size in bytes */
|
||||
uint32_t max_zone_append_size;
|
||||
|
||||
STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion_list) async_events;
|
||||
|
||||
/* PMR size in bytes */
|
||||
uint64_t pmr_size;
|
||||
};
|
||||
|
@ -543,7 +543,6 @@ test_nvme_pcie_ctrlr_construct_admin_qpair(void)
|
||||
|
||||
TAILQ_INIT(&pctrlr.ctrlr.active_io_qpairs);
|
||||
STAILQ_INIT(&pctrlr.ctrlr.queued_aborts);
|
||||
STAILQ_INIT(&pctrlr.ctrlr.async_events);
|
||||
TAILQ_INIT(&pctrlr.ctrlr.active_procs);
|
||||
|
||||
rc = nvme_pcie_ctrlr_construct_admin_qpair(&pctrlr.ctrlr, 32);
|
||||
|
Loading…
Reference in New Issue
Block a user