nvme: add helper to wait for internal commands
Factor out the common pattern of waiting for an internally-submitted command to complete. This will give us a convenient central place to add error checking. Change-Id: I65334d654d294cfb208fc86d16fa387ac5432254 Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com> Reviewed-on: https://review.gerrithub.io/412545 Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
parent
7f51e8e5e2
commit
c4bb0ea600
@ -99,6 +99,49 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
|
||||
status->done = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Poll qpair for completions until a command completes.
|
||||
*
|
||||
* \param qpair queue to poll
|
||||
* \param status completion status
|
||||
* \param robust_mutex optional robust mutex to lock while polling qpair
|
||||
*
|
||||
* \return 0 if command completed without error, negative errno on failure
|
||||
*
|
||||
* The command to wait upon must be submitted with nvme_completion_poll_cb as the callback
|
||||
* and status as the callback argument.
|
||||
*/
|
||||
int
|
||||
spdk_nvme_wait_for_completion_robust_lock(
|
||||
struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_completion_poll_status *status,
|
||||
pthread_mutex_t *robust_mutex)
|
||||
{
|
||||
memset(&status->cpl, 0, sizeof(status->cpl));
|
||||
status->done = false;
|
||||
|
||||
while (status->done == false) {
|
||||
if (robust_mutex) {
|
||||
nvme_robust_mutex_lock(robust_mutex);
|
||||
}
|
||||
|
||||
spdk_nvme_qpair_process_completions(qpair, 0);
|
||||
|
||||
if (robust_mutex) {
|
||||
nvme_robust_mutex_unlock(robust_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_completion_poll_status *status)
|
||||
{
|
||||
return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
|
||||
}
|
||||
|
||||
struct nvme_request *
|
||||
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
|
||||
const struct nvme_payload *payload, uint32_t payload_size,
|
||||
|
@ -380,15 +380,11 @@ static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
status.done = false;
|
||||
spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, SPDK_NVME_GLOBAL_NS_TAG,
|
||||
log_page_directory, sizeof(struct spdk_nvme_intel_log_page_directory), 0,
|
||||
nvme_completion_poll_cb,
|
||||
&status);
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
spdk_dma_free(log_page_directory);
|
||||
SPDK_ERRLOG("nvme_ctrlr_cmd_get_log_page failed!\n");
|
||||
return -ENXIO;
|
||||
@ -673,17 +669,13 @@ nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
|
||||
goto error;
|
||||
}
|
||||
|
||||
status.done = false;
|
||||
rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
|
||||
nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -771,7 +763,6 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
|
||||
struct nvme_completion_poll_status status;
|
||||
int rc;
|
||||
|
||||
status.done = false;
|
||||
rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
|
||||
&ctrlr->cdata, sizeof(ctrlr->cdata),
|
||||
nvme_completion_poll_cb, &status);
|
||||
@ -779,10 +770,7 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("nvme_identify_controller failed!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
@ -824,24 +812,20 @@ nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
|
||||
SPDK_ERRLOG("Failed to allocate active_ns_list!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
status.done = false;
|
||||
|
||||
if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 1, 0) && !(ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
|
||||
/*
|
||||
* Iterate through the pages and fetch each chunk of 1024 namespaces until
|
||||
* there are no more active namespaces
|
||||
*/
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
status.done = false;
|
||||
rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, next_nsid,
|
||||
&new_ns_list[1024 * i], sizeof(struct spdk_nvme_ns_list),
|
||||
nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
goto fail;
|
||||
}
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("nvme_ctrlr_cmd_identify_active_ns_list failed!\n");
|
||||
rc = -ENXIO;
|
||||
goto fail;
|
||||
@ -885,8 +869,6 @@ nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
|
||||
uint32_t cq_allocated, sq_allocated, min_allocated, i;
|
||||
int rc;
|
||||
|
||||
status.done = false;
|
||||
|
||||
if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
|
||||
SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n",
|
||||
ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
|
||||
@ -902,24 +884,17 @@ nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("Set Features - Number of Queues failed!\n");
|
||||
}
|
||||
|
||||
/* Obtain the number of queues allocated using Get Features. */
|
||||
status.done = false;
|
||||
rc = nvme_ctrlr_cmd_get_num_queues(ctrlr, nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("Get Features - Number of Queues failed!\n");
|
||||
ctrlr->opts.num_io_queues = 0;
|
||||
} else {
|
||||
@ -973,7 +948,6 @@ nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
|
||||
}
|
||||
|
||||
/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
|
||||
status.done = false;
|
||||
rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
|
||||
nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
@ -982,10 +956,7 @@ nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("Keep alive timeout Get Feature failed: SC %x SCT %x\n",
|
||||
status.cpl.status.sc, status.cpl.status.sct);
|
||||
ctrlr->opts.keep_alive_timeout_ms = 0;
|
||||
@ -1049,17 +1020,13 @@ nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
|
||||
|
||||
SPDK_TRACEDUMP(SPDK_LOG_NVME, "host_id", host_id, host_id_size);
|
||||
|
||||
status.done = false;
|
||||
rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("Set Features - Host ID failed: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_WARNLOG("Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
|
||||
status.cpl.status.sc, status.cpl.status.sct);
|
||||
/*
|
||||
@ -1227,16 +1194,12 @@ _nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
|
||||
config.bits.telemetry_log_notice = 1;
|
||||
}
|
||||
|
||||
status.done = false;
|
||||
rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config, nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -2050,18 +2013,12 @@ spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
||||
struct nvme_completion_poll_status status;
|
||||
int res;
|
||||
|
||||
status.done = false;
|
||||
res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
|
||||
nvme_completion_poll_cb, &status);
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
while (status.done == false) {
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
|
||||
SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
@ -2076,18 +2033,12 @@ spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
||||
struct nvme_completion_poll_status status;
|
||||
int res;
|
||||
|
||||
status.done = false;
|
||||
res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
|
||||
nvme_completion_poll_cb, &status);
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
while (status.done == false) {
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
|
||||
SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
@ -2101,17 +2052,11 @@ spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_dat
|
||||
struct nvme_completion_poll_status status;
|
||||
int res;
|
||||
|
||||
status.done = false;
|
||||
res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status);
|
||||
if (res) {
|
||||
return 0;
|
||||
}
|
||||
while (status.done == false) {
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
|
||||
SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
|
||||
return 0;
|
||||
}
|
||||
@ -2131,17 +2076,11 @@ spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
|
||||
struct nvme_completion_poll_status status;
|
||||
int res;
|
||||
|
||||
status.done = false;
|
||||
res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status);
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
while (status.done == false) {
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
|
||||
SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
@ -2156,18 +2095,12 @@ spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
||||
struct nvme_completion_poll_status status;
|
||||
int res;
|
||||
|
||||
status.done = false;
|
||||
res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
|
||||
&status);
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
while (status.done == false) {
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
|
||||
SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
@ -2212,7 +2145,6 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
|
||||
|
||||
while (size_remaining > 0) {
|
||||
transfer = spdk_min(size_remaining, ctrlr->min_page_size);
|
||||
status.done = false;
|
||||
|
||||
res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
|
||||
nvme_completion_poll_cb,
|
||||
@ -2221,12 +2153,7 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
|
||||
return res;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
|
||||
SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
@ -2240,21 +2167,17 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
|
||||
fw_commit.fs = slot;
|
||||
fw_commit.ca = commit_action;
|
||||
|
||||
status.done = false;
|
||||
|
||||
res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
|
||||
&status);
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
}
|
||||
res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock);
|
||||
|
||||
memcpy(completion_status, &status.cpl.status, sizeof(struct spdk_nvme_status));
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
|
||||
if (res) {
|
||||
if (status.cpl.status.sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
|
||||
status.cpl.status.sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
|
||||
if (status.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
|
||||
|
@ -576,6 +576,11 @@ int nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
|
||||
uint32_t size, uint32_t offset, void *payload,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
||||
void nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
|
||||
int spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_completion_poll_status *status);
|
||||
int spdk_nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_completion_poll_status *status,
|
||||
pthread_mutex_t *robust_mutex);
|
||||
|
||||
struct spdk_nvme_ctrlr_process *spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
|
||||
pid_t pid);
|
||||
|
@ -47,7 +47,6 @@ int nvme_ns_identify_update(struct spdk_nvme_ns *ns)
|
||||
int rc;
|
||||
|
||||
nsdata = _nvme_ns_get_data(ns);
|
||||
status.done = false;
|
||||
rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id,
|
||||
nsdata, sizeof(*nsdata),
|
||||
nvme_completion_poll_cb, &status);
|
||||
@ -55,12 +54,8 @@ int nvme_ns_identify_update(struct spdk_nvme_ns *ns)
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
nvme_robust_mutex_lock(&ns->ctrlr->ctrlr_lock);
|
||||
spdk_nvme_qpair_process_completions(ns->ctrlr->adminq, 0);
|
||||
nvme_robust_mutex_unlock(&ns->ctrlr->ctrlr_lock);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, &status,
|
||||
&ns->ctrlr->ctrlr_lock)) {
|
||||
/* This can occur if the namespace is not active. Simply zero the
|
||||
* namespace data and continue. */
|
||||
memset(nsdata, 0, sizeof(*nsdata));
|
||||
@ -127,16 +122,11 @@ int nvme_ns_identify_update(struct spdk_nvme_ns *ns)
|
||||
if (ns->ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) &&
|
||||
!(ns->ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
|
||||
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Attempting to retrieve NS ID Descriptor List\n");
|
||||
status.done = false;
|
||||
rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST, 0, ns->id,
|
||||
ns->id_desc_list, sizeof(ns->id_desc_list),
|
||||
nvme_completion_poll_cb, &status);
|
||||
if (rc == 0) {
|
||||
while (status.done == false) {
|
||||
nvme_robust_mutex_lock(&ns->ctrlr->ctrlr_lock);
|
||||
spdk_nvme_qpair_process_completions(ns->ctrlr->adminq, 0);
|
||||
nvme_robust_mutex_unlock(&ns->ctrlr->ctrlr_lock);
|
||||
}
|
||||
rc = spdk_nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, &status, &ns->ctrlr->ctrlr_lock);
|
||||
}
|
||||
|
||||
if (rc != 0 || spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
|
@ -1516,40 +1516,29 @@ _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
|
||||
struct nvme_completion_poll_status status;
|
||||
int rc;
|
||||
|
||||
status.done = false;
|
||||
rc = nvme_pcie_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("nvme_create_io_cq failed!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
status.done = false;
|
||||
rc = nvme_pcie_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("nvme_create_io_sq failed!\n");
|
||||
/* Attempt to delete the completion queue */
|
||||
status.done = false;
|
||||
rc = nvme_pcie_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
return -1;
|
||||
}
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
spdk_nvme_wait_for_completion(ctrlr->adminq, &status);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1625,15 +1614,11 @@ nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
|
||||
}
|
||||
|
||||
/* Delete the I/O submission queue */
|
||||
status.done = false;
|
||||
rc = nvme_pcie_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1646,15 +1631,11 @@ nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
|
||||
}
|
||||
|
||||
/* Delete the completion queue */
|
||||
status.done = false;
|
||||
rc = nvme_pcie_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -621,7 +621,6 @@ nvme_rdma_qpair_fabric_connect(struct nvme_rdma_qpair *rqpair)
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
memset(&status, 0, sizeof(struct nvme_completion_poll_status));
|
||||
|
||||
cmd.opcode = SPDK_NVME_OPC_FABRIC;
|
||||
cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
|
||||
@ -651,11 +650,7 @@ nvme_rdma_qpair_fabric_connect(struct nvme_rdma_qpair *rqpair)
|
||||
goto ret;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(&rqpair->qpair, 0);
|
||||
}
|
||||
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(&rqpair->qpair, &status)) {
|
||||
SPDK_ERRLOG("Connect command failed\n");
|
||||
return -1;
|
||||
}
|
||||
@ -1006,7 +1001,7 @@ nvme_rdma_fabric_prop_set_cmd(struct spdk_nvme_ctrlr *ctrlr,
|
||||
uint32_t offset, uint8_t size, uint64_t value)
|
||||
{
|
||||
struct spdk_nvmf_fabric_prop_set_cmd cmd = {};
|
||||
struct nvme_completion_poll_status status = {};
|
||||
struct nvme_completion_poll_status status;
|
||||
int rc;
|
||||
|
||||
cmd.opcode = SPDK_NVME_OPC_FABRIC;
|
||||
@ -1024,11 +1019,7 @@ nvme_rdma_fabric_prop_set_cmd(struct spdk_nvme_ctrlr *ctrlr,
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("nvme_rdma_fabric_prop_get_cmd failed\n");
|
||||
return -1;
|
||||
}
|
||||
@ -1041,7 +1032,7 @@ nvme_rdma_fabric_prop_get_cmd(struct spdk_nvme_ctrlr *ctrlr,
|
||||
uint32_t offset, uint8_t size, uint64_t *value)
|
||||
{
|
||||
struct spdk_nvmf_fabric_prop_set_cmd cmd = {};
|
||||
struct nvme_completion_poll_status status = {};
|
||||
struct nvme_completion_poll_status status;
|
||||
struct spdk_nvmf_fabric_prop_get_rsp *response;
|
||||
int rc;
|
||||
|
||||
@ -1059,11 +1050,7 @@ nvme_rdma_fabric_prop_get_cmd(struct spdk_nvme_ctrlr *ctrlr,
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("nvme_rdma_fabric_prop_get_cmd failed\n");
|
||||
return -1;
|
||||
}
|
||||
@ -1170,18 +1157,13 @@ nvme_fabrics_get_log_discovery_page(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct nvme_completion_poll_status status;
|
||||
int rc;
|
||||
|
||||
status.done = false;
|
||||
rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_DISCOVERY, 0, log_page, size, offset,
|
||||
nvme_completion_poll_cb, &status);
|
||||
if (rc < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
|
||||
}
|
||||
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1296,7 +1278,6 @@ nvme_rdma_ctrlr_scan(const struct spdk_nvme_transport_id *discovery_trid,
|
||||
}
|
||||
|
||||
/* get the cdata info */
|
||||
status.done = false;
|
||||
rc = nvme_ctrlr_cmd_identify(discovery_ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
|
||||
&discovery_ctrlr->cdata, sizeof(discovery_ctrlr->cdata),
|
||||
nvme_completion_poll_cb, &status);
|
||||
@ -1305,10 +1286,7 @@ nvme_rdma_ctrlr_scan(const struct spdk_nvme_transport_id *discovery_trid,
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (status.done == false) {
|
||||
spdk_nvme_qpair_process_completions(discovery_ctrlr->adminq, 0);
|
||||
}
|
||||
if (spdk_nvme_cpl_is_error(&status.cpl)) {
|
||||
if (spdk_nvme_wait_for_completion(discovery_ctrlr->adminq, &status)) {
|
||||
SPDK_ERRLOG("nvme_identify_controller failed!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@ -83,6 +83,10 @@ DEFINE_STUB_P(nvme_transport_ctrlr_construct, struct spdk_nvme_ctrlr,
|
||||
const struct spdk_nvme_ctrlr_opts *opts,
|
||||
void *devhandle), {0})
|
||||
|
||||
DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
|
||||
(struct spdk_nvme_qpair *qpair,
|
||||
uint32_t max_completions), 0);
|
||||
|
||||
static bool ut_destruct_called = false;
|
||||
void
|
||||
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
|
||||
|
@ -264,6 +264,29 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
|
||||
status->done = true;
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvme_wait_for_completion_robust_lock(
|
||||
struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_completion_poll_status *status,
|
||||
pthread_mutex_t *robust_mutex)
|
||||
{
|
||||
status->done = true;
|
||||
memset(&status->cpl, 0, sizeof(status->cpl));
|
||||
status->cpl.status.sc = 0;
|
||||
if (set_status_cpl == 1) {
|
||||
status->cpl.status.sc = 1;
|
||||
}
|
||||
return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_completion_poll_status *status)
|
||||
{
|
||||
return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
|
||||
union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
|
||||
@ -354,22 +377,13 @@ int
|
||||
nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_completion_poll_status *status;
|
||||
struct spdk_nvme_cpl status_cpl = {};
|
||||
struct spdk_nvme_status cpl_status = {};
|
||||
|
||||
status = cb_arg;
|
||||
CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
|
||||
CU_ASSERT(status->done == false);
|
||||
if (fw_commit->fs == 0) {
|
||||
return -1;
|
||||
}
|
||||
status->done = true;
|
||||
status->cpl = status_cpl;
|
||||
status->cpl.status = cpl_status;
|
||||
status->cpl.status.sc = 1;
|
||||
set_status_cpl = 1;
|
||||
if (ctrlr->is_resetting == true) {
|
||||
status->cpl.status.sc = 0;
|
||||
set_status_cpl = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -379,25 +393,10 @@ nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
|
||||
uint32_t size, uint32_t offset, void *payload,
|
||||
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_completion_poll_status *status;
|
||||
struct spdk_nvme_cpl status_cpl = {};
|
||||
struct spdk_nvme_status cpl_status = {};
|
||||
status = cb_arg;
|
||||
|
||||
if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
|
||||
return -1;
|
||||
}
|
||||
if (set_size > 0) {
|
||||
CU_ASSERT(status->done == false);
|
||||
}
|
||||
CU_ASSERT(offset == 0);
|
||||
status->done = true;
|
||||
status->cpl = status_cpl;
|
||||
status->cpl.status = cpl_status;
|
||||
status->cpl.status.sc = 0;
|
||||
if (set_status_cpl == 1) {
|
||||
status->cpl.status.sc = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1623,6 +1622,8 @@ test_spdk_nvme_ctrlr_update_firmware(void)
|
||||
payload = &point_payload;
|
||||
ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
|
||||
CU_ASSERT(ret == 0);
|
||||
|
||||
set_status_cpl = 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -41,6 +41,11 @@
|
||||
|
||||
SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
|
||||
|
||||
DEFINE_STUB(spdk_nvme_wait_for_completion_robust_lock, int,
|
||||
(struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_completion_poll_status *status,
|
||||
pthread_mutex_t *robust_mutex), 0);
|
||||
|
||||
int
|
||||
nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
|
||||
void *payload, size_t payload_size,
|
||||
|
@ -38,6 +38,10 @@
|
||||
|
||||
#include "common/lib/test_env.c"
|
||||
|
||||
DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
|
||||
(struct spdk_nvme_qpair *qpair,
|
||||
uint32_t max_completions), 0);
|
||||
|
||||
static struct nvme_driver _g_nvme_driver = {
|
||||
.lock = PTHREAD_MUTEX_INITIALIZER,
|
||||
};
|
||||
|
@ -52,6 +52,10 @@ DEFINE_STUB(spdk_nvme_ctrlr_get_current_process,
|
||||
(struct spdk_nvme_ctrlr *ctrlr),
|
||||
NULL);
|
||||
|
||||
DEFINE_STUB(spdk_nvme_wait_for_completion, int,
|
||||
(struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_completion_poll_status *status), 0);
|
||||
|
||||
struct spdk_trace_flag SPDK_LOG_NVME = {
|
||||
.name = "nvme",
|
||||
.enabled = false,
|
||||
|
Loading…
x
Reference in New Issue
Block a user