nvme: add return code to nvme_qpair_submit_request

If the controller is failed, attempting to submit additional I/O is
futile - it will be immediately failed using the completion callback,
which can result in infinite recursion if the application code resubmits
I/Os on failure.

Instead, provide a way for request submission to indicate failure, and
use it to exit early if the controller is failed; this can only happen
when a reset failed (timed out).

If a request is submitted directly by the user when the controller has
failed, we can return an error code directly.  For the case where I/O
was queued and is being resubmitted after a reset, we still need to call
the completion handler via _nvme_fail_request_ctrlr_failed().

Change-Id: I9e144328d524b25db2acf48e923b584746e8d0b6
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-03-08 15:16:09 -07:00
parent c1592f3880
commit eb555b139e
9 changed files with 93 additions and 103 deletions

View File

@ -769,9 +769,7 @@ nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
}
req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
return nvme_ctrlr_submit_admin_request(ctrlr, req);
}
static int
@ -1044,11 +1042,11 @@ nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
nvme_mutex_destroy(&ctrlr->ctrlr_lock);
}
void
int
nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req)
{
nvme_qpair_submit_request(&ctrlr->adminq, req);
return nvme_qpair_submit_request(&ctrlr->adminq, req);
}
int32_t

View File

@ -50,8 +50,7 @@ spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
memcpy(&req->cmd, cmd, sizeof(req->cmd));
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
}
int
@ -61,6 +60,7 @@ spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);
@ -71,10 +71,10 @@ spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
memcpy(&req->cmd, cmd, sizeof(req->cmd));
nvme_ctrlr_submit_admin_request(ctrlr, req);
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
return rc;
}
int
@ -100,8 +100,7 @@ nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr *ctrlr, void *payload,
*/
cmd->cdw10 = 1;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
return nvme_ctrlr_submit_admin_request(ctrlr, req);
}
int
@ -126,8 +125,7 @@ nvme_ctrlr_cmd_identify_namespace(struct spdk_nvme_ctrlr *ctrlr, uint16_t nsid,
*/
cmd->nsid = nsid;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
return nvme_ctrlr_submit_admin_request(ctrlr, req);
}
int
@ -158,8 +156,7 @@ nvme_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
cmd->cdw11 = 0x1;
cmd->dptr.prp.prp1 = io_que->cpl_bus_addr;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
return nvme_ctrlr_submit_admin_request(ctrlr, req);
}
int
@ -186,8 +183,7 @@ nvme_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
cmd->cdw11 = (io_que->id << 16) | (io_que->qprio << 1) | 0x1;
cmd->dptr.prp.prp1 = io_que->cmd_bus_addr;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
return nvme_ctrlr_submit_admin_request(ctrlr, req);
}
int
@ -206,8 +202,7 @@ nvme_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpai
cmd->opc = SPDK_NVME_OPC_DELETE_IO_CQ;
cmd->cdw10 = qpair->id;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
return nvme_ctrlr_submit_admin_request(ctrlr, req);
}
int
@ -226,8 +221,7 @@ nvme_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpai
cmd->opc = SPDK_NVME_OPC_DELETE_IO_SQ;
cmd->cdw10 = qpair->id;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
return nvme_ctrlr_submit_admin_request(ctrlr, req);
}
int
@ -236,6 +230,7 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ctrlr_list),
@ -250,10 +245,10 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
cmd->nsid = nsid;
cmd->cdw10 = SPDK_NVME_NS_CTRLR_ATTACH;
nvme_ctrlr_submit_admin_request(ctrlr, req);
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
return rc;
}
int
@ -262,6 +257,7 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ctrlr_list),
@ -276,10 +272,10 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
cmd->nsid = nsid;
cmd->cdw10 = SPDK_NVME_NS_CTRLR_DETACH;
nvme_ctrlr_submit_admin_request(ctrlr, req);
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
return rc;
}
int
@ -288,6 +284,7 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ns_data),
@ -301,10 +298,10 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data
cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_CREATE;
nvme_ctrlr_submit_admin_request(ctrlr, req);
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
return rc;
}
int
@ -313,6 +310,7 @@ nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_null(cb_fn, cb_arg);
@ -326,10 +324,10 @@ nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme
cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_DELETE;
cmd->nsid = nsid;
nvme_ctrlr_submit_admin_request(ctrlr, req);
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
return rc;
}
int
@ -339,6 +337,7 @@ spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_null(cb_fn, cb_arg);
@ -353,10 +352,10 @@ spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
cmd->cdw11 = cdw11;
cmd->cdw12 = cdw12;
nvme_ctrlr_submit_admin_request(ctrlr, req);
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
return rc;
}
int
@ -366,6 +365,7 @@ spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_null(cb_fn, cb_arg);
@ -379,10 +379,10 @@ spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
cmd->cdw10 = feature;
cmd->cdw11 = cdw11;
nvme_ctrlr_submit_admin_request(ctrlr, req);
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
return rc;
}
int
@ -416,6 +416,7 @@ spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
int rc;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, payload_size, cb_fn, cb_arg);
@ -430,10 +431,10 @@ spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page
cmd->cdw10 = ((payload_size / sizeof(uint32_t)) - 1) << 16;
cmd->cdw10 |= log_page;
nvme_ctrlr_submit_admin_request(ctrlr, req);
rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
return rc;
}
int
@ -452,6 +453,5 @@ nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, uint16_t cid,
cmd->opc = SPDK_NVME_OPC_ABORT;
cmd->cdw10 = (cid << 16) | sqid;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
return nvme_ctrlr_submit_admin_request(ctrlr, req);
}

View File

@ -521,7 +521,7 @@ void nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
int nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
int nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr);
void nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
int nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req);
int nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
@ -531,7 +531,7 @@ int nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
void nvme_qpair_destroy(struct spdk_nvme_qpair *qpair);
void nvme_qpair_enable(struct spdk_nvme_qpair *qpair);
void nvme_qpair_disable(struct spdk_nvme_qpair *qpair);
void nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
int nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
struct nvme_request *req);
void nvme_qpair_reset(struct spdk_nvme_qpair *qpair);
void nvme_qpair_fail(struct spdk_nvme_qpair *qpair);

View File

@ -188,8 +188,7 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, vo
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, io_flags);
if (req != NULL) {
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
} else {
return ENOMEM;
}
@ -215,8 +214,7 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, io_flags);
if (req != NULL) {
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
} else {
return ENOMEM;
}
@ -236,8 +234,7 @@ spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, io_flags);
if (req != NULL) {
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
} else {
return ENOMEM;
}
@ -263,8 +260,7 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, io_flags);
if (req != NULL) {
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
} else {
return ENOMEM;
}
@ -298,9 +294,7 @@ spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q
cmd->cdw12 = lba_count - 1;
cmd->cdw12 |= io_flags;
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
}
int
@ -329,9 +323,7 @@ spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpa
cmd->cdw10 = num_ranges - 1;
cmd->cdw11 = SPDK_NVME_DSM_ATTR_DEALLOCATE;
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
}
int
@ -350,9 +342,7 @@ spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
cmd->opc = SPDK_NVME_OPC_FLUSH;
cmd->nsid = ns->id;
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
}
int
@ -385,9 +375,7 @@ spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
/* Bits 30-31 */
cmd->cdw10 |= (uint32_t)cptpl << 30;
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
}
int
@ -419,9 +407,7 @@ spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
/* Bits 8-15 */
cmd->cdw10 |= (uint32_t)type << 8;
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
}
int
@ -454,9 +440,7 @@ spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
/* Bits 8-15 */
cmd->cdw10 |= (uint32_t)type << 8;
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
}
int
@ -484,7 +468,5 @@ spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
cmd->cdw10 = num_dwords;
nvme_qpair_submit_request(qpair, req);
return 0;
return nvme_qpair_submit_request(qpair, req);
}

View File

@ -867,7 +867,7 @@ _nvme_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_re
return 0;
}
void
int
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
int rc;
@ -875,6 +875,11 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
struct nvme_request *child_req;
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
if (ctrlr->is_failed) {
nvme_free_request(req);
return ENXIO;
}
nvme_qpair_check_enabled(qpair);
if (req->num_children) {
@ -883,9 +888,12 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
* request itself, since the parent is the original unsplit request.
*/
TAILQ_FOREACH(child_req, &req->children, child_tailq) {
nvme_qpair_submit_request(qpair, child_req);
rc = nvme_qpair_submit_request(qpair, child_req);
if (rc != 0) {
return rc;
}
}
return;
return 0;
}
tr = LIST_FIRST(&qpair->free_tr);
@ -893,22 +901,15 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
if (tr == NULL || !qpair->is_enabled) {
/*
* No tracker is available, or the qpair is disabled due to
* an in-progress controller-level reset or controller
* failure.
* an in-progress controller-level reset.
*
* Put the request on the qpair's request queue to be
* processed when a tracker frees up via a command
* completion or when the controller reset is
* completed.
*/
if (qpair->ctrlr->is_failed) {
_nvme_fail_request_ctrlr_failed(qpair, req);
} else {
/*
* Put the request on the qpair's request queue to be
* processed when a tracker frees up via a command
* completion or when the controller reset is
* completed.
*/
STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
}
return;
STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
return 0;
}
LIST_REMOVE(tr, list); /* remove tr from free_tr */
@ -921,7 +922,7 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
} else if (req->payload.type == NVME_PAYLOAD_TYPE_CONTIG) {
rc = _nvme_qpair_build_contig_request(qpair, req, tr);
if (rc < 0) {
return;
return rc;
}
} else if (req->payload.type == NVME_PAYLOAD_TYPE_SGL) {
if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED)
@ -929,15 +930,16 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
else
rc = _nvme_qpair_build_prps_sgl_request(qpair, req, tr);
if (rc < 0) {
return;
return rc;
}
} else {
nvme_assert(0, ("invalid NVMe payload type %d\n", req->payload.type));
_nvme_fail_request_bad_vtophys(qpair, tr);
return;
return EINVAL;
}
nvme_qpair_submit_tracker(qpair, tr);
return 0;
}
void
@ -1012,7 +1014,9 @@ _nvme_io_qpair_enable(struct spdk_nvme_qpair *qpair)
nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
nvme_qpair_print_command(qpair, &req->cmd);
nvme_qpair_submit_request(qpair, req);
if (nvme_qpair_submit_request(qpair, req) != 0) {
_nvme_fail_request_ctrlr_failed(qpair, req);
}
}
}

View File

@ -110,7 +110,7 @@ nvme_qpair_fail(struct spdk_nvme_qpair *qpair)
{
}
void
int
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
@ -120,6 +120,8 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
* For the purposes of this unit test, we don't need to bother emulating request submission.
*/
nvme_dealloc_request(req);
return 0;
}
int32_t

View File

@ -241,20 +241,24 @@ nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
void
int
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
verify_fn(req);
/* stop analyzer from thinking stack variable addresses are stored in a global */
memset(req, 0, sizeof(*req));
return 0;
}
void
int
nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
{
verify_fn(req);
/* stop analyzer from thinking stack variable addresses are stored in a global */
memset(req, 0, sizeof(*req));
return 0;
}
static void

View File

@ -95,10 +95,12 @@ spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
return ns->ctrlr->max_xfer_size;
}
void
int
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
g_request = req;
return 0;
}
static void

View File

@ -293,7 +293,7 @@ test3(void)
CU_ASSERT(qpair.sq_tail == 0);
nvme_qpair_submit_request(&qpair, req);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
CU_ASSERT(qpair.sq_tail == 1);
@ -339,7 +339,7 @@ test4(void)
CU_ASSERT(qpair.sq_tail == 0);
nvme_qpair_submit_request(&qpair, req);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
/* Assert that command/completion data was printed to log. */
@ -374,7 +374,7 @@ test_sgl_req(void)
req->cmd.cdw12 = 255 | 0;
req->payload_offset = 1;
nvme_qpair_submit_request(&qpair, req);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
CU_ASSERT(req->cmd.psdt == SPDK_NVME_PSDT_PRP);
CU_ASSERT(req->cmd.dptr.prp.prp1 == 7);
CU_ASSERT(req->cmd.dptr.prp.prp2 == 4096);
@ -393,7 +393,7 @@ test_sgl_req(void)
spdk_nvme_retry_count = 1;
fail_next_sge = true;
nvme_qpair_submit_request(&qpair, req);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
@ -407,7 +407,7 @@ test_sgl_req(void)
req->cmd.cdw12 = 255 | 0;
req->payload_offset = 2;
nvme_qpair_submit_request(&qpair, req);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
@ -418,7 +418,7 @@ test_sgl_req(void)
req->cmd.cdw10 = 10000;
req->cmd.cdw12 = 255 | 0;
nvme_qpair_submit_request(&qpair, req);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
CU_ASSERT(qpair.sq_tail == 1);
@ -459,11 +459,9 @@ test_ctrlr_failed(void)
CU_ASSERT(qpair.sq_tail == 0);
nvme_qpair_submit_request(&qpair, req);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
/* Assert that command/completion data was printed to log. */
CU_ASSERT(strlen(outbuf) > 0);
cleanup_submit_request_test(&qpair);
}