nvme: add timeout for Admin commands when initialization

Currently there are no timeout mechanism for Admin commands
when initialization, the NVMe driver may enter infinite loop.
While here, add a new parameter to the controller initialization
options, NVMe controller will report an error when timeout
happens during initialization.

Change-Id: Id0c6b6fa15abe5227b486bee95c8e02914b0d358
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.gerrithub.io/424622
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Changpeng Liu 2018-10-15 02:20:35 -04:00 committed by Jim Harris
parent e688d1ccf1
commit 2706cd4238
4 changed files with 89 additions and 34 deletions
CHANGELOG.md
include/spdk
lib/nvme
test/unit/lib/nvme/nvme_ctrlr.c

@ -2,6 +2,11 @@
## v19.01: (Upcoming Release)
### nvme
admin_timeout_ms was added to NVMe controller initialization options, users
can change the default value when probing a controller.
### NVMe-oF Target
The `spdk_nvmf_tgt_opts` struct has been deprecated in favor of `spdk_nvmf_transport_opts`.

@ -156,6 +156,17 @@ struct spdk_nvme_ctrlr_opts {
* command set is used.
*/
enum spdk_nvme_cc_css command_set;
/**
* Admin commands timeout in milliseconds (0 = no timeout).
*
* The timeout value is used for admin commands submitted internally
* by the nvme driver during initialization, before the user is able
* to call spdk_nvme_ctrlr_register_timeout_callback(). By default,
* this is set to 120 seconds, users can change it in the probing
* callback.
*/
uint32_t admin_timeout_ms;
};
/**

@ -142,6 +142,10 @@ spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t
if (FIELD_OK(command_set)) {
opts->command_set = SPDK_NVME_CC_CSS_NVM;
}
if (FIELD_OK(admin_timeout_ms)) {
opts->admin_timeout_ms = NVME_MAX_TIMEOUT_PERIOD * 1000;
}
#undef FIELD_OK
}
@ -680,7 +684,7 @@ nvme_ctrlr_set_state(struct spdk_nvme_ctrlr *ctrlr, enum nvme_ctrlr_state state,
uint64_t timeout_in_ms)
{
ctrlr->state = state;
if (timeout_in_ms == NVME_TIMEOUT_INFINITE) {
if (timeout_in_ms == 0) {
SPDK_DEBUGLOG(SPDK_LOG_NVME, "setting state to %s (no timeout)\n",
nvme_ctrlr_state_string(ctrlr->state));
ctrlr->state_timeout_tsc = NVME_TIMEOUT_INFINITE;
@ -716,7 +720,8 @@ nvme_ctrlr_set_doorbell_buffer_config_done(void *arg, const struct spdk_nvme_cpl
SPDK_INFOLOG(SPDK_LOG_NVME, "NVMe controller: %s doorbell buffer config enabled\n",
ctrlr->trid.traddr);
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
ctrlr->opts.admin_timeout_ms);
}
static int
@ -726,12 +731,14 @@ nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
uint64_t prp1, prp2;
if (!ctrlr->cdata.oacs.doorbell_buffer_config) {
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
ctrlr->opts.admin_timeout_ms);
return 0;
}
if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT,
ctrlr->opts.admin_timeout_ms);
return 0;
}
@ -749,7 +756,8 @@ nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
goto error;
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_DB_BUF_CFG,
ctrlr->opts.admin_timeout_ms);
rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
nvme_ctrlr_set_doorbell_buffer_config_done, ctrlr);
@ -878,7 +886,8 @@ nvme_ctrlr_identify_done(void *arg, const struct spdk_nvme_cpl *cpl)
ctrlr->max_sges = nvme_transport_ctrlr_get_max_sges(ctrlr);
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_NUM_QUEUES,
ctrlr->opts.admin_timeout_ms);
}
static int
@ -886,7 +895,8 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY,
ctrlr->opts.admin_timeout_ms);
rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
&ctrlr->cdata, sizeof(ctrlr->cdata),
@ -995,7 +1005,8 @@ nvme_ctrlr_identify_ns_async_done(void *arg, const struct spdk_nvme_cpl *cpl)
nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
if (ns == NULL) {
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ID_DESCS,
ctrlr->opts.admin_timeout_ms);
return;
}
ns->ctrlr = ctrlr;
@ -1015,7 +1026,8 @@ nvme_ctrlr_identify_ns_async(struct spdk_nvme_ns *ns)
nsdata = &ctrlr->nsdata[ns->id - 1];
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_NS,
ctrlr->opts.admin_timeout_ms);
return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id,
nsdata, sizeof(*nsdata),
nvme_ctrlr_identify_ns_async_done, ns);
@ -1032,7 +1044,8 @@ nvme_ctrlr_identify_namespaces(struct spdk_nvme_ctrlr *ctrlr)
ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
if (ns == NULL) {
/* No active NS, move on to the next state */
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
ctrlr->opts.admin_timeout_ms);
return 0;
}
@ -1056,7 +1069,8 @@ nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cp
int rc;
if (spdk_nvme_cpl_is_error(cpl)) {
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
ctrlr->opts.admin_timeout_ms);
return;
}
@ -1064,7 +1078,8 @@ nvme_ctrlr_identify_id_desc_async_done(void *arg, const struct spdk_nvme_cpl *cp
nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, ns->id);
ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
if (ns == NULL) {
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
ctrlr->opts.admin_timeout_ms);
return;
}
@ -1081,7 +1096,8 @@ nvme_ctrlr_identify_id_desc_async(struct spdk_nvme_ns *ns)
memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS,
ctrlr->opts.admin_timeout_ms);
return nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST,
0, ns->id, ns->id_desc_list, sizeof(ns->id_desc_list),
nvme_ctrlr_identify_id_desc_async_done, ns);
@ -1097,7 +1113,8 @@ nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
if (ctrlr->vs.raw < SPDK_NVME_VERSION(1, 3, 0) ||
(ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Version < 1.3; not attempting to retrieve NS ID Descriptor List\n");
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
ctrlr->opts.admin_timeout_ms);
return 0;
}
@ -1105,7 +1122,8 @@ nvme_ctrlr_identify_id_desc_namespaces(struct spdk_nvme_ctrlr *ctrlr)
ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
if (ns == NULL) {
/* No active NS, move on to the next state */
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
ctrlr->opts.admin_timeout_ms);
return 0;
}
@ -1125,7 +1143,8 @@ nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
if (spdk_nvme_cpl_is_error(cpl)) {
SPDK_ERRLOG("Set Features - Number of Queues failed!\n");
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_NUM_QUEUES, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_GET_NUM_QUEUES,
ctrlr->opts.admin_timeout_ms);
}
static int
@ -1142,7 +1161,8 @@ nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
ctrlr->opts.num_io_queues = 1;
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_SET_NUM_QUEUES,
ctrlr->opts.admin_timeout_ms);
rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->opts.num_io_queues,
nvme_ctrlr_set_num_queues_done, ctrlr);
@ -1193,7 +1213,8 @@ nvme_ctrlr_get_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
spdk_bit_array_set(ctrlr->free_io_qids, i);
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS,
ctrlr->opts.admin_timeout_ms);
}
static int
@ -1201,7 +1222,8 @@ nvme_ctrlr_get_num_queues(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_GET_NUM_QUEUES,
ctrlr->opts.admin_timeout_ms);
/* Obtain the number of queues allocated using Get Features. */
rc = nvme_ctrlr_cmd_get_num_queues(ctrlr, nvme_ctrlr_get_num_queues_done, ctrlr);
@ -1244,7 +1266,8 @@ nvme_ctrlr_set_keep_alive_timeout_done(void *arg, const struct spdk_nvme_cpl *cp
/* Schedule the first Keep Alive to be sent as soon as possible. */
ctrlr->next_keep_alive_tick = spdk_get_ticks();
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
ctrlr->opts.admin_timeout_ms);
}
static int
@ -1253,18 +1276,21 @@ nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
int rc;
if (ctrlr->opts.keep_alive_timeout_ms == 0) {
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
ctrlr->opts.admin_timeout_ms);
return 0;
}
if (ctrlr->cdata.kas == 0) {
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Controller KAS is 0 - not enabling Keep Alive\n");
ctrlr->opts.keep_alive_timeout_ms = 0;
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_HOST_ID,
ctrlr->opts.admin_timeout_ms);
return 0;
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_KEEP_ALIVE_TIMEOUT,
ctrlr->opts.admin_timeout_ms);
/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
@ -1335,7 +1361,8 @@ nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)
SPDK_TRACEDUMP(SPDK_LOG_NVME, "host_id", host_id, host_id_size);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_HOST_ID,
ctrlr->opts.admin_timeout_ms);
rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_ctrlr_set_host_id_done, ctrlr);
if (rc != 0) {
@ -1523,7 +1550,8 @@ nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
if (spdk_nvme_cpl_is_error(cpl)) {
SPDK_NOTICELOG("nvme_ctrlr_configure_aer failed!\n");
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
ctrlr->opts.admin_timeout_ms);
return;
}
@ -1539,7 +1567,8 @@ nvme_ctrlr_configure_aer_done(void *arg, const struct spdk_nvme_cpl *cpl)
return;
}
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES,
ctrlr->opts.admin_timeout_ms);
}
static int
@ -1567,7 +1596,8 @@ nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
config.bits.telemetry_log_notice = 1;
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_WAIT_FOR_CONFIGURE_AER,
ctrlr->opts.admin_timeout_ms);
rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config,
nvme_ctrlr_configure_aer_done,
@ -1984,14 +2014,16 @@ nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
* The controller has been enabled.
* Perform the rest of initialization serially.
*/
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE,
ctrlr->opts.admin_timeout_ms);
return 0;
}
break;
case NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE:
nvme_ctrlr_enable_admin_queue(ctrlr);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY,
ctrlr->opts.admin_timeout_ms);
break;
case NVME_CTRLR_STATE_IDENTIFY:
@ -2020,7 +2052,8 @@ nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
case NVME_CTRLR_STATE_CONSTRUCT_NS:
rc = nvme_ctrlr_construct_namespaces(ctrlr);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS,
ctrlr->opts.admin_timeout_ms);
break;
case NVME_CTRLR_STATE_IDENTIFY_ACTIVE_NS:
@ -2028,7 +2061,8 @@ nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
if (rc < 0) {
nvme_ctrlr_destruct_namespaces(ctrlr);
}
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_IDENTIFY_NS,
ctrlr->opts.admin_timeout_ms);
break;
case NVME_CTRLR_STATE_IDENTIFY_NS:
@ -2041,7 +2075,8 @@ nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
case NVME_CTRLR_STATE_IDENTIFY_ID_DESCS:
rc = nvme_ctrlr_identify_id_desc_namespaces(ctrlr);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONFIGURE_AER,
ctrlr->opts.admin_timeout_ms);
break;
case NVME_CTRLR_STATE_WAIT_FOR_IDENTIFY_ID_DESCS:
@ -2058,12 +2093,14 @@ nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
case NVME_CTRLR_STATE_SET_SUPPORTED_LOG_PAGES:
rc = nvme_ctrlr_set_supported_log_pages(ctrlr);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES,
ctrlr->opts.admin_timeout_ms);
break;
case NVME_CTRLR_STATE_SET_SUPPORTED_FEATURES:
nvme_ctrlr_set_supported_features(ctrlr);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG, NVME_TIMEOUT_INFINITE);
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_SET_DB_BUF_CFG,
ctrlr->opts.admin_timeout_ms);
break;
case NVME_CTRLR_STATE_SET_DB_BUF_CFG:

@ -1506,6 +1506,7 @@ test_ctrlr_get_default_ctrlr_opts(void)
CU_ASSERT(strlen(opts.hostnqn) == 0);
CU_ASSERT(strlen(opts.src_addr) == 0);
CU_ASSERT(strlen(opts.src_svcid) == 0);
CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
/* set a consistent opts_size */
spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
@ -1524,6 +1525,7 @@ test_ctrlr_get_default_ctrlr_opts(void)
sizeof(opts.extended_host_id)) == 0);
CU_ASSERT(strlen(opts.src_addr) == 0);
CU_ASSERT(strlen(opts.src_svcid) == 0);
CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_TIMEOUT_PERIOD * 1000);
}
static void