nvmf: Fix double controller destruction when subsys is deleted

When a subsystem is being deleted, we disconnect all qpairs
and when the last qpair for some controller is disconnected,
we start controller desctruction process. This process requires
to send a message to subsystem's thread to remove the controller
from the list in the subsystem and after that send a message to
controller's thread to release resources.
The problem is that the subsystem also destroys all attached
controllers. This order is unpredictable and we may get
heap-use-after-free or double free.

To fix this problem we can rely on the fact that the subsystem
can only be destroyed in incative state, that means that all
qpairs linked to the subsystem are already disconnected and
all controllers are already destroyed or in the process of
destruction.

spdk_nvmf_subsystem_destroy API is now can be asyncrhonous,
it accepts a callback with cb argument.

Change-Id: Ic72d69200bc8302dae2f8cd8ca44bc640c6a8116
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6660
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Alexey Marchuk 2021-03-03 11:48:19 +03:00 committed by Tomasz Zawadzki
parent 971f07b9fb
commit 97385af196
12 changed files with 195 additions and 44 deletions

View File

@ -15,6 +15,8 @@ An new parameter `anagrpid` was added to the RPC `nvmf_subsystem_add_ns`.
An new parameter `anagrpid` was added to the RPC `nvmf_subsystem_listener_set_ana_state`.
`spdk_nvmf_subsystem_destroy` is now can be asynchronous, it accepts a callback and callback argument.
### bdev
New API `spdk_bdev_get_memory_domains` has been added, it allows to get SPDK memory domains used by bdev.

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2018-2021 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -357,13 +358,27 @@ struct spdk_nvmf_subsystem *spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt
enum spdk_nvmf_subtype type,
uint32_t num_ns);
typedef void (*nvmf_subsystem_destroy_cb)(void *cb_arg);
/**
* Destroy an NVMe-oF subsystem. A subsystem may only be destroyed when in
* the Inactive state. See spdk_nvmf_subsystem_stop().
* the Inactive state. See spdk_nvmf_subsystem_stop(). A subsystem may be
* destroyed asynchronously, in that case \b cpl_cb will be called
*
* \param subsystem The NVMe-oF subsystem to destroy.
* \param cpl_cb Optional callback to be called if the subsystem is destroyed asynchronously, only called if
* return value is -EINPROGRESS
* \param cpl_cb_arg Optional user context to be passed to \b cpl_cb
*
* \retval 0 if sybsystem is destroyed, \b cpl_cb is not called is that case
* \retval -EINVAl if \b subsystem is a NULL pointer
* \retval -EAGAIN if \b subsystem is not in INACTIVE state
* \retval -EALREADY if subsystem destruction is already started
* \retval -EINPROGRESS if subsystem is destroyed asyncronously, cpl_cb will be called in that case
*/
void spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem);
int
spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem, nvmf_subsystem_destroy_cb cpl_cb,
void *cpl_cb_arg);
/**
* Function to be called once the subsystem has changed state.

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -176,6 +177,11 @@ nvmf_ctrlr_keep_alive_poll(void *ctx)
uint64_t now = spdk_get_ticks();
struct spdk_nvmf_ctrlr *ctrlr = ctx;
if (ctrlr->in_destruct) {
nvmf_ctrlr_stop_keep_alive_timer(ctrlr);
return SPDK_POLLER_IDLE;
}
SPDK_DEBUGLOG(nvmf, "Polling ctrlr keep alive timeout\n");
/* If the Keep alive feature is in use and the timer expires */
@ -465,6 +471,9 @@ _nvmf_ctrlr_destruct(void *ctx)
struct spdk_nvmf_reservation_log *log, *log_tmp;
struct spdk_nvmf_async_event_completion *event, *event_tmp;
assert(spdk_get_thread() == ctrlr->thread);
assert(ctrlr->in_destruct);
if (ctrlr->disconnect_in_progress) {
SPDK_ERRLOG("freeing ctrlr with disconnect in progress\n");
spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr);
@ -904,6 +913,11 @@ nvmf_ctrlr_association_remove(void *ctx)
struct spdk_nvmf_ctrlr *ctrlr = ctx;
int rc;
nvmf_ctrlr_stop_association_timer(ctrlr);
if (ctrlr->in_destruct) {
return SPDK_POLLER_IDLE;
}
SPDK_DEBUGLOG(nvmf, "Disconnecting host from subsystem %s due to association timeout.\n",
ctrlr->subsys->subnqn);
@ -913,7 +927,6 @@ nvmf_ctrlr_association_remove(void *ctx)
assert(false);
}
nvmf_ctrlr_stop_association_timer(ctrlr);
return SPDK_POLLER_BUSY;
}

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -357,12 +358,24 @@ nvmf_tgt_destroy_cb(void *io_device)
{
struct spdk_nvmf_tgt *tgt = io_device;
uint32_t i;
int rc;
if (tgt->subsystems) {
for (i = 0; i < tgt->max_subsystems; i++) {
if (tgt->subsystems[i]) {
nvmf_subsystem_remove_all_listeners(tgt->subsystems[i], true);
spdk_nvmf_subsystem_destroy(tgt->subsystems[i]);
rc = spdk_nvmf_subsystem_destroy(tgt->subsystems[i], nvmf_tgt_destroy_cb, tgt);
if (rc) {
if (rc == -EINPROGRESS) {
/* If rc is -EINPROGRESS, nvmf_tgt_destroy_cb will be called again when subsystem #i
* is destroyed, nvmf_tgt_destroy_cb will continue to destroy other subsystems if any */
return;
} else {
SPDK_ERRLOG("Failed to destroy subsystem, id %u, rc %d\n", tgt->subsystems[i]->id, rc);
assert(0);
}
}
}
}
free(tgt->subsystems);
@ -940,6 +953,7 @@ _nvmf_ctrlr_free_from_qpair(void *ctx)
spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid);
count = spdk_bit_array_count_set(ctrlr->qpair_mask);
if (count == 0) {
assert(!ctrlr->in_destruct);
ctrlr->in_destruct = true;
spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr);
}

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -292,6 +293,9 @@ struct spdk_nvmf_subsystem {
/* boolean for state change synchronization */
bool changing_state;
bool destroying;
bool async_destroy;
struct spdk_nvmf_tgt *tgt;
/* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
@ -314,6 +318,9 @@ struct spdk_nvmf_subsystem {
TAILQ_ENTRY(spdk_nvmf_subsystem) entries;
nvmf_subsystem_destroy_cb async_destroy_cb;
void *async_destroy_cb_arg;
char sn[SPDK_NVME_CTRLR_SN_LEN + 1];
char mn[SPDK_NVME_CTRLR_MN_LEN + 1];
char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2018-2021 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -399,7 +400,7 @@ rpc_nvmf_subsystem_started(struct spdk_nvmf_subsystem *subsystem,
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"Subsystem %s start failed",
subsystem->subnqn);
spdk_nvmf_subsystem_destroy(subsystem);
spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
}
}
@ -480,6 +481,10 @@ rpc_nvmf_create_subsystem(struct spdk_jsonrpc_request *request,
rc = spdk_nvmf_subsystem_start(subsystem,
rpc_nvmf_subsystem_started,
request);
if (rc) {
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"Failed to start subsystem");
}
cleanup:
free(req->nqn);
@ -489,7 +494,7 @@ cleanup:
free(req);
if (rc && subsystem) {
spdk_nvmf_subsystem_destroy(subsystem);
spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
}
}
SPDK_RPC_REGISTER("nvmf_create_subsystem", rpc_nvmf_create_subsystem, SPDK_RPC_RUNTIME)
@ -507,15 +512,33 @@ free_rpc_delete_subsystem(struct rpc_delete_subsystem *r)
free(r->tgt_name);
}
static void rpc_nvmf_subsystem_destroy_complete_cb(void *cb_arg)
{
struct spdk_jsonrpc_request *request = cb_arg;
spdk_jsonrpc_send_bool_response(request, true);
}
static void
rpc_nvmf_subsystem_stopped(struct spdk_nvmf_subsystem *subsystem,
void *cb_arg, int status)
{
struct spdk_jsonrpc_request *request = cb_arg;
int rc;
nvmf_subsystem_remove_all_listeners(subsystem, true);
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, rpc_nvmf_subsystem_destroy_complete_cb, request);
if (rc) {
if (rc == -EINPROGRESS) {
/* response will be sent in completion callback */
return;
} else {
SPDK_ERRLOG("Subsystem destruction failed, rc %d\n", rc);
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"Subsystem destruction failed, rc %d", rc);
return;
}
}
spdk_jsonrpc_send_bool_response(request, true);
}

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -68,6 +69,8 @@ enum spdk_nvmf_nqn_domain_states {
SPDK_NVMF_DOMAIN_ACCEPT_ANY = 2
};
static int _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem);
/* Returns true if is a valid ASCII string as defined by the NVMe spec */
static bool
nvmf_valid_ascii_string(const void *buf, size_t size)
@ -352,33 +355,32 @@ _nvmf_subsystem_remove_listener(struct spdk_nvmf_subsystem *subsystem,
free(listener);
}
void
spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem)
static void
_nvmf_subsystem_destroy_msg(void *cb_arg)
{
struct spdk_nvmf_subsystem *subsystem = cb_arg;
_nvmf_subsystem_destroy(subsystem);
}
static int
_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem)
{
struct spdk_nvmf_host *host, *host_tmp;
struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp;
struct spdk_nvmf_ns *ns;
nvmf_subsystem_destroy_cb async_destroy_cb = NULL;
void *async_destroy_cb_arg = NULL;
int rc;
if (!subsystem) {
return;
}
assert(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE);
SPDK_DEBUGLOG(nvmf, "subsystem is %p\n", subsystem);
nvmf_subsystem_remove_all_listeners(subsystem, false);
pthread_mutex_lock(&subsystem->mutex);
TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) {
nvmf_subsystem_remove_host(subsystem, host);
}
pthread_mutex_unlock(&subsystem->mutex);
TAILQ_FOREACH_SAFE(ctrlr, &subsystem->ctrlrs, link, ctrlr_tmp) {
nvmf_ctrlr_destruct(ctrlr);
if (!TAILQ_EMPTY(&subsystem->ctrlrs)) {
SPDK_DEBUGLOG(nvmf, "subsystem %p %s has active controllers\n", subsystem, subsystem->subnqn);
subsystem->async_destroy = true;
rc = spdk_thread_send_msg(subsystem->thread, _nvmf_subsystem_destroy_msg, subsystem);
if (rc) {
SPDK_ERRLOG("Failed to send thread msg, rc %d\n", rc);
assert(0);
return rc;
}
return -EINPROGRESS;
}
ns = spdk_nvmf_subsystem_get_first_ns(subsystem);
@ -397,9 +399,62 @@ spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem)
pthread_mutex_destroy(&subsystem->mutex);
if (subsystem->async_destroy) {
async_destroy_cb = subsystem->async_destroy_cb;
async_destroy_cb_arg = subsystem->async_destroy_cb_arg;
}
free(subsystem);
if (async_destroy_cb) {
async_destroy_cb(async_destroy_cb_arg);
}
return 0;
}
int
spdk_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem, nvmf_subsystem_destroy_cb cpl_cb,
void *cpl_cb_arg)
{
struct spdk_nvmf_host *host, *host_tmp;
if (!subsystem) {
return -EINVAL;
}
assert(spdk_get_thread() == subsystem->thread);
if (subsystem->state != SPDK_NVMF_SUBSYSTEM_INACTIVE) {
SPDK_ERRLOG("Subsystem can only be destroyed in inactive state\n");
assert(0);
return -EAGAIN;
}
if (subsystem->destroying) {
SPDK_ERRLOG("Subsystem destruction is already started\n");
assert(0);
return -EALREADY;
}
subsystem->destroying = true;
SPDK_DEBUGLOG(nvmf, "subsystem is %p %s\n", subsystem, subsystem->subnqn);
nvmf_subsystem_remove_all_listeners(subsystem, false);
pthread_mutex_lock(&subsystem->mutex);
TAILQ_FOREACH_SAFE(host, &subsystem->hosts, link, host_tmp) {
nvmf_subsystem_remove_host(subsystem, host);
}
pthread_mutex_unlock(&subsystem->mutex);
subsystem->async_destroy_cb = cpl_cb;
subsystem->async_destroy_cb_arg = cpl_cb_arg;
return _nvmf_subsystem_destroy(subsystem);
}
/* we have to use the typedef in the function declaration to appease astyle. */
typedef enum spdk_nvmf_subsystem_state spdk_nvmf_subsystem_state_t;
@ -1844,7 +1899,9 @@ void
nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
struct spdk_nvmf_ctrlr *ctrlr)
{
assert(spdk_get_thread() == subsystem->thread);
assert(subsystem == ctrlr->subsys);
SPDK_DEBUGLOG(nvmf, "remove ctrlr %p from subsys %p %s\n", ctrlr, subsystem, subsystem->subnqn);
TAILQ_REMOVE(&subsystem->ctrlrs, ctrlr, link);
}

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -2279,6 +2280,7 @@ test_nvmf_ctrlr_create_destruct(void)
CU_ASSERT(ctrlr->vcprop.csts.bits.rdy == 0);
CU_ASSERT(ctrlr->dif_insert_or_strip == true);
ctrlr->in_destruct = true;
nvmf_ctrlr_destruct(ctrlr);
poll_threads();
CU_ASSERT(TAILQ_EMPTY(&subsystem.ctrlrs));

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -257,6 +258,7 @@ test_discovery_log(void)
struct spdk_nvmf_discovery_log_page *disc_log;
struct spdk_nvmf_discovery_log_page_entry *entry;
struct spdk_nvme_transport_id trid = {};
int rc;
iov.iov_base = buffer;
iov.iov_len = 8192;
@ -320,7 +322,8 @@ test_discovery_log(void)
sizeof(*entry));
CU_ASSERT(entry->trtype == 42);
subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
CU_ASSERT(rc == 0);
free(tgt.subsystems);
}

View File

@ -57,7 +57,8 @@ DEFINE_STUB(nvmf_transport_poll_group_poll, int, (struct spdk_nvmf_transport_pol
DEFINE_STUB(nvmf_transport_accept, uint32_t, (struct spdk_nvmf_transport *transport), 0);
DEFINE_STUB_V(nvmf_subsystem_remove_all_listeners, (struct spdk_nvmf_subsystem *subsystem,
bool stop));
DEFINE_STUB_V(spdk_nvmf_subsystem_destroy, (struct spdk_nvmf_subsystem *subsystem));
DEFINE_STUB(spdk_nvmf_subsystem_destroy, int, (struct spdk_nvmf_subsystem *subsystem,
nvmf_subsystem_destroy_cb cpl_cb, void *cpl_cb_arg), 0);
DEFINE_STUB(spdk_nvmf_subsystem_get_first_listener, struct spdk_nvmf_subsystem_listener *,
(struct spdk_nvmf_subsystem *subsystem), NULL);
DEFINE_STUB(spdk_nvmf_subsystem_get_next_listener, struct spdk_nvmf_subsystem_listener *,

View File

@ -353,6 +353,7 @@ nvmf_test_create_subsystem(void)
struct spdk_nvmf_tgt tgt = {};
char nqn[256];
struct spdk_nvmf_subsystem *subsystem;
int rc;
tgt.max_subsystems = 1024;
tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
@ -362,22 +363,24 @@ nvmf_test_create_subsystem(void)
subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
CU_ASSERT(rc == 0);
/* valid name with complex reverse domain */
snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-full--rev-domain.name:subsystem1");
subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
CU_ASSERT(rc == 0);
/* Valid name discovery controller */
snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
CU_ASSERT(rc == 0);
/* Invalid name, no user supplied string */
snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
@ -389,7 +392,8 @@ nvmf_test_create_subsystem(void)
subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
CU_ASSERT(rc == 0);
/* Invalid name, domain label > 63 characters */
snprintf(nqn, sizeof(nqn),
@ -425,7 +429,8 @@ nvmf_test_create_subsystem(void)
subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
CU_ASSERT(rc == 0);
/* Invalid name, too long */
snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
@ -440,7 +445,8 @@ nvmf_test_create_subsystem(void)
subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
CU_ASSERT(rc == 0);
/* Invalid name user string contains an invalid utf-8 character */
snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xFFsubsystem1");
@ -452,7 +458,8 @@ nvmf_test_create_subsystem(void)
subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
SPDK_CU_ASSERT_FATAL(subsystem != NULL);
CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
CU_ASSERT(rc == 0);
/* Invalid uuid (too long) */
snprintf(nqn, sizeof(nqn),
@ -1442,6 +1449,11 @@ test_nvmf_ns_reservation_add_remove_registrant(void)
CU_ASSERT(ns.gen == 2);
}
static void
test_nvmf_subsystem_destroy_cb(void *cb_arg)
{
}
static void
test_nvmf_subsystem_add_ctrlr(void)
{
@ -1467,7 +1479,8 @@ test_nvmf_subsystem_add_ctrlr(void)
nvmf_subsystem_remove_ctrlr(subsystem, &ctrlr);
CU_ASSERT(TAILQ_EMPTY(&subsystem->ctrlrs));
spdk_nvmf_subsystem_destroy(subsystem);
rc = spdk_nvmf_subsystem_destroy(subsystem, test_nvmf_subsystem_destroy_cb, NULL);
CU_ASSERT(rc == 0);
free(tgt.subsystems);
}
@ -1504,7 +1517,7 @@ test_spdk_nvmf_subsystem_add_host(void)
rc = spdk_nvmf_subsystem_remove_host(subsystem, hostnqn);
CU_ASSERT(rc == -ENOENT);
spdk_nvmf_subsystem_destroy(subsystem);
spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL);
free(tgt.subsystems);
}