bdev/nvme: Aggregate multiple ctrlrs into a single bdev ctrlr

This patch enables us to aggrete multiple ctrlrs in the same NVM
subsystem into a single bdev ctrlr to create multipath.

This patch has a critical limitation that ctrlrs which are aggregated
need to have no namespace. Hence any nvme bdev is not created.
However it will be removed in the next patch.

The design is as follows.

A nvme_bdev_ctrlr is created to aggregate multiple nvme_ctrlrs in
the same NVM subsystem. The name of the nvme_ctrlr is changed to be
the name of the nvme_bdev_ctrlr.

NVMe bdev module has both the failover feature and the multipath
feature now. To choose which of failover or multipath to use, add an new
parameter multipath to the RPC bdev_nvme_attach_controller.

When we attach a new trid to the existing nvme_bdev_ctrlr, we use the failover
feature if multipath is false, we use the multipath feature if multipath is
false.

nvme_bdev_ctrlr has a list for nvme_ctrlr and it is guarded by the
global mutex. Callers can query nvme_ctrlrs from a nvme_bdev_ctrlr via
trid as a key. nvme_bdev_ctrlr is not registered as io_device.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I20571bf89a65d53a00fb77236ad1b193e88b8153
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8119
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2021-09-08 01:13:07 +09:00 committed by Tomasz Zawadzki
parent 14535253f1
commit 75f1d6484a
5 changed files with 419 additions and 119 deletions

View File

@ -226,10 +226,39 @@ static struct spdk_bdev_module nvme_if = {
};
SPDK_BDEV_MODULE_REGISTER(nvme, &nvme_if)
struct nvme_ctrlrs g_nvme_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_ctrlrs);
struct nvme_bdev_ctrlrs g_nvme_bdev_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_bdev_ctrlrs);
pthread_mutex_t g_bdev_nvme_mutex = PTHREAD_MUTEX_INITIALIZER;
bool g_bdev_nvme_module_finish;
static struct nvme_bdev_ctrlr *
nvme_bdev_ctrlr_get(const char *name)
{
struct nvme_bdev_ctrlr *nbdev_ctrlr;
TAILQ_FOREACH(nbdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) {
if (strcmp(name, nbdev_ctrlr->name) == 0) {
break;
}
}
return nbdev_ctrlr;
}
static struct nvme_ctrlr *
nvme_bdev_ctrlr_get_ctrlr(struct nvme_bdev_ctrlr *nbdev_ctrlr,
const struct spdk_nvme_transport_id *trid)
{
struct nvme_ctrlr *nvme_ctrlr;
TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) {
if (spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid) == 0) {
break;
}
}
return nvme_ctrlr;
}
struct nvme_ns *
nvme_ctrlr_get_ns(struct nvme_ctrlr *nvme_ctrlr, uint32_t nsid)
{
@ -280,11 +309,13 @@ nvme_ctrlr_get_next_active_ns(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *ns)
static struct nvme_ctrlr *
nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid)
{
struct nvme_ctrlr *nvme_ctrlr;
struct nvme_bdev_ctrlr *nbdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr = NULL;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid) == 0) {
TAILQ_FOREACH(nbdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) {
nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, trid);
if (nvme_ctrlr != NULL) {
break;
}
}
@ -296,17 +327,17 @@ nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid)
struct nvme_ctrlr *
nvme_ctrlr_get_by_name(const char *name)
{
struct nvme_ctrlr *nvme_ctrlr;
struct nvme_bdev_ctrlr *nbdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr = NULL;
if (name == NULL) {
return NULL;
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (strcmp(name, nvme_ctrlr->name) == 0) {
break;
}
nbdev_ctrlr = nvme_bdev_ctrlr_get(name);
if (nbdev_ctrlr != NULL) {
nvme_ctrlr = TAILQ_FIRST(&nbdev_ctrlr->ctrlrs);
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
@ -316,11 +347,14 @@ nvme_ctrlr_get_by_name(const char *name)
void
nvme_ctrlr_for_each(nvme_ctrlr_for_each_fn fn, void *ctx)
{
struct nvme_bdev_ctrlr *nbdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
fn(nvme_ctrlr, ctx);
TAILQ_FOREACH(nbdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) {
TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) {
fn(nvme_ctrlr, ctx);
}
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
}
@ -354,6 +388,26 @@ nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid, struct spdk_
}
}
static void
nvme_bdev_ctrlr_delete(struct nvme_bdev_ctrlr *nbdev_ctrlr,
struct nvme_ctrlr *nvme_ctrlr)
{
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_REMOVE(&nbdev_ctrlr->ctrlrs, nvme_ctrlr, tailq);
if (!TAILQ_EMPTY(&nbdev_ctrlr->ctrlrs)) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return;
}
TAILQ_REMOVE(&g_nvme_bdev_ctrlrs, nbdev_ctrlr, tailq);
pthread_mutex_unlock(&g_bdev_nvme_mutex);
free(nbdev_ctrlr->name);
free(nbdev_ctrlr);
}
static void
nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr)
{
@ -368,12 +422,12 @@ nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr)
nvme_ctrlr->opal_dev = NULL;
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_REMOVE(&g_nvme_ctrlrs, nvme_ctrlr, tailq);
pthread_mutex_unlock(&g_bdev_nvme_mutex);
if (nvme_ctrlr->nbdev_ctrlr) {
nvme_bdev_ctrlr_delete(nvme_ctrlr->nbdev_ctrlr, nvme_ctrlr);
}
spdk_nvme_detach(nvme_ctrlr->ctrlr);
spdk_poller_unregister(&nvme_ctrlr->adminq_timer_poller);
free(nvme_ctrlr->name);
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
free(nvme_ctrlr->namespaces[i]);
}
@ -397,9 +451,9 @@ nvme_ctrlr_unregister_cb(void *io_device)
nvme_ctrlr_delete(nvme_ctrlr);
pthread_mutex_lock(&g_bdev_nvme_mutex);
if (g_bdev_nvme_module_finish && TAILQ_EMPTY(&g_nvme_ctrlrs)) {
if (g_bdev_nvme_module_finish && TAILQ_EMPTY(&g_nvme_bdev_ctrlrs)) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
spdk_io_device_unregister(&g_nvme_ctrlrs, NULL);
spdk_io_device_unregister(&g_nvme_bdev_ctrlrs, NULL);
spdk_bdev_module_fini_done();
return;
}
@ -1298,7 +1352,7 @@ bdev_nvme_create_ctrlr_channel_cb(void *io_device, void *ctx_buf)
struct spdk_io_channel *pg_ch;
int rc;
pg_ch = spdk_get_io_channel(&g_nvme_ctrlrs);
pg_ch = spdk_get_io_channel(&g_nvme_bdev_ctrlrs);
if (!pg_ch) {
return -1;
}
@ -1807,7 +1861,7 @@ nvme_bdev_create(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns)
bdev->nvme_ns = nvme_ns;
bdev->opal = nvme_ctrlr->opal_dev != NULL;
rc = nvme_disk_create(&bdev->disk, nvme_ctrlr->name, nvme_ctrlr->ctrlr,
rc = nvme_disk_create(&bdev->disk, nvme_ctrlr->nbdev_ctrlr->name, nvme_ctrlr->ctrlr,
nvme_ns->ns, nvme_ctrlr->prchk_flags, bdev);
if (rc != 0) {
SPDK_ERRLOG("Failed to create NVMe disk\n");
@ -2275,7 +2329,7 @@ nvme_ctrlr_create_done(struct nvme_ctrlr *nvme_ctrlr,
bdev_nvme_create_ctrlr_channel_cb,
bdev_nvme_destroy_ctrlr_channel_cb,
sizeof(struct nvme_ctrlr_channel),
nvme_ctrlr->name);
nvme_ctrlr->nbdev_ctrlr->name);
nvme_ctrlr_populate_namespaces(nvme_ctrlr, ctx);
}
@ -2346,6 +2400,84 @@ nvme_ctrlr_init_ana_log_page(struct nvme_ctrlr *nvme_ctrlr,
nvme_ctrlr);
}
/* hostnqn and subnqn were already verified before attaching a controller.
* Hence check only the multipath capability and cntlid here.
*/
static bool
bdev_nvme_check_multipath(struct nvme_bdev_ctrlr *nbdev_ctrlr, struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_ctrlr *tmp;
const struct spdk_nvme_ctrlr_data *cdata, *tmp_cdata;
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
/* TODO: This check is removed in the following patches. */
if (spdk_nvme_ctrlr_get_num_ns(ctrlr) > 0) {
return false;
}
if (!cdata->cmic.multi_ctrlr) {
SPDK_ERRLOG("Ctrlr%u does not support multipath.\n", cdata->cntlid);
return false;
}
TAILQ_FOREACH(tmp, &nbdev_ctrlr->ctrlrs, tailq) {
tmp_cdata = spdk_nvme_ctrlr_get_data(tmp->ctrlr);
/* TODO: This check is removed in the following patches. */
if (spdk_nvme_ctrlr_get_num_ns(tmp->ctrlr) > 0) {
return false;
}
if (!tmp_cdata->cmic.multi_ctrlr) {
SPDK_ERRLOG("Ctrlr%u does not support multipath.\n", cdata->cntlid);
return false;
}
if (cdata->cntlid == tmp_cdata->cntlid) {
SPDK_ERRLOG("cntlid %u are duplicated.\n", tmp_cdata->cntlid);
return false;
}
}
return true;
}
static int
nvme_bdev_ctrlr_create(const char *name, struct nvme_ctrlr *nvme_ctrlr)
{
struct nvme_bdev_ctrlr *nbdev_ctrlr;
struct spdk_nvme_ctrlr *ctrlr = nvme_ctrlr->ctrlr;
int rc = 0;
pthread_mutex_lock(&g_bdev_nvme_mutex);
nbdev_ctrlr = nvme_bdev_ctrlr_get(name);
if (nbdev_ctrlr != NULL) {
if (!bdev_nvme_check_multipath(nbdev_ctrlr, ctrlr)) {
rc = -EINVAL;
goto exit;
}
} else {
nbdev_ctrlr = calloc(1, sizeof(*nbdev_ctrlr));
if (nbdev_ctrlr == NULL) {
SPDK_ERRLOG("Failed to allocate nvme_bdev_ctrlr.\n");
rc = -ENOMEM;
goto exit;
}
nbdev_ctrlr->name = strdup(name);
if (nbdev_ctrlr->name == NULL) {
SPDK_ERRLOG("Failed to allocate name of nvme_bdev_ctrlr.\n");
free(nbdev_ctrlr);
goto exit;
}
TAILQ_INIT(&nbdev_ctrlr->ctrlrs);
TAILQ_INSERT_TAIL(&g_nvme_bdev_ctrlrs, nbdev_ctrlr, tailq);
}
nvme_ctrlr->nbdev_ctrlr = nbdev_ctrlr;
TAILQ_INSERT_TAIL(&nbdev_ctrlr->ctrlrs, nvme_ctrlr, tailq);
exit:
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return rc;
}
static int
nvme_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr,
const char *name,
@ -2399,11 +2531,6 @@ nvme_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr,
nvme_ctrlr->thread = spdk_get_thread();
nvme_ctrlr->ctrlr = ctrlr;
nvme_ctrlr->ref = 1;
nvme_ctrlr->name = strdup(name);
if (nvme_ctrlr->name == NULL) {
rc = -ENOMEM;
goto err;
}
if (spdk_nvme_ctrlr_is_ocssd_supported(ctrlr)) {
SPDK_ERRLOG("OCSSDs are not supported");
@ -2416,10 +2543,6 @@ nvme_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr,
nvme_ctrlr->adminq_timer_poller = SPDK_POLLER_REGISTER(bdev_nvme_poll_adminq, nvme_ctrlr,
g_opts.nvme_adminq_poll_period_us);
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_INSERT_TAIL(&g_nvme_ctrlrs, nvme_ctrlr, tailq);
pthread_mutex_unlock(&g_bdev_nvme_mutex);
if (g_opts.timeout_us > 0) {
/* Register timeout callback. Timeout values for IO vs. admin reqs can be different. */
/* If timeout_admin_us is 0 (not specified), admin uses same timeout as IO. */
@ -2437,6 +2560,11 @@ nvme_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr,
nvme_ctrlr->opal_dev = spdk_opal_dev_construct(ctrlr);
}
rc = nvme_bdev_ctrlr_create(name, nvme_ctrlr);
if (rc != 0) {
goto err;
}
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
if (cdata->cmic.ana_reporting) {
@ -2602,7 +2730,7 @@ bdev_nvme_set_opts(const struct spdk_bdev_nvme_opts *opts)
}
if (g_bdev_nvme_init_thread != NULL) {
if (!TAILQ_EMPTY(&g_nvme_ctrlrs)) {
if (!TAILQ_EMPTY(&g_nvme_bdev_ctrlrs)) {
return -EPERM;
}
}
@ -2890,7 +3018,8 @@ bdev_nvme_create(struct spdk_nvme_transport_id *trid,
uint32_t prchk_flags,
spdk_bdev_create_nvme_fn cb_fn,
void *cb_ctx,
struct spdk_nvme_ctrlr_opts *opts)
struct spdk_nvme_ctrlr_opts *opts,
bool multipath)
{
struct nvme_probe_skip_entry *entry, *tmp;
struct nvme_async_probe_ctx *ctx;
@ -2936,7 +3065,7 @@ bdev_nvme_create(struct spdk_nvme_transport_id *trid,
ctx->opts.keep_alive_timeout_ms = g_opts.keep_alive_timeout_ms;
ctx->opts.disable_read_ana_log_page = true;
if (nvme_ctrlr_get_by_name(base_name) == NULL) {
if (nvme_bdev_ctrlr_get(base_name) == NULL || multipath) {
attach_cb = connect_attach_cb;
} else {
attach_cb = connect_set_failover_cb;
@ -2977,39 +3106,53 @@ bdev_nvme_delete_secondary_trid(struct nvme_ctrlr *nvme_ctrlr,
int
bdev_nvme_delete(const char *name, const struct spdk_nvme_transport_id *trid)
{
struct nvme_ctrlr *nvme_ctrlr;
struct nvme_bdev_ctrlr *nbdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr, *tmp_nvme_ctrlr;
struct nvme_ctrlr_trid *ctrlr_trid;
int rc = 0;
if (name == NULL) {
return -EINVAL;
}
nvme_ctrlr = nvme_ctrlr_get_by_name(name);
if (nvme_ctrlr == NULL) {
SPDK_ERRLOG("Failed to find NVMe controller\n");
nbdev_ctrlr = nvme_bdev_ctrlr_get(name);
if (nbdev_ctrlr == NULL) {
SPDK_ERRLOG("Failed to find NVMe bdev controller\n");
return -ENODEV;
}
/* case 1: remove the controller itself. */
if (trid == NULL) {
return _bdev_nvme_delete(nvme_ctrlr, false);
}
/* The following is based on an assumption that one trid can be registered
* to only one nvme_ctrlr.
*/
/* case 2: we are currently using the path to be removed. */
if (!spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid)) {
ctrlr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
assert(nvme_ctrlr->connected_trid == &ctrlr_trid->trid);
/* case 2A: the current path is the only path. */
if (!TAILQ_NEXT(ctrlr_trid, link)) {
return _bdev_nvme_delete(nvme_ctrlr, false);
TAILQ_FOREACH_SAFE(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq, tmp_nvme_ctrlr) {
if (trid == NULL) {
/* Case 1: Remove all nvme_ctrlrs of the nvme_bdev_ctrlr. */
rc = _bdev_nvme_delete(nvme_ctrlr, false);
if (rc != 0) {
return rc;
}
} else if (!spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid)) {
ctrlr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
assert(nvme_ctrlr->connected_trid == &ctrlr_trid->trid);
if (!TAILQ_NEXT(ctrlr_trid, link)) {
/* Case 2A: The current path is the only path. */
return _bdev_nvme_delete(nvme_ctrlr, false);
} else {
/* Case 2B: There is an alternative path. */
return bdev_nvme_failover(nvme_ctrlr, true);
}
} else {
/* Case 3: We are not using the specified path. */
rc = bdev_nvme_delete_secondary_trid(nvme_ctrlr, trid);
if (rc != -ENXIO) {
return rc;
}
}
/* case 2B: there is an alternative path. */
return bdev_nvme_failover(nvme_ctrlr, true);
}
/* case 3: We are not using the specified path. */
return bdev_nvme_delete_secondary_trid(nvme_ctrlr, trid);
/* All nvme_ctrlrs were deleted or no nvme_ctrlr which had the trid was found. */
return rc;
}
static int
@ -3017,7 +3160,7 @@ bdev_nvme_library_init(void)
{
g_bdev_nvme_init_thread = spdk_get_thread();
spdk_io_device_register(&g_nvme_ctrlrs, bdev_nvme_create_poll_group_cb,
spdk_io_device_register(&g_nvme_bdev_ctrlrs, bdev_nvme_create_poll_group_cb,
bdev_nvme_destroy_poll_group_cb,
sizeof(struct nvme_poll_group), "nvme_poll_groups");
@ -3027,7 +3170,8 @@ bdev_nvme_library_init(void)
static void
bdev_nvme_library_fini(void)
{
struct nvme_ctrlr *nvme_ctrlr, *tmp;
struct nvme_bdev_ctrlr *nbdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct nvme_probe_skip_entry *entry, *entry_tmp;
spdk_poller_unregister(&g_hotplug_poller);
@ -3040,26 +3184,28 @@ bdev_nvme_library_fini(void)
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH_SAFE(nvme_ctrlr, &g_nvme_ctrlrs, tailq, tmp) {
pthread_mutex_lock(&nvme_ctrlr->mutex);
if (nvme_ctrlr->destruct) {
/* This controller's destruction was already started
* before the application started shutting down
*/
TAILQ_FOREACH(nbdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) {
TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) {
pthread_mutex_lock(&nvme_ctrlr->mutex);
if (nvme_ctrlr->destruct) {
/* This controller's destruction was already started
* before the application started shutting down
*/
pthread_mutex_unlock(&nvme_ctrlr->mutex);
continue;
}
nvme_ctrlr->destruct = true;
pthread_mutex_unlock(&nvme_ctrlr->mutex);
continue;
}
nvme_ctrlr->destruct = true;
pthread_mutex_unlock(&nvme_ctrlr->mutex);
spdk_thread_send_msg(nvme_ctrlr->thread, _nvme_ctrlr_destruct,
nvme_ctrlr);
spdk_thread_send_msg(nvme_ctrlr->thread, _nvme_ctrlr_destruct,
nvme_ctrlr);
}
}
g_bdev_nvme_module_finish = true;
if (TAILQ_EMPTY(&g_nvme_ctrlrs)) {
if (TAILQ_EMPTY(&g_nvme_bdev_ctrlrs)) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
spdk_io_device_unregister(&g_nvme_ctrlrs, NULL);
spdk_io_device_unregister(&g_nvme_bdev_ctrlrs, NULL);
spdk_bdev_module_fini_done();
return;
}
@ -4016,7 +4162,7 @@ nvme_ctrlr_config_json(struct spdk_json_write_ctx *w,
spdk_json_write_named_string(w, "method", "bdev_nvme_attach_controller");
spdk_json_write_named_object_begin(w, "params");
spdk_json_write_named_string(w, "name", nvme_ctrlr->name);
spdk_json_write_named_string(w, "name", nvme_ctrlr->nbdev_ctrlr->name);
nvme_bdev_dump_trid_json(trid, w);
spdk_json_write_named_bool(w, "prchk_reftag",
(nvme_ctrlr->prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_REFTAG) != 0);
@ -4045,14 +4191,17 @@ bdev_nvme_hotplug_config_json(struct spdk_json_write_ctx *w)
static int
bdev_nvme_config_json(struct spdk_json_write_ctx *w)
{
struct nvme_bdev_ctrlr *nbdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
bdev_nvme_opts_config_json(w);
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
nvme_ctrlr_config_json(w, nvme_ctrlr);
TAILQ_FOREACH(nbdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) {
TAILQ_FOREACH(nvme_ctrlr, &nbdev_ctrlr->ctrlrs, tailq) {
nvme_ctrlr_config_json(w, nvme_ctrlr);
}
}
/* Dump as last parameter to give all NVMe bdevs chance to be constructed

View File

@ -40,8 +40,8 @@
#include "spdk/nvme.h"
#include "spdk/bdev_module.h"
TAILQ_HEAD(nvme_ctrlrs, nvme_ctrlr);
extern struct nvme_ctrlrs g_nvme_ctrlrs;
TAILQ_HEAD(nvme_bdev_ctrlrs, nvme_bdev_ctrlr);
extern struct nvme_bdev_ctrlrs g_nvme_bdev_ctrlrs;
extern pthread_mutex_t g_bdev_nvme_mutex;
extern bool g_bdev_nvme_module_finish;
@ -77,6 +77,7 @@ struct nvme_ns {
};
struct nvme_bdev_io;
struct nvme_bdev_ctrlr;
struct nvme_ctrlr_trid {
struct spdk_nvme_transport_id trid;
@ -94,7 +95,6 @@ struct nvme_ctrlr {
*/
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_transport_id *connected_trid;
char *name;
int ref;
bool resetting;
bool failover_in_progress;
@ -122,6 +122,7 @@ struct nvme_ctrlr {
/** linked list pointer for device list */
TAILQ_ENTRY(nvme_ctrlr) tailq;
struct nvme_bdev_ctrlr *nbdev_ctrlr;
TAILQ_HEAD(, nvme_ctrlr_trid) trids;
@ -134,6 +135,12 @@ struct nvme_ctrlr {
pthread_mutex_t mutex;
};
struct nvme_bdev_ctrlr {
char *name;
TAILQ_HEAD(, nvme_ctrlr) ctrlrs;
TAILQ_ENTRY(nvme_bdev_ctrlr) tailq;
};
struct nvme_bdev {
struct spdk_bdev disk;
struct nvme_ns *nvme_ns;
@ -213,7 +220,8 @@ int bdev_nvme_create(struct spdk_nvme_transport_id *trid,
uint32_t prchk_flags,
spdk_bdev_create_nvme_fn cb_fn,
void *cb_ctx,
struct spdk_nvme_ctrlr_opts *opts);
struct spdk_nvme_ctrlr_opts *opts,
bool multipath);
struct spdk_nvme_ctrlr *bdev_nvme_get_ctrlr(struct spdk_bdev *bdev);
/**

View File

@ -401,7 +401,8 @@ rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request,
ctx->request = request;
ctx->count = NVME_MAX_BDEVS_PER_RPC;
rc = bdev_nvme_create(&trid, ctx->req.name, ctx->names, ctx->count, prchk_flags,
rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.opts);
rpc_bdev_nvme_attach_controller_done, ctx, &ctx->req.opts,
false);
if (rc) {
spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc));
goto cleanup;
@ -430,7 +431,7 @@ rpc_dump_nvme_controller_info(struct nvme_ctrlr *nvme_ctrlr, void *ctx)
trid = nvme_ctrlr->connected_trid;
spdk_json_write_object_begin(w);
spdk_json_write_named_string(w, "name", nvme_ctrlr->name);
spdk_json_write_named_string(w, "name", nvme_ctrlr->nbdev_ctrlr->name);
#ifdef SPDK_CONFIG_NVME_CUSE
size_t cuse_name_size = 128;
@ -1098,7 +1099,7 @@ rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request,
spdk_json_write_object_begin(ctx->w);
spdk_json_write_named_array_begin(ctx->w, "poll_groups");
spdk_for_each_channel(&g_nvme_ctrlrs,
spdk_for_each_channel(&g_nvme_bdev_ctrlrs,
rpc_bdev_nvme_stats_per_channel,
ctx,
rpc_bdev_nvme_stats_done);

View File

@ -126,7 +126,7 @@ vbdev_opal_delete_all_base_config(struct vbdev_opal_part_base *base)
struct opal_vbdev *bdev, *tmp_bdev;
TAILQ_FOREACH_SAFE(bdev, &g_opal_vbdev, tailq, tmp_bdev) {
if (!strcmp(nvme_ctrlr_name, bdev->nvme_ctrlr->name)) {
if (!strcmp(nvme_ctrlr_name, bdev->nvme_ctrlr->nbdev_ctrlr->name)) {
vbdev_opal_delete(bdev);
}
}

View File

@ -299,6 +299,7 @@ static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZ
static int g_ut_attach_ctrlr_status;
static size_t g_ut_attach_bdev_count;
static int g_ut_register_bdev_status;
static uint16_t g_ut_cntlid;
static void
ut_init_trid(struct spdk_nvme_transport_id *trid)
@ -372,7 +373,7 @@ spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
static struct spdk_nvme_ctrlr *
ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
bool ana_reporting)
bool ana_reporting, bool multi_ctrlr)
{
struct spdk_nvme_ctrlr *ctrlr;
uint32_t i;
@ -420,6 +421,8 @@ ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns,
ctrlr->cdata.nanagrpid = num_ns;
}
ctrlr->cdata.cntlid = ++g_ut_cntlid;
ctrlr->cdata.cmic.multi_ctrlr = multi_ctrlr;
ctrlr->cdata.cmic.ana_reporting = ana_reporting;
ctrlr->trid = *trid;
TAILQ_INIT(&ctrlr->active_io_qpairs);
@ -1535,14 +1538,14 @@ test_pending_reset(void)
set_thread(0);
ctrlr = ut_attach_ctrlr(&trid, 1, false);
ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
g_ut_attach_ctrlr_status = 0;
g_ut_attach_bdev_count = 1;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -1659,7 +1662,7 @@ test_attach_ctrlr(void)
/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
* by probe polling.
*/
ctrlr = ut_attach_ctrlr(&trid, 0, false);
ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
ctrlr->is_failed = true;
@ -1667,7 +1670,7 @@ test_attach_ctrlr(void)
g_ut_attach_bdev_count = 0;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -1676,13 +1679,13 @@ test_attach_ctrlr(void)
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
ctrlr = ut_attach_ctrlr(&trid, 0, false);
ctrlr = ut_attach_ctrlr(&trid, 0, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
g_ut_attach_ctrlr_status = 0;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -1703,13 +1706,13 @@ test_attach_ctrlr(void)
/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
* one nvme_bdev is created.
*/
ctrlr = ut_attach_ctrlr(&trid, 1, false);
ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
g_ut_attach_bdev_count = 1;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -1737,14 +1740,14 @@ test_attach_ctrlr(void)
/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
* created because creating one nvme_bdev failed.
*/
ctrlr = ut_attach_ctrlr(&trid, 1, false);
ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
g_ut_register_bdev_status = -EINVAL;
g_ut_attach_bdev_count = 0;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -1788,7 +1791,7 @@ test_aer_cb(void)
/* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th
* namespaces are populated.
*/
ctrlr = ut_attach_ctrlr(&trid, 4, true);
ctrlr = ut_attach_ctrlr(&trid, 4, true, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
ctrlr->ns[0].is_active = false;
@ -1797,13 +1800,13 @@ test_aer_cb(void)
g_ut_attach_bdev_count = 3;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(10000);
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
@ -1849,7 +1852,7 @@ test_aer_cb(void)
aer_cb(nvme_ctrlr, &cpl);
spdk_delay_us(10000);
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE);
@ -1956,7 +1959,7 @@ ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
CU_ASSERT(bdev_io->internal.in_submit_request == true);
CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1);
spdk_delay_us(10000);
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_thread_times(1, 1);
CU_ASSERT(bdev_io->internal.in_submit_request == true);
@ -1986,14 +1989,14 @@ test_submit_nvme_cmd(void)
set_thread(1);
ctrlr = ut_attach_ctrlr(&trid, 1, false);
ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
g_ut_attach_ctrlr_status = 0;
g_ut_attach_bdev_count = 1;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -2079,11 +2082,11 @@ test_add_remove_trid(void)
g_ut_attach_ctrlr_status = 0;
g_ut_attach_bdev_count = 0;
ctrlr1 = ut_attach_ctrlr(&trid1, 0, false);
ctrlr1 = ut_attach_ctrlr(&trid1, 0, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -2094,11 +2097,11 @@ test_add_remove_trid(void)
CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
ctrlr2 = ut_attach_ctrlr(&trid2, 0, false);
ctrlr2 = ut_attach_ctrlr(&trid2, 0, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -2124,11 +2127,11 @@ test_add_remove_trid(void)
CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0);
}
ctrlr3 = ut_attach_ctrlr(&trid3, 0, false);
ctrlr3 = ut_attach_ctrlr(&trid3, 0, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
rc = bdev_nvme_create(&trid3, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -2169,11 +2172,11 @@ test_add_remove_trid(void)
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
ctrlr1 = ut_attach_ctrlr(&trid1, 0, false);
ctrlr1 = ut_attach_ctrlr(&trid1, 0, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -2184,11 +2187,11 @@ test_add_remove_trid(void)
CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid1) == 0);
ctrlr2 = ut_attach_ctrlr(&trid2, 0, false);
ctrlr2 = ut_attach_ctrlr(&trid2, 0, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -2234,7 +2237,7 @@ test_abort(void)
ut_init_trid(&trid);
ctrlr = ut_attach_ctrlr(&trid, 1, false);
ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
g_ut_attach_ctrlr_status = 0;
@ -2243,7 +2246,7 @@ test_abort(void)
set_thread(1);
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -2302,7 +2305,7 @@ test_abort(void)
admin_io->internal.in_submit_request = true;
bdev_nvme_submit_request(ch1, admin_io);
spdk_delay_us(10000);
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
CU_ASSERT(admin_io->internal.in_submit_request == false);
@ -2331,7 +2334,7 @@ test_abort(void)
bdev_nvme_submit_request(ch1, abort_io);
spdk_delay_us(10000);
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
CU_ASSERT(abort_io->internal.in_submit_request == false);
@ -2354,7 +2357,7 @@ test_abort(void)
bdev_nvme_submit_request(ch2, abort_io);
spdk_delay_us(10000);
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
CU_ASSERT(abort_io->internal.in_submit_request == false);
@ -2448,14 +2451,14 @@ test_bdev_unregister(void)
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
ut_init_trid(&trid);
ctrlr = ut_attach_ctrlr(&trid, 2, false);
ctrlr = ut_attach_ctrlr(&trid, 2, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
g_ut_attach_ctrlr_status = 0;
g_ut_attach_bdev_count = 2;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -2556,7 +2559,7 @@ test_init_ana_log_page(void)
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
ut_init_trid(&trid);
ctrlr = ut_attach_ctrlr(&trid, 5, true);
ctrlr = ut_attach_ctrlr(&trid, 5, true, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE;
@ -2569,13 +2572,13 @@ test_init_ana_log_page(void)
g_ut_attach_bdev_count = 5;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(10000);
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
@ -2672,14 +2675,14 @@ test_reconnect_qpair(void)
set_thread(0);
ctrlr = ut_attach_ctrlr(&trid, 1, false);
ctrlr = ut_attach_ctrlr(&trid, 1, false, false);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
g_ut_attach_ctrlr_status = 0;
g_ut_attach_bdev_count = 1;
rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL);
attach_ctrlr_done, NULL, NULL, false);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
@ -2780,6 +2783,144 @@ test_reconnect_qpair(void)
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
test_create_bdev_ctrlr(void)
{
struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
struct nvme_bdev_ctrlr *nbdev_ctrlr;
const int STRING_SIZE = 32;
const char *attached_names[STRING_SIZE];
int rc;
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
ut_init_trid(&trid1);
ut_init_trid2(&trid2);
ctrlr1 = ut_attach_ctrlr(&trid1, 0, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
g_ut_attach_ctrlr_status = 0;
g_ut_attach_bdev_count = 0;
rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL, true);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) != NULL);
/* cntlid is duplicated, and adding the second ctrlr should fail. */
g_ut_attach_ctrlr_status = -EINVAL;
ctrlr2 = ut_attach_ctrlr(&trid2, 0, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid;
rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) == NULL);
/* cntlid is not duplicated, and adding the third ctrlr should succeed. */
g_ut_attach_ctrlr_status = 0;
ctrlr2 = ut_attach_ctrlr(&trid2, 0, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL);
/* Delete two ctrlrs at once. */
rc = bdev_nvme_delete("nvme0", NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) != NULL);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
/* Add two ctrlrs and delete one by one. */
ctrlr1 = ut_attach_ctrlr(&trid1, 0, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
ctrlr2 = ut_attach_ctrlr(&trid2, 0, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
rc = bdev_nvme_delete("nvme0", &trid1);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) != NULL);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) == NULL);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL);
rc = bdev_nvme_delete("nvme0", &trid2);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) == NULL);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
}
int
main(int argc, const char **argv)
{
@ -2807,6 +2948,7 @@ main(int argc, const char **argv)
CU_ADD_TEST(suite, test_init_ana_log_page);
CU_ADD_TEST(suite, test_get_memory_domains);
CU_ADD_TEST(suite, test_reconnect_qpair);
CU_ADD_TEST(suite, test_create_bdev_ctrlr);
CU_basic_set_mode(CU_BRM_VERBOSE);