bdev/nvme: Add multiple namespaces to a single nvme_bdev

This patch removes the critical limitation that ctrlrs which are
aggregated need to have no namespace. After this patch, we can
add multiple namespaces into a single nvme_bdev.

The conditions that such namespaces satisfy are,
- they are in the same NVM subsystem,
- they are in different ctrlrs,
- they are identical.

Additionally, if we add one or more namespaces to an existing
nvme_bdev and there are active nvme_bdev_channels, the corresponding
I/O paths are added to these nvme_bdev_channels.

Even after this patch, ANA state is not utilized in I/O paths yet.
It will be done in the following patches.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I15db35451e640d4beb99b138a4762243bee0d0f4
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8131
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Shuhei Matsumoto 2021-09-28 08:15:02 +09:00 committed by Tomasz Zawadzki
parent 630db8c209
commit c19ec84378
3 changed files with 788 additions and 93 deletions

View File

@ -259,6 +259,22 @@ nvme_bdev_ctrlr_get_ctrlr(struct nvme_bdev_ctrlr *nbdev_ctrlr,
return nvme_ctrlr;
}
static struct nvme_bdev *
nvme_bdev_ctrlr_get_bdev(struct nvme_bdev_ctrlr *nbdev_ctrlr, uint32_t nsid)
{
struct nvme_bdev *bdev;
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_FOREACH(bdev, &nbdev_ctrlr->bdevs, tailq) {
if (bdev->nsid == nsid) {
break;
}
}
pthread_mutex_unlock(&g_bdev_nvme_mutex);
return bdev;
}
struct nvme_ns *
nvme_ctrlr_get_ns(struct nvme_ctrlr *nvme_ctrlr, uint32_t nsid)
{
@ -404,6 +420,8 @@ nvme_bdev_ctrlr_delete(struct nvme_bdev_ctrlr *nbdev_ctrlr,
pthread_mutex_unlock(&g_bdev_nvme_mutex);
assert(TAILQ_EMPTY(&nbdev_ctrlr->bdevs));
free(nbdev_ctrlr->name);
free(nbdev_ctrlr);
}
@ -532,24 +550,90 @@ nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr)
nvme_ctrlr_unregister(nvme_ctrlr);
}
static struct nvme_io_path *
_bdev_nvme_get_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_ns *nvme_ns)
{
struct nvme_io_path *io_path;
STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
if (io_path->nvme_ns == nvme_ns) {
break;
}
}
return io_path;
}
static int
_bdev_nvme_add_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_ns *nvme_ns)
{
struct nvme_io_path *io_path;
struct spdk_io_channel *ch;
io_path = calloc(1, sizeof(*io_path));
if (io_path == NULL) {
SPDK_ERRLOG("Failed to alloc io_path.\n");
return -ENOMEM;
}
ch = spdk_get_io_channel(nvme_ns->ctrlr);
if (ch == NULL) {
free(io_path);
SPDK_ERRLOG("Failed to alloc io_channel.\n");
return -ENOMEM;
}
io_path->ctrlr_ch = spdk_io_channel_get_ctx(ch);
io_path->nvme_ns = nvme_ns;
STAILQ_INSERT_TAIL(&nbdev_ch->io_path_list, io_path, stailq);
return 0;
}
static void
_bdev_nvme_delete_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_io_path *io_path)
{
struct spdk_io_channel *ch;
STAILQ_REMOVE(&nbdev_ch->io_path_list, io_path, nvme_io_path, stailq);
ch = spdk_io_channel_from_ctx(io_path->ctrlr_ch);
spdk_put_io_channel(ch);
free(io_path);
}
static void
_bdev_nvme_delete_io_paths(struct nvme_bdev_channel *nbdev_ch)
{
struct nvme_io_path *io_path, *tmp_io_path;
STAILQ_FOREACH_SAFE(io_path, &nbdev_ch->io_path_list, stailq, tmp_io_path) {
_bdev_nvme_delete_io_path(nbdev_ch, io_path);
}
}
static int
bdev_nvme_create_bdev_channel_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_channel *nbdev_ch = ctx_buf;
struct nvme_bdev *nbdev = io_device;
struct nvme_ns *nvme_ns;
struct spdk_io_channel *ch;
int rc;
nvme_ns = nbdev->nvme_ns;
STAILQ_INIT(&nbdev_ch->io_path_list);
ch = spdk_get_io_channel(nvme_ns->ctrlr);
if (ch == NULL) {
SPDK_ERRLOG("Failed to alloc io_channel.\n");
return -ENOMEM;
pthread_mutex_lock(&nbdev->mutex);
TAILQ_FOREACH(nvme_ns, &nbdev->nvme_ns_list, tailq) {
rc = _bdev_nvme_add_io_path(nbdev_ch, nvme_ns);
if (rc != 0) {
pthread_mutex_unlock(&nbdev->mutex);
_bdev_nvme_delete_io_paths(nbdev_ch);
return rc;
}
}
nbdev_ch->ctrlr_ch = spdk_io_channel_get_ctx(ch);
nbdev_ch->nvme_ns = nvme_ns;
pthread_mutex_unlock(&nbdev->mutex);
return 0;
}
@ -558,24 +642,28 @@ static void
bdev_nvme_destroy_bdev_channel_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_channel *nbdev_ch = ctx_buf;
struct spdk_io_channel *ch;
ch = spdk_io_channel_from_ctx(nbdev_ch->ctrlr_ch);
spdk_put_io_channel(ch);
_bdev_nvme_delete_io_paths(nbdev_ch);
}
static inline bool
bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch,
struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair)
{
if (spdk_unlikely(nbdev_ch->ctrlr_ch->qpair == NULL)) {
/* The device is currently resetting. */
return false;
struct nvme_io_path *io_path;
STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
if (spdk_unlikely(io_path->ctrlr_ch->qpair == NULL)) {
/* The device is currently resetting. */
continue;
}
*_ns = io_path->nvme_ns->ns;
*_qpair = io_path->ctrlr_ch->qpair;
return true;
}
*_ns = nbdev_ch->nvme_ns->ns;
*_qpair = nbdev_ch->ctrlr_ch->qpair;
return true;
return false;
}
static inline void
@ -702,23 +790,29 @@ static int
bdev_nvme_destruct(void *ctx)
{
struct nvme_bdev *nvme_disk = ctx;
struct nvme_ns *nvme_ns = nvme_disk->nvme_ns;
struct nvme_ns *nvme_ns, *tmp_nvme_ns;
pthread_mutex_lock(&nvme_ns->ctrlr->mutex);
TAILQ_FOREACH_SAFE(nvme_ns, &nvme_disk->nvme_ns_list, tailq, tmp_nvme_ns) {
pthread_mutex_lock(&nvme_ns->ctrlr->mutex);
nvme_ns->bdev = NULL;
nvme_ns->bdev = NULL;
assert(nvme_ns->id > 0);
assert(nvme_ns->id > 0);
if (nvme_ctrlr_get_ns(nvme_ns->ctrlr, nvme_ns->id) == NULL) {
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
if (nvme_ctrlr_get_ns(nvme_ns->ctrlr, nvme_ns->id) == NULL) {
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
nvme_ctrlr_release(nvme_ns->ctrlr);
free(nvme_ns);
} else {
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
nvme_ctrlr_release(nvme_ns->ctrlr);
free(nvme_ns);
} else {
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
}
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_REMOVE(&nvme_disk->nbdev_ctrlr->bdevs, nvme_disk, tailq);
pthread_mutex_unlock(&g_bdev_nvme_mutex);
spdk_io_device_unregister(nvme_disk, _bdev_nvme_unregister_dev_cb);
return 0;
@ -1012,9 +1106,9 @@ bdev_nvme_reset_io_complete(void *cb_arg, int rc)
}
static int
bdev_nvme_reset_io(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio)
_bdev_nvme_reset_io(struct nvme_io_path *io_path, struct nvme_bdev_io *bio)
{
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
struct nvme_ctrlr_channel *ctrlr_ch = io_path->ctrlr_ch;
struct nvme_ctrlr *nvme_ctrlr;
struct spdk_bdev_io *bdev_io;
int rc;
@ -1042,6 +1136,22 @@ bdev_nvme_reset_io(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio)
return 0;
}
static int
bdev_nvme_reset_io(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio)
{
struct nvme_io_path *io_path;
/* Reset only the first nvme_ctrlr in the nvme_bdev_ctrlr for now.
*
* TODO: Reset all nvme_ctrlrs in the nvme_bdev_ctrlr sequentially.
* This will be done in the following patches.
*/
io_path = STAILQ_FIRST(&nbdev_ch->io_path_list);
assert(io_path != NULL);
return _bdev_nvme_reset_io(io_path, bio);
}
static int
bdev_nvme_failover_start(struct nvme_ctrlr *nvme_ctrlr, bool remove)
{
@ -1340,7 +1450,7 @@ bdev_nvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
struct spdk_nvme_ctrlr *ctrlr;
const struct spdk_nvme_ctrlr_data *cdata;
nvme_ns = nbdev->nvme_ns;
nvme_ns = TAILQ_FIRST(&nbdev->nvme_ns_list);
assert(nvme_ns != NULL);
ns = nvme_ns->ns;
ctrlr = spdk_nvme_ns_get_ctrlr(ns);
@ -1526,12 +1636,18 @@ static void *
bdev_nvme_get_module_ctx(void *ctx)
{
struct nvme_bdev *nvme_bdev = ctx;
struct nvme_ns *nvme_ns;
if (!nvme_bdev || nvme_bdev->disk.module != &nvme_if || !nvme_bdev->nvme_ns) {
if (!nvme_bdev || nvme_bdev->disk.module != &nvme_if) {
return NULL;
}
return nvme_bdev->nvme_ns->ns;
nvme_ns = TAILQ_FIRST(&nvme_bdev->nvme_ns_list);
if (!nvme_ns) {
return NULL;
}
return nvme_ns->ns;
}
static const char *
@ -1557,15 +1673,18 @@ static int
bdev_nvme_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
{
struct nvme_bdev *nbdev = ctx;
struct nvme_ns *nvme_ns;
return spdk_nvme_ctrlr_get_memory_domains(nbdev->nvme_ns->ctrlr->ctrlr, domains, array_size);
nvme_ns = TAILQ_FIRST(&nbdev->nvme_ns_list);
assert(nvme_ns != NULL);
return spdk_nvme_ctrlr_get_memory_domains(nvme_ns->ctrlr->ctrlr, domains, array_size);
}
static int
bdev_nvme_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
static void
nvme_namespace_info_json(struct spdk_json_write_ctx *w,
struct nvme_ns *nvme_ns)
{
struct nvme_bdev *nvme_bdev = ctx;
struct nvme_ns *nvme_ns;
struct spdk_nvme_ns *ns;
struct spdk_nvme_ctrlr *ctrlr;
const struct spdk_nvme_ctrlr_data *cdata;
@ -1573,8 +1692,6 @@ bdev_nvme_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
union spdk_nvme_vs_register vs;
char buf[128];
nvme_ns = nvme_bdev->nvme_ns;
assert(nvme_ns != NULL);
ns = nvme_ns->ns;
ctrlr = spdk_nvme_ns_get_ctrlr(ns);
@ -1661,12 +1778,25 @@ bdev_nvme_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
if (cdata->oacs.security) {
spdk_json_write_named_object_begin(w, "security");
spdk_json_write_named_bool(w, "opal", nvme_bdev->opal);
spdk_json_write_named_bool(w, "opal", nvme_ns->bdev->opal);
spdk_json_write_object_end(w);
}
spdk_json_write_object_end(w);
}
static int
bdev_nvme_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
{
struct nvme_bdev *nvme_bdev = ctx;
struct nvme_ns *nvme_ns;
pthread_mutex_lock(&nvme_bdev->mutex);
TAILQ_FOREACH(nvme_ns, &nvme_bdev->nvme_ns_list, tailq) {
nvme_namespace_info_json(w, nvme_ns);
}
pthread_mutex_unlock(&nvme_bdev->mutex);
return 0;
}
@ -1681,24 +1811,28 @@ static uint64_t
bdev_nvme_get_spin_time(struct spdk_io_channel *ch)
{
struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
struct nvme_poll_group *group = ctrlr_ch->group;
uint64_t spin_time;
struct nvme_io_path *io_path;
struct nvme_poll_group *group;
uint64_t spin_time = 0;
if (!group || !group->collect_spin_stat) {
return 0;
STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
group = io_path->ctrlr_ch->group;
if (!group || !group->collect_spin_stat) {
continue;
}
if (group->end_ticks != 0) {
group->spin_ticks += (group->end_ticks - group->start_ticks);
group->end_ticks = 0;
}
spin_time += group->spin_ticks;
group->start_ticks = 0;
group->spin_ticks = 0;
}
if (group->end_ticks != 0) {
group->spin_ticks += (group->end_ticks - group->start_ticks);
group->end_ticks = 0;
}
spin_time = (group->spin_ticks * 1000000ULL) / spdk_get_ticks_hz();
group->start_ticks = 0;
group->spin_ticks = 0;
return spin_time;
return (spin_time * 1000000ULL) / spdk_get_ticks_hz();
}
static const struct spdk_bdev_fn_table nvmelib_fn_table = {
@ -1883,7 +2017,15 @@ nvme_bdev_create(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns)
return -ENOMEM;
}
bdev->nvme_ns = nvme_ns;
rc = pthread_mutex_init(&bdev->mutex, NULL);
if (rc != 0) {
free(bdev);
return rc;
}
bdev->ref = 1;
TAILQ_INIT(&bdev->nvme_ns_list);
TAILQ_INSERT_TAIL(&bdev->nvme_ns_list, nvme_ns, tailq);
bdev->opal = nvme_ctrlr->opal_dev != NULL;
rc = nvme_disk_create(&bdev->disk, nvme_ctrlr->nbdev_ctrlr->name, nvme_ctrlr->ctrlr,
@ -1904,12 +2046,17 @@ nvme_bdev_create(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns)
if (rc != 0) {
SPDK_ERRLOG("spdk_bdev_register() failed\n");
spdk_io_device_unregister(bdev, NULL);
pthread_mutex_destroy(&bdev->mutex);
free(bdev->disk.name);
free(bdev);
return rc;
}
nvme_ns->bdev = bdev;
bdev->nsid = nvme_ns->id;
bdev->nbdev_ctrlr = nvme_ctrlr->nbdev_ctrlr;
TAILQ_INSERT_TAIL(&nvme_ctrlr->nbdev_ctrlr->bdevs, bdev, tailq);
return 0;
}
@ -2052,14 +2199,102 @@ nvme_ctrlr_populate_namespace_done(struct nvme_ns *nvme_ns, int rc)
}
}
static void
bdev_nvme_add_io_path(struct spdk_io_channel_iter *i)
{
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(_ch);
struct nvme_ns *nvme_ns = spdk_io_channel_iter_get_ctx(i);
int rc;
rc = _bdev_nvme_add_io_path(nbdev_ch, nvme_ns);
if (rc != 0) {
SPDK_ERRLOG("Failed to add I/O path to bdev_channel dynamically.\n");
}
spdk_for_each_channel_continue(i, rc);
}
static void
bdev_nvme_delete_io_path(struct spdk_io_channel_iter *i)
{
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(_ch);
struct nvme_ns *nvme_ns = spdk_io_channel_iter_get_ctx(i);
struct nvme_io_path *io_path;
io_path = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns);
if (io_path != NULL) {
_bdev_nvme_delete_io_path(nbdev_ch, io_path);
}
spdk_for_each_channel_continue(i, 0);
}
static void
bdev_nvme_add_io_path_failed(struct spdk_io_channel_iter *i, int status)
{
struct nvme_ns *nvme_ns = spdk_io_channel_iter_get_ctx(i);
nvme_ctrlr_populate_namespace_done(nvme_ns, -1);
}
static void
bdev_nvme_add_io_path_done(struct spdk_io_channel_iter *i, int status)
{
struct nvme_ns *nvme_ns = spdk_io_channel_iter_get_ctx(i);
struct nvme_bdev *bdev = spdk_io_channel_iter_get_io_device(i);
if (status == 0) {
nvme_ctrlr_populate_namespace_done(nvme_ns, 0);
} else {
/* Delete the added io_paths and fail populating the namespace. */
spdk_for_each_channel(bdev,
bdev_nvme_delete_io_path,
nvme_ns,
bdev_nvme_add_io_path_failed);
}
}
static int
nvme_bdev_add_ns(struct nvme_bdev *bdev, struct nvme_ns *nvme_ns)
{
struct nvme_ns *tmp_ns;
pthread_mutex_lock(&bdev->mutex);
tmp_ns = TAILQ_FIRST(&bdev->nvme_ns_list);
assert(tmp_ns != NULL);
if (!bdev_nvme_compare_ns(nvme_ns->ns, tmp_ns->ns)) {
pthread_mutex_unlock(&bdev->mutex);
SPDK_ERRLOG("Namespaces are not identical.\n");
return -EINVAL;
}
bdev->ref++;
TAILQ_INSERT_TAIL(&bdev->nvme_ns_list, nvme_ns, tailq);
nvme_ns->bdev = bdev;
pthread_mutex_unlock(&bdev->mutex);
/* Add nvme_io_path to nvme_bdev_channels dynamically. */
spdk_for_each_channel(bdev,
bdev_nvme_add_io_path,
nvme_ns,
bdev_nvme_add_io_path_done);
return 0;
}
static void
nvme_ctrlr_populate_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns)
{
struct spdk_nvme_ctrlr *ctrlr = nvme_ctrlr->ctrlr;
struct spdk_nvme_ns *ns;
struct nvme_bdev *bdev;
int rc = 0;
ns = spdk_nvme_ctrlr_get_ns(ctrlr, nvme_ns->id);
ns = spdk_nvme_ctrlr_get_ns(nvme_ctrlr->ctrlr, nvme_ns->id);
if (!ns) {
SPDK_DEBUGLOG(bdev_nvme, "Invalid NS %d\n", nvme_ns->id);
rc = -EINVAL;
@ -2073,8 +2308,15 @@ nvme_ctrlr_populate_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvm
bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ns_set_ana_state, nvme_ns);
}
rc = nvme_bdev_create(nvme_ctrlr, nvme_ns);
bdev = nvme_bdev_ctrlr_get_bdev(nvme_ctrlr->nbdev_ctrlr, nvme_ns->id);
if (bdev == NULL) {
rc = nvme_bdev_create(nvme_ctrlr, nvme_ns);
} else {
rc = nvme_bdev_add_ns(bdev, nvme_ns);
if (rc == 0) {
return;
}
}
done:
nvme_ctrlr_populate_namespace_done(nvme_ns, rc);
}
@ -2101,6 +2343,14 @@ nvme_ctrlr_depopulate_namespace_done(struct nvme_ns *nvme_ns)
nvme_ctrlr_release(nvme_ctrlr);
}
static void
bdev_nvme_delete_io_path_done(struct spdk_io_channel_iter *i, int status)
{
struct nvme_ns *nvme_ns = spdk_io_channel_iter_get_ctx(i);
nvme_ctrlr_depopulate_namespace_done(nvme_ns);
}
static void
nvme_ctrlr_depopulate_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns)
{
@ -2108,7 +2358,33 @@ nvme_ctrlr_depopulate_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *n
bdev = nvme_ns->bdev;
if (bdev != NULL) {
spdk_bdev_unregister(&bdev->disk, NULL, NULL);
pthread_mutex_lock(&bdev->mutex);
assert(bdev->ref > 0);
bdev->ref--;
if (bdev->ref == 0) {
pthread_mutex_unlock(&bdev->mutex);
spdk_bdev_unregister(&bdev->disk, NULL, NULL);
} else {
/* spdk_bdev_unregister() is not called until the last nvme_ns is
* depopulated. Hence we need to remove nvme_ns from bdev->nvme_ns_list
* and clear nvme_ns->bdev here.
*/
TAILQ_REMOVE(&bdev->nvme_ns_list, nvme_ns, tailq);
nvme_ns->bdev = NULL;
pthread_mutex_unlock(&bdev->mutex);
/* Delete nvme_io_paths from nvme_bdev_channels dynamically. After that,
* we call depopulate_namespace_done() to avoid use-after-free.
*/
spdk_for_each_channel(bdev,
bdev_nvme_delete_io_path,
nvme_ns,
bdev_nvme_delete_io_path_done);
return;
}
}
nvme_ctrlr_depopulate_namespace_done(nvme_ns);
@ -2436,10 +2712,6 @@ bdev_nvme_check_multipath(struct nvme_bdev_ctrlr *nbdev_ctrlr, struct spdk_nvme_
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
/* TODO: This check is removed in the following patches. */
if (spdk_nvme_ctrlr_get_num_ns(ctrlr) > 0) {
return false;
}
if (!cdata->cmic.multi_ctrlr) {
SPDK_ERRLOG("Ctrlr%u does not support multipath.\n", cdata->cntlid);
return false;
@ -2448,10 +2720,6 @@ bdev_nvme_check_multipath(struct nvme_bdev_ctrlr *nbdev_ctrlr, struct spdk_nvme_
TAILQ_FOREACH(tmp, &nbdev_ctrlr->ctrlrs, tailq) {
tmp_cdata = spdk_nvme_ctrlr_get_data(tmp->ctrlr);
/* TODO: This check is removed in the following patches. */
if (spdk_nvme_ctrlr_get_num_ns(tmp->ctrlr) > 0) {
return false;
}
if (!tmp_cdata->cmic.multi_ctrlr) {
SPDK_ERRLOG("Ctrlr%u does not support multipath.\n", cdata->cntlid);
return false;
@ -2494,6 +2762,7 @@ nvme_bdev_ctrlr_create(const char *name, struct nvme_ctrlr *nvme_ctrlr)
goto exit;
}
TAILQ_INIT(&nbdev_ctrlr->ctrlrs);
TAILQ_INIT(&nbdev_ctrlr->bdevs);
TAILQ_INSERT_TAIL(&g_nvme_bdev_ctrlrs, nbdev_ctrlr, tailq);
}
nvme_ctrlr->nbdev_ctrlr = nbdev_ctrlr;
@ -4040,11 +4309,18 @@ static int
bdev_nvme_admin_passthru(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes)
{
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
struct nvme_io_path *io_path;
struct nvme_ctrlr *nvme_ctrlr;
uint32_t max_xfer_size;
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(ctrlr_ch);
/* Admin commands are submitted only to the first nvme_ctrlr for now.
*
* TODO: This limitation will be removed in the following patches.
*/
io_path = STAILQ_FIRST(&nbdev_ch->io_path_list);
assert(io_path != NULL);
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(io_path->ctrlr_ch);
max_xfer_size = spdk_nvme_ctrlr_get_max_xfer_size(nvme_ctrlr->ctrlr);
@ -4115,26 +4391,36 @@ static int
bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
struct nvme_bdev_io *bio_to_abort)
{
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
struct nvme_io_path *io_path;
struct nvme_ctrlr *nvme_ctrlr;
int rc;
int rc = 0;
bio->orig_thread = spdk_get_thread();
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(ctrlr_ch);
/* Even admin commands, they were submitted to only nvme_ctrlrs which were
* on any io_path. So traverse the io_path list for not only I/O commands
* but also admin commands.
*/
STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
nvme_ctrlr = nvme_ctrlr_channel_get_ctrlr(io_path->ctrlr_ch);
rc = spdk_nvme_ctrlr_cmd_abort_ext(nvme_ctrlr->ctrlr,
ctrlr_ch->qpair,
bio_to_abort,
bdev_nvme_abort_done, bio);
if (rc == -ENOENT) {
/* If no command was found in I/O qpair, the target command may be
* admin command.
*/
rc = spdk_nvme_ctrlr_cmd_abort_ext(nvme_ctrlr->ctrlr,
NULL,
io_path->ctrlr_ch->qpair,
bio_to_abort,
bdev_nvme_abort_done, bio);
if (rc == -ENOENT) {
/* If no command was found in I/O qpair, the target command may be
* admin command.
*/
rc = spdk_nvme_ctrlr_cmd_abort_ext(nvme_ctrlr->ctrlr,
NULL,
bio_to_abort,
bdev_nvme_abort_done, bio);
}
if (rc != -ENOENT) {
break;
}
}
if (rc == -ENOENT) {
@ -4254,11 +4540,18 @@ bdev_nvme_config_json(struct spdk_json_write_ctx *w)
struct spdk_nvme_ctrlr *
bdev_nvme_get_ctrlr(struct spdk_bdev *bdev)
{
struct nvme_bdev *nbdev;
struct nvme_ns *nvme_ns;
if (!bdev || bdev->module != &nvme_if) {
return NULL;
}
return SPDK_CONTAINEROF(bdev, struct nvme_bdev, disk)->nvme_ns->ctrlr->ctrlr;
nbdev = SPDK_CONTAINEROF(bdev, struct nvme_bdev, disk);
nvme_ns = TAILQ_FIRST(&nbdev->nvme_ns_list);
assert(nvme_ns != NULL);
return nvme_ns->ctrlr->ctrlr;
}
SPDK_LOG_REGISTER_COMPONENT(bdev_nvme)

View File

@ -74,10 +74,12 @@ struct nvme_ns {
uint32_t ana_group_id;
enum spdk_nvme_ana_state ana_state;
struct nvme_async_probe_ctx *probe_ctx;
TAILQ_ENTRY(nvme_ns) tailq;
};
struct nvme_bdev_io;
struct nvme_bdev_ctrlr;
struct nvme_bdev;
struct nvme_path_id {
struct spdk_nvme_transport_id trid;
@ -140,13 +142,19 @@ struct nvme_ctrlr {
struct nvme_bdev_ctrlr {
char *name;
TAILQ_HEAD(, nvme_ctrlr) ctrlrs;
TAILQ_HEAD(, nvme_bdev) bdevs;
TAILQ_ENTRY(nvme_bdev_ctrlr) tailq;
};
struct nvme_bdev {
struct spdk_bdev disk;
struct nvme_ns *nvme_ns;
uint32_t nsid;
struct nvme_bdev_ctrlr *nbdev_ctrlr;
pthread_mutex_t mutex;
int ref;
TAILQ_HEAD(, nvme_ns) nvme_ns_list;
bool opal;
TAILQ_ENTRY(nvme_bdev) tailq;
};
struct nvme_ctrlr_channel {
@ -159,9 +167,14 @@ struct nvme_ctrlr_channel {
#define nvme_ctrlr_channel_get_ctrlr(ctrlr_ch) \
(struct nvme_ctrlr *)spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(ctrlr_ch))
struct nvme_bdev_channel {
struct nvme_io_path {
struct nvme_ns *nvme_ns;
struct nvme_ctrlr_channel *ctrlr_ch;
STAILQ_ENTRY(nvme_io_path) stailq;
};
struct nvme_bdev_channel {
STAILQ_HEAD(, nvme_io_path) io_path_list;
};
struct nvme_poll_group {

View File

@ -1555,6 +1555,7 @@ test_pending_reset(void)
struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
struct spdk_io_channel *ch1, *ch2;
struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
struct nvme_io_path *io_path1, *io_path2;
struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
int rc;
@ -1586,7 +1587,9 @@ test_pending_reset(void)
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
ctrlr_ch1 = io_path1->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
@ -1598,7 +1601,9 @@ test_pending_reset(void)
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
ctrlr_ch2 = nbdev_ch2->ctrlr_ch;
io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
ctrlr_ch2 = io_path2->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
@ -2268,6 +2273,7 @@ test_abort(void)
struct spdk_bdev_io *write_io, *admin_io, *abort_io;
struct spdk_io_channel *ch1, *ch2;
struct nvme_bdev_channel *nbdev_ch1;
struct nvme_io_path *io_path1;
struct nvme_ctrlr_channel *ctrlr_ch1;
int rc;
@ -2312,7 +2318,9 @@ test_abort(void)
ch1 = spdk_get_io_channel(bdev);
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
ctrlr_ch1 = io_path1->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
set_thread(1);
@ -2676,10 +2684,12 @@ test_get_memory_domains(void)
{
struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef };
struct nvme_ns ns = { .ctrlr = &ctrlr };
struct nvme_bdev nbdev = { .nvme_ns = &ns };
struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) };
struct spdk_memory_domain *domains[2] = {};
int rc = 0;
TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns, tailq);
/* nvme controller doesn't have memory domainы */
MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0);
rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2);
@ -2703,6 +2713,7 @@ test_reconnect_qpair(void)
struct nvme_bdev *bdev;
struct spdk_io_channel *ch1, *ch2;
struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
struct nvme_io_path *io_path1, *io_path2;
struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
int rc;
@ -2734,7 +2745,9 @@ test_reconnect_qpair(void)
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
ctrlr_ch1 = io_path1->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
set_thread(1);
@ -2743,7 +2756,9 @@ test_reconnect_qpair(void)
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
ctrlr_ch2 = nbdev_ch2->ctrlr_ch;
io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list);
SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
ctrlr_ch2 = io_path2->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
/* If a qpair is disconnected, it is freed and then reconnected via
@ -2965,6 +2980,378 @@ test_create_bdev_ctrlr(void)
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
}
static struct nvme_ns *
_nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr)
{
struct nvme_ns *nvme_ns;
TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) {
if (nvme_ns->ctrlr == nvme_ctrlr) {
return nvme_ns;
}
}
return NULL;
}
static void
test_add_multi_ns_to_bdev(void)
{
struct nvme_path_id path1 = {}, path2 = {};
struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2;
struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2;
struct nvme_bdev_ctrlr *nbdev_ctrlr;
struct nvme_ns *nvme_ns1, *nvme_ns2;
struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4;
const int STRING_SIZE = 32;
const char *attached_names[STRING_SIZE];
int rc;
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
ut_init_trid(&path1.trid);
ut_init_trid2(&path2.trid);
/* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */
/* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th
* namespaces are populated.
*/
ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
ctrlr1->ns[1].is_active = false;
ctrlr1->ns[4].is_active = false;
memset(&ctrlr1->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
memset(&ctrlr1->ns[2].uuid, 0x3, sizeof(struct spdk_uuid));
memset(&ctrlr1->ns[3].uuid, 0x4, sizeof(struct spdk_uuid));
g_ut_attach_ctrlr_status = 0;
g_ut_attach_bdev_count = 3;
rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
/* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th
* namespaces are populated. The uuid of 4th namespace is different, and hence
* adding 4th namespace to a bdev should fail.
*/
ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
ctrlr2->ns[2].is_active = false;
ctrlr2->ns[4].is_active = false;
memset(&ctrlr2->ns[0].uuid, 0x1, sizeof(struct spdk_uuid));
memset(&ctrlr2->ns[1].uuid, 0x2, sizeof(struct spdk_uuid));
memset(&ctrlr2->ns[3].uuid, 0x44, sizeof(struct spdk_uuid));
g_ut_attach_ctrlr_status = 0;
g_ut_attach_bdev_count = 2;
rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
CU_ASSERT(nvme_ctrlr1->num_ns == 5);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL);
nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
CU_ASSERT(nvme_ctrlr2->num_ns == 5);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL);
CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL);
bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2);
SPDK_CU_ASSERT_FATAL(bdev2 != NULL);
bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3);
SPDK_CU_ASSERT_FATAL(bdev3 != NULL);
bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4);
SPDK_CU_ASSERT_FATAL(bdev4 != NULL);
CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL);
CU_ASSERT(bdev1->ref == 2);
CU_ASSERT(bdev2->ref == 1);
CU_ASSERT(bdev3->ref == 1);
CU_ASSERT(bdev4->ref == 1);
/* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */
rc = bdev_nvme_delete("nvme0", &path1);
CU_ASSERT(rc == 0);
poll_threads();
spdk_delay_us(1000);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2);
rc = bdev_nvme_delete("nvme0", &path2);
CU_ASSERT(rc == 0);
poll_threads();
spdk_delay_us(1000);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
/* Test if a nvme_bdev which has a shared namespace between two ctrlrs
* can be deleted when the bdev subsystem shutdown.
*/
g_ut_attach_bdev_count = 1;
ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
ut_init_trid2(&path2.trid);
ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
/* Check if a nvme_bdev has two nvme_ns. */
nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1);
SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
CU_ASSERT(nvme_ns1->bdev == bdev1);
nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2);
SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
CU_ASSERT(nvme_ns2->bdev == bdev1);
/* Delete nvme_bdev first when the bdev subsystem shutdown. */
bdev_nvme_destruct(&bdev1->disk);
poll_threads();
CU_ASSERT(nvme_ns1->bdev == NULL);
CU_ASSERT(nvme_ns2->bdev == NULL);
nvme_ctrlr1->destruct = true;
_nvme_ctrlr_destruct(nvme_ctrlr1);
poll_threads();
spdk_delay_us(1000);
poll_threads();
nvme_ctrlr2->destruct = true;
_nvme_ctrlr_destruct(nvme_ctrlr2);
poll_threads();
spdk_delay_us(1000);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
}
static void
test_add_multi_io_paths_to_nbdev_ch(void)
{
struct nvme_path_id path1 = {}, path2 = {}, path3 = {};
struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3;
struct nvme_bdev_ctrlr *nbdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3;
struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3;
const int STRING_SIZE = 32;
const char *attached_names[STRING_SIZE];
struct nvme_bdev *bdev;
struct spdk_io_channel *ch;
struct nvme_bdev_channel *nbdev_ch;
struct nvme_io_path *io_path1, *io_path2, *io_path3;
int rc;
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
ut_init_trid(&path1.trid);
ut_init_trid2(&path2.trid);
ut_init_trid3(&path3.trid);
g_ut_attach_ctrlr_status = 0;
g_ut_attach_bdev_count = 1;
set_thread(1);
ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL);
memset(&ctrlr1->ns[0].uuid, 1, sizeof(struct spdk_uuid));
rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL);
memset(&ctrlr2->ns[0].uuid, 1, sizeof(struct spdk_uuid));
rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0");
SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL);
nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL);
nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL);
bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1);
SPDK_CU_ASSERT_FATAL(bdev != NULL);
nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1);
SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2);
SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
set_thread(0);
ch = spdk_get_io_channel(bdev);
SPDK_CU_ASSERT_FATAL(ch != NULL);
nbdev_ch = spdk_io_channel_get_ctx(ch);
io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1);
SPDK_CU_ASSERT_FATAL(io_path1 != NULL);
io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2);
SPDK_CU_ASSERT_FATAL(io_path2 != NULL);
set_thread(1);
/* Check if I/O path is dynamically added to nvme_bdev_channel. */
ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true);
SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL);
memset(&ctrlr3->ns[0].uuid, 1, sizeof(struct spdk_uuid));
rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 0,
attach_ctrlr_done, NULL, NULL, true);
CU_ASSERT(rc == 0);
spdk_delay_us(1000);
poll_threads();
spdk_delay_us(g_opts.nvme_adminq_poll_period_us);
poll_threads();
nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL);
nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3);
SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL);
io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3);
SPDK_CU_ASSERT_FATAL(io_path3 != NULL);
/* Check if I/O path is dynamically deleted from nvme_bdev_channel. */
rc = bdev_nvme_delete("nvme0", &path2);
CU_ASSERT(rc == 0);
poll_threads();
spdk_delay_us(1000);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);
CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3);
CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1);
CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL);
CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3);
set_thread(0);
spdk_put_io_channel(ch);
poll_threads();
set_thread(1);
rc = bdev_nvme_delete("nvme0", &g_any_path);
CU_ASSERT(rc == 0);
poll_threads();
spdk_delay_us(1000);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL);
}
int
main(int argc, const char **argv)
{
@ -2993,6 +3380,8 @@ main(int argc, const char **argv)
CU_ADD_TEST(suite, test_get_memory_domains);
CU_ADD_TEST(suite, test_reconnect_qpair);
CU_ADD_TEST(suite, test_create_bdev_ctrlr);
CU_ADD_TEST(suite, test_add_multi_ns_to_bdev);
CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch);
CU_basic_set_mode(CU_BRM_VERBOSE);