bdev/nvme: Rename nvme_bdev_ctrlr by nvme_ctrlr

This object is per I/O path and will be aggregated by an new upper
layer object.

Hence rename nvme_bdev_ctrlr by nvme_ctrlr. Then the following patches
will add nvme_bdev_ctrlr as a different upper layer object.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ieed634447785cc98140b3d49c52a2c753988ece7
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8381
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2021-07-07 04:42:41 +09:00 committed by Tomasz Zawadzki
parent 9e65d3bb35
commit a3dcdc051f
12 changed files with 619 additions and 619 deletions

File diff suppressed because it is too large Load Diff

View File

@ -61,7 +61,7 @@ rpc_nvme_cuse_register(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_nvme_cuse_register req = {};
struct nvme_bdev_ctrlr *bdev_ctrlr = NULL;
struct nvme_ctrlr *bdev_ctrlr = NULL;
int rc;
if (spdk_json_decode_object(params, rpc_nvme_cuse_register_decoders,
@ -73,7 +73,7 @@ rpc_nvme_cuse_register(struct spdk_jsonrpc_request *request,
goto cleanup;
}
bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name);
bdev_ctrlr = nvme_ctrlr_get_by_name(req.name);
if (!bdev_ctrlr) {
SPDK_ERRLOG("No such controller\n");
spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));
@ -113,7 +113,7 @@ rpc_nvme_cuse_unregister(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_nvme_cuse_unregister req = {};
struct nvme_bdev_ctrlr *bdev_ctrlr = NULL;
struct nvme_ctrlr *bdev_ctrlr = NULL;
int rc;
if (spdk_json_decode_object(params, rpc_nvme_cuse_unregister_decoders,
@ -125,7 +125,7 @@ rpc_nvme_cuse_unregister(struct spdk_jsonrpc_request *request,
goto cleanup;
}
bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name);
bdev_ctrlr = nvme_ctrlr_get_by_name(req.name);
if (!bdev_ctrlr) {
SPDK_ERRLOG("No such controller\n");
spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV));

View File

@ -268,7 +268,7 @@ rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request,
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_host_id hostid = {};
uint32_t prchk_flags = 0;
struct nvme_bdev_ctrlr *ctrlr = NULL;
struct nvme_ctrlr *ctrlr = NULL;
size_t len, maxlen;
int rc;
@ -302,7 +302,7 @@ rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request,
rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype);
assert(rc == 0);
ctrlr = nvme_bdev_ctrlr_get_by_name(ctx->req.name);
ctrlr = nvme_ctrlr_get_by_name(ctx->req.name);
/* Parse traddr */
maxlen = sizeof(trid.traddr);
@ -413,20 +413,20 @@ SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_attach_controller, construct_nvme_b
static void
rpc_dump_nvme_controller_info(struct spdk_json_write_ctx *w,
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
struct nvme_ctrlr *nvme_ctrlr)
{
struct spdk_nvme_transport_id *trid;
trid = nvme_bdev_ctrlr->connected_trid;
trid = nvme_ctrlr->connected_trid;
spdk_json_write_object_begin(w);
spdk_json_write_named_string(w, "name", nvme_bdev_ctrlr->name);
spdk_json_write_named_string(w, "name", nvme_ctrlr->name);
#ifdef SPDK_CONFIG_NVME_CUSE
size_t cuse_name_size = 128;
char cuse_name[cuse_name_size];
int rc = spdk_nvme_cuse_get_ctrlr_name(nvme_bdev_ctrlr->ctrlr, cuse_name, &cuse_name_size);
int rc = spdk_nvme_cuse_get_ctrlr_name(nvme_ctrlr->ctrlr, cuse_name, &cuse_name_size);
if (rc == 0) {
spdk_json_write_named_string(w, "cuse_device", cuse_name);
}
@ -459,7 +459,7 @@ rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request,
{
struct rpc_bdev_nvme_get_controllers req = {};
struct spdk_json_write_ctx *w;
struct nvme_bdev_ctrlr *ctrlr = NULL;
struct nvme_ctrlr *ctrlr = NULL;
if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders,
SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders),
@ -471,7 +471,7 @@ rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request,
}
if (req.name) {
ctrlr = nvme_bdev_ctrlr_get_by_name(req.name);
ctrlr = nvme_ctrlr_get_by_name(req.name);
if (ctrlr == NULL) {
SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name);
spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name);
@ -1085,7 +1085,7 @@ rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request,
spdk_json_write_object_begin(ctx->w);
spdk_json_write_named_array_begin(ctx->w, "poll_groups");
spdk_for_each_channel(&g_nvme_bdev_ctrlrs,
spdk_for_each_channel(&g_nvme_ctrlrs,
rpc_bdev_nvme_stats_per_channel,
ctx,
rpc_bdev_nvme_stats_done);

View File

@ -238,7 +238,7 @@ bdev_ocssd_destruct(void *ctx)
if (!nvme_ns->populated) {
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
nvme_bdev_ctrlr_release(nvme_ns->ctrlr);
nvme_ctrlr_release(nvme_ns->ctrlr);
} else {
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
}
@ -1037,14 +1037,14 @@ bdev_ocssd_chunk_notification_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
static int
bdev_ocssd_poll_mm(void *ctx)
{
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = ctx;
struct nvme_ctrlr *nvme_ctrlr = ctx;
struct nvme_ns *nvme_ns;
struct bdev_ocssd_ns *ocssd_ns;
uint32_t nsid;
int rc;
for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) {
nvme_ns = nvme_bdev_ctrlr->namespaces[nsid];
for (nsid = 0; nsid < nvme_ctrlr->num_ns; ++nsid) {
nvme_ns = nvme_ctrlr->namespaces[nsid];
if (nvme_ns == NULL || !nvme_ns->populated) {
continue;
}
@ -1054,7 +1054,7 @@ bdev_ocssd_poll_mm(void *ctx)
ocssd_ns->chunk_notify_pending = false;
ocssd_ns->num_outstanding++;
rc = spdk_nvme_ctrlr_cmd_get_log_page(nvme_bdev_ctrlr->ctrlr,
rc = spdk_nvme_ctrlr_cmd_get_log_page(nvme_ctrlr->ctrlr,
SPDK_OCSSD_LOG_CHUNK_NOTIFICATION,
nsid + 1, ocssd_ns->chunk,
sizeof(ocssd_ns->chunk[0]) *
@ -1073,14 +1073,14 @@ bdev_ocssd_poll_mm(void *ctx)
}
void
bdev_ocssd_handle_chunk_notification(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
bdev_ocssd_handle_chunk_notification(struct nvme_ctrlr *nvme_ctrlr)
{
struct bdev_ocssd_ns *ocssd_ns;
struct nvme_ns *nvme_ns;
uint32_t nsid;
for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) {
nvme_ns = nvme_bdev_ctrlr->namespaces[nsid];
for (nsid = 0; nsid < nvme_ctrlr->num_ns; ++nsid) {
nvme_ns = nvme_ctrlr->namespaces[nsid];
if (nvme_ns == NULL || !nvme_ns->populated) {
continue;
}
@ -1248,7 +1248,7 @@ void
bdev_ocssd_create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t nsid,
bdev_ocssd_create_cb cb_fn, void *cb_arg)
{
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct bdev_ocssd_create_ctx *create_ctx = NULL;
struct nvme_bdev *nvme_bdev = NULL;
struct ocssd_bdev *ocssd_bdev = NULL;
@ -1258,14 +1258,14 @@ bdev_ocssd_create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t n
struct spdk_ocssd_geometry_data *geometry;
int rc = 0;
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(ctrlr_name);
if (!nvme_bdev_ctrlr) {
nvme_ctrlr = nvme_ctrlr_get_by_name(ctrlr_name);
if (!nvme_ctrlr) {
SPDK_ERRLOG("Unable to find controller %s\n", ctrlr_name);
rc = -ENODEV;
goto error;
}
ns = spdk_nvme_ctrlr_get_ns(nvme_bdev_ctrlr->ctrlr, nsid);
ns = spdk_nvme_ctrlr_get_ns(nvme_ctrlr->ctrlr, nsid);
if (!ns) {
SPDK_ERRLOG("Unable to retrieve namespace %"PRIu32"\n", nsid);
rc = -ENODEV;
@ -1278,8 +1278,8 @@ bdev_ocssd_create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t n
goto error;
}
assert(nsid <= nvme_bdev_ctrlr->num_ns);
nvme_ns = nvme_bdev_ctrlr->namespaces[nsid - 1];
assert(nsid <= nvme_ctrlr->num_ns);
nvme_ns = nvme_ctrlr->namespaces[nsid - 1];
if (nvme_ns == NULL) {
SPDK_ERRLOG("Namespace %"PRIu32" is not initialized\n", nsid);
rc = -EINVAL;
@ -1444,7 +1444,7 @@ bdev_ocssd_geometry_cb(void *_ctx, const struct spdk_nvme_cpl *cpl)
}
void
bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
bdev_ocssd_populate_namespace(struct nvme_ctrlr *nvme_ctrlr,
struct nvme_ns *nvme_ns,
struct nvme_async_probe_ctx *nvme_ctx)
{
@ -1453,7 +1453,7 @@ bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
struct spdk_nvme_ns *ns;
int rc;
ns = spdk_nvme_ctrlr_get_ns(nvme_bdev_ctrlr->ctrlr, nvme_ns->id);
ns = spdk_nvme_ctrlr_get_ns(nvme_ctrlr->ctrlr, nvme_ns->id);
if (ns == NULL) {
rc = -EINVAL;
goto error;
@ -1478,7 +1478,7 @@ bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
ctx->nvme_ctx = nvme_ctx;
ctx->nvme_ns = nvme_ns;
rc = spdk_nvme_ocssd_ctrlr_cmd_geometry(nvme_bdev_ctrlr->ctrlr, nvme_ns->id,
rc = spdk_nvme_ocssd_ctrlr_cmd_geometry(nvme_ctrlr->ctrlr, nvme_ns->id,
&ocssd_ns->geometry,
sizeof(ocssd_ns->geometry),
bdev_ocssd_geometry_cb, ctx);
@ -1550,7 +1550,7 @@ bdev_ocssd_destroy_io_channel(struct nvme_ctrlr_channel *ctrlr_ch)
}
int
bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
bdev_ocssd_init_ctrlr(struct nvme_ctrlr *nvme_ctrlr)
{
struct ocssd_bdev_ctrlr *ocssd_ctrlr;
@ -1559,24 +1559,24 @@ bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
return -ENOMEM;
}
ocssd_ctrlr->mm_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_mm, nvme_bdev_ctrlr,
ocssd_ctrlr->mm_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_mm, nvme_ctrlr,
10000ULL);
if (!ocssd_ctrlr->mm_poller) {
free(ocssd_ctrlr);
return -ENOMEM;
}
nvme_bdev_ctrlr->ocssd_ctrlr = ocssd_ctrlr;
nvme_ctrlr->ocssd_ctrlr = ocssd_ctrlr;
return 0;
}
void
bdev_ocssd_fini_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
bdev_ocssd_fini_ctrlr(struct nvme_ctrlr *nvme_ctrlr)
{
spdk_poller_unregister(&nvme_bdev_ctrlr->ocssd_ctrlr->mm_poller);
free(nvme_bdev_ctrlr->ocssd_ctrlr);
nvme_bdev_ctrlr->ocssd_ctrlr = NULL;
spdk_poller_unregister(&nvme_ctrlr->ocssd_ctrlr->mm_poller);
free(nvme_ctrlr->ocssd_ctrlr);
nvme_ctrlr->ocssd_ctrlr = NULL;
}
SPDK_LOG_REGISTER_COMPONENT(bdev_ocssd)

View File

@ -44,7 +44,7 @@ void bdev_ocssd_create_bdev(const char *ctrlr_name, const char *bdev_name, uint3
bdev_ocssd_create_cb cb_fn, void *cb_arg);
void bdev_ocssd_delete_bdev(const char *bdev_name, bdev_ocssd_delete_cb cb_fn, void *cb_arg);
void bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
void bdev_ocssd_populate_namespace(struct nvme_ctrlr *nvme_ctrlr,
struct nvme_ns *nvme_ns,
struct nvme_async_probe_ctx *ctx);
void bdev_ocssd_depopulate_namespace(struct nvme_ns *nvme_ns);
@ -53,9 +53,9 @@ void bdev_ocssd_namespace_config_json(struct spdk_json_write_ctx *w, struct nvme
int bdev_ocssd_create_io_channel(struct nvme_ctrlr_channel *ioch);
void bdev_ocssd_destroy_io_channel(struct nvme_ctrlr_channel *ioch);
int bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
void bdev_ocssd_fini_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
int bdev_ocssd_init_ctrlr(struct nvme_ctrlr *nvme_ctrlr);
void bdev_ocssd_fini_ctrlr(struct nvme_ctrlr *nvme_ctrlr);
void bdev_ocssd_handle_chunk_notification(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
void bdev_ocssd_handle_chunk_notification(struct nvme_ctrlr *nvme_ctrlr);
#endif /* SPDK_BDEV_OCSSD_H */

View File

@ -35,50 +35,50 @@
#include "bdev_ocssd.h"
#include "common.h"
struct nvme_bdev_ctrlrs g_nvme_bdev_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_bdev_ctrlrs);
struct nvme_ctrlrs g_nvme_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_ctrlrs);
pthread_mutex_t g_bdev_nvme_mutex = PTHREAD_MUTEX_INITIALIZER;
bool g_bdev_nvme_module_finish;
struct nvme_bdev_ctrlr *
nvme_bdev_ctrlr_get(const struct spdk_nvme_transport_id *trid)
struct nvme_ctrlr *
nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid)
{
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
TAILQ_FOREACH(nvme_bdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) {
if (spdk_nvme_transport_id_compare(trid, nvme_bdev_ctrlr->connected_trid) == 0) {
return nvme_bdev_ctrlr;
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid) == 0) {
return nvme_ctrlr;
}
}
return NULL;
}
struct nvme_bdev_ctrlr *
nvme_bdev_ctrlr_get_by_name(const char *name)
struct nvme_ctrlr *
nvme_ctrlr_get_by_name(const char *name)
{
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
if (name == NULL) {
return NULL;
}
TAILQ_FOREACH(nvme_bdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) {
if (strcmp(name, nvme_bdev_ctrlr->name) == 0) {
return nvme_bdev_ctrlr;
TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) {
if (strcmp(name, nvme_ctrlr->name) == 0) {
return nvme_ctrlr;
}
}
return NULL;
}
struct nvme_bdev_ctrlr *
struct nvme_ctrlr *
nvme_bdev_first_ctrlr(void)
{
return TAILQ_FIRST(&g_nvme_bdev_ctrlrs);
return TAILQ_FIRST(&g_nvme_ctrlrs);
}
struct nvme_bdev_ctrlr *
nvme_bdev_next_ctrlr(struct nvme_bdev_ctrlr *prev)
struct nvme_ctrlr *
nvme_bdev_next_ctrlr(struct nvme_ctrlr *prev)
{
return TAILQ_NEXT(prev, tailq);
}
@ -113,52 +113,52 @@ nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid, struct spdk_
}
void
nvme_bdev_ctrlr_delete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr)
{
struct nvme_ctrlr_trid *trid, *tmp_trid;
uint32_t i;
if (nvme_bdev_ctrlr->opal_dev) {
spdk_opal_dev_destruct(nvme_bdev_ctrlr->opal_dev);
nvme_bdev_ctrlr->opal_dev = NULL;
if (nvme_ctrlr->opal_dev) {
spdk_opal_dev_destruct(nvme_ctrlr->opal_dev);
nvme_ctrlr->opal_dev = NULL;
}
if (nvme_bdev_ctrlr->ocssd_ctrlr) {
bdev_ocssd_fini_ctrlr(nvme_bdev_ctrlr);
if (nvme_ctrlr->ocssd_ctrlr) {
bdev_ocssd_fini_ctrlr(nvme_ctrlr);
}
pthread_mutex_lock(&g_bdev_nvme_mutex);
TAILQ_REMOVE(&g_nvme_bdev_ctrlrs, nvme_bdev_ctrlr, tailq);
TAILQ_REMOVE(&g_nvme_ctrlrs, nvme_ctrlr, tailq);
pthread_mutex_unlock(&g_bdev_nvme_mutex);
spdk_nvme_detach(nvme_bdev_ctrlr->ctrlr);
spdk_poller_unregister(&nvme_bdev_ctrlr->adminq_timer_poller);
free(nvme_bdev_ctrlr->name);
for (i = 0; i < nvme_bdev_ctrlr->num_ns; i++) {
free(nvme_bdev_ctrlr->namespaces[i]);
spdk_nvme_detach(nvme_ctrlr->ctrlr);
spdk_poller_unregister(&nvme_ctrlr->adminq_timer_poller);
free(nvme_ctrlr->name);
for (i = 0; i < nvme_ctrlr->num_ns; i++) {
free(nvme_ctrlr->namespaces[i]);
}
TAILQ_FOREACH_SAFE(trid, &nvme_bdev_ctrlr->trids, link, tmp_trid) {
TAILQ_REMOVE(&nvme_bdev_ctrlr->trids, trid, link);
TAILQ_FOREACH_SAFE(trid, &nvme_ctrlr->trids, link, tmp_trid) {
TAILQ_REMOVE(&nvme_ctrlr->trids, trid, link);
free(trid);
}
pthread_mutex_destroy(&nvme_bdev_ctrlr->mutex);
pthread_mutex_destroy(&nvme_ctrlr->mutex);
free(nvme_bdev_ctrlr->namespaces);
free(nvme_bdev_ctrlr);
free(nvme_ctrlr->namespaces);
free(nvme_ctrlr);
}
static void
nvme_bdev_ctrlr_unregister_cb(void *io_device)
nvme_ctrlr_unregister_cb(void *io_device)
{
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = io_device;
struct nvme_ctrlr *nvme_ctrlr = io_device;
nvme_bdev_ctrlr_delete(nvme_bdev_ctrlr);
nvme_ctrlr_delete(nvme_ctrlr);
pthread_mutex_lock(&g_bdev_nvme_mutex);
if (g_bdev_nvme_module_finish && TAILQ_EMPTY(&g_nvme_bdev_ctrlrs)) {
if (g_bdev_nvme_module_finish && TAILQ_EMPTY(&g_nvme_ctrlrs)) {
pthread_mutex_unlock(&g_bdev_nvme_mutex);
spdk_io_device_unregister(&g_nvme_bdev_ctrlrs, NULL);
spdk_io_device_unregister(&g_nvme_ctrlrs, NULL);
spdk_bdev_module_finish_done();
return;
}
@ -167,48 +167,48 @@ nvme_bdev_ctrlr_unregister_cb(void *io_device)
}
void
nvme_bdev_ctrlr_unregister(void *ctx)
nvme_ctrlr_unregister(void *ctx)
{
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = ctx;
struct nvme_ctrlr *nvme_ctrlr = ctx;
spdk_io_device_unregister(nvme_bdev_ctrlr, nvme_bdev_ctrlr_unregister_cb);
spdk_io_device_unregister(nvme_ctrlr, nvme_ctrlr_unregister_cb);
}
void
nvme_bdev_ctrlr_release(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr)
{
pthread_mutex_lock(&nvme_bdev_ctrlr->mutex);
pthread_mutex_lock(&nvme_ctrlr->mutex);
assert(nvme_bdev_ctrlr->ref > 0);
nvme_bdev_ctrlr->ref--;
assert(nvme_ctrlr->ref > 0);
nvme_ctrlr->ref--;
if (nvme_bdev_ctrlr->ref > 0 || !nvme_bdev_ctrlr->destruct ||
nvme_bdev_ctrlr->resetting) {
pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex);
if (nvme_ctrlr->ref > 0 || !nvme_ctrlr->destruct ||
nvme_ctrlr->resetting) {
pthread_mutex_unlock(&nvme_ctrlr->mutex);
return;
}
pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex);
pthread_mutex_unlock(&nvme_ctrlr->mutex);
nvme_bdev_ctrlr_unregister(nvme_bdev_ctrlr);
nvme_ctrlr_unregister(nvme_ctrlr);
}
void
nvme_ctrlr_depopulate_namespace_done(struct nvme_ns *nvme_ns)
{
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = nvme_ns->ctrlr;
struct nvme_ctrlr *nvme_ctrlr = nvme_ns->ctrlr;
assert(nvme_bdev_ctrlr != NULL);
assert(nvme_ctrlr != NULL);
pthread_mutex_lock(&nvme_bdev_ctrlr->mutex);
pthread_mutex_lock(&nvme_ctrlr->mutex);
nvme_ns->populated = false;
if (nvme_ns->bdev != NULL) {
pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex);
pthread_mutex_unlock(&nvme_ctrlr->mutex);
return;
}
pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex);
pthread_mutex_unlock(&nvme_ctrlr->mutex);
nvme_bdev_ctrlr_release(nvme_bdev_ctrlr);
nvme_ctrlr_release(nvme_ctrlr);
}

View File

@ -39,8 +39,8 @@
#include "spdk/bdev_module.h"
#include "spdk/opal.h"
TAILQ_HEAD(nvme_bdev_ctrlrs, nvme_bdev_ctrlr);
extern struct nvme_bdev_ctrlrs g_nvme_bdev_ctrlrs;
TAILQ_HEAD(nvme_ctrlrs, nvme_ctrlr);
extern struct nvme_ctrlrs g_nvme_ctrlrs;
extern pthread_mutex_t g_bdev_nvme_mutex;
extern bool g_bdev_nvme_module_finish;
@ -63,7 +63,7 @@ struct nvme_ns {
*/
bool populated;
struct spdk_nvme_ns *ns;
struct nvme_bdev_ctrlr *ctrlr;
struct nvme_ctrlr *ctrlr;
struct nvme_bdev *bdev;
void *type_ctx;
};
@ -76,7 +76,7 @@ struct nvme_ctrlr_trid {
bool is_failed;
};
struct nvme_bdev_ctrlr {
struct nvme_ctrlr {
/**
* points to pinned, physically contiguous memory region;
* contains 4KB IDENTIFY structure for controller which is
@ -110,7 +110,7 @@ struct nvme_bdev_ctrlr {
struct spdk_bdev_io *reset_bdev_io;
/** linked list pointer for device list */
TAILQ_ENTRY(nvme_bdev_ctrlr) tailq;
TAILQ_ENTRY(nvme_ctrlr) tailq;
TAILQ_HEAD(, nvme_ctrlr_trid) trids;
@ -155,7 +155,7 @@ struct nvme_async_probe_ctx {
struct ocssd_io_channel;
struct nvme_ctrlr_channel {
struct nvme_bdev_ctrlr *ctrlr;
struct nvme_ctrlr *ctrlr;
struct spdk_nvme_qpair *qpair;
struct nvme_bdev_poll_group *group;
TAILQ_HEAD(, spdk_bdev_io) pending_resets;
@ -166,16 +166,16 @@ void nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx,
struct nvme_ns *nvme_ns, int rc);
void nvme_ctrlr_depopulate_namespace_done(struct nvme_ns *nvme_ns);
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr_get(const struct spdk_nvme_transport_id *trid);
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr_get_by_name(const char *name);
struct nvme_bdev_ctrlr *nvme_bdev_first_ctrlr(void);
struct nvme_bdev_ctrlr *nvme_bdev_next_ctrlr(struct nvme_bdev_ctrlr *prev);
struct nvme_ctrlr *nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid);
struct nvme_ctrlr *nvme_ctrlr_get_by_name(const char *name);
struct nvme_ctrlr *nvme_bdev_first_ctrlr(void);
struct nvme_ctrlr *nvme_bdev_next_ctrlr(struct nvme_ctrlr *prev);
void nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid,
struct spdk_json_write_ctx *w);
void nvme_bdev_ctrlr_release(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
void nvme_bdev_ctrlr_unregister(void *ctx);
void nvme_bdev_ctrlr_delete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
void nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr);
void nvme_ctrlr_unregister(void *ctx);
void nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr);
#endif /* SPDK_COMMON_BDEV_NVME_H */

View File

@ -70,7 +70,7 @@ struct rpc_bdev_nvme_send_cmd_ctx {
struct spdk_jsonrpc_request *jsonrpc_request;
struct rpc_bdev_nvme_send_cmd_req req;
struct rpc_bdev_nvme_send_cmd_resp resp;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct spdk_io_channel *ctrlr_io_ch;
};
@ -172,7 +172,7 @@ static int
nvme_rpc_admin_cmd_bdev_nvme(struct rpc_bdev_nvme_send_cmd_ctx *ctx, struct spdk_nvme_cmd *cmd,
void *buf, uint32_t nbytes, uint32_t timeout_ms)
{
struct nvme_bdev_ctrlr *_nvme_ctrlr = ctx->nvme_bdev_ctrlr;
struct nvme_ctrlr *_nvme_ctrlr = ctx->nvme_ctrlr;
int ret;
ret = spdk_nvme_ctrlr_cmd_admin_raw(_nvme_ctrlr->ctrlr, cmd, buf,
@ -186,7 +186,7 @@ nvme_rpc_io_cmd_bdev_nvme(struct rpc_bdev_nvme_send_cmd_ctx *ctx, struct spdk_nv
void *buf, uint32_t nbytes, void *md_buf, uint32_t md_len,
uint32_t timeout_ms)
{
struct nvme_bdev_ctrlr *_nvme_ctrlr = ctx->nvme_bdev_ctrlr;
struct nvme_ctrlr *_nvme_ctrlr = ctx->nvme_ctrlr;
struct spdk_nvme_qpair *io_qpair;
int ret;
@ -464,8 +464,8 @@ rpc_bdev_nvme_send_cmd(struct spdk_jsonrpc_request *request,
goto invalid;
}
ctx->nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(ctx->req.name);
if (ctx->nvme_bdev_ctrlr == NULL) {
ctx->nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->req.name);
if (ctx->nvme_ctrlr == NULL) {
SPDK_ERRLOG("Failed at device lookup\n");
error_code = SPDK_JSONRPC_ERROR_INVALID_PARAMS;
ret = -EINVAL;

View File

@ -42,7 +42,7 @@
struct opal_vbdev {
char *name;
struct nvme_bdev_ctrlr *nvme_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct spdk_opal_dev *opal_dev;
struct spdk_bdev_part *bdev_part;
@ -227,7 +227,7 @@ struct spdk_opal_locking_range_info *
vbdev_opal_get_info_from_bdev(const char *opal_bdev_name, const char *password)
{
struct opal_vbdev *vbdev;
struct nvme_bdev_ctrlr *nvme_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
int locking_range_id;
int rc;
@ -321,7 +321,7 @@ vbdev_opal_create(const char *nvme_ctrlr_name, uint32_t nsid, uint8_t locking_ra
int rc;
char *opal_vbdev_name;
char *base_bdev_name;
struct nvme_bdev_ctrlr *nvme_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct opal_vbdev *opal_bdev;
struct vbdev_opal_part_base *opal_part_base = NULL;
struct spdk_bdev_part *part_bdev;
@ -332,7 +332,7 @@ vbdev_opal_create(const char *nvme_ctrlr_name, uint32_t nsid, uint8_t locking_ra
return -EINVAL;
}
nvme_ctrlr = nvme_bdev_ctrlr_get_by_name(nvme_ctrlr_name);
nvme_ctrlr = nvme_ctrlr_get_by_name(nvme_ctrlr_name);
if (!nvme_ctrlr) {
SPDK_ERRLOG("get nvme ctrlr failed\n");
return -ENODEV;
@ -471,7 +471,7 @@ vbdev_opal_destruct_bdev(struct opal_vbdev *opal_bdev)
int
vbdev_opal_destruct(const char *bdev_name, const char *password)
{
struct nvme_bdev_ctrlr *nvme_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
int locking_range_id;
int rc;
struct opal_vbdev *opal_bdev;
@ -531,7 +531,7 @@ int
vbdev_opal_set_lock_state(const char *bdev_name, uint16_t user_id, const char *password,
const char *lock_state)
{
struct nvme_bdev_ctrlr *nvme_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
int locking_range_id;
int rc;
enum spdk_opal_lock_state state_flag;
@ -579,7 +579,7 @@ int
vbdev_opal_enable_new_user(const char *bdev_name, const char *admin_password, uint16_t user_id,
const char *user_password)
{
struct nvme_bdev_ctrlr *nvme_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
int locking_range_id;
int rc;
struct opal_vbdev *opal_bdev;

View File

@ -60,7 +60,7 @@ rpc_bdev_nvme_opal_init(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_bdev_nvme_opal_init req = {};
struct nvme_bdev_ctrlr *nvme_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
int rc;
if (spdk_json_decode_object(params, rpc_bdev_nvme_opal_init_decoders,
@ -72,7 +72,7 @@ rpc_bdev_nvme_opal_init(struct spdk_jsonrpc_request *request,
}
/* check if opal supported */
nvme_ctrlr = nvme_bdev_ctrlr_get_by_name(req.nvme_ctrlr_name);
nvme_ctrlr = nvme_ctrlr_get_by_name(req.nvme_ctrlr_name);
if (nvme_ctrlr == NULL || nvme_ctrlr->opal_dev == NULL) {
SPDK_ERRLOG("%s not support opal\n", req.nvme_ctrlr_name);
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
@ -135,7 +135,7 @@ rpc_bdev_nvme_opal_revert(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_bdev_nvme_opal_revert req = {};
struct nvme_bdev_ctrlr *nvme_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
int rc;
if (spdk_json_decode_object(params, rpc_bdev_nvme_opal_revert_decoders,
@ -147,7 +147,7 @@ rpc_bdev_nvme_opal_revert(struct spdk_jsonrpc_request *request,
}
/* check if opal supported */
nvme_ctrlr = nvme_bdev_ctrlr_get_by_name(req.nvme_ctrlr_name);
nvme_ctrlr = nvme_ctrlr_get_by_name(req.nvme_ctrlr_name);
if (nvme_ctrlr == NULL || nvme_ctrlr->opal_dev == NULL) {
SPDK_ERRLOG("%s not support opal\n", req.nvme_ctrlr_name);
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");

View File

@ -200,7 +200,7 @@ DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_c
DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev));
DEFINE_STUB_V(bdev_ocssd_populate_namespace, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
DEFINE_STUB_V(bdev_ocssd_populate_namespace, (struct nvme_ctrlr *nvme_ctrlr,
struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *ctx));
DEFINE_STUB_V(bdev_ocssd_depopulate_namespace, (struct nvme_ns *nvme_ns));
@ -212,11 +212,11 @@ DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_ctrlr_channel *ioch)
DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_ctrlr_channel *ioch));
DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr), 0);
DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_ctrlr *nvme_ctrlr), 0);
DEFINE_STUB_V(bdev_ocssd_fini_ctrlr, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr));
DEFINE_STUB_V(bdev_ocssd_fini_ctrlr, (struct nvme_ctrlr *nvme_ctrlr));
DEFINE_STUB_V(bdev_ocssd_handle_chunk_notification, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr));
DEFINE_STUB_V(bdev_ocssd_handle_chunk_notification, (struct nvme_ctrlr *nvme_ctrlr));
DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst,
struct iovec *iov,
@ -1007,19 +1007,19 @@ test_create_ctrlr(void)
ut_init_trid(&trid);
rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") != NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
rc = bdev_nvme_delete("nvme0", NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") != NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
@ -1027,7 +1027,7 @@ test_reset_ctrlr(void)
{
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
struct nvme_ctrlr *nvme_ctrlr = NULL;
struct nvme_ctrlr_trid *curr_trid;
struct spdk_io_channel *ch1, *ch2;
struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
@ -1038,16 +1038,16 @@ test_reset_ctrlr(void)
set_thread(0);
rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
CU_ASSERT(rc == 0);
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids);
curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch1 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
@ -1055,7 +1055,7 @@ test_reset_ctrlr(void)
set_thread(1);
ch2 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch2 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
@ -1065,26 +1065,26 @@ test_reset_ctrlr(void)
set_thread(1);
/* Case 1: ctrlr is already being destructed. */
nvme_bdev_ctrlr->destruct = true;
nvme_ctrlr->destruct = true;
rc = _bdev_nvme_reset(nvme_bdev_ctrlr);
rc = _bdev_nvme_reset(nvme_ctrlr);
CU_ASSERT(rc == -EBUSY);
/* Case 2: reset is in progress. */
nvme_bdev_ctrlr->destruct = false;
nvme_bdev_ctrlr->resetting = true;
nvme_ctrlr->destruct = false;
nvme_ctrlr->resetting = true;
rc = _bdev_nvme_reset(nvme_bdev_ctrlr);
rc = _bdev_nvme_reset(nvme_ctrlr);
CU_ASSERT(rc == -EAGAIN);
/* Case 3: reset completes successfully. */
nvme_bdev_ctrlr->resetting = false;
nvme_ctrlr->resetting = false;
curr_trid->is_failed = true;
ctrlr.is_failed = true;
rc = _bdev_nvme_reset(nvme_bdev_ctrlr);
rc = _bdev_nvme_reset(nvme_ctrlr);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(nvme_ctrlr->resetting == true);
CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair != NULL);
@ -1107,11 +1107,11 @@ test_reset_ctrlr(void)
poll_thread_times(1, 1);
CU_ASSERT(ctrlr_ch1->qpair != NULL);
CU_ASSERT(ctrlr_ch2->qpair != NULL);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(nvme_ctrlr->resetting == true);
CU_ASSERT(curr_trid->is_failed == true);
poll_thread_times(1, 1);
CU_ASSERT(nvme_bdev_ctrlr->resetting == false);
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(curr_trid->is_failed == false);
spdk_put_io_channel(ch2);
@ -1127,7 +1127,7 @@ test_reset_ctrlr(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
@ -1135,7 +1135,7 @@ test_race_between_reset_and_destruct_ctrlr(void)
{
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct spdk_io_channel *ch1, *ch2;
int rc;
@ -1144,45 +1144,45 @@ test_race_between_reset_and_destruct_ctrlr(void)
set_thread(0);
rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
CU_ASSERT(rc == 0);
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch1 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
set_thread(1);
ch2 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch2 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
/* Reset starts from thread 1. */
set_thread(1);
rc = _bdev_nvme_reset(nvme_bdev_ctrlr);
rc = _bdev_nvme_reset(nvme_ctrlr);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(nvme_ctrlr->resetting == true);
/* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */
set_thread(0);
rc = bdev_nvme_delete("nvme0", NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr);
CU_ASSERT(nvme_bdev_ctrlr->destruct == true);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
CU_ASSERT(nvme_ctrlr->destruct == true);
CU_ASSERT(nvme_ctrlr->resetting == true);
poll_threads();
/* Reset completed but ctrlr is not still destructed yet. */
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr);
CU_ASSERT(nvme_bdev_ctrlr->destruct == true);
CU_ASSERT(nvme_bdev_ctrlr->resetting == false);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
CU_ASSERT(nvme_ctrlr->destruct == true);
CU_ASSERT(nvme_ctrlr->resetting == false);
/* New reset request is rejected. */
rc = _bdev_nvme_reset(nvme_bdev_ctrlr);
rc = _bdev_nvme_reset(nvme_ctrlr);
CU_ASSERT(rc == -EBUSY);
/* Additional polling called spdk_io_device_unregister() to ctrlr,
@ -1190,7 +1190,7 @@ test_race_between_reset_and_destruct_ctrlr(void)
*/
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
set_thread(0);
@ -1202,7 +1202,7 @@ test_race_between_reset_and_destruct_ctrlr(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
@ -1210,7 +1210,7 @@ test_failover_ctrlr(void)
{
struct spdk_nvme_transport_id trid1 = {}, trid2 = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
struct nvme_ctrlr *nvme_ctrlr = NULL;
struct nvme_ctrlr_trid *curr_trid, *next_trid;
struct spdk_io_channel *ch1, *ch2;
int rc;
@ -1221,112 +1221,112 @@ test_failover_ctrlr(void)
set_thread(0);
rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
CU_ASSERT(rc == 0);
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch1 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
set_thread(1);
ch2 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch2 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
/* First, test one trid case. */
curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids);
curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
/* Failover starts from thread 1. */
set_thread(1);
/* Case 1: ctrlr is already being destructed. */
nvme_bdev_ctrlr->destruct = true;
nvme_ctrlr->destruct = true;
rc = bdev_nvme_failover(nvme_bdev_ctrlr, false);
rc = bdev_nvme_failover(nvme_ctrlr, false);
CU_ASSERT(rc == 0);
CU_ASSERT(curr_trid->is_failed == false);
/* Case 2: reset is in progress. */
nvme_bdev_ctrlr->destruct = false;
nvme_bdev_ctrlr->resetting = true;
nvme_ctrlr->destruct = false;
nvme_ctrlr->resetting = true;
rc = bdev_nvme_failover(nvme_bdev_ctrlr, false);
rc = bdev_nvme_failover(nvme_ctrlr, false);
CU_ASSERT(rc == 0);
/* Case 3: failover is in progress. */
nvme_bdev_ctrlr->failover_in_progress = true;
nvme_ctrlr->failover_in_progress = true;
rc = bdev_nvme_failover(nvme_bdev_ctrlr, false);
rc = bdev_nvme_failover(nvme_ctrlr, false);
CU_ASSERT(rc == 0);
CU_ASSERT(curr_trid->is_failed == false);
/* Case 4: reset completes successfully. */
nvme_bdev_ctrlr->resetting = false;
nvme_bdev_ctrlr->failover_in_progress = false;
nvme_ctrlr->resetting = false;
nvme_ctrlr->failover_in_progress = false;
rc = bdev_nvme_failover(nvme_bdev_ctrlr, false);
rc = bdev_nvme_failover(nvme_ctrlr, false);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(nvme_ctrlr->resetting == true);
CU_ASSERT(curr_trid->is_failed == true);
poll_threads();
curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids);
curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
CU_ASSERT(nvme_bdev_ctrlr->resetting == false);
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(curr_trid->is_failed == false);
set_thread(0);
/* Second, test two trids case. */
rc = bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2);
rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
CU_ASSERT(rc == 0);
curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids);
curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
SPDK_CU_ASSERT_FATAL(curr_trid != NULL);
CU_ASSERT(&curr_trid->trid == nvme_bdev_ctrlr->connected_trid);
CU_ASSERT(&curr_trid->trid == nvme_ctrlr->connected_trid);
CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0);
/* Failover starts from thread 1. */
set_thread(1);
/* Case 5: reset is in progress. */
nvme_bdev_ctrlr->resetting = true;
nvme_ctrlr->resetting = true;
rc = bdev_nvme_failover(nvme_bdev_ctrlr, false);
rc = bdev_nvme_failover(nvme_ctrlr, false);
CU_ASSERT(rc == -EAGAIN);
/* Case 5: failover is in progress. */
nvme_bdev_ctrlr->failover_in_progress = true;
nvme_ctrlr->failover_in_progress = true;
rc = bdev_nvme_failover(nvme_bdev_ctrlr, false);
rc = bdev_nvme_failover(nvme_ctrlr, false);
CU_ASSERT(rc == 0);
/* Case 6: failover completes successfully. */
nvme_bdev_ctrlr->resetting = false;
nvme_bdev_ctrlr->failover_in_progress = false;
nvme_ctrlr->resetting = false;
nvme_ctrlr->failover_in_progress = false;
rc = bdev_nvme_failover(nvme_bdev_ctrlr, false);
rc = bdev_nvme_failover(nvme_ctrlr, false);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(nvme_bdev_ctrlr->failover_in_progress == true);
CU_ASSERT(nvme_ctrlr->resetting == true);
CU_ASSERT(nvme_ctrlr->failover_in_progress == true);
next_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids);
next_trid = TAILQ_FIRST(&nvme_ctrlr->trids);
SPDK_CU_ASSERT_FATAL(next_trid != NULL);
CU_ASSERT(next_trid != curr_trid);
CU_ASSERT(&next_trid->trid == nvme_bdev_ctrlr->connected_trid);
CU_ASSERT(&next_trid->trid == nvme_ctrlr->connected_trid);
CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr->resetting == false);
CU_ASSERT(nvme_bdev_ctrlr->failover_in_progress == false);
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(nvme_ctrlr->failover_in_progress == false);
spdk_put_io_channel(ch2);
@ -1341,7 +1341,7 @@ test_failover_ctrlr(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
@ -1357,7 +1357,7 @@ test_pending_reset(void)
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_host_id hostid = {};
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
struct nvme_ctrlr *nvme_ctrlr = NULL;
const int STRING_SIZE = 32;
const char *attached_names[STRING_SIZE];
struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
@ -1391,17 +1391,17 @@ test_pending_reset(void)
spdk_delay_us(1000);
poll_threads();
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch1 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
set_thread(1);
ch2 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch2 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
ctrlr_ch2 = spdk_io_channel_get_ctx(ch2);
@ -1411,7 +1411,7 @@ test_pending_reset(void)
*/
rc = bdev_nvme_reset(ctrlr_ch2, first_bdev_io);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(nvme_ctrlr->resetting == true);
CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
set_thread(0);
@ -1422,7 +1422,7 @@ test_pending_reset(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr->resetting == false);
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
@ -1436,7 +1436,7 @@ test_pending_reset(void)
rc = bdev_nvme_reset(ctrlr_ch2, first_bdev_io);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
CU_ASSERT(nvme_ctrlr->resetting == true);
CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets));
set_thread(0);
@ -1449,7 +1449,7 @@ test_pending_reset(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr->resetting == false);
CU_ASSERT(nvme_ctrlr->resetting == false);
CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
@ -1468,7 +1468,7 @@ test_pending_reset(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
free(first_bdev_io);
free(second_bdev_io);
@ -1480,7 +1480,7 @@ test_attach_ctrlr(void)
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_host_id hostid = {};
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
const int STRING_SIZE = 32;
const char *attached_names[STRING_SIZE];
struct nvme_bdev *nbdev;
@ -1491,7 +1491,7 @@ test_attach_ctrlr(void)
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
ut_init_trid(&trid);
/* If ctrlr fails, no nvme_bdev_ctrlr is created. Failed ctrlr is removed
/* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed
* by probe polling.
*/
ctrlr = ut_attach_ctrlr(&trid, 0);
@ -1508,9 +1508,9 @@ test_attach_ctrlr(void)
spdk_delay_us(1000);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
/* If ctrlr has no namespace, one nvme_bdev_ctrlr with no namespace is created */
/* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */
ctrlr = ut_attach_ctrlr(&trid, 0);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
@ -1523,19 +1523,19 @@ test_attach_ctrlr(void)
spdk_delay_us(1000);
poll_threads();
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr);
CU_ASSERT(nvme_bdev_ctrlr->num_ns == 0);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
CU_ASSERT(nvme_ctrlr->num_ns == 0);
rc = bdev_nvme_delete("nvme0", NULL);
CU_ASSERT(rc == 0);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
/* If ctrlr has one namespace, one nvme_bdev_ctrlr with one namespace and
/* If ctrlr has one namespace, one nvme_ctrlr with one namespace and
* one nvme_bdev is created.
*/
ctrlr = ut_attach_ctrlr(&trid, 1);
@ -1550,15 +1550,15 @@ test_attach_ctrlr(void)
spdk_delay_us(1000);
poll_threads();
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr);
CU_ASSERT(nvme_bdev_ctrlr->num_ns == 1);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
CU_ASSERT(nvme_ctrlr->num_ns == 1);
CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0);
attached_names[0] = NULL;
nbdev = nvme_bdev_ctrlr->namespaces[0]->bdev;
nbdev = nvme_ctrlr->namespaces[0]->bdev;
SPDK_CU_ASSERT_FATAL(nbdev != NULL);
CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr);
@ -1567,9 +1567,9 @@ test_attach_ctrlr(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
/* Ctrlr has one namespace but one nvme_bdev_ctrlr with no namespace is
/* Ctrlr has one namespace but one nvme_ctrlr with no namespace is
* created because creating one nvme_bdev failed.
*/
ctrlr = ut_attach_ctrlr(&trid, 1);
@ -1585,10 +1585,10 @@ test_attach_ctrlr(void)
spdk_delay_us(1000);
poll_threads();
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr);
CU_ASSERT(nvme_bdev_ctrlr->num_ns == 1);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr);
CU_ASSERT(nvme_ctrlr->num_ns == 1);
CU_ASSERT(attached_names[0] == NULL);
@ -1597,7 +1597,7 @@ test_attach_ctrlr(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
g_ut_register_bdev_status = 0;
}
@ -1607,7 +1607,7 @@ test_reconnect_qpair(void)
{
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
struct nvme_ctrlr *nvme_ctrlr = NULL;
struct spdk_io_channel *ch;
struct nvme_ctrlr_channel *ctrlr_ch;
int rc;
@ -1617,13 +1617,13 @@ test_reconnect_qpair(void)
ut_init_trid(&trid);
TAILQ_INIT(&ctrlr.active_io_qpairs);
rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
CU_ASSERT(rc == 0);
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
ch = spdk_get_io_channel(nvme_bdev_ctrlr);
ch = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch != NULL);
ctrlr_ch = spdk_io_channel_get_ctx(ch);
@ -1656,7 +1656,7 @@ test_reconnect_qpair(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
@ -1665,7 +1665,7 @@ test_aer_cb(void)
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_host_id hostid = {};
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct nvme_bdev *bdev;
const int STRING_SIZE = 32;
const char *attached_names[STRING_SIZE];
@ -1696,16 +1696,16 @@ test_aer_cb(void)
spdk_delay_us(1000);
poll_threads();
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
CU_ASSERT(nvme_bdev_ctrlr->num_ns == 4);
CU_ASSERT(nvme_bdev_ctrlr->namespaces[0]->populated == false);
CU_ASSERT(nvme_bdev_ctrlr->namespaces[1]->populated == true);
CU_ASSERT(nvme_bdev_ctrlr->namespaces[2]->populated == true);
CU_ASSERT(nvme_bdev_ctrlr->namespaces[3]->populated == true);
CU_ASSERT(nvme_ctrlr->num_ns == 4);
CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == false);
CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == true);
CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
bdev = nvme_bdev_ctrlr->namespaces[3]->bdev;
bdev = nvme_ctrlr->namespaces[3]->bdev;
SPDK_CU_ASSERT_FATAL(bdev != NULL);
CU_ASSERT(bdev->disk.blockcnt == 1024);
@ -1720,12 +1720,12 @@ test_aer_cb(void)
event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED;
cpl.cdw0 = event.raw;
aer_cb(nvme_bdev_ctrlr, &cpl);
aer_cb(nvme_ctrlr, &cpl);
CU_ASSERT(nvme_bdev_ctrlr->namespaces[0]->populated == true);
CU_ASSERT(nvme_bdev_ctrlr->namespaces[1]->populated == true);
CU_ASSERT(nvme_bdev_ctrlr->namespaces[2]->populated == false);
CU_ASSERT(nvme_bdev_ctrlr->namespaces[3]->populated == true);
CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == true);
CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true);
CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == false);
CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true);
CU_ASSERT(bdev->disk.blockcnt == 2048);
rc = bdev_nvme_delete("nvme0", NULL);
@ -1733,7 +1733,7 @@ test_aer_cb(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
@ -1848,7 +1848,7 @@ test_submit_nvme_cmd(void)
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_host_id hostid = {};
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
const int STRING_SIZE = 32;
const char *attached_names[STRING_SIZE];
struct nvme_bdev *bdev;
@ -1874,15 +1874,15 @@ test_submit_nvme_cmd(void)
spdk_delay_us(1000);
poll_threads();
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
bdev = nvme_bdev_ctrlr->namespaces[0]->bdev;
bdev = nvme_ctrlr->namespaces[0]->bdev;
SPDK_CU_ASSERT_FATAL(bdev != NULL);
set_thread(0);
ch = spdk_get_io_channel(nvme_bdev_ctrlr);
ch = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch != NULL);
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
@ -1920,7 +1920,7 @@ test_submit_nvme_cmd(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
@ -1928,7 +1928,7 @@ test_remove_trid(void)
{
struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
struct nvme_ctrlr *nvme_ctrlr = NULL;
struct nvme_ctrlr_trid *ctrid;
int rc;
@ -1938,13 +1938,13 @@ test_remove_trid(void)
set_thread(0);
rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
CU_ASSERT(rc == 0);
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
rc = bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2);
rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
CU_ASSERT(rc == 0);
/* trid3 is not in the registered list. */
@ -1954,12 +1954,12 @@ test_remove_trid(void)
/* trid2 is not used, and simply removed. */
rc = bdev_nvme_delete("nvme0", &trid2);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr);
TAILQ_FOREACH(ctrid, &nvme_bdev_ctrlr->trids, link) {
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0);
}
rc = bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid3);
rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3);
CU_ASSERT(rc == 0);
/* trid1 is currently used and trid3 is an alternative path.
@ -1967,45 +1967,45 @@ test_remove_trid(void)
*/
rc = bdev_nvme_delete("nvme0", &trid1);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr);
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
TAILQ_FOREACH(ctrid, &nvme_bdev_ctrlr->trids, link) {
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
CU_ASSERT(nvme_ctrlr->resetting == true);
TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) {
CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0);
}
CU_ASSERT(spdk_nvme_transport_id_compare(nvme_bdev_ctrlr->connected_trid, &trid3) == 0);
CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid3) == 0);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr->resetting == false);
CU_ASSERT(nvme_ctrlr->resetting == false);
/* trid3 is the current and only path. If we remove trid3, the corresponding
* nvme_bdev_ctrlr is removed.
* nvme_ctrlr is removed.
*/
rc = bdev_nvme_delete("nvme0", &trid3);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL);
CU_ASSERT(rc == 0);
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
rc = bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2);
rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2);
CU_ASSERT(rc == 0);
/* If trid is not specified, nvme_bdev_ctrlr itself is removed. */
/* If trid is not specified, nvme_ctrlr itself is removed. */
rc = bdev_nvme_delete("nvme0", NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
@ -2014,7 +2014,7 @@ test_abort(void)
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_host_id hostid = {};
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
const int STRING_SIZE = 32;
const char *attached_names[STRING_SIZE];
struct nvme_bdev *bdev;
@ -2045,10 +2045,10 @@ test_abort(void)
spdk_delay_us(1000);
poll_threads();
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
bdev = nvme_bdev_ctrlr->namespaces[0]->bdev;
bdev = nvme_ctrlr->namespaces[0]->bdev;
SPDK_CU_ASSERT_FATAL(bdev != NULL);
write_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io));
@ -2070,13 +2070,13 @@ test_abort(void)
set_thread(0);
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch1 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
ctrlr_ch1 = spdk_io_channel_get_ctx(ch1);
set_thread(1);
ch2 = spdk_get_io_channel(nvme_bdev_ctrlr);
ch2 = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
write_io->internal.ch = (struct spdk_bdev_channel *)ch1;
@ -2188,7 +2188,7 @@ test_abort(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void
@ -2196,7 +2196,7 @@ test_get_io_qpair(void)
{
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
struct nvme_ctrlr *nvme_ctrlr = NULL;
struct spdk_io_channel *ch;
struct nvme_ctrlr_channel *ctrlr_ch;
struct spdk_nvme_qpair *qpair;
@ -2207,13 +2207,13 @@ test_get_io_qpair(void)
set_thread(0);
rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL);
CU_ASSERT(rc == 0);
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
ch = spdk_get_io_channel(nvme_bdev_ctrlr);
ch = spdk_get_io_channel(nvme_ctrlr);
SPDK_CU_ASSERT_FATAL(ch != NULL);
ctrlr_ch = spdk_io_channel_get_ctx(ch);
CU_ASSERT(ctrlr_ch->qpair != NULL);
@ -2228,7 +2228,7 @@ test_get_io_qpair(void)
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
/* Test a scenario that the bdev subsystem starts shutdown when there still exists
@ -2242,7 +2242,7 @@ test_bdev_unregister(void)
struct spdk_nvme_transport_id trid = {};
struct spdk_nvme_host_id hostid = {};
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct nvme_ns *nvme_ns1, *nvme_ns2;
const int STRING_SIZE = 32;
const char *attached_names[STRING_SIZE];
@ -2265,16 +2265,16 @@ test_bdev_unregister(void)
spdk_delay_us(1000);
poll_threads();
nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0");
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
nvme_ns1 = nvme_bdev_ctrlr->namespaces[0];
nvme_ns1 = nvme_ctrlr->namespaces[0];
SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL);
bdev1 = nvme_ns1->bdev;
SPDK_CU_ASSERT_FATAL(bdev1 != NULL);
nvme_ns2 = nvme_bdev_ctrlr->namespaces[1];
nvme_ns2 = nvme_ctrlr->namespaces[1];
SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL);
bdev2 = nvme_ns2->bdev;
@ -2288,12 +2288,12 @@ test_bdev_unregister(void)
CU_ASSERT(nvme_ns1->bdev == NULL);
CU_ASSERT(nvme_ns2->bdev == NULL);
nvme_bdev_ctrlr->destruct = true;
_nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr);
nvme_ctrlr->destruct = true;
_nvme_ctrlr_destruct(nvme_ctrlr);
poll_threads();
CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL);
CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL);
}
static void

View File

@ -110,7 +110,7 @@ find_controller(const struct spdk_nvme_transport_id *trid)
static void
free_controller(struct spdk_nvme_ctrlr *ctrlr)
{
CU_ASSERT(!nvme_bdev_ctrlr_get(&ctrlr->trid));
CU_ASSERT(!nvme_ctrlr_get(&ctrlr->trid));
LIST_REMOVE(ctrlr, list);
spdk_nvme_ctrlr_free_io_qpair(ctrlr->admin_qpair);
free(ctrlr->chunk_info);
@ -197,11 +197,11 @@ nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx,
ns->ctrlr->ref++;
}
static struct nvme_bdev_ctrlr *
static struct nvme_ctrlr *
create_nvme_bdev_controller(const struct spdk_nvme_transport_id *trid, const char *name)
{
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct nvme_ctrlr_trid *trid_entry;
uint32_t nsid;
int rc;
@ -209,48 +209,48 @@ create_nvme_bdev_controller(const struct spdk_nvme_transport_id *trid, const cha
ctrlr = find_controller(trid);
SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
SPDK_CU_ASSERT_FATAL(!nvme_bdev_ctrlr_get(trid));
SPDK_CU_ASSERT_FATAL(!nvme_ctrlr_get(trid));
nvme_bdev_ctrlr = calloc(1, sizeof(*nvme_bdev_ctrlr));
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = calloc(1, sizeof(*nvme_ctrlr));
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
rc = pthread_mutex_init(&nvme_bdev_ctrlr->mutex, NULL);
rc = pthread_mutex_init(&nvme_ctrlr->mutex, NULL);
SPDK_CU_ASSERT_FATAL(rc == 0);
nvme_bdev_ctrlr->namespaces = calloc(ctrlr->ns_count, sizeof(struct nvme_ns *));
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces != NULL);
nvme_ctrlr->namespaces = calloc(ctrlr->ns_count, sizeof(struct nvme_ns *));
SPDK_CU_ASSERT_FATAL(nvme_ctrlr->namespaces != NULL);
trid_entry = calloc(1, sizeof(struct nvme_ctrlr_trid));
SPDK_CU_ASSERT_FATAL(trid_entry != NULL);
trid_entry->trid = *trid;
nvme_bdev_ctrlr->ctrlr = ctrlr;
nvme_bdev_ctrlr->num_ns = ctrlr->ns_count;
nvme_bdev_ctrlr->ref = 1;
nvme_bdev_ctrlr->connected_trid = &trid_entry->trid;
nvme_bdev_ctrlr->name = strdup(name);
nvme_ctrlr->ctrlr = ctrlr;
nvme_ctrlr->num_ns = ctrlr->ns_count;
nvme_ctrlr->ref = 1;
nvme_ctrlr->connected_trid = &trid_entry->trid;
nvme_ctrlr->name = strdup(name);
for (nsid = 0; nsid < ctrlr->ns_count; ++nsid) {
nvme_bdev_ctrlr->namespaces[nsid] = calloc(1, sizeof(struct nvme_ns));
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces[nsid] != NULL);
nvme_ctrlr->namespaces[nsid] = calloc(1, sizeof(struct nvme_ns));
SPDK_CU_ASSERT_FATAL(nvme_ctrlr->namespaces[nsid] != NULL);
nvme_bdev_ctrlr->namespaces[nsid]->id = nsid + 1;
nvme_bdev_ctrlr->namespaces[nsid]->ctrlr = nvme_bdev_ctrlr;
nvme_bdev_ctrlr->namespaces[nsid]->type = NVME_NS_OCSSD;
nvme_ctrlr->namespaces[nsid]->id = nsid + 1;
nvme_ctrlr->namespaces[nsid]->ctrlr = nvme_ctrlr;
nvme_ctrlr->namespaces[nsid]->type = NVME_NS_OCSSD;
bdev_ocssd_populate_namespace(nvme_bdev_ctrlr, nvme_bdev_ctrlr->namespaces[nsid], NULL);
bdev_ocssd_populate_namespace(nvme_ctrlr, nvme_ctrlr->namespaces[nsid], NULL);
}
while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
spdk_io_device_register(nvme_bdev_ctrlr, io_channel_create_cb,
spdk_io_device_register(nvme_ctrlr, io_channel_create_cb,
io_channel_destroy_cb, 0, name);
TAILQ_INSERT_TAIL(&g_nvme_bdev_ctrlrs, nvme_bdev_ctrlr, tailq);
TAILQ_INSERT_TAIL(&g_nvme_ctrlrs, nvme_ctrlr, tailq);
TAILQ_INIT(&nvme_bdev_ctrlr->trids);
TAILQ_INSERT_HEAD(&nvme_bdev_ctrlr->trids, trid_entry, link);
TAILQ_INIT(&nvme_ctrlr->trids);
TAILQ_INSERT_HEAD(&nvme_ctrlr->trids, trid_entry, link);
return nvme_bdev_ctrlr;
return nvme_ctrlr;
}
static struct nvme_request *
@ -530,29 +530,29 @@ create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t nsid)
}
static void
delete_nvme_bdev_controller(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
delete_nvme_bdev_controller(struct nvme_ctrlr *nvme_ctrlr)
{
uint32_t nsid;
nvme_bdev_ctrlr->destruct = true;
nvme_ctrlr->destruct = true;
for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) {
bdev_ocssd_depopulate_namespace(nvme_bdev_ctrlr->namespaces[nsid]);
for (nsid = 0; nsid < nvme_ctrlr->num_ns; ++nsid) {
bdev_ocssd_depopulate_namespace(nvme_ctrlr->namespaces[nsid]);
}
nvme_bdev_ctrlr_release(nvme_bdev_ctrlr);
nvme_ctrlr_release(nvme_ctrlr);
spdk_delay_us(1000);
while (spdk_thread_poll(g_thread, 0, 0) > 0) {}
CU_ASSERT(TAILQ_EMPTY(&g_nvme_bdev_ctrlrs));
CU_ASSERT(TAILQ_EMPTY(&g_nvme_ctrlrs));
}
static void
test_create_controller(void)
{
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
struct spdk_ocssd_geometry_data geometry = {};
struct spdk_bdev *bdev;
@ -579,7 +579,7 @@ test_create_controller(void)
};
ctrlr = create_controller(&trid, ns_count, &geometry);
nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
for (nsid = 1; nsid <= ns_count; ++nsid) {
snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid);
@ -591,10 +591,10 @@ test_create_controller(void)
CU_ASSERT_TRUE(bdev->zoned);
}
delete_nvme_bdev_controller(nvme_bdev_ctrlr);
delete_nvme_bdev_controller(nvme_ctrlr);
/* Verify that after deletion the bdevs can still be created */
nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
for (nsid = 1; nsid <= ns_count; ++nsid) {
snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid);
@ -606,7 +606,7 @@ test_create_controller(void)
CU_ASSERT_TRUE(bdev->zoned);
}
delete_nvme_bdev_controller(nvme_bdev_ctrlr);
delete_nvme_bdev_controller(nvme_ctrlr);
free_controller(ctrlr);
}
@ -615,7 +615,7 @@ static void
test_device_geometry(void)
{
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
const char *controller_name = "nvme0";
const char *bdev_name = "nvme0n1";
@ -640,7 +640,7 @@ test_device_geometry(void)
};
ctrlr = create_controller(&trid, 1, &geometry);
nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
rc = create_bdev(controller_name, bdev_name, 1);
CU_ASSERT_EQUAL(rc, 0);
@ -655,7 +655,7 @@ test_device_geometry(void)
CU_ASSERT_EQUAL(bdev->max_open_zones, geometry.maxocpu);
CU_ASSERT_EQUAL(bdev->write_unit_size, geometry.ws_opt);
delete_nvme_bdev_controller(nvme_bdev_ctrlr);
delete_nvme_bdev_controller(nvme_ctrlr);
free_controller(ctrlr);
}
@ -687,7 +687,7 @@ static void
test_lba_translation(void)
{
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
const char *controller_name = "nvme0";
const char *bdev_name = "nvme0n1";
@ -711,11 +711,11 @@ test_lba_translation(void)
};
ctrlr = create_controller(&trid, 1, &geometry);
nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces[0] != NULL);
ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_bdev_ctrlr->namespaces[0]);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr->namespaces[0] != NULL);
ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ctrlr->namespaces[0]);
rc = create_bdev(controller_name, bdev_name, 1);
CU_ASSERT_EQUAL(rc, 0);
@ -749,7 +749,7 @@ test_lba_translation(void)
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 1, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_ns, lba), bdev->zone_size + 68);
delete_nvme_bdev_controller(nvme_bdev_ctrlr);
delete_nvme_bdev_controller(nvme_ctrlr);
free_controller(ctrlr);
geometry = (struct spdk_ocssd_geometry_data) {
@ -766,11 +766,11 @@ test_lba_translation(void)
};
ctrlr = create_controller(&trid, 1, &geometry);
nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL);
nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL);
SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces[0] != NULL);
ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_bdev_ctrlr->namespaces[0]);
SPDK_CU_ASSERT_FATAL(nvme_ctrlr->namespaces[0] != NULL);
ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ctrlr->namespaces[0]);
rc = create_bdev(controller_name, bdev_name, 1);
CU_ASSERT_EQUAL(rc, 0);
@ -807,7 +807,7 @@ test_lba_translation(void)
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_ns, lba),
bdev->zone_size * geometry.num_pu * geometry.num_grp + 68);
delete_nvme_bdev_controller(nvme_bdev_ctrlr);
delete_nvme_bdev_controller(nvme_ctrlr);
free_controller(ctrlr);
}
@ -878,7 +878,7 @@ static void
test_get_zone_info(void)
{
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct nvme_ctrlr *nvme_ctrlr;
struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
const char *controller_name = "nvme0";
const char *bdev_name = "nvme0n1";
@ -907,7 +907,7 @@ test_get_zone_info(void)
};
ctrlr = create_controller(&trid, 1, &geometry);
nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
rc = create_bdev(controller_name, bdev_name, 1);
CU_ASSERT_EQUAL(rc, 0);
@ -919,7 +919,7 @@ test_get_zone_info(void)
SPDK_CU_ASSERT_FATAL(ch != NULL);
ctrlr_ch = spdk_io_channel_get_ctx(ch);
ctrlr_ch->ctrlr = nvme_bdev_ctrlr;
ctrlr_ch->ctrlr = nvme_ctrlr;
ctrlr_ch->qpair = (struct spdk_nvme_qpair *)0x1;
bdev_io = alloc_ocssd_io();
@ -1033,7 +1033,7 @@ test_get_zone_info(void)
g_chunk_info_cpl = (struct spdk_nvme_cpl) {};
g_zone_info_status = true;
delete_nvme_bdev_controller(nvme_bdev_ctrlr);
delete_nvme_bdev_controller(nvme_ctrlr);
free(bdev_io);
free(ch);